content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import copy
import json
import requests
import pytz
import time
from inky.inky_uc8159 import Inky, DESATURATED_PALETTE
from datetime import datetime
from PIL import Image, ImageFont, ImageDraw
import io
import apikey
import os
import signal
import RPi.GPIO as GPIO
path = os.path.dirname(os.path.realpath(__file__))
ICON_SIZE = 100
TILE_WIDTH = 150
TILE_HEIGHT = 200
FONT_SIZE = 25
SPACE = 2
ROTATE = 0 # 180 = flip display
USE_INKY = True
SHOW_CLOCK = False
SLEEP_TIME = 3600
colors = ['Black', 'White', 'Green', 'Blue', 'Red', 'Yellow', 'Orange']
percipitation_colour = colors[0]
temprature_colour = colors[4]
day_colour = colors[3]
#BUTTONS = [5, 6, 16, 24]
LABELS = ['A','B','C','D']
GPIO.setmode(GPIO.BCM)
#GPIO.setup(Buttons, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#def handle_button(pin):
# label = LABELS[BUTTONS.index(pin)]
time_colour = colors[4]
general_map = {
200: "thunderstorm.PNG8",
201: "thunderstorm.PNG8",
202: "thunderstorm.PNG8",
210: "lightning.PNG8",
211: "lightning.PNG8",
212: "lightning.PNG8",
221: "lightning.PNG8",
230: "thunderstorm.PNG8",
231: "thunderstorm.PNG8",
232: "thunderstorm.PNG8",
300: "sprinkle.PNG8",
301: "sprinkle.PNG8",
302: "rain.PNG8",
310: "rain-mix.PNG8",
311: "rain.PNG8",
312: "rain.PNG8",
313: "showers.PNG8",
314: "rain.PNG8",
321: "sprinkle.PNG8",
500: "sprinkle.PNG8",
501: "rain.PNG8",
502: "rain.PNG8",
503: "rain.PNG8",
504: "rain.PNG8",
511: "rain-mix.PNG8",
520: "showers.PNG8",
521: "showers.PNG8",
522: "showers.PNG8",
531: "storm-showers.PNG8",
600: "snow.PNG8",
601: "snow.PNG8",
602: "sleet.PNG8",
611: "rain-mix.PNG8",
612: "rain-mix.PNG8",
615: "rain-mix.PNG8",
616: "rain-mix.PNG8",
620: "rain-mix.PNG8",
621: "snow.PNG8",
622: "snow.PNG8",
701: "showers.PNG8",
711: "smoke.PNG8",
721: "day-haze.PNG8",
731: "dust.PNG8",
741: "fog.PNG8",
761: "dust.PNG8",
762: "dust.PNG8",
771: "cloudy-gusts.PNG8",
781: "tornado.PNG8",
800: "day-sunny.PNG8",
801: "cloudy-gusts.PNG8",
802: "cloudy-gusts.PNG8",
803: "cloudy-gusts.PNG8",
804: "cloudy.PNG8",
900: "tornado.PNG8",
901: "storm-showers.PNG8",
902: "hurricane.PNG8",
903: "snowflake-cold.PNG8",
904: "hot.PNG8",
905: "windy.PNG8",
906: "hail.PNG8",
957: "strong-wind.PNG8"}
day_map = {
200: "day-thunderstorm.PNG8",
201: "day-thunderstorm.PNG8",
202: "day-thunderstorm.PNG8",
210: "day-lightning.PNG8",
211: "day-lightning.PNG8",
212: "day-lightning.PNG8",
221: "day-lightning.PNG8",
230: "day-thunderstorm.PNG8",
231: "day-thunderstorm.PNG8",
232: "day-thunderstorm.PNG8",
300: "day-sprinkle.PNG8",
301: "day-sprinkle.PNG8",
302: "day-rain.PNG8",
310: "day-rain.PNG8",
311: "day-rain.PNG8",
312: "day-rain.PNG8",
313: "day-rain.PNG8",
314: "day-rain.PNG8",
321: "day-sprinkle.PNG8",
500: "day-sprinkle.PNG8",
501: "day-rain.PNG8",
502: "day-rain.PNG8",
503: "day-rain.PNG8",
504: "day-rain.PNG8",
511: "day-rain-mix.PNG8",
520: "day-showers.PNG8",
521: "day-showers.PNG8",
522: "day-showers.PNG8",
531: "day-storm-showers.PNG8",
600: "day-snow.PNG8",
601: "day-sleet.PNG8",
602: "day-snow.PNG8",
611: "day-rain-mix.PNG8",
612: "day-rain-mix.PNG8",
615: "day-rain-mix.PNG8",
616: "day-rain-mix.PNG8",
620: "day-rain-mix.PNG8",
621: "day-snow.PNG8",
622: "day-snow.PNG8",
701: "day-showers.PNG8",
711: "smoke.PNG8",
721: "day-haze.PNG8",
731: "dust.PNG8",
741: "day-fog.PNG8",
761: "dust.PNG8",
762: "dust.PNG8",
781: "tornado.PNG8",
800: "day-sunny.PNG8",
801: "day-cloudy-gusts.PNG8",
802: "day-cloudy-gusts.PNG8",
803: "day-cloudy-gusts.PNG8",
804: "day-sunny-overcast.PNG8",
900: "tornado.PNG8",
902: "hurricane.PNG8",
903: "snowflake-cold.PNG8",
904: "hot.PNG8",
906: "day-hail.PNG8",
957: "strong-wind.PNG8"}
night_map = {
200: "night-alt-thunderstorm.PNG8",
201: "night-alt-thunderstorm.PNG8",
202: "night-alt-thunderstorm.PNG8",
210: "night-alt-lightning.PNG8",
211: "night-alt-lightning.PNG8",
212: "night-alt-lightning.PNG8",
221: "night-alt-lightning.PNG8",
230: "night-alt-thunderstorm.PNG8",
231: "night-alt-thunderstorm.PNG8",
232: "night-alt-thunderstorm.PNG8",
300: "night-alt-sprinkle.PNG8",
301: "night-alt-sprinkle.PNG8",
302: "night-alt-rain.PNG8",
310: "night-alt-rain.PNG8",
311: "night-alt-rain.PNG8",
312: "night-alt-rain.PNG8",
313: "night-alt-rain.PNG8",
314: "night-alt-rain.PNG8",
321: "night-alt-sprinkle.PNG8",
500: "night-alt-sprinkle.PNG8",
501: "night-alt-rain.PNG8",
502: "night-alt-rain.PNG8",
503: "night-alt-rain.PNG8",
504: "night-alt-rain.PNG8",
511: "night-alt-rain-mix.PNG8",
520: "night-alt-showers.PNG8",
521: "night-alt-showers.PNG8",
522: "night-alt-showers.PNG8",
531: "night-alt-storm-showers.PNG8",
600: "night-alt-snow.PNG8",
601: "night-alt-sleet.PNG8",
602: "night-alt-snow.PNG8",
611: "night-alt-rain-mix.PNG8",
612: "night-alt-rain-mix.PNG8",
615: "night-alt-rain-mix.PNG8",
616: "night-alt-rain-mix.PNG8",
620: "night-alt-rain-mix.PNG8",
621: "night-alt-snow.PNG8",
622: "night-alt-snow.PNG8",
701: "night-alt-showers.PNG8",
711: "smoke.PNG8",
721: "day-haze.PNG8",
731: "dust.PNG8",
741: "night-fog.PNG8",
761: "dust.PNG8",
762: "dust.PNG8",
781: "tornado.PNG8",
800: "night-clear.PNG8",
801: "night-alt-cloudy-gusts.PNG8",
802: "night-alt-cloudy-gusts.PNG8",
803: "night-alt-cloudy-gusts.PNG8",
804: "night-alt-cloudy.PNG8",
900: "tornado.PNG8",
902: "hurricane.PNG8",
903: "snowflake-cold.PNG8",
904: "hot.PNG8",
906: "night-alt-hail.PNG8",
957: "strong-wind.PNG8"}
class Day:
def __init__(self, min, max, pop, id, sunrise, sunset, pressure, dt):
self.min = int(min + 0.5)
self.max = int(max + 0.5)
self.pop = pop
self.id = id
self.sunrise = sunrise
self.sunset = sunset
self.pressure = pressure
self.dt = dt
def get_icon(name):
return Image.open(name).convert("RGBA")
def day_lists_not_identical(days, other_days):
if (len(days) != len(other_days)):
return True
for i in range(len(days)):
if (days[i].min != other_days[i].min):
return True
if (days[i].max != other_days[i].max):
return True
if (days[i].pop != other_days[i].pop):
return True
if (days[i].id != other_days[i].id):
return True
return True
api_key = apikey.api_key
if (api_key == "<your API key>"):
print("You forgot to enter your API key")
exit()
lat = apikey.lat
lon = apikey.lon
url = "https://api.openweathermap.org/data/2.5/onecall?lat=%s&lon=%s&exclude=hourly&appid=%s&units=metric" % (
lat, lon, api_key)
palette_colors = [(c[0] / 255.0, c[1] / 255.0, c[2] / 255.0) for c in DESATURATED_PALETTE[2:6] + [(0, 0, 0)]]
tile_positions = []
for i in range(2):
for j in range(4):
tile_positions.append((j * TILE_WIDTH, i * TILE_HEIGHT))
inky_display = Inky()
satuation = 0
y_top = int(inky_display.height)
y_bottom = y_top + int(inky_display.height * (4.0 / 10.0))
font = ImageFont.truetype(path+
"/fonts/BungeeColor-Regular_colr_Windows.ttf", FONT_SIZE)
old_days = []
while(True):
try:
response = requests.get(url)
data = json.loads(response.text)
except:
None
days = []
daily = data["daily"]
for day in daily:
min = day["temp"]["min"]
max = day["temp"]["max"]
pop = day["pop"]
id = day["weather"][0]["id"]
sunrise = int(day["sunrise"])
sunset = int(day["sunset"])
dt = int(day["dt"])
pressure = int(day["pressure"])
days.append(Day(min, max, pop, id, sunrise, sunset, pressure, dt))
#pressure = int(day["pressure"])
#print(day["pressure"])
if (day_lists_not_identical(days, old_days)):
old_days = copy.deepcopy(days)
img = Image.new("RGBA", inky_display.resolution, colors[1])
draw = ImageDraw.Draw(img)
for i in range(8):
name = path+"/icons/wi-"
if (i == 0):
t = int(time.time())
if (t < days[i].sunset):
name += day_map[days[i].id]
else:
name += night_map[days[i].id]
else:
name += general_map[days[i].id]
icon = get_icon(name)
x = tile_positions[i][0] + (TILE_WIDTH - ICON_SIZE) // 2
y = tile_positions[i][1]
img.paste(icon, (x, y))
text = str(int(100 * days[i].pop)) + "%"
w, h = font.getsize(text)
x = tile_positions[i][0] + (TILE_WIDTH - w) // 2
y = tile_positions[i][1] + ICON_SIZE + SPACE
draw.text((x, y), text, percipitation_colour, font)
text = str(days[i].min) + "°|" + str(days[i].max) + "°"
w, h = font.getsize(text)
x = tile_positions[i][0] + (TILE_WIDTH - w) // 2
y += FONT_SIZE
draw.text((x, y), text, temprature_colour, font)
press = str(days[i].pressure)
text = str(press)+"hPa"
w, h = font.getsize(text)
x = tile_positions[i][0] + (TILE_WIDTH - w) // 2
y += FONT_SIZE
draw.text((x, y), text, day_colour, font)
ts = time.gmtime(days[i].dt)
day_name = time.strftime("%a", ts)
text = day_name
w, h = font.getsize(text)
x = tile_positions[i][0] + (TILE_WIDTH - w) // 2
y += FONT_SIZE
draw.text((x, y), text, day_colour, font)
img.rotate(180)
if (SHOW_CLOCK == True):
now = datetime.now()
current_time = now.strftime("%H:%M")
draw.text((245, 410), current_time, time_colour, font)
if (USE_INKY):
inky_display.set_border(colors[4])
inky_display.set_image(img.rotate(ROTATE), saturation=0)
inky_display.show()
else:
img.show()
time.sleep(SLEEP_TIME)
print("loop")
| 28.917808 | 110 | 0.573283 | [
"MIT"
] | vwillcox/Inky-Impression-Weather-Station | weather.py | 10,557 | Python |
"""
This module contains the cli functions.
Split them out into separate files if required.
"""
import sys
import os
import subprocess
import pickle
from cheapskate_bal import balance as bal
__collector__ = {'exe': "collect_3008", 'samp_rate': 2000}
def csbal_process():
"""
This method is run when the `csbal` script is called.
can be used to check a single file (check balance state after adjusting)
args are file stem, freq (Hz [rpm/60] float), samp_rate (data collector)
"""
args = sys.argv[1:]
stem = args[0]
freq = float(args[1])
samp_rate = float(args[2])
df = bal.read_data_files(stem, freq, samp_rate)
bal.graph_data(df)
bal.process_data(df, freq, samp_rate, True)
def grab_data(tests, stem):
for t in tests:
msg, tag = t
print("\n\n==================================")
print(msg)
print("start DUT now")
input("Press Enter to start data capture...")
cp = subprocess.run(["taskset", "-c", "3", "nice", "-20", __collector__['exe'], stem+tag],
capture_output=True, universal_newlines=True)
summary = cp.stdout.splitlines()[-5:]
print(*summary,sep='\n')
def batch_process(tests, stem, freq):
results = []
for t in tests:
tag = t[1]
sr = __collector__['samp_rate']
df = bal.read_data_files(stem+tag, freq, sr)
results.append(bal.process_data(df, freq, sr))
return results
def csbal_single():
"""
This method performs the whole process for a single plane balance
Four data files are captured, and the results are emitted
args are file stem, freq(Hz), shift angle of test mass (deg), test mass """
args = sys.argv[1:]
if len(args) < 4:
print("args are stem, freq, shift_ang, test_mass")
stem = args[0]
freq = float(args[1])
shift_ang = float(args[2])
tmass = float(args[3])
offset_1_ang = 360
offset_2_ang = 360 # these should not both be 0, as there is a div by their sum
if len(args) > 5:
offset_1_ang = float(args[4])
offset_2_ang = float(args[5])
# make sure the stem looks like a directory
if stem[-1] != os.path.sep:
stem = stem + os.path.sep
tests = [('T0: initial unbalanced state', 't0'),
('T1: test mass at 0 deg ref', 't1'),
('T2: test mass at positive angle', 't2'),
('T3: test mass at negative angle', 't3'), ]
grab_data(tests, stem)
print("Processing captured data...")
results = batch_process(tests, stem, freq)
print("Balace Results:")
bal.single_balance(results, tmass, shift_ang, offset_1_ang, offset_2_ang)
def csbal_dual_init():
"""
THis method performs the whole process for a dual plane balance
Three files are captured and the results are emitted
args are file stem, freq(Hz), shift angle of test mass (deg), test mass """
args = sys.argv[1:]
if len(args) < 4:
print("args are stem, freq, shift_ang, test_mass")
stem = args[0]
freq = float(args[1])
shift_ang = float(args[2])
tmass = float(args[3])
# make sure the stem looks like a directory
if stem[-1] != os.path.sep:
stem = stem + os.path.sep
tests = [('T0: initial unbalanced state', 't0'),
('TA: test mass on bearing 1 at shift angle', 'ta'),
('TB: test mass on bearing 2 at shift angle', 'tb')]
grab_data(tests, stem)
print("Processing captured data...")
results = batch_process(tests, stem, freq)
print("Dual Plane Balance Results")
influence, correction = bal.dual_compute_influence(results, tmass, shift_ang)
# write the influence params to a file
inf_file = stem+"influence"
with open(inf_file, 'wb') as filehandle:
pickle.dump(influence, filehandle)
def csbal_dual_iter():
"""
This method performs an iteration of dual plane balance, once the
influence params are known. One file is captured and the results
are emitted
args are file stem, tag, freq
"""
args = sys.argv[1:]
if len(args) < 3:
print("args are: filestem, tag, freq")
stem = args[0]
tag = args[1]
freq = float(args[2])
# make sure the stem looks like a directory
if stem[-1] != os.path.sep:
stem = stem + os.path.sep
# get the influence from file
influence = []
inf_file = stem+"influence"
with open(inf_file, 'rb') as filehandle:
influence = pickle.load(filehandle)
tests = [('T(curr): initial unbalanced state', 't'+tag)]
grab_data(tests, stem)
print("Processing captured data...")
results = batch_process(tests, stem, freq)
print("Dual Plane Balance Results")
correction = bal.dual_compute_weights(results, influence)
| 28.951807 | 98 | 0.620266 | [
"Unlicense"
] | kevinpowell/balancer | cheapskate_bal/cheapskate_bal/cli.py | 4,806 | Python |
from __future__ import absolute_import
import os
# test_settings.py works differently from
# dev_settings.py/prod_settings.py; it actually is directly referenced
# by the test suite as DJANGO_SETTINGS_MODULE and imports settings.py
# directly and then hacks up the values that are different for the
# test suite. As will be explained, this is kinda messy and probably
# we'd be better off switching it to work more like dev_settings.py,
# but for now, this is what we have.
#
# An important downside of the test_settings.py approach is that if we
# want to change any settings that settings.py then computes
# additional settings from (e.g. EXTERNAL_HOST), we need to do a hack
# like the below line(s) before we import from settings, for
# transmitting the value of EXTERNAL_HOST to dev_settings.py so that
# it can be set there, at the right place in the settings.py flow.
# Ick.
if os.getenv("EXTERNAL_HOST") is None:
os.environ["EXTERNAL_HOST"] = "testserver"
from .settings import *
# Used to clone DBs in backend tests.
BACKEND_DATABASE_TEMPLATE = 'zulip_test_template'
DATABASES["default"] = {
"NAME": "zulip_test",
"USER": "zulip_test",
"PASSWORD": LOCAL_DATABASE_PASSWORD,
"HOST": "localhost",
"SCHEMA": "zulip",
"ENGINE": "django.db.backends.postgresql_psycopg2",
"TEST_NAME": "django_zulip_tests",
"OPTIONS": {"connection_factory": TimeTrackingConnection},
}
if USING_PGROONGA:
# We need to have "pgroonga" schema before "pg_catalog" schema in
# the PostgreSQL search path, because "pgroonga" schema overrides
# the "@@" operator from "pg_catalog" schema, and "pg_catalog"
# schema is searched first if not specified in the search path.
# See also: http://www.postgresql.org/docs/current/static/runtime-config-client.html
pg_options = '-c search_path=%(SCHEMA)s,zulip,public,pgroonga,pg_catalog' % \
DATABASES['default']
DATABASES['default']['OPTIONS']['options'] = pg_options
if "TORNADO_SERVER" in os.environ:
# This covers the Casper test suite case
TORNADO_SERVER = os.environ["TORNADO_SERVER"]
else:
# This covers the backend test suite case
TORNADO_SERVER = None
CAMO_URI = 'https://external-content.zulipcdn.net/'
CAMO_KEY = 'dummy'
if "CASPER_TESTS" in os.environ:
CASPER_TESTS = True
# Decrease the get_updates timeout to 1 second.
# This allows CasperJS to proceed quickly to the next test step.
POLL_TIMEOUT = 1000
# Don't use the real message log for tests
EVENT_LOG_DIR = '/tmp/zulip-test-event-log'
# Print our emails rather than sending them
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# The test suite uses EmailAuthBackend
AUTHENTICATION_BACKENDS += ('zproject.backends.EmailAuthBackend',)
# Configure Google Oauth2
GOOGLE_OAUTH2_CLIENT_ID = "test_client_id"
# Makes testing LDAP backend require less mocking
AUTH_LDAP_ALWAYS_UPDATE_USER = False
TEST_SUITE = True
RATE_LIMITING = False
# Don't use rabbitmq from the test suite -- the user_profile_ids for
# any generated queue elements won't match those being used by the
# real app.
USING_RABBITMQ = False
# Disable the tutorial because it confuses the client tests.
TUTORIAL_ENABLED = False
# Disable use of memcached for caching
CACHES['database'] = {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'LOCATION': 'zulip-database-test-cache',
'TIMEOUT': 3600,
'CONN_MAX_AGE': 600,
'OPTIONS': {
'MAX_ENTRIES': 100000
}
}
# Use production config from Webpack in tests
if CASPER_TESTS:
WEBPACK_FILE = 'webpack-stats-production.json'
else:
WEBPACK_FILE = os.path.join('var', 'webpack-stats-test.json')
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = os.path.join(DEPLOY_ROOT, WEBPACK_FILE)
if CASPER_TESTS:
# Don't auto-restart Tornado server during casper tests
AUTORELOAD = False
REALMS_HAVE_SUBDOMAINS = True
else:
# Use local memory cache for backend tests.
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
}
LOGGING['loggers']['zulip.requests']['level'] = 'CRITICAL'
LOGGING['loggers']['zulip.management']['level'] = 'CRITICAL'
LOGGING['loggers']['django.request'] = {'level': 'ERROR'}
LOGGING['loggers']['fakeldap'] = {'level': 'ERROR'}
# Enable file:/// hyperlink support by default in tests
ENABLE_FILE_LINKS = True
LOCAL_UPLOADS_DIR = 'var/test_uploads'
S3_KEY = 'test-key'
S3_SECRET_KEY = 'test-secret-key'
S3_AUTH_UPLOADS_BUCKET = 'test-authed-bucket'
# Test Custom TOS template rendering
TERMS_OF_SERVICE = 'corporate/terms.md'
INLINE_URL_EMBED_PREVIEW = False
HOME_NOT_LOGGED_IN = '/login'
LOGIN_URL = '/accounts/login'
# By default will not send emails when login occurs.
# Explicity set this to True within tests that must have this on.
SEND_LOGIN_EMAILS = False
GOOGLE_OAUTH2_CLIENT_ID = "id"
GOOGLE_OAUTH2_CLIENT_SECRET = "secret"
SOCIAL_AUTH_GITHUB_KEY = "key"
SOCIAL_AUTH_GITHUB_SECRET = "secret"
| 34.213793 | 88 | 0.738359 | [
"Apache-2.0"
] | JaneCeng/zulip | zproject/test_settings.py | 4,961 | Python |
import json
import os
import signal
import sys
from zipfile import BadZipfile
from zlib import error as zlib_error
from defusedxml.common import DefusedXmlException
import validator
from validator import decorator
from validator.chromemanifest import ChromeManifest
from validator.opensearch import detect_opensearch
from validator.rdf import RDFException, RDFParser
from validator.typedetection import detect_type
from validator.xpi import XPIManager
from constants import (PACKAGE_ANY, PACKAGE_EXTENSION, PACKAGE_SEARCHPROV,
PACKAGE_THEME)
types = {0: 'Unknown',
1: 'Extension/Multi-Extension',
2: 'Full Theme',
3: 'Dictionary',
4: 'Language Pack',
5: 'Search Provider'}
assumed_extensions = {'jar': PACKAGE_THEME,
'xml': PACKAGE_SEARCHPROV}
def prepare_package(err, path, expectation=0, for_appversions=None,
timeout=-1):
"""Prepares a file-based package for validation.
timeout is the number of seconds before validation is aborted.
If timeout is -1 then no timeout checking code will run.
"""
package = None
try:
# Test that the package actually exists. I consider this Tier 0
# since we may not even be dealing with a real file.
if not os.path.isfile(path):
err.error(('main', 'prepare_package', 'not_found'),
'The package could not be found')
return
# Pop the package extension.
package_extension = os.path.splitext(path)[1]
package_extension = package_extension.lower()
def timeout_handler(signum, frame):
raise validator.ValidationTimeout(timeout)
if timeout != -1:
signal.signal(signal.SIGALRM, timeout_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
if package_extension == '.xml':
test_search(err, path, expectation)
elif package_extension not in ('.xpi', '.jar'):
err.error(('main', 'prepare_package', 'unrecognized'),
'The package is not of a recognized type.')
else:
package = open(path, 'rb')
test_package(err, package, path, expectation, for_appversions)
except validator.ValidationTimeout:
err.system_error(
msg_id='validation_timeout',
message='Validation has timed out',
signing_severity='high',
description=('Validation was unable to complete in the allotted '
'time. This is most likely due to the size or '
'complexity of your add-on.',
'This timeout has been logged, but please consider '
'filing an issue report here: http://mzl.la/1DG0sFd'),
exc_info=sys.exc_info())
except Exception:
err.system_error(exc_info=sys.exc_info())
finally:
# Remove timers and signal handlers regardless of whether
# we've completed successfully or the timer has fired.
if timeout != -1:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
if package:
package.close()
decorator.cleanup()
def test_search(err, package, expectation=0):
'Tests the package to see if it is a search provider.'
expected_search_provider = expectation in (PACKAGE_ANY,
PACKAGE_SEARCHPROV)
# If we're not expecting a search provider, warn the user and stop
# testing it like a search provider.
if not expected_search_provider:
return err.warning(('main',
'test_search',
'extension'),
'Unexpected file extension.')
# Is this a search provider?
detect_opensearch(err, package, listed=err.get_resource('listed'))
if expected_search_provider and not err.failed():
err.detected_type = PACKAGE_SEARCHPROV
def test_package(err, file_, name, expectation=PACKAGE_ANY,
for_appversions=None):
'Begins tests for the package.'
# Load up a new instance of an XPI.
try:
package = XPIManager(file_, mode='r', name=name)
has_package_json = 'package.json' in package
has_manifest_json = 'manifest.json' in package
has_install_rdf = 'install.rdf' in package
# install.rdf? | package.json? | manifest.json? | error | use-file
# Yes | No | No | No | install.rdf
# Yes | Yes | No | No | install.rdf
# Yes | No | Yes | No | install.rdf
# No | No | Yes | No | manifest.json
# No | No | No | Yes | install.rdf
# No | Yes | No | No | package.json
# No | No | Yes | Yes | install.rdf
if has_package_json:
_load_package_json(err, package, expectation)
if has_manifest_json:
_load_manifest_json(err, package, expectation)
if has_install_rdf:
_load_install_rdf(err, package, expectation)
except IOError:
# Die on this one because the file won't open.
err.error(('main', 'test_package', 'unopenable'),
'The XPI could not be opened.')
return
except (BadZipfile, zlib_error):
# Die if the zip file is corrupt.
err.error(('submain', '_load_install_rdf', 'badzipfile'),
error='Corrupt ZIP file',
description='We were unable to decompress the zip file.')
return
if package.extension in assumed_extensions:
assumed_type = assumed_extensions[package.extension]
# Is the user expecting a different package type?
if expectation not in (PACKAGE_ANY, assumed_type):
err.error(('main', 'test_package', 'unexpected_type'),
'Unexpected package type (found theme)')
test_inner_package(err, package, for_appversions)
def _load_install_rdf(err, package, expectation):
try:
install_rdf = RDFParser(err, package.read('install.rdf'))
except (RDFException, DefusedXmlException) as ex:
if isinstance(ex, DefusedXmlException):
url = 'https://pypi.python.org/pypi/defusedxml/0.3#attack-vectors'
reason = 'Malicious XML was detected, see {0}.'.format(url)
line = 0
else:
reason = ('Try validating your RDF with the W3 validator: '
'http://www.w3.org/RDF/Validator/.')
line = ex.line()
err.error(
err_id=('main', 'test_package', 'parse_error'),
error='Could not parse `install.rdf`.',
description=('The RDF parser was unable to parse the '
'install.rdf file included with this add-on.',
reason),
filename='install.rdf',
line=line)
return
else:
if install_rdf.rdf is None:
err.error(
err_id=('main', 'test_package', 'cannot_parse_installrdf'),
error='Cannot read `install.rdf`',
description='The install.rdf file could not be parsed.',
filename='install.rdf')
return
else:
err.save_resource('has_install_rdf', True, pushable=True)
err.save_resource('install_rdf', install_rdf, pushable=True)
# Load up the results of the type detection
results = detect_type(err, install_rdf, package)
if results is None:
err.error(
err_id=('main', 'test_package', 'undeterminable_type'),
error='Unable to determine add-on type',
description='The type detection algorithm could not determine '
'the type of the add-on.')
return
else:
err.detected_type = results
# Compare the results of the low-level type detection to
# that of the expectation and the assumption.
if expectation not in (PACKAGE_ANY, results):
err.warning(
err_id=('main', 'test_package', 'extension_type_mismatch'),
warning='Extension Type Mismatch',
description=("We detected that the add-on's type does not match "
'the expected type.',
'Type "%s" expected, found "%s"' %
(types[expectation], types[results])))
def _load_package_json(err, package, expectation):
raw_package_json = package.read('package.json')
try:
package_json = json.loads(raw_package_json)
except ValueError:
err.error(
err_id=('main', 'test_package', 'parse_error'),
error='Could not parse `package.json`.',
description='The JSON parser was unable to parse the '
'package.json file included with this add-on.',
filename='package.json')
else:
err.save_resource('has_package_json', True, pushable=True)
err.save_resource('package_json', package_json, pushable=True)
err.detected_type = PACKAGE_EXTENSION
def _load_manifest_json(err, package, expectation):
raw_manifest_json = package.read('manifest.json')
try:
manifest_json = json.loads(raw_manifest_json)
except ValueError:
err.error(
err_id=('main', 'test_package', 'parse_error'),
error='Could not parse `manifest.json`.',
description='The JSON parser was unable to parse the '
'manifest.json file included with this add-on.',
filename='manifest.json')
else:
err.save_resource('has_manifest_json', True, pushable=True)
err.save_resource('manifest_json', manifest_json, pushable=True)
err.detected_type = PACKAGE_EXTENSION
def populate_chrome_manifest(err, xpi_package):
"Loads the chrome.manifest if it's present"
if 'chrome.manifest' in xpi_package:
chrome_data = xpi_package.read('chrome.manifest')
chrome = ChromeManifest(chrome_data, 'chrome.manifest')
chrome_recursion_buster = set()
# Handle the case of manifests linked from the manifest.
def get_linked_manifest(path, from_path, from_chrome, from_triple):
if path in chrome_recursion_buster:
err.warning(
err_id=('submain', 'populate_chrome_manifest',
'recursion'),
warning='Linked manifest recursion detected.',
description='A chrome registration file links back to '
'itself. This can cause a multitude of '
'issues.',
filename=path)
return
# Make sure the manifest is properly linked
if path not in xpi_package:
err.notice(
err_id=('submain', 'populate_chrome_manifest', 'linkerr'),
notice='Linked manifest could not be found.',
description=('A linked manifest file could not be found '
'in the package.',
'Path: %s' % path),
filename=from_path,
line=from_triple['line'],
context=from_chrome.context)
return
chrome_recursion_buster.add(path)
manifest = ChromeManifest(xpi_package.read(path), path)
for triple in manifest.triples:
yield triple
if triple['subject'] == 'manifest':
subpath = triple['predicate']
# If the path is relative, make it relative to the current
# file.
if not subpath.startswith('/'):
subpath = '%s/%s' % (
'/'.join(path.split('/')[:-1]), subpath)
subpath = subpath.lstrip('/')
for subtriple in get_linked_manifest(
subpath, path, manifest, triple):
yield subtriple
chrome_recursion_buster.discard(path)
chrome_recursion_buster.add('chrome.manifest')
# Search for linked manifests in the base manifest.
for extra_manifest in chrome.get_triples(subject='manifest'):
# When one is found, add its triples to our own.
for triple in get_linked_manifest(extra_manifest['predicate'],
'chrome.manifest', chrome,
extra_manifest):
chrome.triples.append(triple)
chrome_recursion_buster.discard('chrome.manifest')
# Create a reference so we can get the chrome manifest later, but make
# it pushable so we don't run chrome manifests in JAR files.
err.save_resource('chrome.manifest', chrome, pushable=True)
# Create a non-pushable reference for tests that need to access the
# chrome manifest from within JAR files.
err.save_resource('chrome.manifest_nopush', chrome, pushable=False)
def test_inner_package(err, xpi_package, for_appversions=None):
"Tests a package's inner content."
populate_chrome_manifest(err, xpi_package)
# Iterate through each tier.
for tier in sorted(decorator.get_tiers()):
# Let the error bundler know what tier we're on.
err.set_tier(tier)
# Iterate through each test of our detected type.
for test in decorator.get_tests(tier, err.detected_type):
# Test whether the test is app/version specific.
if test['versions'] is not None:
# If the test's version requirements don't apply to the add-on,
# then skip the test.
if not err.supports_version(test['versions']):
continue
# If the user's version requirements don't apply to the test or
# to the add-on, then skip the test.
if (for_appversions and
not (err._compare_version(requirements=for_appversions,
support=test['versions']) and
err.supports_version(for_appversions))):
continue
# Save the version requirements to the error bundler.
err.version_requirements = test['versions']
test_func = test['test']
if test['simple']:
test_func(err)
else:
# Pass in:
# - Error Bundler
# - A copy of the package itself
test_func(err, xpi_package)
# Return any errors at the end of the tier if undetermined.
if err.failed(fail_on_warnings=False) and not err.determined:
err.unfinished = True
err.discard_unused_messages(ending_tier=tier)
return err
# Return the results.
return err
| 40.057292 | 79 | 0.575803 | [
"BSD-3-Clause"
] | kumar303/amo-validator | validator/submain.py | 15,382 | Python |
from flask import Flask, request, redirect, render_template, url_for, flash, jsonify
import gridfs, random, uuid, os
from flask_uploads import UploadSet, configure_uploads, IMAGES
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import desc
from datetime import datetime
app = Flask(__name__,
static_url_path = '',
static_folder = 'static',
template_folder = 'templates')
app.config['SECRET_KEY'] = 'big secrets'
photos = UploadSet('photos', IMAGES)
app.config['UPLOAD_FOLDER'] = 'images_store'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# Database setup
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
db = SQLAlchemy(app)
# SQL form items
class PostItem(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False, nullable=False)
storeItem = db.Column(db.String(80), unique=False, nullable=False)
avalability = db.Column(db.String(80), unique=False, nullable=False)
location = db.Column(db.String(80), unique=False, nullable=False)
#time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return (id, name, storeItem, avalability, location)
db.create_all()
def get_posts():
query = [i.__dict__ for i in PostItem.query.all()]
for item in query:
del item['_sa_instance_state']
return query
# Render webpages
@app.route("/")
def render_index():
return render_template("index.html", posts = get_posts())
@app.route('/about')
def render_about():
return render_template('about.html')
@app.route("/upload/", methods=['GET', 'POST'])
def render_upload():
# Get form data
if request.method == 'POST':
# Check if the form is empty
item = ""
if '--------' == request.form.get('storeItem'):
redirected = redirect(url_for('render_upload'))
flash('Please select store item.')
return redirected
elif 'Other' == request.form.get('storeItem'):
item = request.form.get('Other')
else:
item = request.form.get('storeItem')
if None is request.form.get('radio'):
redirected = redirect(url_for('render_upload'))
flash('Please select an availability option.')
return redirected
if '' == request.form.get('Name'):
redirected = redirect(url_for('render_upload'))
flash('Please enter a name.')
return redirected
if '' == request.form.get('location'):
redirected = redirect(url_for('render_upload'))
flash('Please enter a location.')
return redirected
if '' == request.form.get('store'):
redirected = redirect(url_for('render_upload'))
flash('Please enter a store.')
return redirected
if 'photo' not in request.files:
redirected = redirect(url_for('render_upload'))
flash('Please upload a photo.')
return redirected
file = request.files['photo']
if '' == file.filename:
redirected = redirect(url_for('render_upload'))
flash('No photo selected')
return redirected
locationStr = request.form.get('location') + '-' + request.form.get('store')
# Save to database
post = PostItem(name = request.form.get('Name'), storeItem = item, avalability = request.form.get('radio'), location = locationStr)
db.session.add(post)
db.session.commit()
# Save the photo in the upload folder
photo = request.files['photo']
path = os.path.join(app.config['UPLOAD_FOLDER'], str(post.id))
photo.save(path)
# Print test
print(str(post.id) + post.storeItem + post.avalability)
return redirect(url_for('render_index'))
return render_template('upload.html')
if __name__ == '__main__':
app.run('0.0.0.0', 3000)
| 33.216667 | 139 | 0.62845 | [
"MIT"
] | iSkytran/supplymedia | app.py | 3,986 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to parse BSM event auditing files."""
import argparse
import logging
import sys
from dtformats import bsm
from dtformats import output_writers
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from BSM event auditing files.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH',
default=None, help='path of the BSM event auditing file.')
options = argument_parser.parse_args()
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print('Unable to open output writer with error: {0!s}'.format(exception))
print('')
return False
log_file = bsm.BSMEventAuditingFile(
debug=options.debug, output_writer=output_writer)
log_file.Open(options.source)
print('BSM event auditing information:')
print('')
log_file.Close()
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| 21.830986 | 77 | 0.683226 | [
"Apache-2.0"
] | jleaniz/dtformats | scripts/bsm.py | 1,550 | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.tools.masks Contains functions for dealing with two-dimensional masks.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
# Import the relevant PTS classes and modules
from . import regions
# -----------------------------------------------------------------
def annuli_around(region, inner_factor, outer_factor, header, x_size, y_size):
"""
This function ...
:param region:
:param inner_factor:
:param outer_factor:
:param header:
:param x_size:
:param y_size:
:return:
"""
# Create new regions for the background estimation around the stars
inner_region = regions.expand(region, inner_factor)
outer_region = regions.expand(region, outer_factor)
# Create inner and outer masks
inner_mask = regions.create_mask(inner_region, header, x_size, y_size)
outer_mask = regions.create_mask(outer_region, header, x_size, y_size)
# Create the mask
mask = inner_mask | np.logical_not(outer_mask)
# Return the mask
return mask
# -----------------------------------------------------------------
def masked_outside(region, header, x_size, y_size, expand_factor=1.0):
"""
This function ...
:param region:
:param header:
:param x_size:
:param y_size:
:param expand_factor:
:return:
"""
# Create a new region ...
region = regions.expand(region, factor=expand_factor)
# Create a mask from the region
mask = np.logical_not(regions.create_mask(region, header, x_size, y_size))
# Return the mask
return mask
# -----------------------------------------------------------------
def create_disk_mask(x_size, y_size, x_center, y_center, radius):
"""
This function ...
:param x_size:
:param y_size:
:param x_center:
:param y_center:
:param radius:
:return:
"""
# Calculate which pixels should be masked
y,x = np.ogrid[-y_center:y_size-y_center, -x_center:x_size-x_center]
mask = x*x + y*y <= radius*radius
# Return the mask
return mask
# -----------------------------------------------------------------
#def union(*args): # i wanted to do it this way, but didn't succeed ...
def union(mask_a, mask_b):
"""
This function ...
:param args:
:return:
"""
return mask_a + mask_b
# -----------------------------------------------------------------
#def intersection(*args): i wanted to do it this way, but didn't succeed ...
def intersection(mask_a, mask_b):
"""
This function ...
:param args:
:return:
"""
return mask_a * mask_b
# -----------------------------------------------------------------
def overlap(mask_a, mask_b):
"""
This function ...
:param mask_a:
:param mask_b:
:return:
"""
return np.any(intersection(mask_a, mask_b))
# -----------------------------------------------------------------
def split_overlap(base_mask, test_mask, return_segments=False):
"""
This function takes all blobs in the base_mask and checks whether they overlap with the test_mask.
The function returns two new masks, one mask with all the blobs that overlapped, and another with the blobs
that did not overlap.
:param base_mask:
:param test_mask:
:return:
"""
overlapping = np.zeros_like(base_mask, dtype=bool)
not_overlapping = np.copy(base_mask)
from photutils import detect_sources
segments = detect_sources(base_mask.astype('float'), 0.5, 1).data
overlap = intersection(segments, test_mask)
# Check which indices are present in the overlap map
possible = np.array(range(1, np.max(overlap) + 1))
present = np.in1d(possible, overlap)
indices = possible[present]
overlapping_segments = np.zeros_like(base_mask, dtype=int)
not_overlapping_segments = np.copy(segments)
# Remove the galaxies from the segmentation map
for index in indices:
blob = segments == index
overlapping[blob] = True
not_overlapping[blob] = False
overlapping_segments[blob] = index
not_overlapping_segments[blob] = 0
if return_segments: return overlapping, not_overlapping, overlapping_segments, not_overlapping_segments
else: return overlapping, not_overlapping
# -----------------------------------------------------------------
| 27.632184 | 111 | 0.56926 | [
"MIT"
] | Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/masks.py | 4,809 | Python |
import sys, math
import numpy as np
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
import gym
from gym import spaces
from gym.utils import colorize, seeding
# This is simple 4-joints walker robot environment.
#
# There are two versions:
#
# - Normal, with slightly uneven terrain.
#
# - Hardcore with ladders, stumps, pitfalls.
#
# Reward is given for moving forward, total 300+ points up to the far end. If the robot falls,
# it gets -100. Applying motor torque costs a small amount of points, more optimal agent
# will get better score.
#
# Heuristic is provided for testing, it's also useful to get demonstrations to
# learn from. To run heuristic:
#
# python gym/envs/box2d/bipedal_walker.py
#
# State consists of hull angle speed, angular velocity, horizontal speed, vertical speed,
# position of joints and joints angular speed, legs contact with ground, and 10 lidar
# rangefinder measurements to help to deal with the hardcore version. There's no coordinates
# in the state vector. Lidar is less useful in normal version, but it works.
#
# To solve the game you need to get 300 points in 1600 time steps.
#
# To solve hardcore version you need 300 points in 2000 time steps.
#
# Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
FPS = 50
SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
MOTORS_TORQUE = 80
SPEED_HIP = 4
SPEED_KNEE = 6
LIDAR_RANGE = 160/SCALE
INITIAL_RANDOM = 5
HULL_POLY =[
(-30,+9), (+6,+9), (+34,+1),
(+34,-8), (-30,-8)
]
LEG_DOWN = -8/SCALE
LEG_W, LEG_H = 8/SCALE, 34/SCALE
VIEWPORT_W = 600
VIEWPORT_H = 400
TERRAIN_STEP = 14/SCALE
TERRAIN_LENGTH = 200 # in steps
TERRAIN_HEIGHT = VIEWPORT_H/SCALE/4
TERRAIN_GRASS = 10 # low long are grass spots, in steps
TERRAIN_STARTPAD = 20 # in steps
FRICTION = 2.5
HULL_FD = fixtureDef(
shape=polygonShape(vertices=[ (x/SCALE,y/SCALE) for x,y in HULL_POLY ]),
density=5.0,
friction=0.1,
categoryBits=0x0020,
maskBits=0x001, # collide only with ground
restitution=0.0) # 0.99 bouncy
LEG_FD = fixtureDef(
shape=polygonShape(box=(LEG_W/2, LEG_H/2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
LOWER_FD = fixtureDef(
shape=polygonShape(box=(0.8*LEG_W/2, LEG_H/2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if self.env.hull==contact.fixtureA.body or self.env.hull==contact.fixtureB.body:
self.env.game_over = True
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = True
def EndContact(self, contact):
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = False
class BipedalWalker(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : FPS
}
hardcore = False
def __init__(self):
self.seed()
self.viewer = None
self.world = Box2D.b2World()
self.terrain = None
self.hull = None
self.prev_shaping = None
self.fd_polygon = fixtureDef(
shape = polygonShape(vertices=
[(0, 0),
(1, 0),
(1, -1),
(0, -1)]),
friction = FRICTION)
self.fd_edge = fixtureDef(
shape = edgeShape(vertices=
[(0, 0),
(1, 1)]),
friction = FRICTION,
categoryBits=0x0001,
)
self.reset()
high = np.array([np.inf]*24)
self.action_space = spaces.Box(np.array([-1,-1,-1,-1]), np.array([+1,+1,+1,+1]))
self.observation_space = spaces.Box(-high, high)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.terrain: return
self.world.contactListener = None
for t in self.terrain:
self.world.DestroyBody(t)
self.terrain = []
self.world.DestroyBody(self.hull)
self.hull = None
for leg in self.legs:
self.world.DestroyBody(leg)
self.legs = []
self.joints = []
def _generate_terrain(self, hardcore):
GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5)
state = GRASS
velocity = 0.0
y = TERRAIN_HEIGHT
counter = TERRAIN_STARTPAD
oneshot = False
self.terrain = []
self.terrain_x = []
self.terrain_y = []
for i in range(TERRAIN_LENGTH):
x = i*TERRAIN_STEP
self.terrain_x.append(x)
if state==GRASS and not oneshot:
velocity = 0.8*velocity + 0.01*np.sign(TERRAIN_HEIGHT - y)
if i > TERRAIN_STARTPAD: velocity += self.np_random.uniform(-1, 1)/SCALE #1
y += velocity
elif state==PIT and oneshot:
counter = self.np_random.randint(3, 5)
poly = [
(x, y),
(x+TERRAIN_STEP, y),
(x+TERRAIN_STEP, y-4*TERRAIN_STEP),
(x, y-4*TERRAIN_STEP),
]
self.fd_polygon.shape.vertices=poly
t = self.world.CreateStaticBody(
fixtures = self.fd_polygon)
t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)
self.terrain.append(t)
self.fd_polygon.shape.vertices=[(p[0]+TERRAIN_STEP*counter,p[1]) for p in poly]
t = self.world.CreateStaticBody(
fixtures = self.fd_polygon)
t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)
self.terrain.append(t)
counter += 2
original_y = y
elif state==PIT and not oneshot:
y = original_y
if counter > 1:
y -= 4*TERRAIN_STEP
elif state==STUMP and oneshot:
counter = self.np_random.randint(1, 3)
poly = [
(x, y),
(x+counter*TERRAIN_STEP, y),
(x+counter*TERRAIN_STEP, y+counter*TERRAIN_STEP),
(x, y+counter*TERRAIN_STEP),
]
self.fd_polygon.shape.vertices=poly
t = self.world.CreateStaticBody(
fixtures = self.fd_polygon)
t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)
self.terrain.append(t)
elif state==STAIRS and oneshot:
stair_height = +1 if self.np_random.rand() > 0.5 else -1
stair_width = self.np_random.randint(4, 5)
stair_steps = self.np_random.randint(3, 5)
original_y = y
for s in range(stair_steps):
poly = [
(x+( s*stair_width)*TERRAIN_STEP, y+( s*stair_height)*TERRAIN_STEP),
(x+((1+s)*stair_width)*TERRAIN_STEP, y+( s*stair_height)*TERRAIN_STEP),
(x+((1+s)*stair_width)*TERRAIN_STEP, y+(-1+s*stair_height)*TERRAIN_STEP),
(x+( s*stair_width)*TERRAIN_STEP, y+(-1+s*stair_height)*TERRAIN_STEP),
]
self.fd_polygon.shape.vertices=poly
t = self.world.CreateStaticBody(
fixtures = self.fd_polygon)
t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)
self.terrain.append(t)
counter = stair_steps*stair_width
elif state==STAIRS and not oneshot:
s = stair_steps*stair_width - counter - stair_height
n = s/stair_width
y = original_y + (n*stair_height)*TERRAIN_STEP
oneshot = False
self.terrain_y.append(y)
counter -= 1
if counter==0:
counter = self.np_random.randint(TERRAIN_GRASS/2, TERRAIN_GRASS)
if state==GRASS and hardcore:
state = self.np_random.randint(1, _STATES_)
oneshot = True
else:
state = GRASS
oneshot = True
self.terrain_poly = []
for i in range(TERRAIN_LENGTH-1):
poly = [
(self.terrain_x[i], self.terrain_y[i]),
(self.terrain_x[i+1], self.terrain_y[i+1])
]
self.fd_edge.shape.vertices=poly
t = self.world.CreateStaticBody(
fixtures = self.fd_edge)
color = (0.3, 1.0 if i%2==0 else 0.8, 0.3)
t.color1 = color
t.color2 = color
self.terrain.append(t)
color = (0.4, 0.6, 0.3)
poly += [ (poly[1][0], 0), (poly[0][0], 0) ]
self.terrain_poly.append( (poly, color) )
self.terrain.reverse()
def _generate_clouds(self):
# Sorry for the clouds, couldn't resist
self.cloud_poly = []
for i in range(TERRAIN_LENGTH//20):
x = self.np_random.uniform(0, TERRAIN_LENGTH)*TERRAIN_STEP
y = VIEWPORT_H/SCALE*3/4
poly = [
(x+15*TERRAIN_STEP*math.sin(3.14*2*a/5)+self.np_random.uniform(0,5*TERRAIN_STEP),
y+ 5*TERRAIN_STEP*math.cos(3.14*2*a/5)+self.np_random.uniform(0,5*TERRAIN_STEP) )
for a in range(5) ]
x1 = min( [p[0] for p in poly] )
x2 = max( [p[0] for p in poly] )
self.cloud_poly.append( (poly,x1,x2) )
def reset(self):
self._destroy()
self.world.contactListener_bug_workaround = ContactDetector(self)
self.world.contactListener = self.world.contactListener_bug_workaround
self.game_over = False
self.prev_shaping = None
self.scroll = 0.0
self.lidar_render = 0
W = VIEWPORT_W/SCALE
H = VIEWPORT_H/SCALE
self._generate_terrain(self.hardcore)
self._generate_clouds()
init_x = TERRAIN_STEP*TERRAIN_STARTPAD/2
init_y = TERRAIN_HEIGHT+2*LEG_H
self.hull = self.world.CreateDynamicBody(
position = (init_x, init_y),
fixtures = HULL_FD
)
self.hull.color1 = (0.5,0.4,0.9)
self.hull.color2 = (0.3,0.3,0.5)
self.hull.ApplyForceToCenter((self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True)
self.legs = []
self.joints = []
for i in [-1,+1]:
leg = self.world.CreateDynamicBody(
position = (init_x, init_y - LEG_H/2 - LEG_DOWN),
angle = (i*0.05),
fixtures = LEG_FD
)
leg.color1 = (0.6-i/10., 0.3-i/10., 0.5-i/10.)
leg.color2 = (0.4-i/10., 0.2-i/10., 0.3-i/10.)
rjd = revoluteJointDef(
bodyA=self.hull,
bodyB=leg,
localAnchorA=(0, LEG_DOWN),
localAnchorB=(0, LEG_H/2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed = i,
lowerAngle = -0.8,
upperAngle = 1.1,
)
self.legs.append(leg)
self.joints.append(self.world.CreateJoint(rjd))
lower = self.world.CreateDynamicBody(
position = (init_x, init_y - LEG_H*3/2 - LEG_DOWN),
angle = (i*0.05),
fixtures = LOWER_FD
)
lower.color1 = (0.6-i/10., 0.3-i/10., 0.5-i/10.)
lower.color2 = (0.4-i/10., 0.2-i/10., 0.3-i/10.)
rjd = revoluteJointDef(
bodyA=leg,
bodyB=lower,
localAnchorA=(0, -LEG_H/2),
localAnchorB=(0, LEG_H/2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed = 1,
lowerAngle = -1.6,
upperAngle = -0.1,
)
lower.ground_contact = False
self.legs.append(lower)
self.joints.append(self.world.CreateJoint(rjd))
self.drawlist = self.terrain + self.legs + [self.hull]
class LidarCallback(Box2D.b2.rayCastCallback):
def ReportFixture(self, fixture, point, normal, fraction):
if (fixture.filterData.categoryBits & 1) == 0:
return 1
self.p2 = point
self.fraction = fraction
return 0
self.lidar = [LidarCallback() for _ in range(10)]
return self.step(np.array([0,0,0,0]))[0]
def step(self, action):
#self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help
control_speed = False # Should be easier as well
if control_speed:
self.joints[0].motorSpeed = float(SPEED_HIP * np.clip(action[0], -1, 1))
self.joints[1].motorSpeed = float(SPEED_KNEE * np.clip(action[1], -1, 1))
self.joints[2].motorSpeed = float(SPEED_HIP * np.clip(action[2], -1, 1))
self.joints[3].motorSpeed = float(SPEED_KNEE * np.clip(action[3], -1, 1))
else:
self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0]))
self.joints[0].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1))
self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1]))
self.joints[1].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1))
self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2]))
self.joints[2].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1))
self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3]))
self.joints[3].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1))
self.world.Step(1.0/FPS, 6*30, 2*30)
pos = self.hull.position
vel = self.hull.linearVelocity
for i in range(10):
self.lidar[i].fraction = 1.0
self.lidar[i].p1 = pos
self.lidar[i].p2 = (
pos[0] + math.sin(1.5*i/10.0)*LIDAR_RANGE,
pos[1] - math.cos(1.5*i/10.0)*LIDAR_RANGE)
self.world.RayCast(self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)
state = [
self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible.
2.0*self.hull.angularVelocity/FPS,
0.3*vel.x*(VIEWPORT_W/SCALE)/FPS, # Normalized to get -1..1 range
0.3*vel.y*(VIEWPORT_H/SCALE)/FPS,
self.joints[0].angle, # This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)
self.joints[0].speed / SPEED_HIP,
self.joints[1].angle + 1.0,
self.joints[1].speed / SPEED_KNEE,
1.0 if self.legs[1].ground_contact else 0.0,
self.joints[2].angle,
self.joints[2].speed / SPEED_HIP,
self.joints[3].angle + 1.0,
self.joints[3].speed / SPEED_KNEE,
1.0 if self.legs[3].ground_contact else 0.0
]
state += [l.fraction for l in self.lidar]
assert len(state)==24
self.scroll = pos.x - VIEWPORT_W/SCALE/5
shaping = 130*pos[0]/SCALE # moving forward is a way to receive reward (normalized to get 300 on completion)
shaping -= 5.0*abs(state[0]) # keep head straight, other than that and falling, any behavior is unpunished
reward = 0
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
for a in action:
reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1)
# normalized to about -50.0 using heuristic, more optimal agent should spend less
done = False
if self.game_over or pos[0] < 0:
reward = -100
done = True
if pos[0] > (TERRAIN_LENGTH-TERRAIN_GRASS)*TERRAIN_STEP:
done = True
return np.array(state), reward, done, {}
def render(self, mode='human'):
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(self.scroll, VIEWPORT_W/SCALE + self.scroll, 0, VIEWPORT_H/SCALE)
self.viewer.draw_polygon( [
(self.scroll, 0),
(self.scroll+VIEWPORT_W/SCALE, 0),
(self.scroll+VIEWPORT_W/SCALE, VIEWPORT_H/SCALE),
(self.scroll, VIEWPORT_H/SCALE),
], color=(0.9, 0.9, 1.0) )
for poly,x1,x2 in self.cloud_poly:
if x2 < self.scroll/2: continue
if x1 > self.scroll/2 + VIEWPORT_W/SCALE: continue
self.viewer.draw_polygon( [(p[0]+self.scroll/2, p[1]) for p in poly], color=(1,1,1))
for poly, color in self.terrain_poly:
if poly[1][0] < self.scroll: continue
if poly[0][0] > self.scroll + VIEWPORT_W/SCALE: continue
self.viewer.draw_polygon(poly, color=color)
self.lidar_render = (self.lidar_render+1) % 100
i = self.lidar_render
if i < 2*len(self.lidar):
l = self.lidar[i] if i < len(self.lidar) else self.lidar[len(self.lidar)-i-1]
self.viewer.draw_polyline( [l.p1, l.p2], color=(1,0,0), linewidth=1 )
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
t = rendering.Transform(translation=trans*f.shape.pos)
self.viewer.draw_circle(f.shape.radius, 30, color=obj.color1).add_attr(t)
self.viewer.draw_circle(f.shape.radius, 30, color=obj.color2, filled=False, linewidth=2).add_attr(t)
else:
path = [trans*v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
path.append(path[0])
self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)
flagy1 = TERRAIN_HEIGHT
flagy2 = flagy1 + 50/SCALE
x = TERRAIN_STEP*3
self.viewer.draw_polyline( [(x, flagy1), (x, flagy2)], color=(0,0,0), linewidth=2 )
f = [(x, flagy2), (x, flagy2-10/SCALE), (x+25/SCALE, flagy2-5/SCALE)]
self.viewer.draw_polygon(f, color=(0.9,0.2,0) )
self.viewer.draw_polyline(f + [f[0]], color=(0,0,0), linewidth=2 )
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
class BipedalWalkerHardcore(BipedalWalker):
hardcore = True
if __name__=="__main__":
# Heurisic: suboptimal, have no notion of balance.
env = BipedalWalker()
env.reset()
steps = 0
total_reward = 0
a = np.array([0.0, 0.0, 0.0, 0.0])
STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1,2,3
SPEED = 0.29 # Will fall forward on higher speed
state = STAY_ON_ONE_LEG
moving_leg = 0
supporting_leg = 1 - moving_leg
SUPPORT_KNEE_ANGLE = +0.1
supporting_knee_angle = SUPPORT_KNEE_ANGLE
while True:
s, r, done, info = env.step(a)
total_reward += r
if steps % 20 == 0 or done:
print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
print("hull " + str(["{:+0.2f}".format(x) for x in s[0:4] ]))
print("leg0 " + str(["{:+0.2f}".format(x) for x in s[4:9] ]))
print("leg1 " + str(["{:+0.2f}".format(x) for x in s[9:14]]))
steps += 1
contact0 = s[8]
contact1 = s[13]
moving_s_base = 4 + 5*moving_leg
supporting_s_base = 4 + 5*supporting_leg
hip_targ = [None,None] # -0.8 .. +1.1
knee_targ = [None,None] # -0.6 .. +0.9
hip_todo = [0.0, 0.0]
knee_todo = [0.0, 0.0]
if state==STAY_ON_ONE_LEG:
hip_targ[moving_leg] = 1.1
knee_targ[moving_leg] = -0.6
supporting_knee_angle += 0.03
if s[2] > SPEED: supporting_knee_angle += 0.03
supporting_knee_angle = min( supporting_knee_angle, SUPPORT_KNEE_ANGLE )
knee_targ[supporting_leg] = supporting_knee_angle
if s[supporting_s_base+0] < 0.10: # supporting leg is behind
state = PUT_OTHER_DOWN
if state==PUT_OTHER_DOWN:
hip_targ[moving_leg] = +0.1
knee_targ[moving_leg] = SUPPORT_KNEE_ANGLE
knee_targ[supporting_leg] = supporting_knee_angle
if s[moving_s_base+4]:
state = PUSH_OFF
supporting_knee_angle = min( s[moving_s_base+2], SUPPORT_KNEE_ANGLE )
if state==PUSH_OFF:
knee_targ[moving_leg] = supporting_knee_angle
knee_targ[supporting_leg] = +1.0
if s[supporting_s_base+2] > 0.88 or s[2] > 1.2*SPEED:
state = STAY_ON_ONE_LEG
moving_leg = 1 - moving_leg
supporting_leg = 1 - moving_leg
if hip_targ[0]: hip_todo[0] = 0.9*(hip_targ[0] - s[4]) - 0.25*s[5]
if hip_targ[1]: hip_todo[1] = 0.9*(hip_targ[1] - s[9]) - 0.25*s[10]
if knee_targ[0]: knee_todo[0] = 4.0*(knee_targ[0] - s[6]) - 0.25*s[7]
if knee_targ[1]: knee_todo[1] = 4.0*(knee_targ[1] - s[11]) - 0.25*s[12]
hip_todo[0] -= 0.9*(0-s[0]) - 1.5*s[1] # PID to keep head strait
hip_todo[1] -= 0.9*(0-s[0]) - 1.5*s[1]
knee_todo[0] -= 15.0*s[3] # vertical speed, to damp oscillations
knee_todo[1] -= 15.0*s[3]
a[0] = hip_todo[0]
a[1] = knee_todo[0]
a[2] = hip_todo[1]
a[3] = knee_todo[1]
a = np.clip(0.5*a, -1.0, 1.0)
env.render()
if done: break
| 39.637457 | 155 | 0.542243 | [
"MIT"
] | hbutsuak95/iv_rl | mbbl_envs/env/gym_env/box2d/walker.py | 23,069 | Python |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes.
"""
import functools
from eventlet import greenthread
from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.image import glance
from cinder.openstack.common import log as logging
from cinder.openstack.common import rpc
import cinder.policy
from cinder.openstack.common import timeutils
from cinder import quota
from cinder.db import base
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host where snapshot resides')
FLAGS = flags.FLAGS
FLAGS.register_opt(volume_host_opt)
flags.DECLARE('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
GB = 1048576 * 1024
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
super(API, self).__init__(db_driver)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
check_policy(context, 'create')
if snapshot is not None:
if snapshot['status'] != "available":
msg = _("status must be available")
raise exception.InvalidSnapshot(reason=msg)
if not size:
size = snapshot['volume_size']
snapshot_id = snapshot['id']
else:
snapshot_id = None
def as_int(s):
try:
return int(s)
except ValueError:
return s
# tolerate size as stringified int
size = as_int(size)
if not isinstance(size, int) or size <= 0:
msg = (_("Volume size '%s' must be an integer and greater than 0")
% size)
raise exception.InvalidInput(reason=msg)
if quota.allowed_volumes(context, 1, size) < 1:
pid = context.project_id
LOG.warn(_("Quota exceeded for %(pid)s, tried to create"
" %(size)sG volume") % locals())
raise exception.QuotaError(code="VolumeSizeTooLarge")
if image_id:
# check image existence
image_meta = self.image_service.show(context, image_id)
image_size_in_gb = int(image_meta['size']) / GB
#check image size is not larger than volume size.
if image_size_in_gb > size:
msg = _('Size of specified image is larger than volume size.')
raise exception.InvalidInput(reason=msg)
if availability_zone is None:
availability_zone = FLAGS.storage_availability_zone
if volume_type is None:
volume_type_id = None
else:
volume_type_id = volume_type.get('id', None)
options = {
'size': size,
'user_id': context.user_id,
'project_id': context.project_id,
'snapshot_id': snapshot_id,
'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'display_name': name,
'display_description': description,
'volume_type_id': volume_type_id,
'metadata': metadata,
}
volume = self.db.volume_create(context, options)
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "create_volume",
"args": {"topic": FLAGS.volume_topic,
"volume_id": volume['id'],
"snapshot_id": volume['snapshot_id'],
"image_id": image_id}})
return volume
def _cast_create_volume(self, context, volume_id, snapshot_id):
# NOTE(Rongze Zhu): It is a simple solution for bug 1008866
# If snapshot_id is set, make the call create volume directly to
# the volume host where the snapshot resides instead of passing it
# through the scheduer. So snapshot can be copy to new volume.
if snapshot_id and FLAGS.snapshot_same_host:
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
src_volume_ref = self.db.volume_get(context,
snapshot_ref['volume_id'])
topic = rpc.queue_get_for(context,
FLAGS.volume_topic,
src_volume_ref['host'])
rpc.cast(context,
topic,
{"method": "create_volume",
"args": {"volume_id": volume_id,
"snapshot_id": snapshot_id}})
else:
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "create_volume",
"args": {"topic": FLAGS.volume_topic,
"volume_id": volume_id,
"snapshot_id": snapshot_id}})
@wrap_check_policy
def delete(self, context, volume):
volume_id = volume['id']
if not volume['host']:
# NOTE(vish): scheduling failed, so delete it
self.db.volume_destroy(context, volume_id)
return
if volume['status'] not in ["available", "error"]:
msg = _("Volume status must be available or error")
raise exception.InvalidVolume(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
if len(snapshots):
msg = _("Volume still has %d dependent snapshots") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
now = timeutils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
host = volume['host']
rpc.cast(context,
rpc.queue_get_for(context, FLAGS.volume_topic, host),
{"method": "delete_volume",
"args": {"volume_id": volume_id}})
@wrap_check_policy
def update(self, context, volume, fields):
self.db.volume_update(context, volume['id'], fields)
def get(self, context, volume_id):
rv = self.db.volume_get(context, volume_id)
volume = dict(rv.iteritems())
check_policy(context, 'get', volume)
return volume
def get_all(self, context, search_opts=None):
check_policy(context, 'get_all')
if search_opts is None:
search_opts = {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
volumes = self.db.volume_get_all(context)
else:
volumes = self.db.volume_get_all_by_project(context,
context.project_id)
if search_opts:
LOG.debug(_("Searching by: %s") % str(search_opts))
def _check_metadata_match(volume, searchdict):
volume_metadata = {}
for i in volume.get('volume_metadata'):
volume_metadata[i['key']] = i['value']
for k, v in searchdict.iteritems():
if (k not in volume_metadata.keys() or
volume_metadata[k] != v):
return False
return True
# search_option to filter_name mapping.
filter_mapping = {'metadata': _check_metadata_match}
result = []
for volume in volumes:
# go over all filters in the list
for opt, values in search_opts.iteritems():
try:
filter_func = filter_mapping[opt]
except KeyError:
# no such filter - ignore it, go to next filter
continue
else:
if filter_func(volume, values):
result.append(volume)
break
volumes = result
return volumes
def get_snapshot(self, context, snapshot_id):
check_policy(context, 'get_snapshot')
rv = self.db.snapshot_get(context, snapshot_id)
return dict(rv.iteritems())
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
return self.db.snapshot_get_all(context)
else:
return self.db.snapshot_get_all_by_project(context,
context.project_id)
@wrap_check_policy
def check_attach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("already detached")
raise exception.InvalidVolume(reason=msg)
def remove_from_compute(self, context, volume, instance_id, host):
"""Remove volume from specified compute host."""
rpc.call(context,
rpc.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "remove_volume_connection",
"args": {'instance_id': instance_id,
'volume_id': volume['id']}})
@wrap_check_policy
def reserve_volume(self, context, volume):
self.update(context, volume, {"status": "attaching"})
@wrap_check_policy
def unreserve_volume(self, context, volume):
if volume['status'] == "attaching":
self.update(context, volume, {"status": "available"})
@wrap_check_policy
def attach(self, context, volume, instance_uuid, mountpoint):
host = volume['host']
queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
return rpc.call(context, queue,
{"method": "attach_volume",
"args": {"volume_id": volume['id'],
"instance_uuid": instance_uuid,
"mountpoint": mountpoint}})
@wrap_check_policy
def detach(self, context, volume):
host = volume['host']
queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
return rpc.call(context, queue,
{"method": "detach_volume",
"args": {"volume_id": volume['id']}})
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
host = volume['host']
queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
return rpc.call(context, queue,
{"method": "initialize_connection",
"args": {"volume_id": volume['id'],
"connector": connector}})
@wrap_check_policy
def terminate_connection(self, context, volume, connector):
self.unreserve_volume(context, volume)
host = volume['host']
queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
return rpc.call(context, queue,
{"method": "terminate_connection",
"args": {"volume_id": volume['id'],
"connector": connector}})
def _create_snapshot(self, context, volume, name, description,
force=False):
check_policy(context, 'create_snapshot', volume)
if ((not force) and (volume['status'] != "available")):
msg = _("must be available")
raise exception.InvalidVolume(reason=msg)
options = {
'volume_id': volume['id'],
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description}
snapshot = self.db.snapshot_create(context, options)
host = volume['host']
rpc.cast(context,
rpc.queue_get_for(context, FLAGS.volume_topic, host),
{"method": "create_snapshot",
"args": {"volume_id": volume['id'],
"snapshot_id": snapshot['id']}})
return snapshot
def create_snapshot(self, context, volume, name, description):
return self._create_snapshot(context, volume, name, description,
False)
def create_snapshot_force(self, context, volume, name, description):
return self._create_snapshot(context, volume, name, description,
True)
@wrap_check_policy
def delete_snapshot(self, context, snapshot):
if snapshot['status'] not in ["available", "error"]:
msg = _("Volume Snapshot status must be available or error")
raise exception.InvalidVolume(reason=msg)
self.db.snapshot_update(context, snapshot['id'],
{'status': 'deleting'})
volume = self.db.volume_get(context, snapshot['volume_id'])
host = volume['host']
rpc.cast(context,
rpc.queue_get_for(context, FLAGS.volume_topic, host),
{"method": "delete_snapshot",
"args": {"snapshot_id": snapshot['id']}})
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
"""Delete the given metadata item from an volume."""
self.db.volume_metadata_delete(context, volume['id'], key)
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
_metadata = self.get_volume_metadata(context, volume['id'])
_metadata.update(metadata)
self.db.volume_metadata_update(context, volume['id'], _metadata, True)
return _metadata
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
return None
def _check_volume_availability(self, context, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
self._check_volume_availability(context, volume, force)
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
rpc.cast(context,
rpc.queue_get_for(context,
FLAGS.volume_topic,
volume['host']),
{"method": "copy_volume_to_image",
"args": {"volume_id": volume['id'],
"image_id": recv_metadata['id']}})
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)
}
return response
| 39.509474 | 79 | 0.577929 | [
"Apache-2.0"
] | CiscoSystems/cinder-old | cinder/volume/api.py | 18,767 | Python |
import logging
from redlib.api.misc import Logger
log = Logger(name='jekt')
log.start('stdout', logging.DEBUG)
| 14.25 | 34 | 0.745614 | [
"MIT"
] | amol9/jekt | jekt/logger.py | 114 | Python |
"""
See notebook 5 for example use of show_mri_sample()
"""
import glob
import os
import random
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
import cv2
import scipy.ndimage as ndimage
def make_bg_transparent(im, bg_th=0.0, set_to_color=None):
# create transparency alpha channel
# convert image to RGBA
if len(im.shape) == 3:
alpha_c = (np.sum(im[:,:,:],axis=2) > bg_th).astype(im.dtype)
c1,c2,c3 = cv2.split(im)
else:
alpha_c = (im[:,:] > bg_th).astype(im.dtype)
c1,c2,c3 = im.copy(), im.copy(), im.copy()
if set_to_color is not None:
zeros = np.zeros_like(c1)
if set_to_color == 'green':
merged = np.stack([zeros,c2,zeros,alpha_c], axis=-1)
elif set_to_color == 'red':
merged = np.stack([c1,zeros,zeros,alpha_c], axis=-1)
elif set_to_color == 'royalblue':
merged = np.stack([c1,zeros,zeros,alpha_c], axis=-1)
elif set_to_color == 'violet':
merged = np.stack([c1,zeros,c3,alpha_c], axis=-1)
elif set_to_color == 'yellow':
merged = np.stack([c1,c2,zeros,alpha_c], axis=-1)
else:
merged = np.stack([c1,c2,c3,alpha_c], axis=-1)
return merged
def to_3d_points(im, th=1e-6, downsample=5):
xs,ys,ds = [],[],[]
if len(im.shape) == 4:
im3d = np.sum(im,axis=3)
else:
im3d = im
depth,width,height = im3d.shape
step_vol = downsample**3
for x in range(0, width - downsample, downsample):
for y in range(0, height - downsample, downsample):
for d in range(0, depth - downsample, downsample):
if (np.sum(im3d[d:d+downsample, x:x+downsample, y:y+downsample]) / step_vol) > th:
xs.append(x + (downsample//2))
ys.append(y + (downsample//2))
ds.append(d + (downsample//2))
return np.array(xs), np.array(ys), np.array(ds)
def adjust_saturation(img, sat_scale=0.3):
hsv_im = cv2.cvtColor((img * 255).astype(np.uint8), cv2.COLOR_RGB2HSV)
(h, s, v) = cv2.split(hsv_im)
s = s*sat_scale
s = np.clip(s,0,255)
hsv_im = np.stack([h,s,v],axis=2).astype(np.uint8)
return cv2.cvtColor(hsv_im, cv2.COLOR_HSV2RGB) / 255.
def show_mri_sample(sample, pred_mask=None, pred_lbl=None, seg_downsample=None, save_fn=None):
""" Plot sample in three projections """
plt.close('all')
alpha=0.5
image_alpha=1.0
ims = sample['image'].numpy()
means = sample['mean'].numpy()
stds = sample['std'].numpy()
segs = sample['segmentation'].numpy() if 'segmentation' in sample else None
# add batch dims if missing
if ims.ndim == 4:
ims = np.expand_dims(ims, 0)
means = np.expand_dims(means, 0)
stds = np.expand_dims(stds, 0)
if segs is not None:
segs = np.expand_dims(segs, 0)
n_images = len(ims)
n_root = int(np.ceil(np.sqrt(n_images)))
n_cols = n_root * 2
n_rows = n_root * 2
# special case fix to get with correct with small bs
if n_images == 2:
n_rows = 2
fig_scale = 2
f = plt.figure(figsize=(fig_scale*n_cols,fig_scale*n_rows))
# Read additional meta from batch
brats_ids = [sample['BraTSID']] if n_images == 1 else sample['BraTSID']
labels = None
if 'label' in sample:
labels = [sample['label']] if n_images == 1 else sample['label']
def _subplot_index(index, row_off, col_off):
startrow = (index * 2)//n_cols
startcol = (index * 2)%n_cols
return (2*startrow+row_off)*n_cols + (startcol + col_off) + 1
for index in range(n_images):
im = ims[index]
seg = segs[index]
seg = np.swapaxes(seg, 0,3)
# upsample seg back to original size if it has been downsampled
if seg_downsample is not None:
seg = seg.repeat(seg_downsample, axis=0).repeat(seg_downsample, axis=1).repeat(seg_downsample, axis=2)
# Normalize images for visualization
im = np.swapaxes(im, 0,3) # swap depth and chan axes
im = (im * stds[index]) + means[index]
title = f'BraTSID: {brats_ids[index]}'
if labels is not None:
title += f', GT-MGMT:{labels[index]}'
if pred_lbl is not None:
title += f'\nPred-MGMT:{float(pred_lbl[index][0]):.3f}'
d,x,y,c = im.shape
coronal_ax = f.add_subplot(n_rows,n_cols, _subplot_index(index,0,0))
coronal_ax.set_title(title + ' - coronal', fontsize=8)
coronal_ax.imshow(make_bg_transparent(adjust_saturation(im[::-1,x//2,:,:])), alpha=image_alpha)
sagittal_ax = f.add_subplot(n_rows,n_cols,_subplot_index(index,0,1))
sagittal_ax.set_title(title + ' - sagittal', fontsize=8)
sagittal_ax.get_yaxis().set_visible(False)
sagittal_ax.imshow(make_bg_transparent(adjust_saturation(im[::-1,:,y//2,:])), alpha=image_alpha)
axial_ax = f.add_subplot(n_rows,n_cols,_subplot_index(index,1,0))
axial_ax.set_title(title + ' - axial', fontsize=8)
axial_ax.imshow(make_bg_transparent(adjust_saturation(im[d//2,:,:,:])), alpha=image_alpha)
proj_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index,1,1), projection='3d')
proj_ax.scatter(*to_3d_points(im), color='gray', alpha=0.015, s=5, depthshade=False)
proj_ax.set_title(f'Green=GT-tumor, Red=Pred-tumor\n{title}', fontsize=6)
proj_ax.set_xticks([])
proj_ax.set_yticks([])
proj_ax.set_zticks([])
if seg is not None:
for seg_chan, color in zip(range(seg.shape[3]),['green']):
coronal_ax.imshow(make_bg_transparent(seg[::-1,x//2,:,seg_chan], set_to_color=color), alpha=alpha)
sagittal_ax.imshow(make_bg_transparent(seg[::-1,:,y//2,seg_chan], set_to_color=color), alpha=alpha)
axial_ax.imshow(make_bg_transparent(seg[d//2,:,:,seg_chan], set_to_color=color), alpha=alpha)
proj_ax.scatter(*to_3d_points(seg[:,:,:,seg_chan]), color=color, s=5, alpha=0.05)
if pred_mask is not None:
pred = np.swapaxes(pred_mask[index].cpu().numpy(), 0,3)
pred = np.clip(pred, 0, 1.)
# upsample seg back to original size if it has been downsampled
if seg_downsample is not None:
pred = pred.repeat(seg_downsample, axis=0).repeat(seg_downsample, axis=1).repeat(seg_downsample, axis=2)
for seg_chan, color in zip(range(pred.shape[3]),['red']):
coronal_ax.imshow(make_bg_transparent(pred[::-1,x//2,:, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
sagittal_ax.imshow(make_bg_transparent(pred[::-1,:,y//2, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
axial_ax.imshow(make_bg_transparent(pred[d//2,:,:, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
proj_ax.scatter(*to_3d_points(pred[:,:,:,seg_chan], th=0.5), color=color, s=5, alpha=0.05)
# draw axial lines
coronal_ax.plot([0,x-1],[d//2,d//2],'--',color='white', linewidth=1) # coronal horizontal
coronal_ax.plot([x//2,x//2],[0,d-1],'--',color='white', linewidth=1) # coronal vertical
sagittal_ax.plot([0,y-1],[d//2,d//2],'--',color='white', linewidth=1) # sagittal horizontal
sagittal_ax.plot([y//2,y//2],[0,d-1],'--',color='white', linewidth=1) # sagittal vertical
axial_ax.plot([0,y-1],[x//2,x//2],'--',color='white', linewidth=1) # axial horizontal
axial_ax.plot([x//2,x//2],[0,y-1],'--',color='white', linewidth=1) # axial vertical
plt.subplots_adjust(left=0.00,top=1.,right=1.,bottom=0.00, wspace=0.15, hspace=0.15)
bbox = f.get_window_extent().transformed(f.dpi_scale_trans.inverted())
width, height = bbox.width*f.dpi, bbox.height*f.dpi
width *= 1.05
height *= 1.05
#if n_images == 2:
# n_rows = 2
for row in range(0, n_rows,2):
if n_images == 2 and row > 0:
break
for col in range(0, n_cols,2):
different_color = (row//2) % 2 == (col//2) % 2
color = (1,1,1) if different_color else (0.8,0.8,0.8)
f.patches.extend([
plt.Rectangle(
(width * col / n_cols, height * (n_rows - row - 2) / n_rows),
width / max(1,n_cols//2),
height / max(1,n_rows//2),
fill=True,
color=color,
zorder=-1, # below axes
alpha=0.5,
transform=None,
figure=f)
])
if save_fn is not None:
plt.savefig(save_fn, transparent=False)
else:
plt.show() | 43.21256 | 128 | 0.585802 | [
"MIT"
] | jpjuvo/RSNA-MICCAI-Brain-Tumor-Classification | src/seg_model_utils/visualization.py | 8,945 | Python |
import glob
import pandas as pd
import os
import datetime
class DataMerge:
def __init__(self, directory):
self.directory = directory
self.__data = self.get_data_from(self.directory)
def date_to_int(self, dates):
"""
calculates number of days between 01/01/0001 and each date in dates
date has format '%m/%d/%Y'
:param dates: Pandas Series
:return: list
"""
ret = []
for date in dates:
date0 = datetime.datetime(year=1, month=1, day=1)
datex = datetime.datetime.strptime(date, '%m/%d/%Y')
ret.append((datex - date0).days)
return ret
def get_data_from(self, dir):
files = glob.glob(f'{dir}/*')
if files == []:
raise f'directory {dir} does not contain any .csv file'
data = None
for file in files:
if file == f'{dir}/merged_data.csv':
continue
if data is None:
data = pd.read_csv(file)
continue
temp_data = pd.read_csv(file)
temp_data = temp_data.dropna(axis=1)
data = data.append(temp_data)
data.drop_duplicates()
data = data.sort_values('Date', ascending=False, key=self.date_to_int)
data = data[: 408]
data.to_csv(f"{dir}/merged_data.csv", index=False)
return data
def get_data(self):
return self.__data | 30.1875 | 78 | 0.562457 | [
"MIT"
] | repeating/stock-analyzer | backend/data_merge.py | 1,449 | Python |
import jax.numpy as np
import matplotlib.pyplot as plt
def plot(vi, X,
target='vanishing',
n=1000, scale=1.5, x_max=1.0, y_max=1.0,
z_func=lambda x_, y_: 0.0,
show=False, splitshow=False):
nvars = X.shape[-1]
if nvars == 2:
_plot2d(vi, X, target=target,
n=n, scale=scale, x_max=x_max, y_max=y_max,
show=show, splitshow=splitshow)
elif nvars == 3:
_plot3d(vi, X, z_func, target=target,
n=n, scale=scale, x_max=x_max, y_max=y_max,
show=show, splitshow=splitshow)
else:
print(f'Cannot plot {nvars}-variate polynomials')
def _plot2d(vi, X, target='vanishing', n=1000, scale=1.5, x_max=1.0, y_max=1.0, show=False, splitshow=False):
## set plot range
m = np.mean(X, axis=0)
x_max = y_max = np.max(np.abs(X))
# x = np.arange(-scale*x_max, scale*x_max, resolution)
# y = np.arange(-scale*y_max, scale*y_max, resolution)
x = np.linspace(-scale*x_max, scale*x_max, 50)
y = np.linspace(-scale*y_max, scale*y_max, 50)
Z1, Z2 = np.meshgrid(x, y)
## set plot setting
npolys = 0
if target == 'vanishing':
# npolys = sum([Gt.shape[-1] for Gt in vi.basis.vanishings()])
npolys = sum([Bt.n_vanishings() for Bt in vi.basis])
# npolys = sum([len(Gt) for Gt in vi.basis.vanishings()])
elif target == 'nonvanishing':
npolys = sum([Bt.n_nonvanishings() for Bt in vi.basis])
colors = plt.cm.Dark2(np.linspace(0,1,8))
linestyles = ['solid','dashed','dashdot', 'dotted']
nfigs = min(npolys, n)
for i in range(nfigs):
f = lambda x_, y_: vi.evaluate(np.array([[x_,y_]]), target=target)[0,i]
f = np.vectorize(f)
plt.contour(Z1,Z2,f(Z1, Z2), levels=[0], colors=[colors[i%len(colors)]], linewidths=[1.], linestyles=[linestyles[i%4]])
if splitshow:
plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
if not splitshow:
plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)
plt.gca().set_aspect('equal', adjustable='box')
# plt.savefig('graph_Z.pdf')
if not splitshow and show:
plt.show()
def _plot3d(vi, X, z_func, target='vanishing', n=1000, scale=1.5, x_max=1.0, y_max=1.0, show=False, splitshow=False):
## set plot range
m = np.mean(X, axis=0)
x_max = y_max = np.max(np.abs(X))
x = np.linspace(-scale*x_max, scale*x_max, 50)
y = np.linspace(-scale*y_max, scale*y_max, 50)
Z1, Z2 = np.meshgrid(x, y)
## set plot setting
npolys = 0
if target == 'vanishing':
npolys = sum([np.asarray(Gt).shape[-1] for Gt in vi.basis.vanishings()])
# npolys = sum([len(Gt) for Gt in vi.basis.vanishings()])
elif target == 'nonvanishing':
npolys = sum([np.asarray(Ft).shape[-1] for Ft in vi.basis.nonvanishings()])
else:
print('unknown target: %s' % target)
colors = plt.cm.Dark2(np.linspace(0,1,8))
linestyles = ['solid','dashed','dashdot', 'dotted']
nfigs = min(npolys, n)
for i in range(nfigs):
f = lambda x_, y_: vi.evaluate(np.array([[x_,y_, z_func(x_,y_)]]), target=target)[0,i]
f = np.vectorize(f)
plt.contour(Z1,Z2,f(Z1, Z2), levels=[0], colors=[colors[i%len(colors)]], linewidths=[1.], linestyles=[linestyles[i%4]])
if splitshow:
plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
if not splitshow:
plt.plot(X[:,0], X[:,1], 'o', mfc='none', alpha=0.8)
plt.gca().set_aspect('equal', adjustable='box')
# plt.savefig('graph_Z.pdf')
if not splitshow and show:
plt.show()
| 35.877358 | 127 | 0.576913 | [
"MIT"
] | HiroshiKERA/monomial-agnostic-vanishing-ideal | mavi/jax/util/plot.py | 3,803 | Python |
"""Code generation utilities"""
from .utils import SchemaInfo, is_valid_identifier, indent_docstring, indent_arglist
import textwrap
import re
class CodeSnippet(object):
"""Object whose repr() is a string of code"""
def __init__(self, code):
self.code = code
def __repr__(self):
return self.code
def _get_args(info):
"""Return the list of args & kwds for building the __init__ function"""
# TODO: - set additional properties correctly
# - handle patternProperties etc.
required = set()
kwds = set()
invalid_kwds = set()
# TODO: specialize for anyOf/oneOf?
if info.is_allOf():
# recursively call function on all children
arginfo = [_get_args(child) for child in info.allOf]
nonkeyword = all(args[0] for args in arginfo)
required = set.union(set(), *(args[1] for args in arginfo))
kwds = set.union(set(), *(args[2] for args in arginfo))
kwds -= required
invalid_kwds = set.union(set(), *(args[3] for args in arginfo))
additional = all(args[4] for args in arginfo)
elif info.is_empty() or info.is_compound():
nonkeyword = True
additional = True
elif info.is_value():
nonkeyword = True
additional=False
elif info.is_object():
invalid_kwds = ({p for p in info.required if not is_valid_identifier(p)} |
{p for p in info.properties if not is_valid_identifier(p)})
required = {p for p in info.required if is_valid_identifier(p)}
kwds = {p for p in info.properties if is_valid_identifier(p)}
kwds -= required
nonkeyword = False
additional = True
#additional = info.additionalProperties or info.patternProperties
else:
raise ValueError("Schema object not understood")
return (nonkeyword, required, kwds, invalid_kwds, additional)
class SchemaGenerator(object):
"""Class that defines methods for generating code from schemas
Parameters
----------
classname : string
The name of the class to generate
schema : dict
The dictionary defining the schema class
rootschema : dict (optional)
The root schema for the class
basename : string (default: "SchemaBase")
The name of the base class to use in the class definition
schemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit schema.
This can be useful, for example, when the generated code should reference
a predefined schema object. The user must ensure that the schema within
the evaluated code is identical to the schema used to generate the code.
rootschemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit root
schema.
"""
schema_class_template = textwrap.dedent('''
class {classname}({basename}):
"""{docstring}"""
_schema = {schema!r}
_rootschema = {rootschema!r}
{init_code}
''')
init_template = textwrap.dedent("""
def __init__({arglist}):
super({classname}, self).__init__({super_arglist})
""").lstrip()
def _process_description(self, description):
return description
def __init__(self, classname, schema, rootschema=None,
basename='SchemaBase', schemarepr=None, rootschemarepr=None,
nodefault=()):
self.classname = classname
self.schema = schema
self.rootschema = rootschema
self.basename = basename
self.schemarepr = schemarepr
self.rootschemarepr = rootschemarepr
self.nodefault = nodefault
def schema_class(self):
"""Generate code for a schema class"""
rootschema = self.rootschema if self.rootschema is not None else self.schema
schemarepr = self.schemarepr if self.schemarepr is not None else self.schema
rootschemarepr = self.rootschemarepr
if rootschemarepr is None:
if rootschema is self.schema:
rootschemarepr = CodeSnippet('_schema')
else:
rootschemarepr = rootschema
return self.schema_class_template.format(
classname=self.classname,
basename=self.basename,
schema=schemarepr,
rootschema=rootschemarepr,
docstring=self.docstring(indent=4),
init_code=self.init_code(indent=4)
)
def docstring(self, indent=0):
# TODO: add a general description at the top, derived from the schema.
# for example, a non-object definition should list valid type, enum
# values, etc.
# TODO: use _get_args here for more information on allOf objects
info = SchemaInfo(self.schema, self.rootschema)
doc = ["{} schema wrapper".format(self.classname),
'',
info.medium_description]
if info.description:
doc += self._process_description( #remove condition description
re.sub(r"\n\{\n(\n|.)*\n\}",'',info.description)).splitlines()
if info.properties:
nonkeyword, required, kwds, invalid_kwds, additional = _get_args(info)
doc += ['',
'Attributes',
'----------',
'']
for prop in sorted(required) + sorted(kwds) + sorted(invalid_kwds):
propinfo = info.properties[prop]
doc += ["{} : {}".format(prop, propinfo.short_description),
" {}".format(self._process_description(propinfo.description))]
if len(doc) > 1:
doc += ['']
return indent_docstring(doc, indent_level=indent, width=100, lstrip=True)
def init_code(self, indent=0):
"""Return code suitablde for the __init__ function of a Schema class"""
info = SchemaInfo(self.schema, rootschema=self.rootschema)
nonkeyword, required, kwds, invalid_kwds, additional =_get_args(info)
nodefault=set(self.nodefault)
required -= nodefault
kwds -= nodefault
args = ['self']
super_args = []
if nodefault:
args.extend(sorted(nodefault))
elif nonkeyword:
args.append('*args')
super_args.append('*args')
args.extend('{}=Undefined'.format(p)
for p in sorted(required) + sorted(kwds))
super_args.extend('{0}={0}'.format(p)
for p in sorted(nodefault) + sorted(required) + sorted(kwds))
if additional:
args.append('**kwds')
super_args.append('**kwds')
arg_indent_level = 9 + indent
super_arg_indent_level = 23 + len(self.classname) + indent
initfunc = self.init_template.format(classname=self.classname,
arglist=indent_arglist(args, indent_level=arg_indent_level),
super_arglist=indent_arglist(super_args, indent_level=super_arg_indent_level))
if indent:
initfunc = ('\n' + indent * ' ').join(initfunc.splitlines())
return initfunc
| 38.343915 | 123 | 0.611425 | [
"BSD-3-Clause"
] | aladdingsw/altair | tools/schemapi/codegen.py | 7,247 | Python |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is largely a wrapper around `jaxlib` that performs version
# checking on import.
import jaxlib
_minimum_jaxlib_version = (0, 1, 38)
try:
from jaxlib import version as jaxlib_version
except:
# jaxlib is too old to have version number.
msg = 'This version of jax requires jaxlib version >= {}.'
raise ImportError(msg.format('.'.join(map(str, _minimum_jaxlib_version))))
version = tuple(int(x) for x in jaxlib_version.__version__.split('.'))
# Check the jaxlib version before importing anything else from jaxlib.
def _check_jaxlib_version():
if version < _minimum_jaxlib_version:
msg = 'jaxlib is version {}, but this version of jax requires version {}.'
if version == (0, 1, 23):
msg += ('\n\nA common cause of this error is that you installed jaxlib '
'using pip, but your version of pip is too old to support '
'manylinux2010 wheels. Try running:\n\n'
'pip install --upgrade pip\n'
'pip install --upgrade jax jaxlib\n')
raise ValueError(msg.format('.'.join(map(str, version)),
'.'.join(map(str, _minimum_jaxlib_version))))
_check_jaxlib_version()
try:
from jaxlib import tpu_client # pytype: disable=import-error
except:
tpu_client = None
from jaxlib import xla_client
from jaxlib import lapack
from jaxlib import pytree
from jaxlib import cusolver
try:
from jaxlib import cuda_prng
except ImportError:
cuda_prng = None
| 34.233333 | 80 | 0.709348 | [
"ECL-2.0",
"Apache-2.0"
] | Circletana/jax | jax/lib/__init__.py | 2,054 | Python |
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
import sys
sys.path.append('..')
from nlpcommon import stopwords
if __name__ == '__main__':
print(len(stopwords), stopwords)
| 16.230769 | 36 | 0.668246 | [
"Apache-2.0"
] | shibing624/nlpcommon | examples/base_demo.py | 211 | Python |
import wmi
import speedtest_cli
import threading
import signal
import os
import json
def testSpeed(urls):
speedtest_cli.shutdown_event = threading.Event()
signal.signal(signal.SIGINT, speedtest_cli.ctrl_c)
print "Start to test download speed: "
dlspeed = speedtest_cli.downloadSpeed(urls)
dlspeed = (dlspeed / 1000 / 1000)
print('Download: %0.2f M%s/s' % (dlspeed, 'B'))
return dlspeed
def setGateway(wmiObj, gateway):
ip = '192.168.8.84'
subnetmask = '255.255.255.0'
configurations = wmiObj.Win32_NetworkAdapterConfiguration(Description="Realtek PCIe GBE Family Controller", IPEnabled=True)
if len(configurations) == 0:
print "No service available"
return
configuration = configurations[0]
# ret = configuration.EnableStatic(IPAddress=[ip],SubnetMask=[subnetmask])
ret = configuration.SetGateways(DefaultIPGateway=[gateway])
return ret
def checkGatewayStatus(urls):
if not urls:
urls = ["http://www.dynamsoft.com/assets/images/logo-index-dwt.png", "http://www.dynamsoft.com/assets/images/logo-index-dnt.png", "http://www.dynamsoft.com/assets/images/logo-index-ips.png", "http://www.codepool.biz/wp-content/uploads/2015/06/django_dwt.png", "http://www.codepool.biz/wp-content/uploads/2015/07/drag_element.png"]
# Query current gateway
wmiObj = wmi.WMI()
sql = "select IPAddress,DefaultIPGateway from Win32_NetworkAdapterConfiguration where Description=\"Realtek PCIe GBE Family Controller\" and IPEnabled=TRUE"
configurations = wmiObj.query(sql)
currentGateway = None
for configuration in configurations:
currentGateway = configuration.DefaultIPGateway[0]
print "IPAddress:", configuration.IPAddress[0], "DefaultIPGateway:", currentGateway
dlspeed = testSpeed(urls)
bestChoice = (currentGateway, dlspeed)
print "Init choice: " + str(bestChoice)
gateways = ["192.168.8.1", "192.168.8.2"] # define gateways
settingReturn = 0
gateways.remove(currentGateway)
for gateway in gateways:
settingReturn = setGateway(wmiObj, gateway)
if (settingReturn[0] != 0):
print "Setting failed"
return
print "Set gateway: " + gateway
dlspeed = testSpeed(urls)
option = (gateway, dlspeed)
print "Network option: " + str(option)
if (option[1] > bestChoice[1]):
bestChoice = option
print "Best choice: " + str(bestChoice)
setGateway(wmiObj, bestChoice[0])
try:
input("Press any key to continue: ")
except:
print('Finished')
def readConfigurationFile():
urls = None
config = 'config.json'
if os.path.exists(config):
with open(config) as file:
content = file.read()
try:
config_json = json.loads(content)
urls = config_json['urls']
except:
pass
return urls
def main():
urls = readConfigurationFile()
checkGatewayStatus(urls)
if __name__ == '__main__':
main()
| 30.868687 | 338 | 0.66394 | [
"Apache-2.0"
] | yushulx/switch-windows-gateway | network.py | 3,056 | Python |
import pymysql.cursors
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self,host,name,user,password):
self.host=host
self.name=name
self.user=user
self.password=password
self.connection=pymysql.connect(host=host ,database=name,user=user,password=password,autocommit=True)
def get_group_list(self):
list=[]
cursor=self.connection.cursor()
try:
cursor.execute("select group_id,group_name,group_header,group_footer from group_list where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id,name,header,footer)=row
list.append(Group(id=str(id),name=name,header=header,footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list=[]
cursor=self.connection.cursor()
try:
cursor.execute("select id,firstname,lastname from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id,firstname,lastname)=row
list.append(Contact(id=str(id),firstname=firstname,lastname=lastname))
finally:
cursor.close()
return list
def get_full_contact_list(self):
list=[]
cursor=self.connection.cursor()
try:
cursor.execute("select id,firstname,lastname,address, CONCAT (email ,email2,email3), CONCAT (home,mobile ,work, phone2) from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id,firstname,lastname,adress,fullemail,fullpfone)=row
list.append(Contact(id=str(id),firstname=firstname,lastname=lastname,all_emails_from_home_page=fullemail,all_phones_from_home_page=fullpfone,address=adress))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close() | 35.160714 | 190 | 0.639411 | [
"Apache-2.0"
] | AnastasiiaAndronova/python_training | fixture/db.py | 1,969 | Python |
from __future__ import absolute_import
import os
import json
import redis
ENV = os.getenv('ENV', 'local')
if ENV == 'docker':
rdb = redis.Redis(db=0, host='redis')
else:
rdb = redis.Redis(db=11)
def emit(typ, **kwargs):
kwargs['type'] = typ
rdb.publish('actions', json.dumps(kwargs))
| 16.105263 | 46 | 0.656863 | [
"MIT"
] | Craftzman7/rowboat | rowboat/redis.py | 306 | Python |
#!/usr/bin/env python
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
import cv2
import numpy as np
from keras_retinanet.utils.transform import random_transform_generator
from keras_retinanet.utils.visualization import draw_annotations, draw_boxes, draw_caption
from keras_retinanet.utils.colors import label_color
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin
__package__ = "keras_maskrcnn.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from ..utils.visualization import draw_mask
def create_generator(args):
# create random transform generator for augmenting training data
transform_generator = random_transform_generator(
# min_rotation=-0.1,
# max_rotation=0.1,
# min_translation=(-0.1, -0.1),
# max_translation=(0.1, 0.1),
# min_shear=-0.1,
# max_shear=0.1,
# min_scaling=(0.9, 0.9),
# max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
# flip_y_chance=0.5,
)
if args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from ..preprocessing.coco import CocoGenerator
generator = CocoGenerator(
args.coco_path,
args.coco_set,
transform_generator=transform_generator
)
elif args.dataset_type == 'csv':
from ..preprocessing.csv_generator import CSVGenerator
generator = CSVGenerator(
args.annotations,
args.classes,
transform_generator=transform_generator
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return generator
def parse_args(args):
parser = argparse.ArgumentParser(description='Debug script for a RetinaNet-MaskRCNN network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
coco_parser.add_argument('--coco-set', help='Name of the set to show (defaults to val2017).', default='val2017')
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations', help='Path to a CSV file containing annotations for evaluation.')
csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
parser.add_argument('-l', '--loop', help='Loop forever, even if the dataset is exhausted.', action='store_true')
parser.add_argument('--no-resize', help='Disable image resizing.', dest='resize', action='store_false')
parser.add_argument('--anchors', help='Show positive anchors on the image.', action='store_true')
parser.add_argument('--annotations', help='Show annotations on the image. Green annotations have anchors, red annotations don\'t and therefore don\'t contribute to training.', action='store_true')
parser.add_argument('--masks', help='Show annotated masks on the image.', action='store_true')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
return parser.parse_args(args)
def run(generator, args):
# display images, one at a time
for i in range(generator.size()):
# load the data
image = generator.load_image(i)
annotations, masks = generator.load_annotations(i)
# apply random transformations
if args.random_transform:
image, annotations, masks = generator.random_transform_group_entry(image, annotations, masks)
# resize the image and annotations
if args.resize:
image, image_scale = generator.resize_image(image)
annotations[:, :4] *= image_scale
for m in range(len(masks)):
masks[m], _ = generator.resize_image(masks[m])
# draw anchors on the image
if args.anchors:
labels, _, anchors = generator.compute_anchor_targets(image.shape, annotations, generator.num_classes())
draw_boxes(image, anchors[np.max(labels, axis=1) == 1], (255, 255, 0), thickness=1)
# draw annotations on the image
if args.annotations:
# draw annotations in red
draw_annotations(image, annotations, color=(0, 0, 255), label_to_name=generator.label_to_name)
# draw regressed anchors in green to override most red annotations
# result is that annotations without anchors are red, with anchors are green
labels, boxes, _ = generator.compute_anchor_targets(image.shape, annotations, generator.num_classes())
draw_boxes(image, boxes[np.max(labels, axis=1) == 1], (0, 255, 0))
# Draw masks over the image with random colours
if args.masks:
for m in range(len(masks)):
# crop the mask with the related bbox size, and then draw them
box = annotations[m, :4].astype(int)
mask = masks[m][box[1]:box[3], box[0]:box[2]]
draw_mask(image, box, mask, label_color(annotations[m, 4].astype(int)))
# add the label caption
caption = '{}'.format(generator.label_to_name(annotations[m, 4]))
draw_caption(image, box, caption)
cv2.imshow('Image', image)
if cv2.waitKey() == ord('q'):
return False
return True
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# create the generator
generator = create_generator(args)
# create the display window
cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
if args.loop:
while run(generator, args):
pass
else:
run(generator, args)
if __name__ == '__main__':
main()
| 39.152941 | 200 | 0.671575 | [
"Apache-2.0"
] | alexFilin/keras-maskrcnn | keras_maskrcnn/bin/debug.py | 6,656 | Python |
"""
DataMeta
DataMeta # noqa: E501
The version of the OpenAPI document: 1.4.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from datameta_client_lib.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class StagedMetaDataSets(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'metadataset_ids': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'metadataset_ids': 'metadatasetIds', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, metadataset_ids, *args, **kwargs): # noqa: E501
"""StagedMetaDataSets - a model defined in OpenAPI
Args:
metadataset_ids ([str]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.metadataset_ids = metadataset_ids
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.321637 | 110 | 0.584923 | [
"Apache-2.0"
] | ghga-de/datameta-client-lib | datameta_client_lib/model/staged_meta_data_sets.py | 6,553 | Python |
__author__ = 'anushabala'
import sys
sys.path.append('/usr1/home/rjoshi2/negotiation_personality/src/negotiation/bot/cocoa/src/basic/sessions')
| 36 | 106 | 0.819444 | [
"Apache-2.0"
] | kingabzpro/DialoGraph_ICLR21 | src/bot/cocoa/src/basic/sessions/__init__.py | 144 | Python |
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.interval.IntervalGlobal import *
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from math import *
from direct.distributed.ClockDelta import *
from toontown.golf import GolfGlobals
from toontown.shtiker.GolfPage import GolfTrophy
class GolfRewardDialog:
notify = directNotify.newCategory('GolfRewardDialog')
def __init__(self, avIdList, trophyList, rankingsList, holeBestList, courseBestList, cupList, localAvId, tieBreakWinner, aimTimesList, endMovieCallback = None):
self.avIdList = avIdList
self.trophyList = trophyList
self.rankingsList = rankingsList
self.holeBestList = holeBestList
self.courseBestList = courseBestList
self.cupList = cupList
self.tieBreakWinner = tieBreakWinner
self.movie = None
self.myPlace = 0
self.victory = None
self.endMovieCallback = endMovieCallback
self.aimTimesList = aimTimesList
self.setup(localAvId)
def calcTrophyTextListForOnePlayer(self, avId):
retval = []
av = base.cr.doId2do.get(avId)
if av and avId in self.avIdList:
playerIndex = self.avIdList.index(avId)
name = av.getName()
for trophyIndex in xrange(len(self.trophyList[playerIndex])):
wonTrophy = self.trophyList[playerIndex][trophyIndex]
if wonTrophy:
trophyName = TTLocalizer.GolfTrophyDescriptions[trophyIndex]
text = TTLocalizer.GolfAvReceivesTrophy % {'name': name,
'award': trophyName}
retval.append(text)
return retval
def calcCupTextListForAllPlayers(self, localAvId):
retval = []
for cupPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cupPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[cupPlayerIndex])
name = ''
if av:
name = av.getName()
cupIndex = 0
for cupIndex in xrange(len(self.cupList[cupPlayerIndex])):
if self.cupList[cupPlayerIndex][cupIndex]:
cupName = TTLocalizer.GolfCupDescriptions[cupIndex]
text = TTLocalizer.GolfAvReceivesCup % {'name': name,
'cup': cupName}
retval.append(text)
for cupPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cupPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[cupPlayerIndex])
name = av.getName()
cupIndex = 0
for cupIndex in xrange(len(self.cupList[cupPlayerIndex])):
if self.cupList[cupPlayerIndex][cupIndex]:
cupName = TTLocalizer.GolfCupDescriptions[cupIndex]
text = TTLocalizer.GolfAvReceivesCup % {'name': name,
'cup': cupName}
retval.append(text)
return retval
def calcRankings(self, localAvId):
retval = []
self.notify.debug('aimTimesList=%s' % self.aimTimesList)
for rank in xrange(len(self.rankingsList) + 1):
for avIndex in xrange(len(self.avIdList)):
if self.rankingsList[avIndex] == rank:
name = ' '
av = base.cr.doId2do.get(self.avIdList[avIndex])
if av:
name = av.getName()
text = '%d. ' % rank + ' ' + name
if GolfGlobals.TIME_TIE_BREAKER:
time = self.aimTimesList[avIndex]
minutes = int(time / 60)
time -= minutes * 60
seconds = int(time)
padding = (seconds < 10 and ['0'] or [''])[0]
time -= seconds
fraction = str(time)[2:4]
fraction = fraction + '0' * (2 - len(fraction))
timeStr = "%d'%s%d''%s" % (minutes,
padding,
seconds,
fraction)
text += ' - ' + timeStr
retval.append(text)
if self.avIdList[avIndex] == localAvId:
self.myPlace = rank
return retval
def calcHoleBestTextListForAllPlayers(self, localAvId):
retval = []
if GolfGlobals.CalcOtherHoleBest:
for hbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[hbPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[hbPlayerIndex])
name = av.getName()
for hbIndex in xrange(len(self.holeBestList[hbPlayerIndex])):
if self.holeBestList[hbPlayerIndex][hbIndex]:
hbName = TTLocalizer.GolfHoleNames[hbIndex]
text = TTLocalizer.GolfAvReceivesHoleBest % {'name': name,
'hole': hbName}
retval.append(text)
for hbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[hbPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[hbPlayerIndex])
name = av.getName()
for hbIndex in xrange(len(self.holeBestList[hbPlayerIndex])):
if self.holeBestList[hbPlayerIndex][hbIndex]:
hbName = TTLocalizer.GolfHoleNames[hbIndex]
text = TTLocalizer.GolfAvReceivesHoleBest % {'name': name,
'hole': hbName}
retval.append(text)
return retval
def calcCourseBestTextListForAllPlayers(self, localAvId):
retval = []
if GolfGlobals.CalcOtherCourseBest:
for cbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cbPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[cbPlayerIndex])
name = av.getName()
for cbIndex in xrange(len(self.holeBestList[cbPlayerIndex])):
if self.holeBestList[cbPlayerIndex][cbIndex]:
cbName = TTLocalizer.GolfCourseNames[cbIndex]
text = TTLocalizer.GolfAvReceivesCourseBest % {'name': name,
'course': cbName}
retval.append(text)
for cbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cbPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[cbPlayerIndex])
name = av.getName()
for cbIndex in xrange(len(self.courseBestList[cbPlayerIndex])):
if self.courseBestList[cbPlayerIndex][cbIndex]:
cbName = TTLocalizer.GolfCourseNames[cbIndex]
text = TTLocalizer.GolfAvReceivesCourseBest % {'name': name,
'course': cbName}
retval.append(text)
return retval
def createRewardMovie(self, localAvId):
retval = Sequence(name='Reward sequence', autoPause=1)
self.trophy = None
def setTrophyLabelText(text, playerIndex, trophyIndex):
self.rankLabel.hide()
self.rewardLabel.hide()
self.trophy = GolfTrophy(level=self.trophyList[playerIndex][trophyIndex], parent=self.trophyLabel, pos=(1.3, 0, -0.25))
self.trophy.setScale(0.65, 1, 0.65)
self.trophy.show()
self.trophyLabel['text'] = text
def setRewardLabelText(text):
self.rewardLabel.show()
self.rankLabel.hide()
self.trophyLabel.hide()
if self.trophy:
self.trophy.hide()
self.rewardLabel['text'] = text
def setRankLabelText(text):
self.rankLabel.show()
self.rewardLabel.hide()
self.trophyLabel.hide()
if self.trophy:
self.trophy.hide()
self.rankLabel['text'] = text
if len(self.avIdList) > 1:
self.victory = base.loader.loadSfx('phase_6/audio/sfx/KART_Applause_%d.ogg' % self.myPlace)
self.victory.play()
for avId in self.avIdList:
if avId != localAvId:
rewardTextList = self.calcTrophyTextListForOnePlayer(avId)
trophyIndex = 0
for rewardText in rewardTextList:
playerIndex = self.avIdList.index(avId)
var = (rewardText, playerIndex, trophyIndex)
oneTrophyIval = Parallel(Func(setTrophyLabelText, rewardText, playerIndex, trophyIndex), LerpColorScaleInterval(self.trophyLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
trophyIndex = trophyIndex + 1
retval.append(oneTrophyIval)
rewardTextList = self.calcTrophyTextListForOnePlayer(localAvId)
trophyIndex = 0
playerIndex = self.avIdList.index(localAvId)
for rewardText in rewardTextList:
if len(rewardTextList) > 0:
var = (rewardText, playerIndex, trophyIndex)
oneRewardIval = Parallel(Func(setTrophyLabelText, rewardText, playerIndex, trophyIndex), LerpColorScaleInterval(self.trophyLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneRewardIval)
rewardCupList = self.calcCupTextListForAllPlayers(localAvId)
if len(rewardCupList) > 0:
for rewardText in rewardCupList:
oneCupIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='noBlend'))
retval.append(oneCupIval)
if self.tieBreakWinner:
name = ''
av = base.cr.doId2do.get(self.tieBreakWinner)
if av:
name = av.getName()
if GolfGlobals.TIME_TIE_BREAKER:
rewardText = TTLocalizer.GolfTimeTieBreakWinner % {'name': name}
else:
rewardText = TTLocalizer.GolfTieBreakWinner % {'name': name}
randomWinnerIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 7, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='noBlend'))
retval.append(randomWinnerIval)
rankings = self.calcRankings(localAvId)
rankText = TTLocalizer.GolfRanking + '\n'
for rank in xrange(len(rankings)):
rankText = rankText + rankings[rank] + '\n'
oneRankIval = Parallel(Func(setRankLabelText, rankText), LerpColorScaleInterval(self.rankLabel, 8, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneRankIval)
rewardHoleList = self.calcHoleBestTextListForAllPlayers(localAvId)
if len(rewardHoleList) > 0:
for rewardText in rewardHoleList:
oneHoleIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 8, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneHoleIval)
rewardCourseList = self.calcCourseBestTextListForAllPlayers(localAvId)
if len(rewardCourseList) > 0:
for rewardText in rewardCourseList:
oneCourseIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneCourseIval)
if self.endMovieCallback:
retval.append(Func(self.endMovieCallback))
return retval
def setup(self, localAvId):
self.rewardBoard = DirectFrame(parent=aspect2d, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1.75, 1, 0.6), pos=(0, 0, -0.6))
self.rewardLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0, 0, 0), text_align=TextNode.ACenter, text='', text_scale=0.05, text_wordwrap=30)
self.rankLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0, 0, 0.17), text_align=TextNode.ACenter, text='', text_scale=0.06)
self.trophyLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0.7, 0, 0.05), text_align=TextNode.ALeft, text='', text_scale=0.06, text_wordwrap=20)
self.movie = self.createRewardMovie(localAvId)
def delete(self):
self.movie.pause()
self.notify.debug('Movie is paused')
self.rewardBoard.destroy()
self.notify.debug('Reward board is destroyed')
self.movie = None
self.notify.debug('Movie is deleted')
def getMovie(self):
return self.movie
| 49.384328 | 225 | 0.583377 | [
"Apache-2.0"
] | AnythingTechPro/Project-Altis | toontown/golf/GolfRewardDialog.py | 13,235 | Python |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution # pylint: disable=line-too-long
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Gamma(distribution.Distribution):
"""The `Gamma` distribution with parameter alpha and beta.
The parameters are the shape and inverse scale parameters alpha, beta.
The PDF of this distribution is:
```pdf(x) = (beta^alpha)(x^(alpha-1))e^(-x*beta)/Gamma(alpha), x > 0```
and the CDF of this distribution is:
```cdf(x) = GammaInc(alpha, beta * x) / Gamma(alpha), x > 0```
where GammaInc is the incomplete lower Gamma function.
Examples:
```python
dist = Gamma(alpha=3.0, beta=2.0)
dist2 = Gamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])
```
"""
def __init__(self,
alpha,
beta,
validate_args=True,
allow_nan_stats=False,
name="Gamma"):
"""Construct Gamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: Floating point tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: Floating point tensor, the inverse scale params of the
distribution(s).
beta must contain only positive values.
validate_args: Whether to assert that `a > 0, b > 0`, and that `x > 0` in
the methods `prob(x)` and `log_prob(x)`. If `validate_args` is `False`
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: Boolean, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
"""
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.name_scope(name, values=[alpha, beta]) as scope:
self._name = scope
with ops.control_dependencies([check_ops.assert_positive(
alpha), check_ops.assert_positive(beta)] if validate_args else []):
alpha = array_ops.identity(alpha, name="alpha")
beta = array_ops.identity(beta, name="beta")
contrib_tensor_util.assert_same_float_dtype((alpha, beta))
self._broadcast_tensor = alpha + beta
self._get_batch_shape = self._broadcast_tensor.get_shape()
self._get_event_shape = tensor_shape.TensorShape([])
self._alpha = alpha
self._beta = beta
@property
def allow_nan_stats(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._allow_nan_stats
@property
def validate_args(self):
"""Boolean describing behavior on invalid input."""
return self._validate_args
@property
def name(self):
"""Name to prepend to all ops."""
return self._name
@property
def dtype(self):
"""dtype of samples from this distribution."""
return self._alpha.dtype
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def beta(self):
"""Inverse scale parameter."""
return self._beta
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._broadcast_tensor]):
return array_ops.shape(self._broadcast_tensor)
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
`TensorShape` object.
"""
return self._get_batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.name_scope(name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
`TensorShape` object.
"""
return self._get_event_shape
def mean(self, name="mean"):
"""Mean of each batch member."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return self._alpha / self._beta
def mode(self, name="mode"):
"""Mode of each batch member.
The mode of a gamma distribution is `(alpha - 1) / beta` when `alpha > 1`,
and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception
will be raised rather than returning `NaN`.
Args:
name: A name to give this op.
Returns:
The mode for every batch member, a `Tensor` with same `dtype` as self.
"""
alpha = self._alpha
beta = self._beta
with ops.name_scope(self.name):
with ops.name_scope(name, values=[alpha, beta]):
mode_if_defined = (alpha - 1.0) / beta
if self.allow_nan_stats:
alpha_ge_1 = alpha >= 1.0
nan = np.nan * self._ones()
return math_ops.select(alpha_ge_1, mode_if_defined, nan)
else:
one = constant_op.constant(1.0, dtype=self.dtype)
return control_flow_ops.with_dependencies(
[check_ops.assert_less(
one, alpha,
message="mode not defined for components of alpha <= 1"
)], mode_if_defined)
def variance(self, name="variance"):
"""Variance of each batch member."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return self._alpha / math_ops.square(self._beta)
def std(self, name="std"):
"""Standard deviation of this distribution."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return math_ops.sqrt(self._alpha) / self._beta
def log_prob(self, x, name="log_prob"):
"""Log prob of observations in `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
Raises:
TypeError: if `x` and `alpha` are different dtypes.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
alpha = self._alpha
beta = self._beta
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
dtype=self.dtype)
return (alpha * math_ops.log(beta) + (alpha - 1) * math_ops.log(x) -
beta * x - math_ops.lgamma(self._alpha))
def prob(self, x, name="prob"):
"""Pdf of observations in `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
prob: tensor of dtype `dtype`, the PDFs of `x`
Raises:
TypeError: if `x` and `alpha` are different dtypes.
"""
return super(Gamma, self).prob(x, name)
def log_cdf(self, x, name="log_cdf"):
"""Log CDF of observations `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
dtype=self.dtype)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.log(math_ops.igamma(self._alpha, self._beta * x))
def cdf(self, x, name="cdf"):
"""CDF of observations `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
return math_ops.igamma(self._alpha, self._beta * x)
def entropy(self, name="entropy"):
"""The entropy of Gamma distribution(s).
This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self.alpha, self._beta]):
alpha = self._alpha
beta = self._beta
return (alpha - math_ops.log(beta) + math_ops.lgamma(alpha) +
(1 - alpha) * math_ops.digamma(alpha))
def sample_n(self, n, seed=None, name="sample_n"):
"""Draws `n` samples from the Gamma distribution(s).
See the doc for tf.random_gamma for further detail.
Args:
n: Python integer, the number of observations to sample from each
distribution.
seed: Python integer, the random seed for this operation.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`
with values of type `self.dtype`.
"""
with ops.name_scope(self.name, values=[n, self.alpha, self._beta]):
return random_ops.random_gamma([n],
self.alpha,
beta=self._beta,
dtype=self.dtype,
seed=seed,
name=name)
@property
def is_reparameterized(self):
return False
def _ones(self):
return array_ops.ones_like(self._alpha + self._beta, dtype=self.dtype)
@property
def is_continuous(self):
return True
| 34.059621 | 125 | 0.643539 | [
"Apache-2.0"
] | enrewen1/tf | tensorflow/contrib/distributions/python/ops/gamma.py | 12,568 | Python |
if _use_time:
_end_time = datetime.utcnow().timestamp()
res.update({'_gatewayTime': {'start': _start_time, 'end': _end_time, 'duration': _end_time-_start_time}}) | 56.333333 | 109 | 0.710059 | [
"Apache-2.0"
] | AlexRogalskiy/bumblebee | packages/api/resources/python-templates/time-end.py | 169 | Python |
from django.conf.urls import url
from .views import (
TableListAPIView,
SalePointTableListAPIView
)
urlpatterns = [
url(r'^$', TableListAPIView.as_view(),
name='api-table-list'),
url(r'^sale-point/(?P<pk>[0-9]+)$',
SalePointTableListAPIView.as_view(),
name='api-sale_point-table'),
]
| 19.529412 | 44 | 0.629518 | [
"BSD-3-Clause"
] | glosoftgroup/restaurant | saleor/api/table/urls.py | 332 | Python |
from recipe_scrapers.purelypope import PurelyPope
from tests import ScraperTest
class TestPurelyPopeScraper(ScraperTest):
scraper_class = PurelyPope
def test_host(self):
self.assertEqual("purelypope.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://purelypope.com/sweet-chili-brussel-sprouts/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Sweet Chili Brussel Sprouts")
def test_yields(self):
self.assertEqual("4 serving(s)", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://purelypope.com/wp-content/uploads/2020/05/IMG_5412-1-150x150.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertCountEqual(
[
"2 cups brussel sprouts, stems removed & cut in half",
"2 tbsp coconut aminos",
"1 tbsp sriracha",
"1/2 tbsp maple syrup",
"1 tsp sesame oil",
"Everything bagel seasoning or sesame seeds, to top",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"Instructions\n\nBrussel Sprout Time!\n\nPreheat oven to 350 degrees.\nWhisk the sauce (coconut aminos, sriracha, maple syrup & sesame oil) together in a large bowl.\nToss in brussel sprouts and coat mixture evenly over the brussels.\nRoast for 30 minutes.\nTurn oven to broil for 2-3 minutes to crisp (watch carefully to not burn.)\nTop with everything or sesame seeds.",
self.harvester_class.instructions(),
)
| 37.5625 | 384 | 0.642263 | [
"MIT"
] | AlexRogalskiy/recipe-scrapers | tests/test_purelypope.py | 1,803 | Python |
from types import SimpleNamespace
fields = SimpleNamespace(
id='id',
name='name',
data_query='data_query')
analysis_properties = {
fields.name: {
'description': 'Name of analysis.',
'type': 'string',
},
fields.data_query: {
'description': 'Lucene query string used to retrieve entities '
'to analyze.',
'type': 'string',
'default': '*',
},
}
analysis_spec = {
'type': 'object',
'required': [fields.name, fields.data_query],
'properties': analysis_properties,
}
analysis = {
'type': 'object',
'properties': {
fields.id: {
'type': 'integer',
'description': 'Unique integer identifying the analysis.',
},
**analysis_properties,
},
}
| 21.540541 | 71 | 0.548306 | [
"MIT"
] | cvisionai/tator | main/schema/components/analysis.py | 797 | Python |
import logging
import sys
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
class PaddownException(Exception):
pass
class Paddown(ABC):
@abstractmethod
def has_valid_padding(self, ciphertext: bytes) -> bool:
"""
Override this method and send off the ciphertext to check for valid padding.
:param bytes ciphertext: The ciphertext to check, send this to your padding oracle.
:rtype: True for valid padding, False otherwise.
"""
raise PaddownException("Not implemented")
def __init__(self, ciphertext: bytes, blocksize: int = 16):
if not isinstance(ciphertext, bytes):
raise Exception(f"Ciphertext {type(ciphertext)} not an instance of {bytes}")
self.ciphertext = ciphertext
self.blocksize = blocksize
def find_c_prime_at_index(self, ciphertext: bytearray, index: int):
if not isinstance(ciphertext, bytearray):
raise PaddownException(f"ciphertext not an instance of {bytearray}")
# Replace ciphertext at index with a guessed byte
ciphertext_temp = ciphertext
for c_prime in range(256):
ciphertext_temp[index] = c_prime
if self.has_valid_padding(ciphertext_temp):
return c_prime
raise PaddownException(f"No valid padding found, is .has_valid_padding(...) implemented correctly?")
def decrypt_block(self, c_i):
if not isinstance(c_i, bytearray):
raise PaddownException(f"block c_i not an instance of {bytearray}")
c_previous = bytearray(b"\x00" * self.blocksize)
intermediate = bytearray(b"\x00" * self.blocksize)
for i in range(self.blocksize):
self.progress_bar(i, self.blocksize - 1, "Decrypting ")
for j in range(i):
c_previous[(self.blocksize - 1) - j] = intermediate[(self.blocksize - 1) - j] ^ (i + 1)
c_prime = self.find_c_prime_at_index(c_previous + c_i, (self.blocksize - 1) - i)
intermediate[(self.blocksize - 1) - i] = c_prime ^ (i + 1)
logger.debug(f"intermediate: {[hex(x)[2:] for x in intermediate]}")
return intermediate
def get_intermediate(self, ciphertext) -> bytes:
key = b""
blocks = len(ciphertext) // self.blocksize
# Iterate blocks last to first
for i in range(blocks):
block_start = len(ciphertext) - (i + 1) * self.blocksize
block_end = len(ciphertext) - (i * self.blocksize)
key = self.decrypt_block(ciphertext[block_start:block_end]) + key
return key
def decrypt(self) -> bytes:
logger.debug(f"Ciphertext length: {len(self.ciphertext)}")
logger.debug(f"Blocks to decrypt: {len(self.ciphertext) // self.blocksize}")
# Convert self.ciphertext to mutable bytearray
self.ciphertext = bytearray(self.ciphertext)
key = self.get_intermediate(self.ciphertext)
plaintext = bytearray()
for i in range(len(self.ciphertext) - self.blocksize):
b = self.ciphertext[i] ^ key[i + self.blocksize]
plaintext += (b).to_bytes(1, byteorder="big")
print("\n") # print variable on new line from progress bar
return plaintext
def progress_bar(self, i, total_length, post_text):
n_bar = 100 # size of progress bar
j = i / total_length
sys.stdout.write("\r")
sys.stdout.write(f"[{'#' * int(n_bar * j):{n_bar}s}] {int(100 * j)}% {post_text}")
sys.stdout.flush()
| 38.673913 | 108 | 0.632378 | [
"MIT"
] | MarvinKweyu/PadDown | paddown.py | 3,558 | Python |
from functools import wraps
def multiply_by(multiplier):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
func_name = func.__name__
print(f'Calling "{func_name}({args[0]}, {args[1]})" function')
print(f'"{func_name}" function is multiplied by {multiplier}')
result = func(*args, **kwargs) * multiplier
print(f'Result equals to {result}')
return result
return wrapper
return decorator
@multiply_by(multiplier=3)
def add(a, b):
return a + b
add(2, 3)
| 24.083333 | 74 | 0.588235 | [
"Apache-2.0"
] | vyahello/python-decorators-cheetsheet | materials/decorator_with_args.py | 578 | Python |
"""
WSGI config for CongressionalRecord project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "CongressionalRecord.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CongressionalRecord.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 44.181818 | 79 | 0.807956 | [
"Apache-2.0"
] | kdunn926/eunomia-django | CongressionalRecord/wsgi.py | 1,458 | Python |
"""Basic pyon logging (with or without container)
NOTE: the functionality of this module has moved to ooi.logging.config.
currently this module is maintained for API compatability, but is implemented using the new package.
"""
import logging
from ooi.logging import config
DEFAULT_LOGGING_PATHS = ['res/config/logging.yml', 'res/config/logging.local.yml']
logging_was_configured = False
def configure_logging(logging_conf_paths, logging_config_override=None):
"""
Public call to configure and initialize logging.
@param logging_conf_paths List of paths to logging config YML files (in read order)
@param config_override Dict with config entries overriding files read
"""
global logging_was_configured
logging_was_configured = True
for path in logging_conf_paths:
try:
config.add_configuration(path)
except Exception, e:
print 'WARNING: could not load logging configuration file %s: %s' % (path, e)
if logging_config_override:
try:
config.add_configuration(logging_config_override)
except Exception,e:
print 'WARNING: failed to apply logging override %r: %e' % (logging_config_override,e)
# direct warnings mechanism to loggers
logging.captureWarnings(True)
def is_logging_configured():
""" allow caller to determine if logging has already been configured in this container """
global logging_was_configured
return logging_was_configured or config.get_configuration()
| 37.487805 | 109 | 0.725439 | [
"BSD-2-Clause"
] | ooici/pyon | pyon/core/log.py | 1,537 | Python |
option='y'
while option=='y':
def fibo(n):
a=0
b=1
for i in range(0,n):
temp=a
a=b
b=temp+b
return a
print("Enter the limit of fibonacci series")
num=int(input())
for c in range(0,num):
print (fibo(c))
print("Do you want to continue?(y/n)")
option=input()
print('Thank you for using this programme')
| 21.05 | 49 | 0.489311 | [
"MIT"
] | DheerajKN/Python-with-pygame | Fibonacci_Iteration.py | 421 | Python |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: boosted_trees_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
_boosted_trees_calculate_best_gains_per_feature_outputs = ["node_ids_list",
"gains_list",
"thresholds_list",
"left_node_contribs_list",
"right_node_contribs_list"]
_BoostedTreesCalculateBestGainsPerFeatureOutput = _collections.namedtuple(
"BoostedTreesCalculateBestGainsPerFeature",
_boosted_trees_calculate_best_gains_per_feature_outputs)
def boosted_trees_calculate_best_gains_per_feature(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits, name=None):
r"""Calculates gains for each feature and returns the best possible split information for the feature.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The length of output lists are all of the same length, `num_features`.
The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.
Args:
node_id_range: A `Tensor` of type `int32`.
A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
stats_summary_list: A list of at least 1 `Tensor` objects with type `float32`.
A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
l1: A `Tensor` of type `float32`.
l1 regularization factor on leaf weights, per instance based.
l2: A `Tensor` of type `float32`.
l2 regularization factor on leaf weights, per instance based.
tree_complexity: A `Tensor` of type `float32`.
adjustment to the gain, per leaf based.
min_node_weight: A `Tensor` of type `float32`.
mininum avg of hessians in a node before required for the node to be considered for splitting.
max_splits: An `int` that is `>= 1`.
the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list).
node_ids_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.
gains_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.
thresholds_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.
left_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.
right_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(stats_summary_list, (list, tuple)):
raise TypeError(
"Expected list for 'stats_summary_list' argument to "
"'boosted_trees_calculate_best_gains_per_feature' Op, not %r." % stats_summary_list)
_attr_num_features = len(stats_summary_list)
max_splits = _execute.make_int(max_splits, "max_splits")
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesCalculateBestGainsPerFeature",
node_id_range=node_id_range, stats_summary_list=stats_summary_list,
l1=l1, l2=l2, tree_complexity=tree_complexity,
min_node_weight=min_node_weight, max_splits=max_splits, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("max_splits", _op.get_attr("max_splits"), "num_features",
_op.get_attr("num_features"))
_execute.record_gradient(
"BoostedTreesCalculateBestGainsPerFeature", _inputs_flat, _attrs, _result, name)
_result = [_result[:_attr_num_features]] + _result[_attr_num_features:]
_result = _result[:1] + [_result[1:1 + _attr_num_features]] + _result[1 + _attr_num_features:]
_result = _result[:2] + [_result[2:2 + _attr_num_features]] + _result[2 + _attr_num_features:]
_result = _result[:3] + [_result[3:3 + _attr_num_features]] + _result[3 + _attr_num_features:]
_result = _result[:4] + [_result[4:]]
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesCalculateBestGainsPerFeature", name,
_ctx._post_execution_callbacks, node_id_range, stats_summary_list, l1,
l2, tree_complexity, min_node_weight, "max_splits", max_splits)
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_calculate_best_gains_per_feature_eager_fallback(
node_id_range, stats_summary_list, l1, l2, tree_complexity,
min_node_weight, max_splits=max_splits, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_calculate_best_gains_per_feature_eager_fallback(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_calculate_best_gains_per_feature
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(stats_summary_list, (list, tuple)):
raise TypeError(
"Expected list for 'stats_summary_list' argument to "
"'boosted_trees_calculate_best_gains_per_feature' Op, not %r." % stats_summary_list)
_attr_num_features = len(stats_summary_list)
max_splits = _execute.make_int(max_splits, "max_splits")
node_id_range = _ops.convert_to_tensor(node_id_range, _dtypes.int32)
stats_summary_list = _ops.convert_n_to_tensor(stats_summary_list, _dtypes.float32)
l1 = _ops.convert_to_tensor(l1, _dtypes.float32)
l2 = _ops.convert_to_tensor(l2, _dtypes.float32)
tree_complexity = _ops.convert_to_tensor(tree_complexity, _dtypes.float32)
min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32)
_inputs_flat = [node_id_range] + list(stats_summary_list) + [l1, l2, tree_complexity, min_node_weight]
_attrs = ("max_splits", max_splits, "num_features", _attr_num_features)
_result = _execute.execute(b"BoostedTreesCalculateBestGainsPerFeature",
_attr_num_features + _attr_num_features +
_attr_num_features + _attr_num_features +
_attr_num_features, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BoostedTreesCalculateBestGainsPerFeature", _inputs_flat, _attrs, _result, name)
_result = [_result[:_attr_num_features]] + _result[_attr_num_features:]
_result = _result[:1] + [_result[1:1 + _attr_num_features]] + _result[1 + _attr_num_features:]
_result = _result[:2] + [_result[2:2 + _attr_num_features]] + _result[2 + _attr_num_features:]
_result = _result[:3] + [_result[3:3 + _attr_num_features]] + _result[3 + _attr_num_features:]
_result = _result[:4] + [_result[4:]]
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
def boosted_trees_create_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None):
r"""Creates a tree ensemble model and returns a handle to it.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble resource to be created.
stamp_token: A `Tensor` of type `int64`.
Token to use as the initial value of the resource stamp.
tree_ensemble_serialized: A `Tensor` of type `string`.
Serialized proto of the tree ensemble.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesCreateEnsemble",
tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token,
tree_ensemble_serialized=tree_ensemble_serialized, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesCreateEnsemble", name, _ctx._post_execution_callbacks,
tree_ensemble_handle, stamp_token, tree_ensemble_serialized)
return _result
except _core._FallbackException:
return boosted_trees_create_ensemble_eager_fallback(
tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_create_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_create_ensemble
"""
_ctx = ctx if ctx else _context.context()
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string)
_inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]
_attrs = None
_result = _execute.execute(b"BoostedTreesCreateEnsemble", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def boosted_trees_deserialize_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None):
r"""Deserializes a serialized tree ensemble config and replaces current tree
ensemble.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble.
stamp_token: A `Tensor` of type `int64`.
Token to use as the new value of the resource stamp.
tree_ensemble_serialized: A `Tensor` of type `string`.
Serialized proto of the ensemble.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesDeserializeEnsemble",
tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token,
tree_ensemble_serialized=tree_ensemble_serialized, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesDeserializeEnsemble", name,
_ctx._post_execution_callbacks, tree_ensemble_handle, stamp_token,
tree_ensemble_serialized)
return _result
except _core._FallbackException:
return boosted_trees_deserialize_ensemble_eager_fallback(
tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_deserialize_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_deserialize_ensemble
"""
_ctx = ctx if ctx else _context.context()
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string)
_inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]
_attrs = None
_result = _execute.execute(b"BoostedTreesDeserializeEnsemble", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def boosted_trees_ensemble_resource_handle_op(container="", shared_name="", name=None):
r"""Creates a handle to a BoostedTreesEnsembleResource
Args:
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesEnsembleResourceHandleOp", container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"BoostedTreesEnsembleResourceHandleOp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesEnsembleResourceHandleOp", name,
_ctx._post_execution_callbacks, "container", container, "shared_name",
shared_name)
return _result
except _core._FallbackException:
return boosted_trees_ensemble_resource_handle_op_eager_fallback(
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_ensemble_resource_handle_op_eager_fallback(container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_ensemble_resource_handle_op
"""
_ctx = ctx if ctx else _context.context()
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("container", container, "shared_name", shared_name)
_result = _execute.execute(b"BoostedTreesEnsembleResourceHandleOp", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BoostedTreesEnsembleResourceHandleOp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_boosted_trees_get_ensemble_states_outputs = ["stamp_token", "num_trees",
"num_finalized_trees",
"num_attempted_layers",
"last_layer_nodes_range"]
_BoostedTreesGetEnsembleStatesOutput = _collections.namedtuple(
"BoostedTreesGetEnsembleStates",
_boosted_trees_get_ensemble_states_outputs)
def boosted_trees_get_ensemble_states(tree_ensemble_handle, name=None):
r"""Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, last_layer_nodes_range).
stamp_token: A `Tensor` of type `int64`.
num_trees: A `Tensor` of type `int32`.
num_finalized_trees: A `Tensor` of type `int32`.
num_attempted_layers: A `Tensor` of type `int32`.
last_layer_nodes_range: A `Tensor` of type `int32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesGetEnsembleStates",
tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BoostedTreesGetEnsembleStates", _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesGetEnsembleStates", name, _ctx._post_execution_callbacks,
tree_ensemble_handle)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_get_ensemble_states_eager_fallback(
tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_get_ensemble_states_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_get_ensemble_states
"""
_ctx = ctx if ctx else _context.context()
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b"BoostedTreesGetEnsembleStates", 5,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BoostedTreesGetEnsembleStates", _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
def boosted_trees_make_stats_summary(node_ids, gradients, hessians, bucketized_features_list, max_splits, num_buckets, name=None):
r"""Makes the summary of accumulated stats for the batch.
The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
Args:
node_ids: A `Tensor` of type `int32`.
int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.
gradients: A `Tensor` of type `float32`.
float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.
hessians: A `Tensor` of type `float32`.
float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.
bucketized_features_list: A list of at least 1 `Tensor` objects with type `int32`.
int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).
max_splits: An `int` that is `>= 1`.
int; the maximum number of splits possible in the whole tree.
num_buckets: An `int` that is `>= 1`.
int; equals to the maximum possible value of bucketized feature.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(bucketized_features_list, (list, tuple)):
raise TypeError(
"Expected list for 'bucketized_features_list' argument to "
"'boosted_trees_make_stats_summary' Op, not %r." % bucketized_features_list)
_attr_num_features = len(bucketized_features_list)
max_splits = _execute.make_int(max_splits, "max_splits")
num_buckets = _execute.make_int(num_buckets, "num_buckets")
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesMakeStatsSummary", node_ids=node_ids,
gradients=gradients, hessians=hessians,
bucketized_features_list=bucketized_features_list,
max_splits=max_splits, num_buckets=num_buckets, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("max_splits", _op.get_attr("max_splits"), "num_buckets",
_op.get_attr("num_buckets"), "num_features",
_op.get_attr("num_features"))
_execute.record_gradient(
"BoostedTreesMakeStatsSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesMakeStatsSummary", name, _ctx._post_execution_callbacks,
node_ids, gradients, hessians, bucketized_features_list, "max_splits",
max_splits, "num_buckets", num_buckets)
return _result
except _core._FallbackException:
return boosted_trees_make_stats_summary_eager_fallback(
node_ids, gradients, hessians, bucketized_features_list,
max_splits=max_splits, num_buckets=num_buckets, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_make_stats_summary_eager_fallback(node_ids, gradients, hessians, bucketized_features_list, max_splits, num_buckets, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_make_stats_summary
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(bucketized_features_list, (list, tuple)):
raise TypeError(
"Expected list for 'bucketized_features_list' argument to "
"'boosted_trees_make_stats_summary' Op, not %r." % bucketized_features_list)
_attr_num_features = len(bucketized_features_list)
max_splits = _execute.make_int(max_splits, "max_splits")
num_buckets = _execute.make_int(num_buckets, "num_buckets")
node_ids = _ops.convert_to_tensor(node_ids, _dtypes.int32)
gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)
bucketized_features_list = _ops.convert_n_to_tensor(bucketized_features_list, _dtypes.int32)
_inputs_flat = [node_ids, gradients, hessians] + list(bucketized_features_list)
_attrs = ("max_splits", max_splits, "num_buckets", num_buckets,
"num_features", _attr_num_features)
_result = _execute.execute(b"BoostedTreesMakeStatsSummary", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BoostedTreesMakeStatsSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def boosted_trees_predict(tree_ensemble_handle, bucketized_features, logits_dimension, name=None):
r"""Runs multiple additive regression ensemble predictors on input instances and
computes the logits. It is designed to be used during prediction.
It traverses all the trees and calculates the final score for each instance.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.
A list of rank 1 Tensors containing bucket id for each
feature.
logits_dimension: An `int`.
scalar, dimension of the logits, to be used for partial logits
shape.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(bucketized_features, (list, tuple)):
raise TypeError(
"Expected list for 'bucketized_features' argument to "
"'boosted_trees_predict' Op, not %r." % bucketized_features)
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, "logits_dimension")
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesPredict", tree_ensemble_handle=tree_ensemble_handle,
bucketized_features=bucketized_features,
logits_dimension=logits_dimension, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("num_bucketized_features",
_op.get_attr("num_bucketized_features"), "logits_dimension",
_op.get_attr("logits_dimension"))
_execute.record_gradient(
"BoostedTreesPredict", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesPredict", name, _ctx._post_execution_callbacks,
tree_ensemble_handle, bucketized_features, "logits_dimension",
logits_dimension)
return _result
except _core._FallbackException:
return boosted_trees_predict_eager_fallback(
tree_ensemble_handle, bucketized_features,
logits_dimension=logits_dimension, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_predict_eager_fallback(tree_ensemble_handle, bucketized_features, logits_dimension, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_predict
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(bucketized_features, (list, tuple)):
raise TypeError(
"Expected list for 'bucketized_features' argument to "
"'boosted_trees_predict' Op, not %r." % bucketized_features)
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, "logits_dimension")
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32)
_inputs_flat = [tree_ensemble_handle] + list(bucketized_features)
_attrs = ("num_bucketized_features", _attr_num_bucketized_features,
"logits_dimension", logits_dimension)
_result = _execute.execute(b"BoostedTreesPredict", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BoostedTreesPredict", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_boosted_trees_serialize_ensemble_outputs = ["stamp_token",
"tree_ensemble_serialized"]
_BoostedTreesSerializeEnsembleOutput = _collections.namedtuple(
"BoostedTreesSerializeEnsemble",
_boosted_trees_serialize_ensemble_outputs)
def boosted_trees_serialize_ensemble(tree_ensemble_handle, name=None):
r"""Serializes the tree ensemble to a proto.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (stamp_token, tree_ensemble_serialized).
stamp_token: A `Tensor` of type `int64`.
tree_ensemble_serialized: A `Tensor` of type `string`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesSerializeEnsemble",
tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BoostedTreesSerializeEnsemble", _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesSerializeEnsemble", name, _ctx._post_execution_callbacks,
tree_ensemble_handle)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_serialize_ensemble_eager_fallback(
tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_serialize_ensemble_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_serialize_ensemble
"""
_ctx = ctx if ctx else _context.context()
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b"BoostedTreesSerializeEnsemble", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BoostedTreesSerializeEnsemble", _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
_boosted_trees_training_predict_outputs = ["partial_logits", "tree_ids",
"node_ids"]
_BoostedTreesTrainingPredictOutput = _collections.namedtuple(
"BoostedTreesTrainingPredict", _boosted_trees_training_predict_outputs)
def boosted_trees_training_predict(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension, name=None):
r"""Runs multiple additive regression ensemble predictors on input instances and
computes the update to cached logits. It is designed to be used during training.
It traverses the trees starting from cached tree id and cached node id and
calculates the updates to be pushed to the cache.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
cached_tree_ids: A `Tensor` of type `int32`.
Rank 1 Tensor containing cached tree ids which is the starting
tree of prediction.
cached_node_ids: A `Tensor` of type `int32`.
Rank 1 Tensor containing cached node id which is the starting
node of prediction.
bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.
A list of rank 1 Tensors containing bucket id for each
feature.
logits_dimension: An `int`.
scalar, dimension of the logits, to be used for partial logits
shape.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (partial_logits, tree_ids, node_ids).
partial_logits: A `Tensor` of type `float32`.
tree_ids: A `Tensor` of type `int32`.
node_ids: A `Tensor` of type `int32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(bucketized_features, (list, tuple)):
raise TypeError(
"Expected list for 'bucketized_features' argument to "
"'boosted_trees_training_predict' Op, not %r." % bucketized_features)
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, "logits_dimension")
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesTrainingPredict",
tree_ensemble_handle=tree_ensemble_handle,
cached_tree_ids=cached_tree_ids, cached_node_ids=cached_node_ids,
bucketized_features=bucketized_features,
logits_dimension=logits_dimension, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("num_bucketized_features",
_op.get_attr("num_bucketized_features"), "logits_dimension",
_op.get_attr("logits_dimension"))
_execute.record_gradient(
"BoostedTreesTrainingPredict", _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesTrainingPredict", name, _ctx._post_execution_callbacks,
tree_ensemble_handle, cached_tree_ids, cached_node_ids,
bucketized_features, "logits_dimension", logits_dimension)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_training_predict_eager_fallback(
tree_ensemble_handle, cached_tree_ids, cached_node_ids,
bucketized_features, logits_dimension=logits_dimension, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_training_predict_eager_fallback(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_training_predict
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(bucketized_features, (list, tuple)):
raise TypeError(
"Expected list for 'bucketized_features' argument to "
"'boosted_trees_training_predict' Op, not %r." % bucketized_features)
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, "logits_dimension")
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
cached_tree_ids = _ops.convert_to_tensor(cached_tree_ids, _dtypes.int32)
cached_node_ids = _ops.convert_to_tensor(cached_node_ids, _dtypes.int32)
bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32)
_inputs_flat = [tree_ensemble_handle, cached_tree_ids, cached_node_ids] + list(bucketized_features)
_attrs = ("num_bucketized_features", _attr_num_bucketized_features,
"logits_dimension", logits_dimension)
_result = _execute.execute(b"BoostedTreesTrainingPredict", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BoostedTreesTrainingPredict", _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
def boosted_trees_update_ensemble(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode, name=None):
r"""Updates the tree ensemble by either adding a layer to the last tree being grown
or by starting a new tree.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the ensemble variable.
feature_ids: A `Tensor` of type `int32`.
Rank 1 tensor with ids for each feature. This is the real id of
the feature that will be used in the split.
node_ids: A list of `Tensor` objects with type `int32`.
List of rank 1 tensors representing the nodes for which this feature
has a split.
gains: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.
List of rank 1 tensors representing the gains for each of the feature's
split.
thresholds: A list with the same length as `node_ids` of `Tensor` objects with type `int32`.
List of rank 1 tensors representing the thesholds for each of the
feature's split.
left_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.
List of rank 2 tensors with left leaf contribs for each of
the feature's splits. Will be added to the previous node values to constitute
the values of the left nodes.
right_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.
List of rank 2 tensors with right leaf contribs for each
of the feature's splits. Will be added to the previous node values to constitute
the values of the right nodes.
max_depth: A `Tensor` of type `int32`. Max depth of the tree to build.
learning_rate: A `Tensor` of type `float32`.
shrinkage const for each new tree.
pruning_mode: An `int` that is `>= 0`.
0-No pruning, 1-Pre-pruning, 2-Post-pruning.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(node_ids, (list, tuple)):
raise TypeError(
"Expected list for 'node_ids' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % node_ids)
_attr_num_features = len(node_ids)
if not isinstance(gains, (list, tuple)):
raise TypeError(
"Expected list for 'gains' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % gains)
if len(gains) != _attr_num_features:
raise ValueError(
"List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d "
"must match length %d of argument 'node_ids'." %
(len(gains), _attr_num_features))
if not isinstance(thresholds, (list, tuple)):
raise TypeError(
"Expected list for 'thresholds' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % thresholds)
if len(thresholds) != _attr_num_features:
raise ValueError(
"List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d "
"must match length %d of argument 'node_ids'." %
(len(thresholds), _attr_num_features))
if not isinstance(left_node_contribs, (list, tuple)):
raise TypeError(
"Expected list for 'left_node_contribs' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % left_node_contribs)
if len(left_node_contribs) != _attr_num_features:
raise ValueError(
"List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d "
"must match length %d of argument 'node_ids'." %
(len(left_node_contribs), _attr_num_features))
if not isinstance(right_node_contribs, (list, tuple)):
raise TypeError(
"Expected list for 'right_node_contribs' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % right_node_contribs)
if len(right_node_contribs) != _attr_num_features:
raise ValueError(
"List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d "
"must match length %d of argument 'node_ids'." %
(len(right_node_contribs), _attr_num_features))
pruning_mode = _execute.make_int(pruning_mode, "pruning_mode")
_, _, _op = _op_def_lib._apply_op_helper(
"BoostedTreesUpdateEnsemble",
tree_ensemble_handle=tree_ensemble_handle, feature_ids=feature_ids,
node_ids=node_ids, gains=gains, thresholds=thresholds,
left_node_contribs=left_node_contribs,
right_node_contribs=right_node_contribs, max_depth=max_depth,
learning_rate=learning_rate, pruning_mode=pruning_mode, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BoostedTreesUpdateEnsemble", name, _ctx._post_execution_callbacks,
tree_ensemble_handle, feature_ids, node_ids, gains, thresholds,
left_node_contribs, right_node_contribs, max_depth, learning_rate,
"pruning_mode", pruning_mode)
return _result
except _core._FallbackException:
return boosted_trees_update_ensemble_eager_fallback(
tree_ensemble_handle, feature_ids, node_ids, gains, thresholds,
left_node_contribs, right_node_contribs, max_depth, learning_rate,
pruning_mode=pruning_mode, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def boosted_trees_update_ensemble_eager_fallback(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function boosted_trees_update_ensemble
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(node_ids, (list, tuple)):
raise TypeError(
"Expected list for 'node_ids' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % node_ids)
_attr_num_features = len(node_ids)
if not isinstance(gains, (list, tuple)):
raise TypeError(
"Expected list for 'gains' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % gains)
if len(gains) != _attr_num_features:
raise ValueError(
"List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d "
"must match length %d of argument 'node_ids'." %
(len(gains), _attr_num_features))
if not isinstance(thresholds, (list, tuple)):
raise TypeError(
"Expected list for 'thresholds' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % thresholds)
if len(thresholds) != _attr_num_features:
raise ValueError(
"List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d "
"must match length %d of argument 'node_ids'." %
(len(thresholds), _attr_num_features))
if not isinstance(left_node_contribs, (list, tuple)):
raise TypeError(
"Expected list for 'left_node_contribs' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % left_node_contribs)
if len(left_node_contribs) != _attr_num_features:
raise ValueError(
"List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d "
"must match length %d of argument 'node_ids'." %
(len(left_node_contribs), _attr_num_features))
if not isinstance(right_node_contribs, (list, tuple)):
raise TypeError(
"Expected list for 'right_node_contribs' argument to "
"'boosted_trees_update_ensemble' Op, not %r." % right_node_contribs)
if len(right_node_contribs) != _attr_num_features:
raise ValueError(
"List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d "
"must match length %d of argument 'node_ids'." %
(len(right_node_contribs), _attr_num_features))
pruning_mode = _execute.make_int(pruning_mode, "pruning_mode")
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
feature_ids = _ops.convert_to_tensor(feature_ids, _dtypes.int32)
node_ids = _ops.convert_n_to_tensor(node_ids, _dtypes.int32)
gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)
thresholds = _ops.convert_n_to_tensor(thresholds, _dtypes.int32)
left_node_contribs = _ops.convert_n_to_tensor(left_node_contribs, _dtypes.float32)
right_node_contribs = _ops.convert_n_to_tensor(right_node_contribs, _dtypes.float32)
max_depth = _ops.convert_to_tensor(max_depth, _dtypes.int32)
learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)
_inputs_flat = [tree_ensemble_handle, feature_ids] + list(node_ids) + list(gains) + list(thresholds) + list(left_node_contribs) + list(right_node_contribs) + [max_depth, learning_rate]
_attrs = ("pruning_mode", pruning_mode, "num_features", _attr_num_features)
_result = _execute.execute(b"BoostedTreesUpdateEnsemble", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def is_boosted_trees_ensemble_initialized(tree_ensemble_handle, name=None):
r"""Checks whether a tree ensemble has been initialized.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble resouce.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IsBoostedTreesEnsembleInitialized",
tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"IsBoostedTreesEnsembleInitialized", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"IsBoostedTreesEnsembleInitialized", name,
_ctx._post_execution_callbacks, tree_ensemble_handle)
return _result
except _core._FallbackException:
return is_boosted_trees_ensemble_initialized_eager_fallback(
tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def is_boosted_trees_ensemble_initialized_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function is_boosted_trees_ensemble_initialized
"""
_ctx = ctx if ctx else _context.context()
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b"IsBoostedTreesEnsembleInitialized", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"IsBoostedTreesEnsembleInitialized", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "BoostedTreesCalculateBestGainsPerFeature"
# input_arg {
# name: "node_id_range"
# type: DT_INT32
# }
# input_arg {
# name: "stats_summary_list"
# type: DT_FLOAT
# number_attr: "num_features"
# }
# input_arg {
# name: "l1"
# type: DT_FLOAT
# }
# input_arg {
# name: "l2"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_complexity"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_node_weight"
# type: DT_FLOAT
# }
# output_arg {
# name: "node_ids_list"
# type: DT_INT32
# number_attr: "num_features"
# }
# output_arg {
# name: "gains_list"
# type: DT_FLOAT
# number_attr: "num_features"
# }
# output_arg {
# name: "thresholds_list"
# type: DT_INT32
# number_attr: "num_features"
# }
# output_arg {
# name: "left_node_contribs_list"
# type: DT_FLOAT
# number_attr: "num_features"
# }
# output_arg {
# name: "right_node_contribs_list"
# type: DT_FLOAT
# number_attr: "num_features"
# }
# attr {
# name: "max_splits"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "num_features"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# }
# op {
# name: "BoostedTreesCreateEnsemble"
# input_arg {
# name: "tree_ensemble_handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "stamp_token"
# type: DT_INT64
# }
# input_arg {
# name: "tree_ensemble_serialized"
# type: DT_STRING
# }
# is_stateful: true
# }
# op {
# name: "BoostedTreesDeserializeEnsemble"
# input_arg {
# name: "tree_ensemble_handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "stamp_token"
# type: DT_INT64
# }
# input_arg {
# name: "tree_ensemble_serialized"
# type: DT_STRING
# }
# is_stateful: true
# }
# op {
# name: "BoostedTreesEnsembleResourceHandleOp"
# output_arg {
# name: "resource"
# type: DT_RESOURCE
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "BoostedTreesGetEnsembleStates"
# input_arg {
# name: "tree_ensemble_handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "stamp_token"
# type: DT_INT64
# }
# output_arg {
# name: "num_trees"
# type: DT_INT32
# }
# output_arg {
# name: "num_finalized_trees"
# type: DT_INT32
# }
# output_arg {
# name: "num_attempted_layers"
# type: DT_INT32
# }
# output_arg {
# name: "last_layer_nodes_range"
# type: DT_INT32
# }
# is_stateful: true
# }
# op {
# name: "BoostedTreesMakeStatsSummary"
# input_arg {
# name: "node_ids"
# type: DT_INT32
# }
# input_arg {
# name: "gradients"
# type: DT_FLOAT
# }
# input_arg {
# name: "hessians"
# type: DT_FLOAT
# }
# input_arg {
# name: "bucketized_features_list"
# type: DT_INT32
# number_attr: "num_features"
# }
# output_arg {
# name: "stats_summary"
# type: DT_FLOAT
# }
# attr {
# name: "max_splits"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "num_buckets"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "num_features"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# }
# op {
# name: "BoostedTreesPredict"
# input_arg {
# name: "tree_ensemble_handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "bucketized_features"
# type: DT_INT32
# number_attr: "num_bucketized_features"
# }
# output_arg {
# name: "logits"
# type: DT_FLOAT
# }
# attr {
# name: "num_bucketized_features"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "logits_dimension"
# type: "int"
# }
# is_stateful: true
# }
# op {
# name: "BoostedTreesSerializeEnsemble"
# input_arg {
# name: "tree_ensemble_handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "stamp_token"
# type: DT_INT64
# }
# output_arg {
# name: "tree_ensemble_serialized"
# type: DT_STRING
# }
# is_stateful: true
# }
# op {
# name: "BoostedTreesTrainingPredict"
# input_arg {
# name: "tree_ensemble_handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "cached_tree_ids"
# type: DT_INT32
# }
# input_arg {
# name: "cached_node_ids"
# type: DT_INT32
# }
# input_arg {
# name: "bucketized_features"
# type: DT_INT32
# number_attr: "num_bucketized_features"
# }
# output_arg {
# name: "partial_logits"
# type: DT_FLOAT
# }
# output_arg {
# name: "tree_ids"
# type: DT_INT32
# }
# output_arg {
# name: "node_ids"
# type: DT_INT32
# }
# attr {
# name: "num_bucketized_features"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "logits_dimension"
# type: "int"
# }
# is_stateful: true
# }
# op {
# name: "BoostedTreesUpdateEnsemble"
# input_arg {
# name: "tree_ensemble_handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "feature_ids"
# type: DT_INT32
# }
# input_arg {
# name: "node_ids"
# type: DT_INT32
# number_attr: "num_features"
# }
# input_arg {
# name: "gains"
# type: DT_FLOAT
# number_attr: "num_features"
# }
# input_arg {
# name: "thresholds"
# type: DT_INT32
# number_attr: "num_features"
# }
# input_arg {
# name: "left_node_contribs"
# type: DT_FLOAT
# number_attr: "num_features"
# }
# input_arg {
# name: "right_node_contribs"
# type: DT_FLOAT
# number_attr: "num_features"
# }
# input_arg {
# name: "max_depth"
# type: DT_INT32
# }
# input_arg {
# name: "learning_rate"
# type: DT_FLOAT
# }
# attr {
# name: "pruning_mode"
# type: "int"
# has_minimum: true
# }
# attr {
# name: "num_features"
# type: "int"
# has_minimum: true
# }
# is_stateful: true
# }
# op {
# name: "IsBoostedTreesEnsembleInitialized"
# input_arg {
# name: "tree_ensemble_handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "is_initialized"
# type: DT_BOOL
# }
# is_stateful: true
# }
_op_def_lib = _InitOpDefLibrary(b"\n\206\003\n(BoostedTreesCalculateBestGainsPerFeature\022\021\n\rnode_id_range\030\003\022$\n\022stats_summary_list\030\001*\014num_features\022\006\n\002l1\030\001\022\006\n\002l2\030\001\022\023\n\017tree_complexity\030\001\022\023\n\017min_node_weight\030\001\032\037\n\rnode_ids_list\030\003*\014num_features\032\034\n\ngains_list\030\001*\014num_features\032!\n\017thresholds_list\030\003*\014num_features\032)\n\027left_node_contribs_list\030\001*\014num_features\032*\n\030right_node_contribs_list\030\001*\014num_features\"\025\n\nmax_splits\022\003int(\0010\001\"\027\n\014num_features\022\003int(\0010\001\nh\n\032BoostedTreesCreateEnsemble\022\030\n\024tree_ensemble_handle\030\024\022\017\n\013stamp_token\030\t\022\034\n\030tree_ensemble_serialized\030\007\210\001\001\nm\n\037BoostedTreesDeserializeEnsemble\022\030\n\024tree_ensemble_handle\030\024\022\017\n\013stamp_token\030\t\022\034\n\030tree_ensemble_serialized\030\007\210\001\001\nk\n$BoostedTreesEnsembleResourceHandleOp\032\014\n\010resource\030\024\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\253\001\n\035BoostedTreesGetEnsembleStates\022\030\n\024tree_ensemble_handle\030\024\032\017\n\013stamp_token\030\t\032\r\n\tnum_trees\030\003\032\027\n\023num_finalized_trees\030\003\032\030\n\024num_attempted_layers\030\003\032\032\n\026last_layer_nodes_range\030\003\210\001\001\n\320\001\n\034BoostedTreesMakeStatsSummary\022\014\n\010node_ids\030\003\022\r\n\tgradients\030\001\022\014\n\010hessians\030\001\022*\n\030bucketized_features_list\030\003*\014num_features\032\021\n\rstats_summary\030\001\"\025\n\nmax_splits\022\003int(\0010\001\"\026\n\013num_buckets\022\003int(\0010\001\"\027\n\014num_features\022\003int(\0010\001\n\255\001\n\023BoostedTreesPredict\022\030\n\024tree_ensemble_handle\030\024\0220\n\023bucketized_features\030\003*\027num_bucketized_features\032\n\n\006logits\030\001\"\"\n\027num_bucketized_features\022\003int(\0010\001\"\027\n\020logits_dimension\022\003int\210\001\001\nk\n\035BoostedTreesSerializeEnsemble\022\030\n\024tree_ensemble_handle\030\024\032\017\n\013stamp_token\030\t\032\034\n\030tree_ensemble_serialized\030\007\210\001\001\n\203\002\n\033BoostedTreesTrainingPredict\022\030\n\024tree_ensemble_handle\030\024\022\023\n\017cached_tree_ids\030\003\022\023\n\017cached_node_ids\030\003\0220\n\023bucketized_features\030\003*\027num_bucketized_features\032\022\n\016partial_logits\030\001\032\014\n\010tree_ids\030\003\032\014\n\010node_ids\030\003\"\"\n\027num_bucketized_features\022\003int(\0010\001\"\027\n\020logits_dimension\022\003int\210\001\001\n\272\002\n\032BoostedTreesUpdateEnsemble\022\030\n\024tree_ensemble_handle\030\024\022\017\n\013feature_ids\030\003\022\032\n\010node_ids\030\003*\014num_features\022\027\n\005gains\030\001*\014num_features\022\034\n\nthresholds\030\003*\014num_features\022$\n\022left_node_contribs\030\001*\014num_features\022%\n\023right_node_contribs\030\001*\014num_features\022\r\n\tmax_depth\030\003\022\021\n\rlearning_rate\030\001\"\025\n\014pruning_mode\022\003int(\001\"\025\n\014num_features\022\003int(\001\210\001\001\nT\n!IsBoostedTreesEnsembleInitialized\022\030\n\024tree_ensemble_handle\030\024\032\022\n\016is_initialized\030\n\210\001\001")
| 42.777778 | 3,324 | 0.716153 | [
"MIT"
] | Con-Mi/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py | 58,905 | Python |
#!/usr/bin/env python
"""JIP module that handles job profiles.
A job profile contains all compute-cluster and execution related meta-data of a
job, such as the number of threads reserved for the job or the time limit.
Profiles can be named and stored in the user configuration.
In addition, hierarchical updates of profiles can be applied. For example, a
default profile can be loaded from the configuration. This profile can then be
refined by a pipeline script or command line options.
This enable you to start with a *hard-coded* profile in your tool
implementation and then gradually modify and change the profile when the
tool is embedded in another pipeline or from the command line at execution
or submission time.
.. note:: Please note that the interpretation of some of the profiles
properties depends on the cluster implementation.
The following properties are supported by a profile and can be maintained
and updated.
General properties
------------------
The following properties are considered *general* and usually always
used and interpreted, independent of where and how you execute the tool
or pipeline:
name
You can assign an arbitrary name to your profiles. This name
will be used either as a job name, if the profile is applied
to a tool, or as a pipeline name if applied to a pipeline.
prefix
A name prefix that is applied to all embedded jobs. This can
be useful if, in a pipeline context, you want to allow your
tool to take their own name, but you want to prefix all tools
that are part of a single pipeline.
threads
The number of threads or compute slots allocated by the execution.
Although this property and its interpretation also depends on
the cluster or grid implementation, this is considered a general
property that is also considered when you execute a pipeline or
tool outside of a compute grid.
working_dir or dir
The working directory for a job. This is initialized to the
current working directory of the process that creates the profile.
temp
A boolean property that you can used to *mark* a job as temporary.
Temporary jobs are treated specially in a pipeline execution.
You can find more information about temporary jobs in the
:class:`~jip.pipelines.Pipeline` documentation.
env
Dictionary that can be used to extend the jobs shell environment
description
Optional field that describes the profile and can be used to
describe custom profiles in the user configuration
Cluster/Grid specific properties
--------------------------------
The following properties can be set or modified, but their interpretation
depends on the cluster implementation and the capabilities of the cluster:
tasks
Number of tasks assigned to a single job
tasks_per_node
If multiple nodes are reserved by a single job, this is the
number of tasks assigned to each node.
nodes
Number of nodes requested by the job
queue
The *queue* the job is sent to
priority
A priority assigned to a job
environment
The name of the *environment* assigned to a job. This is **not**
the shell environment, but an arbitrary name that is used, for
example, in the *Sun Grid Engine* implementation to identify
the *parallel environment* the job is submitted to.
account
Name of the account for this job
mem
The memory limit for the job. This is stored here as a string
and passed on *as is* to the cluster implementation
time
The time limit for the job. Here, the time limit is specified
as a string and passed on to the cluster implementation *as is*.
out
Path to the ``stdout`` log file for this job
log
path to the ``stderr`` log file for this job
err
path to the ``stderr`` log file for this job
extra
This is an array that takes additional options that are
used when the submission command is constructed.
.. note:: Most of the
"""
import collections
import fnmatch
import re
import os
import json
import logging
import jip.utils
from jip.templates import render_template
log = logging.getLogger("jip.profile")
#: global specs
specs = None
class Profile(object):
"""A Profile contains cluster and runtime specific information about
a job.
"""
def __init__(self, name=None, threads=None, nodes=None, tasks=None,
tasks_per_node=None, environment=None, time=None, queue=None,
priority=None, log=None, out=None, account=None, mem=0,
extra=None, profile=None, prefix=None, temp=False, _load=True,
env=None, tool_name=None, working_dir=None, description=None,
specs=None, _name=None, **kwargs):
self._name = name if not _name else _name # render_template(name)
self.environment = render_template(environment)
self.nodes = render_template(nodes)
self.threads = render_template(threads)
self.tasks = render_template(tasks)
self.tasks_per_node = render_template(tasks_per_node)
self.profile = render_template(profile)
self.queue = render_template(queue)
self.time = render_template(time)
self.mem = render_template(mem)
self.priority = render_template(priority)
self.log = log
self.out = out
self.account = render_template(account)
self.prefix = render_template(prefix)
self.description = description
self.env = env
self.temp = temp
self.extra = extra
self.tool_name = tool_name
self.working_dir = working_dir
if self.working_dir is None and kwargs.get('dir', None):
self.working_dir = kwargs['dir']
self.specs = specs if specs else {}
if profile is not None and _load:
self.load(profile)
def apply_to_pipeline(self, pipeline):
"""Apply this profile to the pipeline
:param pipeline: the pipeline
:type pipeline: :class:`jip.pipeline.Pipeline`
"""
for node in pipeline.nodes():
self.apply_to_node(node)
def apply_to_node(self, node):
# check if there is a matching spec for the node
node_profile = self.specs.get(node.name, None)
if not node_profile:
node_profile = self.specs.get(node._name, None)
# check via regexp
for spec_name, spec in self.specs.iteritems():
if fnmatch.fnmatch(node.name, spec_name):
#if re.match(spec_name, node.name):
if not node_profile:
node_profile = spec()
else:
node_profile.update(spec)
if node_profile:
node._job.update(node_profile)
if node._pipeline_profile:
node._pipeline_profile.update(node_profile)
# apply global profile, don't overwrite
node._job.update(self, overwrite=False)
if node._pipeline_profile:
node._pipeline_profile.update(self, overwrite=False)
@property
def err(self):
"""Set the jobs error log file
:getter: access the jobs name
:setter: set the jobs name
:type: string
"""
return self.log
@err.setter
def err(self, value):
self.log = value
@property
def dir(self):
"""Set the jobs working directory
:getter: access the jobs working directory
:setter: set the jobs working directory
:type: string
"""
return self.working_dir
@dir.setter
def dir(self, value):
self.working_dir = value
@property
def name(self):
"""Set the jobs name
:getter: access the jobs name
:setter: set the jobs name
:type: string
"""
return self._name
@name.setter
def name(self, name):
self._name = name
def load(self, profile_name):
"""Set this profiles values to the values loaded from the profile
stored under the given name. An exception is raised if no profile of
that name could be found.
:param profile_name: the name of the profile that will be loaded
:type profile_name: string
"""
import jip
profiles = jip.config.get('profiles', {})
if profile_name not in profiles:
raise ValueError("Profile %s not found!" % profile_name)
profile = profiles[profile_name]
self.threads = profile.get('threads', self.threads)
self.nodes = profile.get('nodes', self.nodes)
self.tasks = profile.get('tasks', self.tasks)
self.tasks_per_node = profile.get('tasks_per_node',
self.tasks_per_node)
self.environment = profile.get('environment', self.environment)
self.time = profile.get('time', self.time)
self.queue = profile.get('queue', self.queue)
self.priority = profile.get('priority', self.priority)
self.log = profile.get('log', self.log)
self.out = profile.get('out', self.out)
self.account = profile.get('account', self.account)
self.mem = profile.get('mem', self.mem)
self.extra = profile.get('extra', self.extra)
self.env = profile.get('env', self.env)
self.description = profile.get('description', self.description)
def load_args(self, args):
"""Update this profile from the given dictionary of command line
arguments. The argument names must match the profile attributes
"""
for k, v in args.iteritems():
k = re.sub("^-+", "", k)
k = re.sub("-", "_", k)
if v and hasattr(self, k):
# check for multiple values
for single in v.split(" "):
tup = single.split("=")
if len(tup) == 1:
setattr(self, k, single)
else:
# find or create a spec for the given key
spec_profile = self.specs.get(tup[0], Profile())
setattr(spec_profile, k, tup[1])
self.specs[tup[0]] = spec_profile
def _render_job_name(self, job):
ctx = {}
for o in job.tool.options:
ctx[o.name] = o
name = job.name
if not name:
name = self.name
if not name:
name = job.tool.name
return render_template(
"%s%s" % ("" if not self.prefix else self.prefix, name), **ctx
)
def _render(self, job, name):
ctx = {}
for o in job.tool.options:
ctx[o.name] = o
ctx['name'] = self.name
ctx['job'] = self
return render_template(
"%s%s" % ("" if not self.prefix else self.prefix, name), **ctx
)
def apply_overwrite(self, job):
"""Apply the profile and overwrite all settings that are set
in this profile
"""
log.debug("Profiles | Overwriting job profile to %s", job)
if self.name:
job.name = self._render_job_name(job)
if self.threads:
job.threads = int(self.threads)
if self.nodes is not None:
job.nodes = self.nodes
if self.tasks is not None:
job.tasks = self.tasks
if self.tasks_per_node is not None:
job.tasks_per_node = self.tasks_per_node
if self.environment is not None:
job.environment = self.environment
if self.queue is not None:
job.queue = self.queue
if self.priority is not None:
job.priority = self.priority
if self.time is not None:
job.max_time = jip.utils.parse_time(self.time)
if self.mem is not None:
job.max_memory = jip.utils.parse_mem(self.mem)
if self.log is not None:
job.stderr = self._render(job, self.log)
if self.out is not None:
job.stdout = self._render(job, self.out)
if self.account is not None:
job.account = self.account
if self.temp is not None:
job.temp = self.temp
if self.extra is not None:
job.extra = self.extra
if self.working_dir is not None:
job.working_directory = os.path.abspath(self.working_dir)
# make log files absolute
if job.stdout and not job.stdout.startswith("/"):
job.stdout = os.path.join(job.working_directory, job.stdout)
if job.stderr and not job.stderr.startswith("/"):
job.stderr = os.path.join(job.working_directory, job.stderr)
# load environment
if self.env:
current = os.environ.copy()
if job.env:
current.update(job.env)
rendered = {}
for k, v in self.env.iteritems():
rendered[k] = render_template(v, **current)
job.env.update(rendered)
if hasattr(job, 'pipe_to'):
for child in job.pipe_to:
self.apply_overwrite(child)
# check specs
for spec_name, spec in self.specs.iteritems():
if fnmatch.fnmatch(job.name, spec_name):
spec.apply_overwrite(job)
def apply(self, job, pipeline=False, overwrite=False):
"""Apply this profile to the given job."""
log.debug("Profiles | Applying job profile to %s", job)
if overwrite:
self.apply_overwrite(job)
return
# set the job name or the pipeline name
# if this is a job or a pipeline
if not pipeline:
job.name = self._render_job_name(job)
elif self.name is not None:
log.info("Apply pipeline name to job: %s %s", job, self.name)
job.pipeline = self._render(job, self.name)
if self.threads and job.threads is None:
job.threads = int(self.threads)
if self.nodes is not None and job.nodes is None:
job.nodes = self.nodes
if self.tasks is not None and job.tasks is None:
job.tasks = self.tasks
if self.tasks_per_node is not None and job.tasts_per_node is None:
job.tasks_per_node = self.tasks_per_node
if self.environment is not None and job.environment is None:
job.environment = self.environment
if self.queue is not None and job.queue is None:
job.queue = self.queue
if self.priority is not None and job.priority is None:
job.priority = self.priority
if self.time is not None and job.max_time is None:
job.max_time = jip.utils.parse_time(self.time)
if self.mem is not None:
if job.max_memory is None:
job.max_memory = 0
job.max_memory += jip.utils.parse_mem(self.mem)
if self.log is not None and job.stderr is None:
job.stderr = self._render(job, self.log)
if self.out is not None and job.stdout is None:
job.stdout = self._render(job, self.out)
if self.account is not None and job.account is None:
job.account = self.account
if self.temp is not None and job.temp is None:
job.temp = self.temp
if self.extra is not None and job.extra is None:
job.extra = self.extra
if self.working_dir is not None and job.working_directory is None:
job.working_directory = os.path.abspath(self.working_dir)
# make log files absolute
if job.stdout and not job.stdout.startswith("/"):
job.stdout = os.path.join(job.working_directory, job.stdout)
if job.stderr and not job.stderr.startswith("/"):
job.stderr = os.path.join(job.working_directory, job.stderr)
# load environment
if self.env:
current = os.environ.copy()
if job.env:
current.update(job.env)
rendered = {}
for k, v in self.env.iteritems():
rendered[k] = render_template(v, **current)
job.env.update(rendered)
if hasattr(job, 'pipe_to'):
for child in job.pipe_to:
self.apply(child)
def update(self, profile, overwrite=True):
"""Update this profile from a given profile. All values that are
not None in the other profile are applied to this
profile
:param profile: the other profile
:type profile: :class:`Profile`
:param overwrite: if True, value will be set regardless. Otherwise, the
new value will only be applied if the old value
is None
"""
attrs = ["environment", "nodes", "threads",
"tasks", "tasks_per_node", "queue",
"time", "mem", "priority", "log", "out",
"account", "prefix", "env", "temp", "extra", "working_dir"]
for attr in attrs:
other = profile.__getattribute__(attr)
if other is not None and (overwrite or
self.__getattribute__(attr) is None):
setattr(self, attr, other)
def merge(self, master):
"""Merge this profile with the given master profile.
Currently this merges the working directory of jobs
:param master: the master profile
"""
self.working_dir = master.working_dir if self.working_dir is None\
else self.working_dir
def __call__(self, name=None, threads=None, nodes=None, tasks=None,
tasks_per_node=None, environment=None, time=None, queue=None,
priority=None, log=None, out=None, err=None, account=None,
mem=None, profile=None, prefix=None, temp=None, extra=None,
dir=None, description=None, env=None):
clone = self.__class__(
name=name if name is not None else self._name,
threads=threads if threads is not None else self.threads,
tasks=tasks if tasks is not None else self.tasks,
tasks_per_node=tasks_per_node if tasks_per_node is not None else
self.tasks_per_node,
environment=environment if environment is not None
else self.environment,
env=env if env is not None else self.env,
nodes=nodes if nodes is not None else self.nodes,
profile=profile if profile is not None else self.profile,
queue=queue if queue is not None else self.queue,
time=time if time is not None else self.time,
priority=priority if priority is not None else self.priority,
log=log if log is not None else
(err if err is not None else self.log),
out=out if out is not None else self.out,
account=account if account is not None else self.account,
mem=mem if mem is not None else self.mem,
prefix=prefix if prefix is not None else self.prefix,
temp=temp if temp is not None else self.temp,
extra=extra if extra is not None else self.extra,
working_dir=dir if dir is not None else self.working_dir,
description=description if description is not None
else self.description,
_load=False
)
for name, spec in self.specs.iteritems():
clone.specs[name] = spec()
return clone
def __repr__(self):
return str(vars(self))
@classmethod
def from_job(cls, job):
"""Create a profile based on a given job. All properties
are set according to the given job, except the jobs temp state,
which will be kept unmodified.
:param job: the job
:returns: new profile generated from the job
"""
profile = cls()
profile.threads = job.threads if job.threads > 0 else None
profile.nodes = job.nodes
profile.tasks = job.tasks
profile.tasts_per_node = job.tasks_per_node
profile.environment = job.environment
profile.queue = job.queue
profile.priority = job.priority
profile.time = job.max_time
profile.mem = job.max_memory
profile.log = job.stderr
profile.out = job.stdout
profile.account = job.account
profile.extra = job.extra
profile.working_dir = job.working_directory
profile.env = job.env
return profile
@classmethod
def from_file(cls, file_name):
"""Load a profile from a json file
:param file_name: the name of the input file
"""
with open(file_name) as of:
try:
data = json.load(of)
except ValueError:
log.error("Malformed json file %s", file_name)
raise jip.ValidationError('jip.profiles', "Malformed json file %s" % (file_name))
return cls.from_dict(data)
@classmethod
def from_dict(cls, data):
"""Load a profile from a dictionary"""
profile = cls()
# apply all the params
for k, v in data.iteritems():
if k != 'jobs':
profile.__setattr__(k, v)
if "jobs" in data:
for name, spec in data["jobs"].iteritems():
profile.specs[name] = cls.from_dict(spec)
return profile
def get(name='default', tool=None):
"""Load a profile by name. If tool is specified, the specs are
searched to the tool and if found, the specs are applied.
"""
# check the name for specs
s = name.split(' ')
p = Profile()
for ss in s:
tup = ss.split("=")
if len(tup) == 1:
# update global
l = Profile(profile=tup[0])
p.update(l)
else:
# update or create spec
spec = p.specs.get(tup[0], Profile())
spec.update(Profile(profile=tup[1]))
p.specs[tup[0]] = spec
return p
def get_specs(path=None):
"""Load specs form default locations and then update from specs in given
path if specified.
:param path: optional path to an additional spec file
"""
def load_json(jf):
with open(jf) as of:
try:
data = json.load(of)
except ValueError:
log.error("Malformed json file %s", jf)
raise jip.ValidationError('jip.profiles', "Malformed json file %s" % (jf))
return data
global specs
cwd = os.path.join(os.getcwd(), "jip.specs")
home = os.path.join(os.getenv("HOME", ""), ".jip/jip.specs")
specs = {}
if os.path.exists(home):
specs = _update(specs, load_json(home))
if os.path.exists(cwd):
specs = _update(specs, load_json(cwd))
if path and os.path.exists(path):
specs = _update(specs, load_json(path))
return specs
def _update(config, other):
for k, v in other.iteritems():
if isinstance(v, collections.Mapping):
r = _update(config.get(k, {}), v)
config[k] = r
else:
config[k] = other[k]
return config
| 37.154088 | 97 | 0.596445 | [
"BSD-3-Clause"
] | VDBWRAIR/pyjip | jip/profiles.py | 23,630 | Python |
#!/usr/bin/env python
# Copyright 2016 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
repository_tags = """
========================
Team and repository tags
========================
.. image:: https://governance.openstack.org/tc/badges/syntribos.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
.. image:: https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat
:target: https://docs.openstack.org/syntribos/latest/
.. image:: https://img.shields.io/pypi/v/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/pypi/pyversions/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/pypi/wheel/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/irc/%23openstack-security.png
:target: https://webchat.freenode.net/?channels=openstack-security
"""
def find_docs():
"""Yields files as per the whitelist."""
loc = "../doc/source/{}.rst"
whitelist = [
"about", "installation",
"configuration", "commands",
"running", "logging",
"test-anatomy", "unittests",
"contributing"]
for fname in whitelist:
fpath = loc.format(fname)
if os.path.isfile(fpath):
yield fpath
def concat_docs():
"""Concatinates files yielded by the generator `find_docs`."""
file_path = os.path.dirname(os.path.realpath(__file__))
head, tail = os.path.split(file_path)
outfile = head + "/README.rst"
if not os.path.isfile(outfile):
print("../README.rst not found, exiting!")
exit(1)
with open(outfile, 'w') as readme_handle:
readme_handle.write(repository_tags)
for doc in find_docs():
with open(doc, 'r') as doc_handle:
for line in doc_handle:
readme_handle.write(line)
readme_handle.write("\n")
if __name__ == '__main__':
"""Generate README.rst from docs."""
concat_docs()
print("\nREADME.rst created!\n")
| 31.987805 | 78 | 0.65345 | [
"Apache-2.0"
] | abdullahzamanbabar/syntribos | scripts/readme.py | 2,623 | Python |
# -*- coding: utf-8 -*-
"""
Benchmark Results
Updated: 18.02.2022 (6618fa3c36b0c9f3a9d7a21bcdb00bf4fd258ee8))
------------------------------------------------------------------------------------------
| Model | Batch Size | Epochs | KNN Test Accuracy | Time | Peak GPU Usage |
------------------------------------------------------------------------------------------
| BarlowTwins | 128 | 200 | 0.835 | 193.4 Min | 2.2 GByte |
| BYOL | 128 | 200 | 0.872 | 217.0 Min | 2.3 GByte |
| DINO | 128 | 200 | 0.868 | 220.7 Min | 2.3 GByte |
| Moco | 128 | 200 | 0.838 | 229.5 Min | 2.3 GByte |
| NNCLR | 128 | 200 | 0.838 | 198.7 Min | 2.2 GByte |
| SimCLR | 128 | 200 | 0.822 | 182.7 Min | 2.2 GByte |
| SimSiam | 128 | 200 | 0.779 | 182.6 Min | 2.3 GByte |
| SwaV | 128 | 200 | 0.806 | 182.4 Min | 2.2 GByte |
------------------------------------------------------------------------------------------
| BarlowTwins | 512 | 200 | 0.827 | 160.7 Min | 7.5 GByte |
| BYOL | 512 | 200 | 0.872 | 188.5 Min | 7.7 GByte |
| DINO | 512 | 200 | 0.862 | 191.1 Min | 7.5 GByte |
| Moco (*) | 512 | 200 | 0.850 | 196.8 Min | 7.8 GByte |
| NNCLR (*) | 512 | 200 | 0.836 | 164.7 Min | 7.6 GByte |
| SimCLR | 512 | 200 | 0.828 | 158.2 Min | 7.5 GByte |
| SimSiam | 512 | 200 | 0.814 | 159.0 Min | 7.6 GByte |
| SwaV | 512 | 200 | 0.833 | 158.4 Min | 7.5 GByte |
------------------------------------------------------------------------------------------
| BarlowTwins | 512 | 800 | 0.857 | 641.5 Min | 7.5 GByte |
| BYOL | 512 | 800 | 0.911 | 754.2 Min | 7.8 GByte |
| DINO | 512 | 800 | 0.884 | 765.5 Min | 7.6 GByte |
| Moco (*) | 512 | 800 | 0.900 | 787.7 Min | 7.8 GByte |
| NNCLR (*) | 512 | 800 | 0.896 | 659.2 Min | 7.6 GByte |
| SimCLR | 512 | 800 | 0.875 | 632.5 Min | 7.5 GByte |
| SimSiam | 512 | 800 | 0.906 | 636.5 Min | 7.6 GByte |
| SwaV | 512 | 800 | 0.881 | 634.9 Min | 7.5 GByte |
------------------------------------------------------------------------------------------
(*): Increased size of memory bank from 4096 to 8192 to avoid too quickly
changing memory bank due to larger batch size.
The benchmarks were created on a single NVIDIA RTX A6000.
Note that this benchmark also supports a multi-GPU setup. If you run it on
a system with multiple GPUs make sure that you kill all the processes when
killing the application. Due to the way we setup this benchmark the distributed
processes might continue the benchmark if one of the nodes is killed.
If you know how to fix this don't hesitate to create an issue or PR :)
"""
import copy
import os
import time
import lightly
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision
from lightly.models import modules
from lightly.models.modules import heads
from lightly.models import utils
from lightly.utils import BenchmarkModule
from pytorch_lightning.loggers import TensorBoardLogger
logs_root_dir = os.path.join(os.getcwd(), 'benchmark_logs')
# set max_epochs to 800 for long run (takes around 10h on a single V100)
max_epochs = 1
num_workers = 8
knn_k = 200
knn_t = 0.1
classes = 10
# Set to True to enable Distributed Data Parallel training.
distributed = True
# Set to True to enable Synchronized Batch Norm (requires distributed=True).
# If enabled the batch norm is calculated over all gpus, otherwise the batch
# norm is only calculated from samples on the same gpu.
sync_batchnorm = False
# Set to True to gather features from all gpus before calculating
# the loss (requires distributed=True).
# If enabled then the loss on every gpu is calculated with features from all
# gpus, otherwise only features from the same gpu are used.
gather_distributed = True
# benchmark
n_runs = 1 # optional, increase to create multiple runs and report mean + std
batch_size = 512
lr_factor = batch_size / 128 # scales the learning rate linearly with batch size
# use a GPU if available
#gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0
gpus = 4 if torch.cuda.is_available() else 0
print(gpus)
if distributed:
distributed_backend = 'ddp'
# reduce batch size for distributed training
batch_size = batch_size // gpus
else:
distributed_backend = None
# limit to single gpu if not using distributed training
gpus = min(gpus, 1)
# Adapted from our MoCo Tutorial on CIFAR-10
#
# Replace the path with the location of your CIFAR-10 dataset.
# We assume we have a train folder with subfolders
# for each class and .png images inside.
#
# You can download `CIFAR-10 in folders from kaggle
# <https://www.kaggle.com/swaroopkml/cifar10-pngs-in-folders>`_.
# The dataset structure should be like this:
# cifar10/train/
# L airplane/
# L 10008_airplane.png
# L ...
# L automobile/
# L bird/
# L cat/
# L deer/
# L dog/
# L frog/
# L horse/
# L ship/
# L truck/
path_to_train = './data/cifar10/train/'
path_to_test = './data/cifar10/test/'
# Use SimCLR augmentations, additionally, disable blur for cifar10
collate_fn = lightly.data.SimCLRCollateFunction(
input_size=32,
gaussian_blur=0.,
)
# Multi crop augmentation for SwAV, additionally, disable blur for cifar10
swav_collate_fn = lightly.data.SwaVCollateFunction(
crop_sizes=[32],
crop_counts=[2], # 2 crops @ 32x32px
crop_min_scales=[0.14],
gaussian_blur=0,
)
# Multi crop augmentation for DINO, additionally, disable blur for cifar10
dino_collate_fn = lightly.data.DINOCollateFunction(
global_crop_size=32,
n_local_views=0,
gaussian_blur=(0, 0, 0),
)
# No additional augmentations for the test set
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=lightly.data.collate.imagenet_normalize['mean'],
std=lightly.data.collate.imagenet_normalize['std'],
)
])
dataset_train_ssl = lightly.data.LightlyDataset(
input_dir=path_to_train
)
# we use test transformations for getting the feature for kNN on train data
dataset_train_kNN = lightly.data.LightlyDataset(
input_dir=path_to_train,
transform=test_transforms
)
dataset_test = lightly.data.LightlyDataset(
input_dir=path_to_test,
transform=test_transforms
)
def get_data_loaders(batch_size: int, model):
"""Helper method to create dataloaders for ssl, kNN train and kNN test
Args:
batch_size: Desired batch size for all dataloaders
"""
col_fn = collate_fn
if isinstance(model, SwaVModel):
col_fn = swav_collate_fn
elif isinstance(model, DINOModel):
col_fn = dino_collate_fn
dataloader_train_ssl = torch.utils.data.DataLoader(
dataset_train_ssl,
batch_size=batch_size,
shuffle=True,
collate_fn=col_fn,
drop_last=True,
num_workers=num_workers
)
dataloader_train_kNN = torch.utils.data.DataLoader(
dataset_train_kNN,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers
)
dataloader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers
)
return dataloader_train_ssl, dataloader_train_kNN, dataloader_test
class MocoModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
num_splits = 0 if sync_batchnorm else 8
resnet = lightly.models.ResNetGenerator('resnet-18', num_splits=num_splits)
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# create a moco model based on ResNet
self.projection_head = heads.MoCoProjectionHead(512, 512, 128)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
# create our loss with the optional memory bank
self.criterion = lightly.loss.NTXentLoss(
temperature=0.1,
memory_bank_size=4096,
)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
return self.projection_head(x)
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
# update momentum
utils.update_momentum(self.backbone, self.backbone_momentum, 0.99)
utils.update_momentum(self.projection_head, self.projection_head_momentum, 0.99)
def step(x0_, x1_):
x1_, shuffle = utils.batch_shuffle(x1_, distributed=distributed)
x0_ = self.backbone(x0_).flatten(start_dim=1)
x0_ = self.projection_head(x0_)
x1_ = self.backbone_momentum(x1_).flatten(start_dim=1)
x1_ = self.projection_head_momentum(x1_)
x1_ = utils.batch_unshuffle(x1_, shuffle, distributed=distributed)
return x0_, x1_
# We use a symmetric loss (model trains faster at little compute overhead)
# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb
loss_1 = self.criterion(*step(x0, x1))
loss_2 = self.criterion(*step(x1, x0))
loss = 0.5 * (loss_1 + loss_2)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) + list(self.projection_head.parameters())
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SimCLRProjectionHead(512, 512, 128)
self.criterion = lightly.loss.NTXentLoss()
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SimSiamModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.prediction_head = heads.SimSiamPredictionHead(2048, 512, 2048)
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
2048,
nn.BatchNorm1d(2048),
None
)
])
self.criterion = lightly.loss.NegativeCosineSimilarity()
def forward(self, x):
f = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(f)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2, # no lr-scaling, results in better training stability
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class BarlowTwinsModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# use a 2-layer projection head for cifar10 as described in the paper
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
2048,
None,
None
)
])
self.criterion = lightly.loss.BarlowTwinsLoss(gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1), _, _ = batch
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class BYOLModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
# create a byol model based on ResNet
self.projection_head = heads.BYOLProjectionHead(512, 1024, 256)
self.prediction_head = heads.BYOLProjectionHead(256, 1024, 256)
self.backbone_momentum = copy.deepcopy(self.backbone)
self.projection_head_momentum = copy.deepcopy(self.projection_head)
utils.deactivate_requires_grad(self.backbone_momentum)
utils.deactivate_requires_grad(self.projection_head_momentum)
self.criterion = lightly.loss.NegativeCosineSimilarity()
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
return p
def forward_momentum(self, x):
y = self.backbone_momentum(x).flatten(start_dim=1)
z = self.projection_head_momentum(y)
z = z.detach()
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.backbone_momentum, m=0.99)
utils.update_momentum(self.projection_head, self.projection_head_momentum, m=0.99)
(x0, x1), _, _ = batch
p0 = self.forward(x0)
z0 = self.forward_momentum(x0)
p1 = self.forward(x1)
z1 = self.forward_momentum(x1)
loss = 0.5 * (self.criterion(p0, z1) + self.criterion(p1, z0))
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
params = list(self.backbone.parameters()) \
+ list(self.projection_head.parameters()) \
+ list(self.prediction_head.parameters())
optim = torch.optim.SGD(
params,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class SwaVModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.projection_head = heads.SwaVProjectionHead(512, 512, 128)
self.prototypes = heads.SwaVPrototypes(128, 512) # use 512 prototypes
self.criterion = lightly.loss.SwaVLoss(sinkhorn_gather_distributed=gather_distributed)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
x = self.projection_head(x)
x = nn.functional.normalize(x, dim=1, p=2)
return self.prototypes(x)
def training_step(self, batch, batch_idx):
# normalize the prototypes so they are on the unit sphere
self.prototypes.normalize()
# the multi-crop dataloader returns a list of image crops where the
# first two items are the high resolution crops and the rest are low
# resolution crops
multi_crops, _, _ = batch
multi_crop_features = [self.forward(x) for x in multi_crops]
# split list of crop features into high and low resolution
high_resolution_features = multi_crop_features[:2]
low_resolution_features = multi_crop_features[2:]
# calculate the SwaV loss
loss = self.criterion(
high_resolution_features,
low_resolution_features
)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
optim = torch.optim.Adam(
self.parameters(),
lr=1e-3 * lr_factor,
weight_decay=1e-6,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class NNCLRModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.prediction_head = heads.NNCLRPredictionHead(256, 4096, 256)
# use only a 2-layer projection head for cifar10
self.projection_head = heads.ProjectionHead([
(
512,
2048,
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True)
),
(
2048,
256,
nn.BatchNorm1d(256),
None
)
])
self.criterion = lightly.loss.NTXentLoss()
self.memory_bank = modules.NNMemoryBankModule(size=4096)
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(y)
p = self.prediction_head(z)
z = z.detach()
return z, p
def training_step(self, batch, batch_idx):
(x0, x1), _, _ = batch
z0, p0 = self.forward(x0)
z1, p1 = self.forward(x1)
z0 = self.memory_bank(z0, update=False)
z1 = self.memory_bank(z1, update=True)
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(
self.parameters(),
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
class DINOModel(BenchmarkModule):
def __init__(self, dataloader_kNN, num_classes):
super().__init__(dataloader_kNN, num_classes)
# create a ResNet backbone and remove the classification head
resnet = lightly.models.ResNetGenerator('resnet-18')
self.backbone = nn.Sequential(
*list(resnet.children())[:-1],
nn.AdaptiveAvgPool2d(1)
)
self.head = self._build_projection_head()
self.teacher_backbone = copy.deepcopy(self.backbone)
self.teacher_head = self._build_projection_head()
utils.deactivate_requires_grad(self.teacher_backbone)
utils.deactivate_requires_grad(self.teacher_head)
self.criterion = lightly.loss.DINOLoss(output_dim=2048)
def _build_projection_head(self):
head = heads.DINOProjectionHead(512, 2048, 256, 2048, batch_norm=True)
# use only 2 layers for cifar10
head.layers = heads.ProjectionHead([
(512, 2048, nn.BatchNorm1d(2048), nn.GELU()),
(2048, 256, None, None),
]).layers
return head
def forward(self, x):
y = self.backbone(x).flatten(start_dim=1)
z = self.head(y)
return z
def forward_teacher(self, x):
y = self.teacher_backbone(x).flatten(start_dim=1)
z = self.teacher_head(y)
return z
def training_step(self, batch, batch_idx):
utils.update_momentum(self.backbone, self.teacher_backbone, m=0.99)
utils.update_momentum(self.head, self.teacher_head, m=0.99)
views, _, _ = batch
views = [view.to(self.device) for view in views]
global_views = views[:2]
teacher_out = [self.forward_teacher(view) for view in global_views]
student_out = [self.forward(view) for view in views]
loss = self.criterion(teacher_out, student_out, epoch=self.current_epoch)
self.log('train_loss_ssl', loss)
return loss
def configure_optimizers(self):
param = list(self.backbone.parameters()) \
+ list(self.head.parameters())
optim = torch.optim.SGD(
param,
lr=6e-2 * lr_factor,
momentum=0.9,
weight_decay=5e-4,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
return [optim], [scheduler]
models = [
BarlowTwinsModel,
BYOLModel,
DINOModel,
MocoModel,
NNCLRModel,
SimCLRModel,
SimSiamModel,
SwaVModel,
]
bench_results = dict()
experiment_version = None
# loop through configurations and train models
for BenchmarkModel in models:
runs = []
model_name = BenchmarkModel.__name__.replace('Model', '')
for seed in range(n_runs):
pl.seed_everything(seed)
dataloader_train_ssl, dataloader_train_kNN, dataloader_test = get_data_loaders(
batch_size=batch_size,
model=BenchmarkModel,
)
benchmark_model = BenchmarkModel(dataloader_train_kNN, classes)
# Save logs to: {CWD}/benchmark_logs/cifar10/{experiment_version}/{model_name}/
# If multiple runs are specified a subdirectory for each run is created.
sub_dir = model_name if n_runs <= 1 else f'{model_name}/run{seed}'
logger = TensorBoardLogger(
save_dir=os.path.join(logs_root_dir, 'cifar10'),
name='',
sub_dir=sub_dir,
version=experiment_version,
)
if experiment_version is None:
# Save results of all models under same version directory
experiment_version = logger.version
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=os.path.join(logger.log_dir, 'checkpoints')
)
trainer = pl.Trainer(
max_epochs=max_epochs,
gpus=gpus,
default_root_dir=logs_root_dir,
strategy=distributed_backend,
sync_batchnorm=sync_batchnorm,
logger=logger,
callbacks=[checkpoint_callback]
)
start = time.time()
trainer.fit(
benchmark_model,
train_dataloaders=dataloader_train_ssl,
val_dataloaders=dataloader_test
)
end = time.time()
run = {
'model': model_name,
'batch_size': batch_size,
'epochs': max_epochs,
'max_accuracy': benchmark_model.max_accuracy,
'runtime': end - start,
'gpu_memory_usage': torch.cuda.max_memory_allocated(),
'seed': seed,
}
runs.append(run)
print(run)
# delete model and trainer + free up cuda memory
del benchmark_model
del trainer
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
bench_results[model_name] = runs
# print results table
header = (
f"| {'Model':<13} | {'Batch Size':>10} | {'Epochs':>6} "
f"| {'KNN Test Accuracy':>18} | {'Time':>10} | {'Peak GPU Usage':>14} |"
)
print('-' * len(header))
print(header)
print('-' * len(header))
for model, results in bench_results.items():
runtime = np.array([result['runtime'] for result in results])
runtime = runtime.mean() / 60 # convert to min
accuracy = np.array([result['max_accuracy'] for result in results])
gpu_memory_usage = np.array([result['gpu_memory_usage'] for result in results])
gpu_memory_usage = gpu_memory_usage.max() / (1024**3) # convert to gbyte
if len(accuracy) > 1:
accuracy_msg = f"{accuracy.mean():>8.3f} +- {accuracy.std():>4.3f}"
else:
accuracy_msg = f"{accuracy.mean():>18.3f}"
print(
f"| {model:<13} | {batch_size:>10} | {max_epochs:>6} "
f"| {accuracy_msg} | {runtime:>6.1f} Min "
f"| {gpu_memory_usage:>8.1f} GByte |",
flush=True
)
print('-' * len(header))
| 36.265252 | 122 | 0.602289 | [
"MIT"
] | dczifra/lightly | docs/source/getting_started/benchmarks/cifar10_benchmark.py | 27,350 | Python |
from core.models import Item, Listing, PromoCode, Address, UserProfile
from core.zipcode import zipcodes
from datetime import datetime, timedelta
from decimal import *
from django import forms
from django.core.files.base import ContentFile
from django.core.files.images import get_image_dimensions
from io import BytesIO
from PIL import Image
class ItemListingForm(forms.ModelForm):
title = forms.CharField(widget=forms.TextInput(attrs={'class': 'validate'}), label="Title", max_length=100)
description = forms.CharField(widget=forms.Textarea(attrs={'class': 'materialize-textarea validate'}),
label="Description")
category = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control'}), choices=Item.CATEGORY_CHOICES)
price = forms.DecimalField(widget=forms.NumberInput(attrs={'class': 'validate', 'onchange': 'change()'}),
label='Buy now price')
zipcode = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'validate'}), label='Pickup zipcode')
# For image cropping purposes
crop_x = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'crop-params'}))
crop_y = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'crop-params'}))
crop_height = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'crop-params'}))
crop_width = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'crop-params'}))
# Make sure starting offer is at least $5.00
def clean_price(self):
price = self.cleaned_data['price']
if price < 5:
raise forms.ValidationError("The minimum price is $5.00.")
return price
# Make sure a category is chosen
def clean_category(self):
category = self.cleaned_data['category']
if category is '0':
raise forms.ValidationError("You must choose a category for your item.")
return category
# Make sure shipping zip code is one we deliver to
def clean_zipcode(self):
zip_code = self.cleaned_data['zipcode']
if zip_code not in zipcodes():
raise forms.ValidationError("Unfortunately, Circa is not yet available in that zip code.")
return zip_code
def clean_crop_width(self):
width = int(self.cleaned_data['crop_width'])
height = int(self.cleaned_data['crop_height'])
if width < 450 or height < 450:
raise forms.ValidationError("Your cropped image must be at least 450 by 450.")
if width != height:
raise forms.ValidationError("Width and height must match.")
return width
def __init__(self, *args, **kwargs):
self.seller = kwargs.pop('seller')
super().__init__(*args, **kwargs)
def save(self, commit=True):
item = super().save(commit=False)
self.process_image(item)
listing = Listing.objects.create(
price=self.cleaned_data['price'],
zipcode=self.cleaned_data['zipcode']
)
item.listing = listing
item.seller = self.seller
item.save()
return item
def process_image(self, item):
image = Image.open(item.photo)
left = int(self.cleaned_data['crop_x'])
top = int(self.cleaned_data['crop_y'])
width = int(self.cleaned_data['crop_width'])
height = int(self.cleaned_data['crop_height'])
box = (left, top, left+width, top+height)
image = image.crop(box)
f = BytesIO()
try:
image.save(f, format='jpeg')
s = f.getvalue()
item.photo.save(item.photo.name, ContentFile(s))
finally:
f.close()
class Meta:
model = Item
fields = {'title', 'description', 'category', 'photo'}
class PromoForm(forms.Form):
code = forms.CharField()
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user') # Grabs current user
self.listing = kwargs.pop('listing') # Grabs listing
super(PromoForm, self).__init__(*args, **kwargs)
def clean_code(self):
found = False
promo_code = self.cleaned_data['code']
if PromoCode.objects.all().count() == 0:
raise forms.ValidationError("Sorry, that code isn't valid.")
codes = PromoCode.objects.all()
for promotional_code in codes:
if promotional_code.code == promo_code:
if promotional_code.redeemed:
raise forms.ValidationError("Sorry, promo code already used.")
elif promotional_code.user != self.user:
raise forms.ValidationError("Sorry, that's not your code!")
else:
found = True
break
if not found:
raise forms.ValidationError("Sorry, that code is not valid.")
return promo_code
def save(self):
promo = PromoCode.objects.filter(code=self.cleaned_data['code'])[0]
promo.listing = self.listing
promo.save()
self.listing.save()
class AddressForm(forms.Form):
address_line_1 = forms.CharField()
address_line_2 = forms.CharField(required=False)
city = forms.CharField()
# Must be changed when we branch to different states!
state = forms.CharField(widget=forms.HiddenInput())
INITIAL_STATE = 'GA'
zipcode = forms.CharField()
special_instructions = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
def save(self):
if not hasattr(self.user, 'userprofile'):
UserProfile.objects.create(user=self.user)
address = Address.objects.create(
address_line_1=self.cleaned_data['address_line_1'],
address_line_2=self.cleaned_data['address_line_2'],
city=self.cleaned_data['city'],
state=self.cleaned_data['state'],
zipcode=self.cleaned_data['zipcode'],
special_instructions=self.cleaned_data['special_instructions']
)
self.user.userprofile.address = address
self.user.userprofile.save()
class EditListingForm(forms.Form):
# Information for Item
title = forms.CharField(widget=forms.TextInput(attrs={'class': 'validate'}), label="Title", max_length=100)
description = forms.CharField(widget=forms.Textarea(attrs={'class': 'materialize-textarea validate'}),
label="Description")
category = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control'}), choices=Item.CATEGORY_CHOICES)
# Information for Listing
price = forms.DecimalField(widget=forms.NumberInput(attrs={'class': 'validate'}))
zipcode = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'validate'}), label='Pickup zipcode')
def __init__(self, *args, **kwargs):
self.listing = kwargs.pop('listing') # Grabs current listing
super(EditListingForm, self).__init__(*args, **kwargs)
# Make sure starting offer is at least $5.00, and that no offers have yet been made
def clean_price(self):
price = Decimal(self.cleaned_data['price'])
if price < 5:
raise forms.ValidationError("The minimum price is $5.00.")
return price
# Make sure a category is chosen
def clean_category(self):
category = self.cleaned_data['category']
if category is '0':
raise forms.ValidationError("You must choose a category for your item.")
return category
# make sure shipping zip code is one we deliver to
def clean_zipcode(self):
zip_code = self.cleaned_data['zipcode']
if zip_code not in zipcodes():
raise forms.ValidationError("Unfortunately, Circa is not yet available in that zip code.")
return zip_code
def save(self):
self.listing.item.title = self.cleaned_data['title']
self.listing.item.description = self.cleaned_data['description']
self.listing.item.category = self.cleaned_data['category']
self.listing.price = self.cleaned_data['price']
self.listing.zipcode = self.cleaned_data['zipcode']
self.listing.item.save()
self.listing.save()
# This is a special form used to get a user's email if they did not provide one via Facebook
class EmailRequestForm(forms.Form):
email = forms.EmailField()
| 38.442478 | 118 | 0.626727 | [
"MIT"
] | gnarizzy/circa | circa/core/forms.py | 8,688 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FbDeprecatedDialogTeacher
from .build import build
from parlai.utils.data import DatatypeHelper
import copy
import os
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'CornellMovie', dt + filtered + '.txt')
class DefaultTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, '')
opt['cands_datafile'] = opt['datafile']
self.fold = DatatypeHelper.fold(opt['datatype'])
super().__init__(opt, shared)
def num_examples(self):
if self.fold == 'train':
return 133125
elif self.fold == 'valid':
return 16759
elif self.fold == 'test':
return 16611
def num_episodes(self):
if self.fold == 'train':
return 66478
elif self.fold == 'valid':
return 8310
elif self.fold == 'test':
return 8309
class DoubleTeacher(DefaultTeacher):
"""
This version creates text-label pairs from the perspective of both speakers.
"""
def num_examples(self):
if self.fold == 'train':
return 176975
elif self.fold == 'valid':
return 22349
elif self.fold == 'test':
return 22013
def num_episodes(self):
if self.fold == 'train':
return 102401
elif self.fold == 'valid':
return 12806
elif self.fold == 'test':
return 12790
def _rebuild(self, entries):
new_list = []
if len(entries) > 0:
# add all ( y_t => x_(t+1) ) pairs
new_list.extend(
[
(entries[i][1][0], [entries[i + 1][0]])
for i in range(len(entries) - 1)
]
)
return new_list
def _is_valid(self, entry):
if entry[0] == '' or entry[1] is None:
return False
return True
def setup_data(self, path):
"""
Adds additional perspectives. For example, in the conversation:
x1 y1
x2 y2
x3
Creates the additional dialog:
y1 x2
y2 x3
"""
# this shows conversations in both directions
alternate = []
for entry, new in super().setup_data(path):
if new:
for i, e in enumerate(self._rebuild(alternate)):
if self._is_valid(e):
yield e, i == 0
alternate.clear()
alternate.append(entry)
if self._is_valid(entry):
yield entry, new
if alternate:
for i, e in enumerate(self._rebuild(alternate)):
if self._is_valid(e):
yield e, i == 0
| 28.716814 | 81 | 0.522342 | [
"Apache-2.0"
] | GuillaumeLeclerc/cortx | doc/integrations/pytorch/parlai/tasks/cornell_movie/agents.py | 3,245 | Python |
a = int(input())
b = int(input())
c = int(input())
p = 0
s = 0
p = ( a + b + c ) / 2
s = ( p * ( p - a ) * ( p - b ) * ( p - c ) ) ** 0.5
print( s ); | 16 | 53 | 0.325 | [
"MIT"
] | Cet500/PyLib | PyCourse/test_1-12-1.py | 160 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.tools import eq_, assert_raises
from socorrolib.lib import MissingArgumentError
from socorro.external.postgresql.field import Field
from .unittestbase import PostgreSQLTestCase
class IntegrationTestField(PostgreSQLTestCase):
'''Test socorro.external.postgresql.field.Field class. '''
def setUp(self):
super(IntegrationTestField, self).setUp()
cursor = self.connection.cursor()
cursor.execute('''
INSERT INTO data_dictionary
(raw_field, transforms, product)
VALUES
(
'field1',
'{}',
'WaterWolf'
),
(
'field2',
'{"processor": "some notes"}',
'WaterWolf'
);
''')
self.connection.commit()
def tearDown(self):
'''Clean up the database, delete tables and functions. '''
cursor = self.connection.cursor()
cursor.execute('''
TRUNCATE data_dictionary CASCADE
''')
self.connection.commit()
super(IntegrationTestField, self).tearDown()
def test_get(self):
api = Field(config=self.config)
# expect a result
res = api.get(name='field1')
res_expected = {
'name': 'field1',
'transforms': {},
'product': 'WaterWolf'
}
eq_(res, res_expected)
# expect a result
res = api.get(name='field2')
res_expected = {
'name': 'field2',
'transforms': {'processor': 'some notes'},
'product': 'WaterWolf'
}
eq_(res, res_expected)
# expect no result
res = api.get(name='i-do-not-exist')
res_expected = {
'name': None,
'transforms': None,
'product': None
}
eq_(res, res_expected)
# expect a failure
assert_raises(MissingArgumentError, api.get)
| 26.634146 | 69 | 0.547161 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Acidburn0zzz/socorro | socorro/unittest/external/postgresql/test_field.py | 2,184 | Python |
import mock
import unittest
from symstore import command_line
class TestMain(unittest.TestCase):
"""
test main()
"""
Z_ARGV = ["prog", "-z", "store_path", "file"]
@mock.patch("symstore.cab.compression_supported", False)
@mock.patch("sys.stderr")
def test_compression_not_supported(self, stderr):
with mock.patch("sys.argv", self.Z_ARGV):
self.assertRaises(SystemExit, command_line.main)
stderr.write.assert_called_once_with(
"gcab module not available, compression not supported\n")
| 26.47619 | 69 | 0.67446 | [
"MIT"
] | 2js855/symstore | tests/unit/test_command_line.py | 556 | Python |
from .products import Products | 30 | 30 | 0.866667 | [
"MIT"
] | GG31/openfood-graphql-api | src/products/__init__.py | 30 | Python |
'''
Custom interpolation methods for representing approximations to functions.
It also includes wrapper classes to enforce standard methods across classes.
Each interpolation class must have a distance() method that compares itself to
another instance; this is used in HARK.core's solve() method to check for solution
convergence. The interpolator classes currently in this module inherit their
distance method from HARKobject.
'''
from __future__ import division, print_function
from __future__ import absolute_import
from builtins import range
import numpy as np
from .core import HARKobject
from copy import deepcopy
def _isscalar(x):
'''
Check whether x is if a scalar type, or 0-dim.
Parameters
----------
x : anything
An input to be checked for scalar-ness.
Returns
-------
is_scalar : boolean
True if the input is a scalar, False otherwise.
'''
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
class HARKinterpolator1D(HARKobject):
'''
A wrapper class for 1D interpolation methods in HARK.
'''
distance_criteria = []
def __call__(self,x):
'''
Evaluates the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
Returns
-------
y : np.array or float
The interpolated function evaluated at x: y = f(x), with the same
shape as x.
'''
z = np.asarray(x)
return (self._evaluate(z.flatten())).reshape(z.shape)
def derivative(self,x):
'''
Evaluates the derivative of the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
Returns
-------
dydx : np.array or float
The interpolated function's first derivative evaluated at x:
dydx = f'(x), with the same shape as x.
'''
z = np.asarray(x)
return (self._der(z.flatten())).reshape(z.shape)
def eval_with_derivative(self,x):
'''
Evaluates the interpolated function and its derivative at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
Returns
-------
y : np.array or float
The interpolated function evaluated at x: y = f(x), with the same
shape as x.
dydx : np.array or float
The interpolated function's first derivative evaluated at x:
dydx = f'(x), with the same shape as x.
'''
z = np.asarray(x)
y, dydx = self._evalAndDer(z.flatten())
return y.reshape(z.shape), dydx.reshape(z.shape)
def _evaluate(self,x):
'''
Interpolated function evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _der(self,x):
'''
Interpolated function derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _evalAndDer(self,x):
'''
Interpolated function and derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
class HARKinterpolator2D(HARKobject):
'''
A wrapper class for 2D interpolation methods in HARK.
'''
distance_criteria = []
def __call__(self,x,y):
'''
Evaluates the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
fxy : np.array or float
The interpolated function evaluated at x,y: fxy = f(x,y), with the
same shape as x and y.
'''
xa = np.asarray(x)
ya = np.asarray(y)
return (self._evaluate(xa.flatten(),ya.flatten())).reshape(xa.shape)
def derivativeX(self,x,y):
'''
Evaluates the partial derivative of interpolated function with respect
to x (the first argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdx : np.array or float
The derivative of the interpolated function with respect to x, eval-
uated at x,y: dfdx = f_x(x,y), with the same shape as x and y.
'''
xa = np.asarray(x)
ya = np.asarray(y)
return (self._derX(xa.flatten(),ya.flatten())).reshape(xa.shape)
def derivativeY(self,x,y):
'''
Evaluates the partial derivative of interpolated function with respect
to y (the second argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdy : np.array or float
The derivative of the interpolated function with respect to y, eval-
uated at x,y: dfdx = f_y(x,y), with the same shape as x and y.
'''
xa = np.asarray(x)
ya = np.asarray(y)
return (self._derY(xa.flatten(),ya.flatten())).reshape(xa.shape)
def _evaluate(self,x,y):
'''
Interpolated function evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derX(self,x,y):
'''
Interpolated function x-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derY(self,x,y):
'''
Interpolated function y-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
class HARKinterpolator3D(HARKobject):
'''
A wrapper class for 3D interpolation methods in HARK.
'''
distance_criteria = []
def __call__(self,x,y,z):
'''
Evaluates the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
fxyz : np.array or float
The interpolated function evaluated at x,y,z: fxyz = f(x,y,z), with
the same shape as x, y, and z.
'''
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._evaluate(xa.flatten(),ya.flatten(),za.flatten())).reshape(xa.shape)
def derivativeX(self,x,y,z):
'''
Evaluates the partial derivative of the interpolated function with respect
to x (the first argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdx : np.array or float
The derivative with respect to x of the interpolated function evaluated
at x,y,z: dfdx = f_x(x,y,z), with the same shape as x, y, and z.
'''
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._derX(xa.flatten(),ya.flatten(),za.flatten())).reshape(xa.shape)
def derivativeY(self,x,y,z):
'''
Evaluates the partial derivative of the interpolated function with respect
to y (the second argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdy : np.array or float
The derivative with respect to y of the interpolated function evaluated
at x,y,z: dfdy = f_y(x,y,z), with the same shape as x, y, and z.
'''
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._derY(xa.flatten(),ya.flatten(),za.flatten())).reshape(xa.shape)
def derivativeZ(self,x,y,z):
'''
Evaluates the partial derivative of the interpolated function with respect
to z (the third argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdz : np.array or float
The derivative with respect to z of the interpolated function evaluated
at x,y,z: dfdz = f_z(x,y,z), with the same shape as x, y, and z.
'''
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._derZ(xa.flatten(),ya.flatten(),za.flatten())).reshape(xa.shape)
def _evaluate(self,x,y,z):
'''
Interpolated function evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derX(self,x,y,z):
'''
Interpolated function x-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derY(self,x,y,z):
'''
Interpolated function y-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derZ(self,x,y,z):
'''
Interpolated function y-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
class HARKinterpolator4D(HARKobject):
'''
A wrapper class for 4D interpolation methods in HARK.
'''
distance_criteria = []
def __call__(self,w,x,y,z):
'''
Evaluates the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
fwxyz : np.array or float
The interpolated function evaluated at w,x,y,z: fwxyz = f(w,x,y,z),
with the same shape as w, x, y, and z.
'''
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._evaluate(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)
def derivativeW(self,w,x,y,z):
'''
Evaluates the partial derivative with respect to w (the first argument)
of the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
dfdw : np.array or float
The derivative with respect to w of the interpolated function eval-
uated at w,x,y,z: dfdw = f_w(w,x,y,z), with the same shape as inputs.
'''
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._derW(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)
def derivativeX(self,w,x,y,z):
'''
Evaluates the partial derivative with respect to x (the second argument)
of the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
dfdx : np.array or float
The derivative with respect to x of the interpolated function eval-
uated at w,x,y,z: dfdx = f_x(w,x,y,z), with the same shape as inputs.
'''
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._derX(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)
def derivativeY(self,w,x,y,z):
'''
Evaluates the partial derivative with respect to y (the third argument)
of the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
dfdy : np.array or float
The derivative with respect to y of the interpolated function eval-
uated at w,x,y,z: dfdy = f_y(w,x,y,z), with the same shape as inputs.
'''
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._derY(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)
def derivativeZ(self,w,x,y,z):
'''
Evaluates the partial derivative with respect to z (the fourth argument)
of the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
dfdz : np.array or float
The derivative with respect to z of the interpolated function eval-
uated at w,x,y,z: dfdz = f_z(w,x,y,z), with the same shape as inputs.
'''
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return (self._derZ(wa.flatten(),xa.flatten(),ya.flatten(),za.flatten())).reshape(wa.shape)
def _evaluate(self,w,x,y,z):
'''
Interpolated function evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derW(self,w,x,y,z):
'''
Interpolated function w-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derX(self,w,x,y,z):
'''
Interpolated function w-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derY(self,w,x,y,z):
'''
Interpolated function w-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
def _derZ(self,w,x,y,z):
'''
Interpolated function w-derivative evaluator, to be defined in subclasses.
'''
raise NotImplementedError()
class IdentityFunction(HARKobject):
'''
A fairly trivial interpolator that simply returns one of its arguments. Useful for avoiding
numeric error in extreme cases.
'''
distance_criteria = ['i_dim']
def __init__(self,i_dim=0,n_dims=1):
'''
Constructor for a new IdentityFunction.
Parameters
----------
i_dim : int
Index of the dimension on which the identity is defined. f(*x) = x[i]
n_dims : int
Total number of input dimensions for this function.
Returns
-------
None
'''
self.i_dim = i_dim
self.n_dims = n_dims
def __call__(self,*args):
'''
Evaluate the identity function.
'''
return args[self.i_dim]
def derivative(self,*args):
'''
Returns the derivative of the function with respect to the first dimension.
'''
if self.i_dim == 0:
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
def derivativeX(self,*args):
'''
Returns the derivative of the function with respect to the X dimension.
This is the first input whenever n_dims < 4 and the second input otherwise.
'''
if self.n_dims >= 4:
j = 1
else:
j = 0
if self.i_dim == j:
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
def derivativeY(self,*args):
'''
Returns the derivative of the function with respect to the Y dimension.
This is the second input whenever n_dims < 4 and the third input otherwise.
'''
if self.n_dims >= 4:
j = 2
else:
j = 1
if self.i_dim == j:
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
def derivativeZ(self,*args):
'''
Returns the derivative of the function with respect to the Z dimension.
This is the third input whenever n_dims < 4 and the fourth input otherwise.
'''
if self.n_dims >= 4:
j = 3
else:
j = 2
if self.i_dim == j:
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
def derivativeW(self,*args):
'''
Returns the derivative of the function with respect to the W dimension.
This should only exist when n_dims >= 4.
'''
if self.n_dims >= 4:
j = 0
else:
assert False, "Derivative with respect to W can't be called when n_dims < 4!"
if self.i_dim == j:
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
class ConstantFunction(HARKobject):
'''
A class for representing trivial functions that return the same real output for any input. This
is convenient for models where an object might be a (non-trivial) function, but in some variations
that object is just a constant number. Rather than needing to make a (Bi/Tri/Quad)-
LinearInterpolation with trivial state grids and the same f_value in every entry, ConstantFunction
allows the user to quickly make a constant/trivial function. This comes up, e.g., in models
with endogenous pricing of insurance contracts; a contract's premium might depend on some state
variables of the individual, but in some variations the premium of a contract is just a number.
'''
convergence_criteria = ['value']
def __init__(self,value):
'''
Make a new ConstantFunction object.
Parameters
----------
value : float
The constant value that the function returns.
Returns
-------
None
'''
self.value = float(value)
def __call__(self,*args):
'''
Evaluate the constant function. The first input must exist and should be an array.
Returns an array of identical shape to args[0] (if it exists).
'''
if len(args) > 0: # If there is at least one argument, return appropriately sized array
if _isscalar(args[0]):
return self.value
else:
shape = args[0].shape
return self.value*np.ones(shape)
else: # Otherwise, return a single instance of the constant value
return self.value
def _der(self,*args):
'''
Evaluate the derivative of the function. The first input must exist and should be an array.
Returns an array of identical shape to args[0] (if it exists). This is an array of zeros.
'''
if len(args) > 0:
if _isscalar(args[0]):
return 0.0
else:
shape = args[0].shape
return np.zeros(shape)
else:
return 0.0
# All other derivatives are also zero everywhere, so these methods just point to derivative
derivative = _der
derivativeX = derivative
derivativeY = derivative
derivativeZ = derivative
derivativeW = derivative
derivativeXX= derivative
class LinearInterp(HARKinterpolator1D):
'''
A "from scratch" 1D linear interpolation class. Allows for linear or decay
extrapolation (approaching a limiting linear function from below).
'''
distance_criteria = ['x_list','y_list']
def __init__(self,x_list,y_list,intercept_limit=None,slope_limit=None,lower_extrap=False):
'''
The interpolation constructor to make a new linear spline interpolation.
Parameters
----------
x_list : np.array
List of x values composing the grid.
y_list : np.array
List of y values, representing f(x) at the points in x_list.
intercept_limit : float
Intercept of limiting linear function.
slope_limit : float
Slope of limiting linear function.
lower_extrap : boolean
Indicator for whether lower extrapolation is allowed. False means
f(x) = NaN for x < min(x_list); True means linear extrapolation.
Returns
-------
new instance of LinearInterp
NOTE: When no input is given for the limiting linear function, linear
extrapolation is used above the highest gridpoint.
'''
# Make the basic linear spline interpolation
self.x_list = np.array(x_list)
self.y_list = np.array(y_list)
self.lower_extrap = lower_extrap
self.x_n = self.x_list.size
# Make a decay extrapolation
if intercept_limit is not None and slope_limit is not None:
slope_at_top = (y_list[-1] - y_list[-2])/(x_list[-1] - x_list[-2])
level_diff = intercept_limit + slope_limit*x_list[-1] - y_list[-1]
slope_diff = slope_limit - slope_at_top
self.decay_extrap_A = level_diff
self.decay_extrap_B = -slope_diff/level_diff
self.intercept_limit = intercept_limit
self.slope_limit = slope_limit
self.decay_extrap = True
else:
self.decay_extrap = False
def _evalOrDer(self,x,_eval,_Der):
'''
Returns the level and/or first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der (etc).
Parameters
----------
x_list : scalar or np.array
Set of points where we want to evlauate the interpolated function and/or its derivative..
_eval : boolean
Indicator for whether to evalute the level of the interpolated function.
_Der : boolean
Indicator for whether to evaluate the derivative of the interpolated function.
Returns
-------
A list including the level and/or derivative of the interpolated function where requested.
'''
i = np.maximum(np.searchsorted(self.x_list[:-1],x),1)
alpha = (x-self.x_list[i-1])/(self.x_list[i]-self.x_list[i-1])
if _eval:
y = (1.-alpha)*self.y_list[i-1] + alpha*self.y_list[i]
if _Der:
dydx = (self.y_list[i] - self.y_list[i-1])/(self.x_list[i] - self.x_list[i-1])
if not self.lower_extrap:
below_lower_bound = x < self.x_list[0]
if _eval:
y[below_lower_bound] = np.nan
if _Der:
dydx[below_lower_bound] = np.nan
if self.decay_extrap:
above_upper_bound = x > self.x_list[-1]
x_temp = x[above_upper_bound] - self.x_list[-1]
if _eval:
y[above_upper_bound] = self.intercept_limit + \
self.slope_limit*x[above_upper_bound] - \
self.decay_extrap_A*np.exp(-self.decay_extrap_B*x_temp)
if _Der:
dydx[above_upper_bound] = self.slope_limit + \
self.decay_extrap_B*self.decay_extrap_A*\
np.exp(-self.decay_extrap_B*x_temp)
output = []
if _eval:
output += [y,]
if _Der:
output += [dydx,]
return output
def _evaluate(self,x,return_indices = False):
'''
Returns the level of the interpolated function at each value in x. Only
called internally by HARKinterpolator1D.__call__ (etc).
'''
return self._evalOrDer(x,True,False)[0]
def _der(self,x):
'''
Returns the first derivative of the interpolated function at each value
in x. Only called internally by HARKinterpolator1D.derivative (etc).
'''
return self._evalOrDer(x,False,True)[0]
def _evalAndDer(self,x):
'''
Returns the level and first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der (etc).
'''
y,dydx = self._evalOrDer(x,True,True)
return y,dydx
class CubicInterp(HARKinterpolator1D):
'''
An interpolating function using piecewise cubic splines. Matches level and
slope of 1D function at gridpoints, smoothly interpolating in between.
Extrapolation above highest gridpoint approaches a limiting linear function
if desired (linear extrapolation also enabled.)
'''
distance_criteria = ['x_list','y_list','dydx_list']
def __init__(self,x_list,y_list,dydx_list,intercept_limit=None,slope_limit=None,lower_extrap=False):
'''
The interpolation constructor to make a new cubic spline interpolation.
Parameters
----------
x_list : np.array
List of x values composing the grid.
y_list : np.array
List of y values, representing f(x) at the points in x_list.
dydx_list : np.array
List of dydx values, representing f'(x) at the points in x_list
intercept_limit : float
Intercept of limiting linear function.
slope_limit : float
Slope of limiting linear function.
lower_extrap : boolean
Indicator for whether lower extrapolation is allowed. False means
f(x) = NaN for x < min(x_list); True means linear extrapolation.
Returns
-------
new instance of CubicInterp
NOTE: When no input is given for the limiting linear function, linear
extrapolation is used above the highest gridpoint.
'''
self.x_list = np.asarray(x_list)
self.y_list = np.asarray(y_list)
self.dydx_list = np.asarray(dydx_list)
self.n = len(x_list)
# Define lower extrapolation as linear function (or just NaN)
if lower_extrap:
self.coeffs = [[y_list[0],dydx_list[0],0,0]]
else:
self.coeffs = [[np.nan,np.nan,np.nan,np.nan]]
# Calculate interpolation coefficients on segments mapped to [0,1]
for i in range(self.n-1):
x0 = x_list[i]
y0 = y_list[i]
x1 = x_list[i+1]
y1 = y_list[i+1]
Span = x1 - x0
dydx0 = dydx_list[i]*Span
dydx1 = dydx_list[i+1]*Span
temp = [y0, dydx0, 3*(y1 - y0) - 2*dydx0 - dydx1, 2*(y0 - y1) + dydx0 + dydx1];
self.coeffs.append(temp)
# Calculate extrapolation coefficients as a decay toward limiting function y = mx+b
if slope_limit is None and intercept_limit is None:
slope_limit = dydx_list[-1]
intercept_limit = y_list[-1] - slope_limit*x_list[-1]
gap = slope_limit*x1 + intercept_limit - y1
slope = slope_limit - dydx_list[self.n-1]
if (gap != 0) and (slope <= 0):
temp = [intercept_limit, slope_limit, gap, slope/gap]
elif slope > 0:
temp = [intercept_limit, slope_limit, 0, 0] # fixing a problem when slope is positive
else:
temp = [intercept_limit, slope_limit, gap, 0]
self.coeffs.append(temp)
self.coeffs = np.array(self.coeffs)
def _evaluate(self,x):
'''
Returns the level of the interpolated function at each value in x. Only
called internally by HARKinterpolator1D.__call__ (etc).
'''
if _isscalar(x):
pos = np.searchsorted(self.x_list,x)
if pos == 0:
y = self.coeffs[0,0] + self.coeffs[0,1]*(x - self.x_list[0])
elif (pos < self.n):
alpha = (x - self.x_list[pos-1])/(self.x_list[pos] - self.x_list[pos-1])
y = self.coeffs[pos,0] + alpha*(self.coeffs[pos,1] + alpha*(self.coeffs[pos,2] + alpha*self.coeffs[pos,3]))
else:
alpha = x - self.x_list[self.n-1]
y = self.coeffs[pos,0] + x*self.coeffs[pos,1] - self.coeffs[pos,2]*np.exp(alpha*self.coeffs[pos,3])
else:
m = len(x)
pos = np.searchsorted(self.x_list,x)
y = np.zeros(m)
if y.size > 0:
out_bot = pos == 0
out_top = pos == self.n
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
# Do the "in bounds" evaluation points
i = pos[in_bnds]
coeffs_in = self.coeffs[i,:]
alpha = (x[in_bnds] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])
y[in_bnds] = coeffs_in[:,0] + alpha*(coeffs_in[:,1] + alpha*(coeffs_in[:,2] + alpha*coeffs_in[:,3]))
# Do the "out of bounds" evaluation points
y[out_bot] = self.coeffs[0,0] + self.coeffs[0,1]*(x[out_bot] - self.x_list[0])
alpha = x[out_top] - self.x_list[self.n-1]
y[out_top] = self.coeffs[self.n,0] + x[out_top]*self.coeffs[self.n,1] - self.coeffs[self.n,2]*np.exp(alpha*self.coeffs[self.n,3])
return y
def _der(self,x):
'''
Returns the first derivative of the interpolated function at each value
in x. Only called internally by HARKinterpolator1D.derivative (etc).
'''
if _isscalar(x):
pos = np.searchsorted(self.x_list,x)
if pos == 0:
dydx = self.coeffs[0,1]
elif (pos < self.n):
alpha = (x - self.x_list[pos-1])/(self.x_list[pos] - self.x_list[pos-1])
dydx = (self.coeffs[pos,1] + alpha*(2*self.coeffs[pos,2] + alpha*3*self.coeffs[pos,3]))/(self.x_list[pos] - self.x_list[pos-1])
else:
alpha = x - self.x_list[self.n-1]
dydx = self.coeffs[pos,1] - self.coeffs[pos,2]*self.coeffs[pos,3]*np.exp(alpha*self.coeffs[pos,3])
else:
m = len(x)
pos = np.searchsorted(self.x_list,x)
dydx = np.zeros(m)
if dydx.size > 0:
out_bot = pos == 0
out_top = pos == self.n
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
# Do the "in bounds" evaluation points
i = pos[in_bnds]
coeffs_in = self.coeffs[i,:]
alpha = (x[in_bnds] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])
dydx[in_bnds] = (coeffs_in[:,1] + alpha*(2*coeffs_in[:,2] + alpha*3*coeffs_in[:,3]))/(self.x_list[i] - self.x_list[i-1])
# Do the "out of bounds" evaluation points
dydx[out_bot] = self.coeffs[0,1]
alpha = x[out_top] - self.x_list[self.n-1]
dydx[out_top] = self.coeffs[self.n,1] - self.coeffs[self.n,2]*self.coeffs[self.n,3]*np.exp(alpha*self.coeffs[self.n,3])
return dydx
def _evalAndDer(self,x):
'''
Returns the level and first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der (etc).
'''
if _isscalar(x):
pos = np.searchsorted(self.x_list,x)
if pos == 0:
y = self.coeffs[0,0] + self.coeffs[0,1]*(x - self.x_list[0])
dydx = self.coeffs[0,1]
elif (pos < self.n):
alpha = (x - self.x_list[pos-1])/(self.x_list[pos] - self.x_list[pos-1])
y = self.coeffs[pos,0] + alpha*(self.coeffs[pos,1] + alpha*(self.coeffs[pos,2] + alpha*self.coeffs[pos,3]))
dydx = (self.coeffs[pos,1] + alpha*(2*self.coeffs[pos,2] + alpha*3*self.coeffs[pos,3]))/(self.x_list[pos] - self.x_list[pos-1])
else:
alpha = x - self.x_list[self.n-1]
y = self.coeffs[pos,0] + x*self.coeffs[pos,1] - self.coeffs[pos,2]*np.exp(alpha*self.coeffs[pos,3])
dydx = self.coeffs[pos,1] - self.coeffs[pos,2]*self.coeffs[pos,3]*np.exp(alpha*self.coeffs[pos,3])
else:
m = len(x)
pos = np.searchsorted(self.x_list,x)
y = np.zeros(m)
dydx = np.zeros(m)
if y.size > 0:
out_bot = pos == 0
out_top = pos == self.n
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
# Do the "in bounds" evaluation points
i = pos[in_bnds]
coeffs_in = self.coeffs[i,:]
alpha = (x[in_bnds] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])
y[in_bnds] = coeffs_in[:,0] + alpha*(coeffs_in[:,1] + alpha*(coeffs_in[:,2] + alpha*coeffs_in[:,3]))
dydx[in_bnds] = (coeffs_in[:,1] + alpha*(2*coeffs_in[:,2] + alpha*3*coeffs_in[:,3]))/(self.x_list[i] - self.x_list[i-1])
# Do the "out of bounds" evaluation points
y[out_bot] = self.coeffs[0,0] + self.coeffs[0,1]*(x[out_bot] - self.x_list[0])
dydx[out_bot] = self.coeffs[0,1]
alpha = x[out_top] - self.x_list[self.n-1]
y[out_top] = self.coeffs[self.n,0] + x[out_top]*self.coeffs[self.n,1] - self.coeffs[self.n,2]*np.exp(alpha*self.coeffs[self.n,3])
dydx[out_top] = self.coeffs[self.n,1] - self.coeffs[self.n,2]*self.coeffs[self.n,3]*np.exp(alpha*self.coeffs[self.n,3])
return y, dydx
class BilinearInterp(HARKinterpolator2D):
'''
Bilinear full (or tensor) grid interpolation of a function f(x,y).
'''
distance_criteria = ['x_list','y_list','f_values']
def __init__(self,f_values,x_list,y_list,xSearchFunc=None,ySearchFunc=None):
'''
Constructor to make a new bilinear interpolation.
Parameters
----------
f_values : numpy.array
An array of size (x_n,y_n) such that f_values[i,j] = f(x_list[i],y_list[j])
x_list : numpy.array
An array of x values, with length designated x_n.
y_list : numpy.array
An array of y values, with length designated y_n.
xSearchFunc : function
An optional function that returns the reference location for x values:
indices = xSearchFunc(x_list,x). Default is np.searchsorted
ySearchFunc : function
An optional function that returns the reference location for y values:
indices = ySearchFunc(y_list,y). Default is np.searchsorted
Returns
-------
new instance of BilinearInterp
'''
self.f_values = f_values
self.x_list = x_list
self.y_list = y_list
self.x_n = x_list.size
self.y_n = y_list.size
if xSearchFunc is None:
xSearchFunc = np.searchsorted
if ySearchFunc is None:
ySearchFunc = np.searchsorted
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
def _evaluate(self,x,y):
'''
Returns the level of the interpolated function at each value in x,y.
Only called internally by HARKinterpolator2D.__call__ (etc).
'''
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
else:
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
f = (
(1-alpha)*(1-beta)*self.f_values[x_pos-1,y_pos-1]
+ (1-alpha)*beta*self.f_values[x_pos-1,y_pos]
+ alpha*(1-beta)*self.f_values[x_pos,y_pos-1]
+ alpha*beta*self.f_values[x_pos,y_pos])
return f
def _derX(self,x,y):
'''
Returns the derivative with respect to x of the interpolated function
at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.
'''
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
else:
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
dfdx = (
((1-beta)*self.f_values[x_pos,y_pos-1]
+ beta*self.f_values[x_pos,y_pos]) -
((1-beta)*self.f_values[x_pos-1,y_pos-1]
+ beta*self.f_values[x_pos-1,y_pos]))/(self.x_list[x_pos] - self.x_list[x_pos-1])
return dfdx
def _derY(self,x,y):
'''
Returns the derivative with respect to y of the interpolated function
at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.
'''
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
else:
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])
dfdy = (
((1-alpha)*self.f_values[x_pos-1,y_pos]
+ alpha*self.f_values[x_pos,y_pos]) -
((1-alpha)*self.f_values[x_pos-1,y_pos-1]
+ alpha*self.f_values[x_pos,y_pos-1]))/(self.y_list[y_pos] - self.y_list[y_pos-1])
return dfdy
class TrilinearInterp(HARKinterpolator3D):
'''
Trilinear full (or tensor) grid interpolation of a function f(x,y,z).
'''
distance_criteria = ['f_values','x_list','y_list','z_list']
def __init__(self,f_values,x_list,y_list,z_list,xSearchFunc=None,ySearchFunc=None,zSearchFunc=None):
'''
Constructor to make a new trilinear interpolation.
Parameters
----------
f_values : numpy.array
An array of size (x_n,y_n,z_n) such that f_values[i,j,k] =
f(x_list[i],y_list[j],z_list[k])
x_list : numpy.array
An array of x values, with length designated x_n.
y_list : numpy.array
An array of y values, with length designated y_n.
z_list : numpy.array
An array of z values, with length designated z_n.
xSearchFunc : function
An optional function that returns the reference location for x values:
indices = xSearchFunc(x_list,x). Default is np.searchsorted
ySearchFunc : function
An optional function that returns the reference location for y values:
indices = ySearchFunc(y_list,y). Default is np.searchsorted
zSearchFunc : function
An optional function that returns the reference location for z values:
indices = zSearchFunc(z_list,z). Default is np.searchsorted
Returns
-------
new instance of TrilinearInterp
'''
self.f_values = f_values
self.x_list = x_list
self.y_list = y_list
self.z_list = z_list
self.x_n = x_list.size
self.y_n = y_list.size
self.z_n = z_list.size
if xSearchFunc is None:
xSearchFunc = np.searchsorted
if ySearchFunc is None:
ySearchFunc = np.searchsorted
if zSearchFunc is None:
zSearchFunc = np.searchsorted
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
self.zSearchFunc = zSearchFunc
def _evaluate(self,x,y,z):
'''
Returns the level of the interpolated function at each value in x,y,z.
Only called internally by HARKinterpolator3D.__call__ (etc).
'''
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
f = (
(1-alpha)*(1-beta)*(1-gamma)*self.f_values[x_pos-1,y_pos-1,z_pos-1]
+ (1-alpha)*(1-beta)*gamma*self.f_values[x_pos-1,y_pos-1,z_pos]
+ (1-alpha)*beta*(1-gamma)*self.f_values[x_pos-1,y_pos,z_pos-1]
+ (1-alpha)*beta*gamma*self.f_values[x_pos-1,y_pos,z_pos]
+ alpha*(1-beta)*(1-gamma)*self.f_values[x_pos,y_pos-1,z_pos-1]
+ alpha*(1-beta)*gamma*self.f_values[x_pos,y_pos-1,z_pos]
+ alpha*beta*(1-gamma)*self.f_values[x_pos,y_pos,z_pos-1]
+ alpha*beta*gamma*self.f_values[x_pos,y_pos,z_pos])
return f
def _derX(self,x,y,z):
'''
Returns the derivative with respect to x of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.
'''
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdx = (
( (1-beta)*(1-gamma)*self.f_values[x_pos,y_pos-1,z_pos-1]
+ (1-beta)*gamma*self.f_values[x_pos,y_pos-1,z_pos]
+ beta*(1-gamma)*self.f_values[x_pos,y_pos,z_pos-1]
+ beta*gamma*self.f_values[x_pos,y_pos,z_pos]) -
( (1-beta)*(1-gamma)*self.f_values[x_pos-1,y_pos-1,z_pos-1]
+ (1-beta)*gamma*self.f_values[x_pos-1,y_pos-1,z_pos]
+ beta*(1-gamma)*self.f_values[x_pos-1,y_pos,z_pos-1]
+ beta*gamma*self.f_values[x_pos-1,y_pos,z_pos]))/(self.x_list[x_pos] - self.x_list[x_pos-1])
return dfdx
def _derY(self,x,y,z):
'''
Returns the derivative with respect to y of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.
'''
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])
gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdy = (
( (1-alpha)*(1-gamma)*self.f_values[x_pos-1,y_pos,z_pos-1]
+ (1-alpha)*gamma*self.f_values[x_pos-1,y_pos,z_pos]
+ alpha*(1-gamma)*self.f_values[x_pos,y_pos,z_pos-1]
+ alpha*gamma*self.f_values[x_pos,y_pos,z_pos]) -
( (1-alpha)*(1-gamma)*self.f_values[x_pos-1,y_pos-1,z_pos-1]
+ (1-alpha)*gamma*self.f_values[x_pos-1,y_pos-1,z_pos]
+ alpha*(1-gamma)*self.f_values[x_pos,y_pos-1,z_pos-1]
+ alpha*gamma*self.f_values[x_pos,y_pos-1,z_pos]))/(self.y_list[y_pos] - self.y_list[y_pos-1])
return dfdy
def _derZ(self,x,y,z):
'''
Returns the derivative with respect to z of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.
'''
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
dfdz = (
( (1-alpha)*(1-beta)*self.f_values[x_pos-1,y_pos-1,z_pos]
+ (1-alpha)*beta*self.f_values[x_pos-1,y_pos,z_pos]
+ alpha*(1-beta)*self.f_values[x_pos,y_pos-1,z_pos]
+ alpha*beta*self.f_values[x_pos,y_pos,z_pos]) -
( (1-alpha)*(1-beta)*self.f_values[x_pos-1,y_pos-1,z_pos-1]
+ (1-alpha)*beta*self.f_values[x_pos-1,y_pos,z_pos-1]
+ alpha*(1-beta)*self.f_values[x_pos,y_pos-1,z_pos-1]
+ alpha*beta*self.f_values[x_pos,y_pos,z_pos-1]))/(self.z_list[z_pos] - self.z_list[z_pos-1])
return dfdz
class QuadlinearInterp(HARKinterpolator4D):
'''
Quadlinear full (or tensor) grid interpolation of a function f(w,x,y,z).
'''
distance_criteria = ['f_values','w_list','x_list','y_list','z_list']
def __init__(self,f_values,w_list,x_list,y_list,z_list,wSearchFunc=None,xSearchFunc=None,ySearchFunc=None,zSearchFunc=None):
'''
Constructor to make a new quadlinear interpolation.
Parameters
----------
f_values : numpy.array
An array of size (w_n,x_n,y_n,z_n) such that f_values[i,j,k,l] =
f(w_list[i],x_list[j],y_list[k],z_list[l])
w_list : numpy.array
An array of x values, with length designated w_n.
x_list : numpy.array
An array of x values, with length designated x_n.
y_list : numpy.array
An array of y values, with length designated y_n.
z_list : numpy.array
An array of z values, with length designated z_n.
wSearchFunc : function
An optional function that returns the reference location for w values:
indices = wSearchFunc(w_list,w). Default is np.searchsorted
xSearchFunc : function
An optional function that returns the reference location for x values:
indices = xSearchFunc(x_list,x). Default is np.searchsorted
ySearchFunc : function
An optional function that returns the reference location for y values:
indices = ySearchFunc(y_list,y). Default is np.searchsorted
zSearchFunc : function
An optional function that returns the reference location for z values:
indices = zSearchFunc(z_list,z). Default is np.searchsorted
Returns
-------
new instance of QuadlinearInterp
'''
self.f_values = f_values
self.w_list = w_list
self.x_list = x_list
self.y_list = y_list
self.z_list = z_list
self.w_n = w_list.size
self.x_n = x_list.size
self.y_n = y_list.size
self.z_n = z_list.size
if wSearchFunc is None:
wSearchFunc = np.searchsorted
if xSearchFunc is None:
xSearchFunc = np.searchsorted
if ySearchFunc is None:
ySearchFunc = np.searchsorted
if zSearchFunc is None:
zSearchFunc = np.searchsorted
self.wSearchFunc = wSearchFunc
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
self.zSearchFunc = zSearchFunc
def _evaluate(self,w,x,y,z):
'''
Returns the level of the interpolated function at each value in x,y,z.
Only called internally by HARKinterpolator4D.__call__ (etc).
'''
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
w_pos = self.wSearchFunc(self.w_list,w)
w_pos[w_pos < 1] = 1
w_pos[w_pos > self.w_n-1] = self.w_n-1
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
i = w_pos # for convenience
j = x_pos
k = y_pos
l = z_pos
alpha = (w - self.w_list[i-1])/(self.w_list[i] - self.w_list[i-1])
beta = (x - self.x_list[j-1])/(self.x_list[j] - self.x_list[j-1])
gamma = (y - self.y_list[k-1])/(self.y_list[k] - self.y_list[k-1])
delta = (z - self.z_list[l-1])/(self.z_list[l] - self.z_list[l-1])
f = (
(1-alpha)*((1-beta)*((1-gamma)*(1-delta)*self.f_values[i-1,j-1,k-1,l-1]
+ (1-gamma)*delta*self.f_values[i-1,j-1,k-1,l]
+ gamma*(1-delta)*self.f_values[i-1,j-1,k,l-1]
+ gamma*delta*self.f_values[i-1,j-1,k,l])
+ beta*((1-gamma)*(1-delta)*self.f_values[i-1,j,k-1,l-1]
+ (1-gamma)*delta*self.f_values[i-1,j,k-1,l]
+ gamma*(1-delta)*self.f_values[i-1,j,k,l-1]
+ gamma*delta*self.f_values[i-1,j,k,l]))
+ alpha*((1-beta)*((1-gamma)*(1-delta)*self.f_values[i,j-1,k-1,l-1]
+ (1-gamma)*delta*self.f_values[i,j-1,k-1,l]
+ gamma*(1-delta)*self.f_values[i,j-1,k,l-1]
+ gamma*delta*self.f_values[i,j-1,k,l])
+ beta*((1-gamma)*(1-delta)*self.f_values[i,j,k-1,l-1]
+ (1-gamma)*delta*self.f_values[i,j,k-1,l]
+ gamma*(1-delta)*self.f_values[i,j,k,l-1]
+ gamma*delta*self.f_values[i,j,k,l])))
return f
def _derW(self,w,x,y,z):
'''
Returns the derivative with respect to w of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.
'''
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
w_pos = self.wSearchFunc(self.w_list,w)
w_pos[w_pos < 1] = 1
w_pos[w_pos > self.w_n-1] = self.w_n-1
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
i = w_pos # for convenience
j = x_pos
k = y_pos
l = z_pos
beta = (x - self.x_list[j-1])/(self.x_list[j] - self.x_list[j-1])
gamma = (y - self.y_list[k-1])/(self.y_list[k] - self.y_list[k-1])
delta = (z - self.z_list[l-1])/(self.z_list[l] - self.z_list[l-1])
dfdw = (
( (1-beta)*(1-gamma)*(1-delta)*self.f_values[i,j-1,k-1,l-1]
+ (1-beta)*(1-gamma)*delta*self.f_values[i,j-1,k-1,l]
+ (1-beta)*gamma*(1-delta)*self.f_values[i,j-1,k,l-1]
+ (1-beta)*gamma*delta*self.f_values[i,j-1,k,l]
+ beta*(1-gamma)*(1-delta)*self.f_values[i,j,k-1,l-1]
+ beta*(1-gamma)*delta*self.f_values[i,j,k-1,l]
+ beta*gamma*(1-delta)*self.f_values[i,j,k,l-1]
+ beta*gamma*delta*self.f_values[i,j,k,l] ) -
( (1-beta)*(1-gamma)*(1-delta)*self.f_values[i-1,j-1,k-1,l-1]
+ (1-beta)*(1-gamma)*delta*self.f_values[i-1,j-1,k-1,l]
+ (1-beta)*gamma*(1-delta)*self.f_values[i-1,j-1,k,l-1]
+ (1-beta)*gamma*delta*self.f_values[i-1,j-1,k,l]
+ beta*(1-gamma)*(1-delta)*self.f_values[i-1,j,k-1,l-1]
+ beta*(1-gamma)*delta*self.f_values[i-1,j,k-1,l]
+ beta*gamma*(1-delta)*self.f_values[i-1,j,k,l-1]
+ beta*gamma*delta*self.f_values[i-1,j,k,l] )
)/(self.w_list[i] - self.w_list[i-1])
return dfdw
def _derX(self,w,x,y,z):
'''
Returns the derivative with respect to x of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.
'''
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
w_pos = self.wSearchFunc(self.w_list,w)
w_pos[w_pos < 1] = 1
w_pos[w_pos > self.w_n-1] = self.w_n-1
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
i = w_pos # for convenience
j = x_pos
k = y_pos
l = z_pos
alpha = (w - self.w_list[i-1])/(self.w_list[i] - self.w_list[i-1])
gamma = (y - self.y_list[k-1])/(self.y_list[k] - self.y_list[k-1])
delta = (z - self.z_list[l-1])/(self.z_list[l] - self.z_list[l-1])
dfdx = (
( (1-alpha)*(1-gamma)*(1-delta)*self.f_values[i-1,j,k-1,l-1]
+ (1-alpha)*(1-gamma)*delta*self.f_values[i-1,j,k-1,l]
+ (1-alpha)*gamma*(1-delta)*self.f_values[i-1,j,k,l-1]
+ (1-alpha)*gamma*delta*self.f_values[i-1,j,k,l]
+ alpha*(1-gamma)*(1-delta)*self.f_values[i,j,k-1,l-1]
+ alpha*(1-gamma)*delta*self.f_values[i,j,k-1,l]
+ alpha*gamma*(1-delta)*self.f_values[i,j,k,l-1]
+ alpha*gamma*delta*self.f_values[i,j,k,l] ) -
( (1-alpha)*(1-gamma)*(1-delta)*self.f_values[i-1,j-1,k-1,l-1]
+ (1-alpha)*(1-gamma)*delta*self.f_values[i-1,j-1,k-1,l]
+ (1-alpha)*gamma*(1-delta)*self.f_values[i-1,j-1,k,l-1]
+ (1-alpha)*gamma*delta*self.f_values[i-1,j-1,k,l]
+ alpha*(1-gamma)*(1-delta)*self.f_values[i,j-1,k-1,l-1]
+ alpha*(1-gamma)*delta*self.f_values[i,j-1,k-1,l]
+ alpha*gamma*(1-delta)*self.f_values[i,j-1,k,l-1]
+ alpha*gamma*delta*self.f_values[i,j-1,k,l] )
)/(self.x_list[j] - self.x_list[j-1])
return dfdx
def _derY(self,w,x,y,z):
'''
Returns the derivative with respect to y of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.
'''
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
w_pos = self.wSearchFunc(self.w_list,w)
w_pos[w_pos < 1] = 1
w_pos[w_pos > self.w_n-1] = self.w_n-1
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
i = w_pos # for convenience
j = x_pos
k = y_pos
l = z_pos
alpha = (w - self.w_list[i-1])/(self.w_list[i] - self.w_list[i-1])
beta = (x - self.x_list[j-1])/(self.x_list[j] - self.x_list[j-1])
delta = (z - self.z_list[l-1])/(self.z_list[l] - self.z_list[l-1])
dfdy = (
( (1-alpha)*(1-beta)*(1-delta)*self.f_values[i-1,j-1,k,l-1]
+ (1-alpha)*(1-beta)*delta*self.f_values[i-1,j-1,k,l]
+ (1-alpha)*beta*(1-delta)*self.f_values[i-1,j,k,l-1]
+ (1-alpha)*beta*delta*self.f_values[i-1,j,k,l]
+ alpha*(1-beta)*(1-delta)*self.f_values[i,j-1,k,l-1]
+ alpha*(1-beta)*delta*self.f_values[i,j-1,k,l]
+ alpha*beta*(1-delta)*self.f_values[i,j,k,l-1]
+ alpha*beta*delta*self.f_values[i,j,k,l] ) -
( (1-alpha)*(1-beta)*(1-delta)*self.f_values[i-1,j-1,k-1,l-1]
+ (1-alpha)*(1-beta)*delta*self.f_values[i-1,j-1,k-1,l]
+ (1-alpha)*beta*(1-delta)*self.f_values[i-1,j,k-1,l-1]
+ (1-alpha)*beta*delta*self.f_values[i-1,j,k-1,l]
+ alpha*(1-beta)*(1-delta)*self.f_values[i,j-1,k-1,l-1]
+ alpha*(1-beta)*delta*self.f_values[i,j-1,k-1,l]
+ alpha*beta*(1-delta)*self.f_values[i,j,k-1,l-1]
+ alpha*beta*delta*self.f_values[i,j,k-1,l] )
)/(self.y_list[k] - self.y_list[k-1])
return dfdy
def _derZ(self,w,x,y,z):
'''
Returns the derivative with respect to z of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.
'''
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list,w),self.w_n-1),1)
x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1)
y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1)
z_pos = max(min(self.zSearchFunc(self.z_list,z),self.z_n-1),1)
else:
w_pos = self.wSearchFunc(self.w_list,w)
w_pos[w_pos < 1] = 1
w_pos[w_pos > self.w_n-1] = self.w_n-1
x_pos = self.xSearchFunc(self.x_list,x)
x_pos[x_pos < 1] = 1
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = self.ySearchFunc(self.y_list,y)
y_pos[y_pos < 1] = 1
y_pos[y_pos > self.y_n-1] = self.y_n-1
z_pos = self.zSearchFunc(self.z_list,z)
z_pos[z_pos < 1] = 1
z_pos[z_pos > self.z_n-1] = self.z_n-1
i = w_pos # for convenience
j = x_pos
k = y_pos
l = z_pos
alpha = (w - self.w_list[i-1])/(self.w_list[i] - self.w_list[i-1])
beta = (x - self.x_list[j-1])/(self.x_list[j] - self.x_list[j-1])
gamma = (y - self.y_list[k-1])/(self.y_list[k] - self.y_list[k-1])
dfdz = (
( (1-alpha)*(1-beta)*(1-gamma)*self.f_values[i-1,j-1,k-1,l]
+ (1-alpha)*(1-beta)*gamma*self.f_values[i-1,j-1,k,l]
+ (1-alpha)*beta*(1-gamma)*self.f_values[i-1,j,k-1,l]
+ (1-alpha)*beta*gamma*self.f_values[i-1,j,k,l]
+ alpha*(1-beta)*(1-gamma)*self.f_values[i,j-1,k-1,l]
+ alpha*(1-beta)*gamma*self.f_values[i,j-1,k,l]
+ alpha*beta*(1-gamma)*self.f_values[i,j,k-1,l]
+ alpha*beta*gamma*self.f_values[i,j,k,l] ) -
( (1-alpha)*(1-beta)*(1-gamma)*self.f_values[i-1,j-1,k-1,l-1]
+ (1-alpha)*(1-beta)*gamma*self.f_values[i-1,j-1,k,l-1]
+ (1-alpha)*beta*(1-gamma)*self.f_values[i-1,j,k-1,l-1]
+ (1-alpha)*beta*gamma*self.f_values[i-1,j,k,l-1]
+ alpha*(1-beta)*(1-gamma)*self.f_values[i,j-1,k-1,l-1]
+ alpha*(1-beta)*gamma*self.f_values[i,j-1,k,l-1]
+ alpha*beta*(1-gamma)*self.f_values[i,j,k-1,l-1]
+ alpha*beta*gamma*self.f_values[i,j,k,l-1] )
)/(self.z_list[l] - self.z_list[l-1])
return dfdz
class LowerEnvelope(HARKinterpolator1D):
'''
The lower envelope of a finite set of 1D functions, each of which can be of
any class that has the methods __call__, derivative, and eval_with_derivative.
Generally: it combines HARKinterpolator1Ds.
'''
distance_criteria = ['functions']
def __init__(self,*functions):
'''
Constructor to make a new lower envelope iterpolation.
Parameters
----------
*functions : function
Any number of real functions; often instances of HARKinterpolator1D
Returns
-------
new instance of LowerEnvelope
'''
self.functions = []
for function in functions:
self.functions.append(function)
self.funcCount = len(self.functions)
def _evaluate(self,x):
'''
Returns the level of the function at each value in x as the minimum among
all of the functions. Only called internally by HARKinterpolator1D.__call__.
'''
if _isscalar(x):
y = np.nanmin([f(x) for f in self.functions])
else:
m = len(x)
fx = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
fx[:,j] = self.functions[j](x)
y = np.nanmin(fx,axis=1)
return y
def _der(self,x):
'''
Returns the first derivative of the function at each value in x. Only
called internally by HARKinterpolator1D.derivative.
'''
y,dydx = self.eval_with_derivative(x)
return dydx # Sadly, this is the fastest / most convenient way...
def _evalAndDer(self,x):
'''
Returns the level and first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der.
'''
m = len(x)
fx = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
fx[:,j] = self.functions[j](x)
fx[np.isnan(fx)] = np.inf
i = np.argmin(fx,axis=1)
y = fx[np.arange(m),i]
dydx = np.zeros_like(y)
for j in range(self.funcCount):
c = i == j
dydx[c] = self.functions[j].derivative(x[c])
return y,dydx
class UpperEnvelope(HARKinterpolator1D):
'''
The upper envelope of a finite set of 1D functions, each of which can be of
any class that has the methods __call__, derivative, and eval_with_derivative.
Generally: it combines HARKinterpolator1Ds.
'''
distance_criteria = ['functions']
def __init__(self,*functions):
'''
Constructor to make a new upper envelope iterpolation.
Parameters
----------
*functions : function
Any number of real functions; often instances of HARKinterpolator1D
Returns
-------
new instance of UpperEnvelope
'''
self.functions = []
for function in functions:
self.functions.append(function)
self.funcCount = len(self.functions)
def _evaluate(self,x):
'''
Returns the level of the function at each value in x as the maximum among
all of the functions. Only called internally by HARKinterpolator1D.__call__.
'''
if _isscalar(x):
y = np.nanmax([f(x) for f in self.functions])
else:
m = len(x)
fx = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
fx[:,j] = self.functions[j](x)
y = np.nanmax(fx,axis=1)
return y
def _der(self,x):
'''
Returns the first derivative of the function at each value in x. Only
called internally by HARKinterpolator1D.derivative.
'''
y,dydx = self.eval_with_derivative(x)
return dydx # Sadly, this is the fastest / most convenient way...
def _evalAndDer(self,x):
'''
Returns the level and first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der.
'''
m = len(x)
fx = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
fx[:,j] = self.functions[j](x)
fx[np.isnan(fx)] = np.inf
i = np.argmax(fx,axis=1)
y = fx[np.arange(m),i]
dydx = np.zeros_like(y)
for j in range(self.funcCount):
c = i == j
dydx[c] = self.functions[j].derivative(x[c])
return y,dydx
class LowerEnvelope2D(HARKinterpolator2D):
'''
The lower envelope of a finite set of 2D functions, each of which can be of
any class that has the methods __call__, derivativeX, and derivativeY.
Generally: it combines HARKinterpolator2Ds.
'''
distance_criteria = ['functions']
def __init__(self,*functions):
'''
Constructor to make a new lower envelope iterpolation.
Parameters
----------
*functions : function
Any number of real functions; often instances of HARKinterpolator2D
Returns
-------
new instance of LowerEnvelope2D
'''
self.functions = []
for function in functions:
self.functions.append(function)
self.funcCount = len(self.functions)
def _evaluate(self,x,y):
'''
Returns the level of the function at each value in (x,y) as the minimum
among all of the functions. Only called internally by
HARKinterpolator2D.__call__.
'''
if _isscalar(x):
f = np.nanmin([f(x,y) for f in self.functions])
else:
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y)
f = np.nanmin(temp,axis=1)
return f
def _derX(self,x,y):
'''
Returns the first derivative of the function with respect to X at each
value in (x,y). Only called internally by HARKinterpolator2D._derX.
'''
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y)
temp[np.isnan(temp)] = np.inf
i = np.argmin(temp,axis=1)
dfdx = np.zeros_like(x)
for j in range(self.funcCount):
c = i == j
dfdx[c] = self.functions[j].derivativeX(x[c],y[c])
return dfdx
def _derY(self,x,y):
'''
Returns the first derivative of the function with respect to Y at each
value in (x,y). Only called internally by HARKinterpolator2D._derY.
'''
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y)
temp[np.isnan(temp)] = np.inf
i = np.argmin(temp,axis=1)
y = temp[np.arange(m),i]
dfdy = np.zeros_like(x)
for j in range(self.funcCount):
c = i == j
dfdy[c] = self.functions[j].derivativeY(x[c],y[c])
return dfdy
class LowerEnvelope3D(HARKinterpolator3D):
'''
The lower envelope of a finite set of 3D functions, each of which can be of
any class that has the methods __call__, derivativeX, derivativeY, and
derivativeZ. Generally: it combines HARKinterpolator2Ds.
'''
distance_criteria = ['functions']
def __init__(self,*functions):
'''
Constructor to make a new lower envelope iterpolation.
Parameters
----------
*functions : function
Any number of real functions; often instances of HARKinterpolator3D
Returns
-------
None
'''
self.functions = []
for function in functions:
self.functions.append(function)
self.funcCount = len(self.functions)
def _evaluate(self,x,y,z):
'''
Returns the level of the function at each value in (x,y,z) as the minimum
among all of the functions. Only called internally by
HARKinterpolator3D.__call__.
'''
if _isscalar(x):
f = np.nanmin([f(x,y,z) for f in self.functions])
else:
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y,z)
f = np.nanmin(temp,axis=1)
return f
def _derX(self,x,y,z):
'''
Returns the first derivative of the function with respect to X at each
value in (x,y,z). Only called internally by HARKinterpolator3D._derX.
'''
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y,z)
temp[np.isnan(temp)] = np.inf
i = np.argmin(temp,axis=1)
dfdx = np.zeros_like(x)
for j in range(self.funcCount):
c = i == j
dfdx[c] = self.functions[j].derivativeX(x[c],y[c],z[c])
return dfdx
def _derY(self,x,y,z):
'''
Returns the first derivative of the function with respect to Y at each
value in (x,y,z). Only called internally by HARKinterpolator3D._derY.
'''
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y,z)
temp[np.isnan(temp)] = np.inf
i = np.argmin(temp,axis=1)
y = temp[np.arange(m),i]
dfdy = np.zeros_like(x)
for j in range(self.funcCount):
c = i == j
dfdy[c] = self.functions[j].derivativeY(x[c],y[c],z[c])
return dfdy
def _derZ(self,x,y,z):
'''
Returns the first derivative of the function with respect to Z at each
value in (x,y,z). Only called internally by HARKinterpolator3D._derZ.
'''
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y,z)
temp[np.isnan(temp)] = np.inf
i = np.argmin(temp,axis=1)
y = temp[np.arange(m),i]
dfdz = np.zeros_like(x)
for j in range(self.funcCount):
c = i == j
dfdz[c] = self.functions[j].derivativeZ(x[c],y[c],z[c])
return dfdz
class VariableLowerBoundFunc2D(HARKobject):
'''
A class for representing a function with two real inputs whose lower bound
in the first input depends on the second input. Useful for managing curved
natural borrowing constraints, as occurs in the persistent shocks model.
'''
distance_criteria = ['func','lowerBound']
def __init__(self,func,lowerBound):
'''
Make a new instance of VariableLowerBoundFunc2D.
Parameters
----------
func : function
A function f: (R_+ x R) --> R representing the function of interest
shifted by its lower bound in the first input.
lowerBound : function
The lower bound in the first input of the function of interest, as
a function of the second input.
Returns
-------
None
'''
self.func = func
self.lowerBound = lowerBound
def __call__(self,x,y):
'''
Evaluate the function at given state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
Returns
-------
f_out : np.array
Function evaluated at (x,y), of same shape as inputs.
'''
xShift = self.lowerBound(y)
f_out = self.func(x-xShift,y)
return f_out
def derivativeX(self,x,y):
'''
Evaluate the first derivative with respect to x of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
Returns
-------
dfdx_out : np.array
First derivative of function with respect to the first input,
evaluated at (x,y), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdx_out = self.func.derivativeX(x-xShift,y)
return dfdx_out
def derivativeY(self,x,y):
'''
Evaluate the first derivative with respect to y of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
Returns
-------
dfdy_out : np.array
First derivative of function with respect to the second input,
evaluated at (x,y), of same shape as inputs.
'''
xShift,xShiftDer = self.lowerBound.eval_with_derivative(y)
dfdy_out = self.func.derivativeY(x-xShift,y) - xShiftDer*self.func.derivativeX(x-xShift,y)
return dfdy_out
class VariableLowerBoundFunc3D(HARKobject):
'''
A class for representing a function with three real inputs whose lower bound
in the first input depends on the second input. Useful for managing curved
natural borrowing constraints.
'''
distance_criteria = ['func','lowerBound']
def __init__(self,func,lowerBound):
'''
Make a new instance of VariableLowerBoundFunc3D.
Parameters
----------
func : function
A function f: (R_+ x R^2) --> R representing the function of interest
shifted by its lower bound in the first input.
lowerBound : function
The lower bound in the first input of the function of interest, as
a function of the second input.
Returns
-------
None
'''
self.func = func
self.lowerBound = lowerBound
def __call__(self,x,y,z):
'''
Evaluate the function at given state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
f_out : np.array
Function evaluated at (x,y,z), of same shape as inputs.
'''
xShift = self.lowerBound(y)
f_out = self.func(x-xShift,y,z)
return f_out
def derivativeX(self,x,y,z):
'''
Evaluate the first derivative with respect to x of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdx_out : np.array
First derivative of function with respect to the first input,
evaluated at (x,y,z), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdx_out = self.func.derivativeX(x-xShift,y,z)
return dfdx_out
def derivativeY(self,x,y,z):
'''
Evaluate the first derivative with respect to y of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdy_out : np.array
First derivative of function with respect to the second input,
evaluated at (x,y,z), of same shape as inputs.
'''
xShift,xShiftDer = self.lowerBound.eval_with_derivative(y)
dfdy_out = self.func.derivativeY(x-xShift,y,z) - \
xShiftDer*self.func.derivativeX(x-xShift,y,z)
return dfdy_out
def derivativeZ(self,x,y,z):
'''
Evaluate the first derivative with respect to z of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdz_out : np.array
First derivative of function with respect to the third input,
evaluated at (x,y,z), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdz_out = self.func.derivativeZ(x-xShift,y,z)
return dfdz_out
class LinearInterpOnInterp1D(HARKinterpolator2D):
'''
A 2D interpolator that linearly interpolates among a list of 1D interpolators.
'''
distance_criteria = ['xInterpolators','y_list']
def __init__(self,xInterpolators,y_values):
'''
Constructor for the class, generating an approximation to a function of
the form f(x,y) using interpolations over f(x,y_0) for a fixed grid of
y_0 values.
Parameters
----------
xInterpolators : [HARKinterpolator1D]
A list of 1D interpolations over the x variable. The nth element of
xInterpolators represents f(x,y_values[n]).
y_values: numpy.array
An array of y values equal in length to xInterpolators.
Returns
-------
new instance of LinearInterpOnInterp1D
'''
self.xInterpolators = xInterpolators
self.y_list = y_values
self.y_n = y_values.size
def _evaluate(self,x,y):
'''
Returns the level of the interpolated function at each value in x,y.
Only called internally by HARKinterpolator2D.__call__ (etc).
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
f = (1-alpha)*self.xInterpolators[y_pos-1](x) + alpha*self.xInterpolators[y_pos](x)
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
f = np.zeros(m) + np.nan
if y.size > 0:
for i in range(1,self.y_n):
c = y_pos == i
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
f[c] = (1-alpha)*self.xInterpolators[i-1](x[c]) + alpha*self.xInterpolators[i](x[c])
return f
def _derX(self,x,y):
'''
Returns the derivative with respect to x of the interpolated function
at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
dfdx = (1-alpha)*self.xInterpolators[y_pos-1]._der(x) + alpha*self.xInterpolators[y_pos]._der(x)
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
dfdx = np.zeros(m) + np.nan
if y.size > 0:
for i in range(1,self.y_n):
c = y_pos == i
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
dfdx[c] = (1-alpha)*self.xInterpolators[i-1]._der(x[c]) + alpha*self.xInterpolators[i]._der(x[c])
return dfdx
def _derY(self,x,y):
'''
Returns the derivative with respect to y of the interpolated function
at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
dfdy = (self.xInterpolators[y_pos](x) - self.xInterpolators[y_pos-1](x))/(self.y_list[y_pos] - self.y_list[y_pos-1])
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
dfdy = np.zeros(m) + np.nan
if y.size > 0:
for i in range(1,self.y_n):
c = y_pos == i
if np.any(c):
dfdy[c] = (self.xInterpolators[i](x[c]) - self.xInterpolators[i-1](x[c]))/(self.y_list[i] - self.y_list[i-1])
return dfdy
class BilinearInterpOnInterp1D(HARKinterpolator3D):
'''
A 3D interpolator that bilinearly interpolates among a list of lists of 1D
interpolators.
'''
distance_criteria = ['xInterpolators','y_list','z_list']
def __init__(self,xInterpolators,y_values,z_values):
'''
Constructor for the class, generating an approximation to a function of
the form f(x,y,z) using interpolations over f(x,y_0,z_0) for a fixed grid
of y_0 and z_0 values.
Parameters
----------
xInterpolators : [[HARKinterpolator1D]]
A list of lists of 1D interpolations over the x variable. The i,j-th
element of xInterpolators represents f(x,y_values[i],z_values[j]).
y_values: numpy.array
An array of y values equal in length to xInterpolators.
z_values: numpy.array
An array of z values equal in length to xInterpolators[0].
Returns
-------
new instance of BilinearInterpOnInterp1D
'''
self.xInterpolators = xInterpolators
self.y_list = y_values
self.y_n = y_values.size
self.z_list = z_values
self.z_n = z_values.size
def _evaluate(self,x,y,z):
'''
Returns the level of the interpolated function at each value in x,y,z.
Only called internally by HARKinterpolator3D.__call__ (etc).
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
f = ((1-alpha)*(1-beta)*self.xInterpolators[y_pos-1][z_pos-1](x)
+ (1-alpha)*beta*self.xInterpolators[y_pos-1][z_pos](x)
+ alpha*(1-beta)*self.xInterpolators[y_pos][z_pos-1](x)
+ alpha*beta*self.xInterpolators[y_pos][z_pos](x))
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
f = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])
f[c] = (
(1-alpha)*(1-beta)*self.xInterpolators[i-1][j-1](x[c])
+ (1-alpha)*beta*self.xInterpolators[i-1][j](x[c])
+ alpha*(1-beta)*self.xInterpolators[i][j-1](x[c])
+ alpha*beta*self.xInterpolators[i][j](x[c]))
return f
def _derX(self,x,y,z):
'''
Returns the derivative with respect to x of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdx = ((1-alpha)*(1-beta)*self.xInterpolators[y_pos-1][z_pos-1]._der(x)
+ (1-alpha)*beta*self.xInterpolators[y_pos-1][z_pos]._der(x)
+ alpha*(1-beta)*self.xInterpolators[y_pos][z_pos-1]._der(x)
+ alpha*beta*self.xInterpolators[y_pos][z_pos]._der(x))
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdx = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])
dfdx[c] = (
(1-alpha)*(1-beta)*self.xInterpolators[i-1][j-1]._der(x[c])
+ (1-alpha)*beta*self.xInterpolators[i-1][j]._der(x[c])
+ alpha*(1-beta)*self.xInterpolators[i][j-1]._der(x[c])
+ alpha*beta*self.xInterpolators[i][j]._der(x[c]))
return dfdx
def _derY(self,x,y,z):
'''
Returns the derivative with respect to y of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdy = (((1-beta)*self.xInterpolators[y_pos][z_pos-1](x) + beta*self.xInterpolators[y_pos][z_pos](x))
- ((1-beta)*self.xInterpolators[y_pos-1][z_pos-1](x) + beta*self.xInterpolators[y_pos-1][z_pos](x)))/(self.y_list[y_pos] - self.y_list[y_pos-1])
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdy = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])
dfdy[c] = (((1-beta)*self.xInterpolators[i][j-1](x[c]) + beta*self.xInterpolators[i][j](x[c]))
- ((1-beta)*self.xInterpolators[i-1][j-1](x[c]) + beta*self.xInterpolators[i-1][j](x[c])))/(self.y_list[i] - self.y_list[i-1])
return dfdy
def _derZ(self,x,y,z):
'''
Returns the derivative with respect to z of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
dfdz = (((1-alpha)*self.xInterpolators[y_pos-1][z_pos](x) + alpha*self.xInterpolators[y_pos][z_pos](x))
- ((1-alpha)*self.xInterpolators[y_pos-1][z_pos-1](x) + alpha*self.xInterpolators[y_pos][z_pos-1](x)))/(self.z_list[z_pos] - self.z_list[z_pos-1])
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdz = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
dfdz[c] = (((1-alpha)*self.xInterpolators[i-1][j](x[c]) + alpha*self.xInterpolators[i][j](x[c]))
- ((1-alpha)*self.xInterpolators[i-1][j-1](x[c]) + alpha*self.xInterpolators[i][j-1](x[c])))/(self.z_list[j] - self.z_list[j-1])
return dfdz
class TrilinearInterpOnInterp1D(HARKinterpolator4D):
'''
A 4D interpolator that trilinearly interpolates among a list of lists of 1D interpolators.
'''
distance_criteria = ['wInterpolators','x_list','y_list','z_list']
def __init__(self,wInterpolators,x_values,y_values,z_values):
'''
Constructor for the class, generating an approximation to a function of
the form f(w,x,y,z) using interpolations over f(w,x_0,y_0,z_0) for a fixed
grid of y_0 and z_0 values.
Parameters
----------
wInterpolators : [[[HARKinterpolator1D]]]
A list of lists of lists of 1D interpolations over the x variable.
The i,j,k-th element of wInterpolators represents f(w,x_values[i],y_values[j],z_values[k]).
x_values: numpy.array
An array of x values equal in length to wInterpolators.
y_values: numpy.array
An array of y values equal in length to wInterpolators[0].
z_values: numpy.array
An array of z values equal in length to wInterpolators[0][0]
Returns
-------
new instance of TrilinearInterpOnInterp1D
'''
self.wInterpolators = wInterpolators
self.x_list = x_values
self.x_n = x_values.size
self.y_list = y_values
self.y_n = y_values.size
self.z_list = z_values
self.z_n = z_values.size
def _evaluate(self,w,x,y,z):
'''
Returns the level of the interpolated function at each value in w,x,y,z.
Only called internally by HARKinterpolator4D.__call__ (etc).
'''
if _isscalar(w):
x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
f = (
(1-alpha)*(1-beta)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1](w)
+ (1-alpha)*(1-beta)*gamma*self.wInterpolators[x_pos-1][y_pos-1][z_pos](w)
+ (1-alpha)*beta*(1-gamma)*self.wInterpolators[x_pos-1][y_pos][z_pos-1](w)
+ (1-alpha)*beta*gamma*self.wInterpolators[x_pos-1][y_pos][z_pos](w)
+ alpha*(1-beta)*(1-gamma)*self.wInterpolators[x_pos][y_pos-1][z_pos-1](w)
+ alpha*(1-beta)*gamma*self.wInterpolators[x_pos][y_pos-1][z_pos](w)
+ alpha*beta*(1-gamma)*self.wInterpolators[x_pos][y_pos][z_pos-1](w)
+ alpha*beta*gamma*self.wInterpolators[x_pos][y_pos][z_pos](w))
else:
m = len(x)
x_pos = np.searchsorted(self.x_list,x)
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
f = np.zeros(m) + np.nan
for i in range(1,self.x_n):
for j in range(1,self.y_n):
for k in range(1,self.z_n):
c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)
if np.any(c):
alpha = (x[c] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])
beta = (y[c] - self.y_list[j-1])/(self.y_list[j] - self.y_list[j-1])
gamma = (z[c] - self.z_list[k-1])/(self.z_list[k] - self.z_list[k-1])
f[c] = (
(1-alpha)*(1-beta)*(1-gamma)*self.wInterpolators[i-1][j-1][k-1](w[c])
+ (1-alpha)*(1-beta)*gamma*self.wInterpolators[i-1][j-1][k](w[c])
+ (1-alpha)*beta*(1-gamma)*self.wInterpolators[i-1][j][k-1](w[c])
+ (1-alpha)*beta*gamma*self.wInterpolators[i-1][j][k](w[c])
+ alpha*(1-beta)*(1-gamma)*self.wInterpolators[i][j-1][k-1](w[c])
+ alpha*(1-beta)*gamma*self.wInterpolators[i][j-1][k](w[c])
+ alpha*beta*(1-gamma)*self.wInterpolators[i][j][k-1](w[c])
+ alpha*beta*gamma*self.wInterpolators[i][j][k](w[c]))
return f
def _derW(self,w,x,y,z):
'''
Returns the derivative with respect to w of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.
'''
if _isscalar(w):
x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1])
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdw = (
(1-alpha)*(1-beta)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1]._der(w)
+ (1-alpha)*(1-beta)*gamma*self.wInterpolators[x_pos-1][y_pos-1][z_pos]._der(w)
+ (1-alpha)*beta*(1-gamma)*self.wInterpolators[x_pos-1][y_pos][z_pos-1]._der(w)
+ (1-alpha)*beta*gamma*self.wInterpolators[x_pos-1][y_pos][z_pos]._der(w)
+ alpha*(1-beta)*(1-gamma)*self.wInterpolators[x_pos][y_pos-1][z_pos-1]._der(w)
+ alpha*(1-beta)*gamma*self.wInterpolators[x_pos][y_pos-1][z_pos]._der(w)
+ alpha*beta*(1-gamma)*self.wInterpolators[x_pos][y_pos][z_pos-1]._der(w)
+ alpha*beta*gamma*self.wInterpolators[x_pos][y_pos][z_pos]._der(w))
else:
m = len(x)
x_pos = np.searchsorted(self.x_list,x)
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdw = np.zeros(m) + np.nan
for i in range(1,self.x_n):
for j in range(1,self.y_n):
for k in range(1,self.z_n):
c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)
if np.any(c):
alpha = (x[c] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])
beta = (y[c] - self.y_list[j-1])/(self.y_list[j] - self.y_list[j-1])
gamma = (z[c] - self.z_list[k-1])/(self.z_list[k] - self.z_list[k-1])
dfdw[c] = (
(1-alpha)*(1-beta)*(1-gamma)*self.wInterpolators[i-1][j-1][k-1]._der(w[c])
+ (1-alpha)*(1-beta)*gamma*self.wInterpolators[i-1][j-1][k]._der(w[c])
+ (1-alpha)*beta*(1-gamma)*self.wInterpolators[i-1][j][k-1]._der(w[c])
+ (1-alpha)*beta*gamma*self.wInterpolators[i-1][j][k]._der(w[c])
+ alpha*(1-beta)*(1-gamma)*self.wInterpolators[i][j-1][k-1]._der(w[c])
+ alpha*(1-beta)*gamma*self.wInterpolators[i][j-1][k]._der(w[c])
+ alpha*beta*(1-gamma)*self.wInterpolators[i][j][k-1]._der(w[c])
+ alpha*beta*gamma*self.wInterpolators[i][j][k]._der(w[c]))
return dfdw
def _derX(self,w,x,y,z):
'''
Returns the derivative with respect to x of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.
'''
if _isscalar(w):
x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdx = (
((1-beta)*(1-gamma)*self.wInterpolators[x_pos][y_pos-1][z_pos-1](w)
+ (1-beta)*gamma*self.wInterpolators[x_pos][y_pos-1][z_pos](w)
+ beta*(1-gamma)*self.wInterpolators[x_pos][y_pos][z_pos-1](w)
+ beta*gamma*self.wInterpolators[x_pos][y_pos][z_pos](w)) -
((1-beta)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1](w)
+ (1-beta)*gamma*self.wInterpolators[x_pos-1][y_pos-1][z_pos](w)
+ beta*(1-gamma)*self.wInterpolators[x_pos-1][y_pos][z_pos-1](w)
+ beta*gamma*self.wInterpolators[x_pos-1][y_pos][z_pos](w)))/(self.x_list[x_pos] - self.x_list[x_pos-1])
else:
m = len(x)
x_pos = np.searchsorted(self.x_list,x)
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdx = np.zeros(m) + np.nan
for i in range(1,self.x_n):
for j in range(1,self.y_n):
for k in range(1,self.z_n):
c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)
if np.any(c):
beta = (y[c] - self.y_list[j-1])/(self.y_list[j] - self.y_list[j-1])
gamma = (z[c] - self.z_list[k-1])/(self.z_list[k] - self.z_list[k-1])
dfdx[c] = (
((1-beta)*(1-gamma)*self.wInterpolators[i][j-1][k-1](w[c])
+ (1-beta)*gamma*self.wInterpolators[i][j-1][k](w[c])
+ beta*(1-gamma)*self.wInterpolators[i][j][k-1](w[c])
+ beta*gamma*self.wInterpolators[i][j][k](w[c])) -
((1-beta)*(1-gamma)*self.wInterpolators[i-1][j-1][k-1](w[c])
+ (1-beta)*gamma*self.wInterpolators[i-1][j-1][k](w[c])
+ beta*(1-gamma)*self.wInterpolators[i-1][j][k-1](w[c])
+ beta*gamma*self.wInterpolators[i-1][j][k](w[c])))/(self.x_list[i] - self.x_list[i-1])
return dfdx
def _derY(self,w,x,y,z):
'''
Returns the derivative with respect to y of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.
'''
if _isscalar(w):
x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (x - self.x_list[x_pos-1])/(self.y_list[x_pos] - self.x_list[x_pos-1])
gamma = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdy = (
((1-alpha)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos][z_pos-1](w)
+ (1-alpha)*gamma*self.wInterpolators[x_pos-1][y_pos][z_pos](w)
+ alpha*(1-gamma)*self.wInterpolators[x_pos][y_pos][z_pos-1](w)
+ alpha*gamma*self.wInterpolators[x_pos][y_pos][z_pos](w)) -
((1-alpha)*(1-gamma)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1](w)
+ (1-alpha)*gamma*self.wInterpolators[x_pos-1][y_pos-1][z_pos](w)
+ alpha*(1-gamma)*self.wInterpolators[x_pos][y_pos-1][z_pos-1](w)
+ alpha*gamma*self.wInterpolators[x_pos][y_pos-1][z_pos](w)))/(self.y_list[y_pos] - self.y_list[y_pos-1])
else:
m = len(x)
x_pos = np.searchsorted(self.x_list,x)
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdy = np.zeros(m) + np.nan
for i in range(1,self.x_n):
for j in range(1,self.y_n):
for k in range(1,self.z_n):
c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)
if np.any(c):
alpha = (x[c] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])
gamma = (z[c] - self.z_list[k-1])/(self.z_list[k] - self.z_list[k-1])
dfdy[c] = (
((1-alpha)*(1-gamma)*self.wInterpolators[i-1][j][k-1](w[c])
+ (1-alpha)*gamma*self.wInterpolators[i-1][j][k](w[c])
+ alpha*(1-gamma)*self.wInterpolators[i][j][k-1](w[c])
+ alpha*gamma*self.wInterpolators[i][j][k](w[c])) -
((1-alpha)*(1-gamma)*self.wInterpolators[i-1][j-1][k-1](w[c])
+ (1-alpha)*gamma*self.wInterpolators[i-1][j-1][k](w[c])
+ alpha*(1-gamma)*self.wInterpolators[i][j-1][k-1](w[c])
+ alpha*gamma*self.wInterpolators[i][j-1][k](w[c])))/(self.y_list[j] - self.y_list[j-1])
return dfdy
def _derZ(self,w,x,y,z):
'''
Returns the derivative with respect to z of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.
'''
if _isscalar(w):
x_pos = max(min(np.searchsorted(self.x_list,x),self.x_n-1),1)
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (x - self.x_list[x_pos-1])/(self.y_list[x_pos] - self.x_list[x_pos-1])
beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
dfdz = (
((1-alpha)*(1-beta)*self.wInterpolators[x_pos-1][y_pos-1][z_pos](w)
+ (1-alpha)*beta*self.wInterpolators[x_pos-1][y_pos][z_pos](w)
+ alpha*(1-beta)*self.wInterpolators[x_pos][y_pos-1][z_pos](w)
+ alpha*beta*self.wInterpolators[x_pos][y_pos][z_pos](w)) -
((1-alpha)*(1-beta)*self.wInterpolators[x_pos-1][y_pos-1][z_pos-1](w)
+ (1-alpha)*beta*self.wInterpolators[x_pos-1][y_pos][z_pos-1](w)
+ alpha*(1-beta)*self.wInterpolators[x_pos][y_pos-1][z_pos-1](w)
+ alpha*beta*self.wInterpolators[x_pos][y_pos][z_pos-1](w)))/(self.z_list[z_pos] - self.z_list[z_pos-1])
else:
m = len(x)
x_pos = np.searchsorted(self.x_list,x)
x_pos[x_pos > self.x_n-1] = self.x_n-1
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdz = np.zeros(m) + np.nan
for i in range(1,self.x_n):
for j in range(1,self.y_n):
for k in range(1,self.z_n):
c = np.logical_and(np.logical_and(i == x_pos, j == y_pos),k == z_pos)
if np.any(c):
alpha = (x[c] - self.x_list[i-1])/(self.x_list[i] - self.x_list[i-1])
beta = (y[c] - self.y_list[j-1])/(self.y_list[j] - self.y_list[j-1])
dfdz[c] = (
((1-alpha)*(1-beta)*self.wInterpolators[i-1][j-1][k](w[c])
+ (1-alpha)*beta*self.wInterpolators[i-1][j][k](w[c])
+ alpha*(1-beta)*self.wInterpolators[i][j-1][k](w[c])
+ alpha*beta*self.wInterpolators[i][j][k](w[c])) -
((1-alpha)*(1-beta)*self.wInterpolators[i-1][j-1][k-1](w[c])
+ (1-alpha)*beta*self.wInterpolators[i-1][j][k-1](w[c])
+ alpha*(1-beta)*self.wInterpolators[i][j-1][k-1](w[c])
+ alpha*beta*self.wInterpolators[i][j][k-1](w[c])))/(self.z_list[k] - self.z_list[k-1])
return dfdz
class LinearInterpOnInterp2D(HARKinterpolator3D):
'''
A 3D interpolation method that linearly interpolates between "layers" of
arbitrary 2D interpolations. Useful for models with two endogenous state
variables and one exogenous state variable when solving with the endogenous
grid method. NOTE: should not be used if an exogenous 3D grid is used, will
be significantly slower than TrilinearInterp.
'''
distance_criteria = ['xyInterpolators','z_list']
def __init__(self,xyInterpolators,z_values):
'''
Constructor for the class, generating an approximation to a function of
the form f(x,y,z) using interpolations over f(x,y,z_0) for a fixed grid
of z_0 values.
Parameters
----------
xyInterpolators : [HARKinterpolator2D]
A list of 2D interpolations over the x and y variables. The nth
element of xyInterpolators represents f(x,y,z_values[n]).
z_values: numpy.array
An array of z values equal in length to xyInterpolators.
Returns
-------
new instance of LinearInterpOnInterp2D
'''
self.xyInterpolators = xyInterpolators
self.z_list = z_values
self.z_n = z_values.size
def _evaluate(self,x,y,z):
'''
Returns the level of the interpolated function at each value in x,y,z.
Only called internally by HARKinterpolator3D.__call__ (etc).
'''
if _isscalar(x):
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
f = (1-alpha)*self.xyInterpolators[z_pos-1](x,y) + alpha*self.xyInterpolators[z_pos](x,y)
else:
m = len(x)
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
f = np.zeros(m) + np.nan
if x.size > 0:
for i in range(1,self.z_n):
c = z_pos == i
if np.any(c):
alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
f[c] = (1-alpha)*self.xyInterpolators[i-1](x[c],y[c]) + alpha*self.xyInterpolators[i](x[c],y[c])
return f
def _derX(self,x,y,z):
'''
Returns the derivative with respect to x of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.
'''
if _isscalar(x):
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdx = (1-alpha)*self.xyInterpolators[z_pos-1].derivativeX(x,y) + alpha*self.xyInterpolators[z_pos].derivativeX(x,y)
else:
m = len(x)
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdx = np.zeros(m) + np.nan
if x.size > 0:
for i in range(1,self.z_n):
c = z_pos == i
if np.any(c):
alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
dfdx[c] = (1-alpha)*self.xyInterpolators[i-1].derivativeX(x[c],y[c]) + alpha*self.xyInterpolators[i].derivativeX(x[c],y[c])
return dfdx
def _derY(self,x,y,z):
'''
Returns the derivative with respect to y of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.
'''
if _isscalar(x):
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdy = (1-alpha)*self.xyInterpolators[z_pos-1].derivativeY(x,y) + alpha*self.xyInterpolators[z_pos].derivativeY(x,y)
else:
m = len(x)
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdy = np.zeros(m) + np.nan
if x.size > 0:
for i in range(1,self.z_n):
c = z_pos == i
if np.any(c):
alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
dfdy[c] = (1-alpha)*self.xyInterpolators[i-1].derivativeY(x[c],y[c]) + alpha*self.xyInterpolators[i].derivativeY(x[c],y[c])
return dfdy
def _derZ(self,x,y,z):
'''
Returns the derivative with respect to z of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.
'''
if _isscalar(x):
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
dfdz = (self.xyInterpolators[z_pos].derivativeX(x,y) - self.xyInterpolators[z_pos-1].derivativeX(x,y))/(self.z_list[z_pos] - self.z_list[z_pos-1])
else:
m = len(x)
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdz = np.zeros(m) + np.nan
if x.size > 0:
for i in range(1,self.z_n):
c = z_pos == i
if np.any(c):
dfdz[c] = (self.xyInterpolators[i](x[c],y[c]) - self.xyInterpolators[i-1](x[c],y[c]))/(self.z_list[i] - self.z_list[i-1])
return dfdz
class BilinearInterpOnInterp2D(HARKinterpolator4D):
'''
A 4D interpolation method that bilinearly interpolates among "layers" of
arbitrary 2D interpolations. Useful for models with two endogenous state
variables and two exogenous state variables when solving with the endogenous
grid method. NOTE: should not be used if an exogenous 4D grid is used, will
be significantly slower than QuadlinearInterp.
'''
distance_criteria = ['wxInterpolators','y_list','z_list']
def __init__(self,wxInterpolators,y_values,z_values):
'''
Constructor for the class, generating an approximation to a function of
the form f(w,x,y,z) using interpolations over f(w,x,y_0,z_0) for a fixed
grid of y_0 and z_0 values.
Parameters
----------
wxInterpolators : [[HARKinterpolator2D]]
A list of lists of 2D interpolations over the w and x variables.
The i,j-th element of wxInterpolators represents
f(w,x,y_values[i],z_values[j]).
y_values: numpy.array
An array of y values equal in length to wxInterpolators.
z_values: numpy.array
An array of z values equal in length to wxInterpolators[0].
Returns
-------
new instance of BilinearInterpOnInterp2D
'''
self.wxInterpolators = wxInterpolators
self.y_list = y_values
self.y_n = y_values.size
self.z_list = z_values
self.z_n = z_values.size
def _evaluate(self,w,x,y,z):
'''
Returns the level of the interpolated function at each value in x,y,z.
Only called internally by HARKinterpolator4D.__call__ (etc).
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
f = ((1-alpha)*(1-beta)*self.wxInterpolators[y_pos-1][z_pos-1](w,x)
+ (1-alpha)*beta*self.wxInterpolators[y_pos-1][z_pos](w,x)
+ alpha*(1-beta)*self.wxInterpolators[y_pos][z_pos-1](w,x)
+ alpha*beta*self.wxInterpolators[y_pos][z_pos](w,x))
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
f = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])
f[c] = (
(1-alpha)*(1-beta)*self.wxInterpolators[i-1][j-1](w[c],x[c])
+ (1-alpha)*beta*self.wxInterpolators[i-1][j](w[c],x[c])
+ alpha*(1-beta)*self.wxInterpolators[i][j-1](w[c],x[c])
+ alpha*beta*self.wxInterpolators[i][j](w[c],x[c]))
return f
def _derW(self,w,x,y,z):
'''
Returns the derivative with respect to w of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.
'''
# This may look strange, as we call the derivativeX() method to get the
# derivative with respect to w, but that's just a quirk of 4D interpolations
# beginning with w rather than x. The derivative wrt the first dimension
# of an element of wxInterpolators is the w-derivative of the main function.
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdw = ((1-alpha)*(1-beta)*self.wxInterpolators[y_pos-1][z_pos-1].derivativeX(w,x)
+ (1-alpha)*beta*self.wxInterpolators[y_pos-1][z_pos].derivativeX(w,x)
+ alpha*(1-beta)*self.wxInterpolators[y_pos][z_pos-1].derivativeX(w,x)
+ alpha*beta*self.wxInterpolators[y_pos][z_pos].derivativeX(w,x))
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdw = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])
dfdw[c] = (
(1-alpha)*(1-beta)*self.wxInterpolators[i-1][j-1].derivativeX(w[c],x[c])
+ (1-alpha)*beta*self.wxInterpolators[i-1][j].derivativeX(w[c],x[c])
+ alpha*(1-beta)*self.wxInterpolators[i][j-1].derivativeX(w[c],x[c])
+ alpha*beta*self.wxInterpolators[i][j].derivativeX(w[c],x[c]))
return dfdw
def _derX(self,w,x,y,z):
'''
Returns the derivative with respect to x of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.
'''
# This may look strange, as we call the derivativeY() method to get the
# derivative with respect to x, but that's just a quirk of 4D interpolations
# beginning with w rather than x. The derivative wrt the second dimension
# of an element of wxInterpolators is the x-derivative of the main function.
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdx = ((1-alpha)*(1-beta)*self.wxInterpolators[y_pos-1][z_pos-1].derivativeY(w,x)
+ (1-alpha)*beta*self.wxInterpolators[y_pos-1][z_pos].derivativeY(w,x)
+ alpha*(1-beta)*self.wxInterpolators[y_pos][z_pos-1].derivativeY(w,x)
+ alpha*beta*self.wxInterpolators[y_pos][z_pos].derivativeY(w,x))
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdx = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])
dfdx[c] = (
(1-alpha)*(1-beta)*self.wxInterpolators[i-1][j-1].derivativeY(w[c],x[c])
+ (1-alpha)*beta*self.wxInterpolators[i-1][j].derivativeY(w[c],x[c])
+ alpha*(1-beta)*self.wxInterpolators[i][j-1].derivativeY(w[c],x[c])
+ alpha*beta*self.wxInterpolators[i][j].derivativeY(w[c],x[c]))
return dfdx
def _derY(self,w,x,y,z):
'''
Returns the derivative with respect to y of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
beta = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
dfdy = (((1-beta)*self.wxInterpolators[y_pos][z_pos-1](w,x) + beta*self.wxInterpolators[y_pos][z_pos](w,x))
- ((1-beta)*self.wxInterpolators[y_pos-1][z_pos-1](w,x) + beta*self.wxInterpolators[y_pos-1][z_pos](w,x)))/(self.y_list[y_pos] - self.y_list[y_pos-1])
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdy = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
beta = (z[c] - self.z_list[j-1])/(self.z_list[j] - self.z_list[j-1])
dfdy[c] = (((1-beta)*self.wxInterpolators[i][j-1](w[c],x[c]) + beta*self.wxInterpolators[i][j](w[c],x[c]))
- ((1-beta)*self.wxInterpolators[i-1][j-1](w[c],x[c]) + beta*self.wxInterpolators[i-1][j](w[c],x[c])))/(self.y_list[i] - self.y_list[i-1])
return dfdy
def _derZ(self,w,x,y,z):
'''
Returns the derivative with respect to z of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.
'''
if _isscalar(x):
y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
dfdz = (((1-alpha)*self.wxInterpolators[y_pos-1][z_pos](w,x) + alpha*self.wxInterpolators[y_pos][z_pos](w,x))
- ((1-alpha)*self.wxInterpolators[y_pos-1][z_pos-1](w,x) + alpha*self.wxInterpolators[y_pos][z_pos-1](w,x)))/(self.z_list[z_pos] - self.z_list[z_pos-1])
else:
m = len(x)
y_pos = np.searchsorted(self.y_list,y)
y_pos[y_pos > self.y_n-1] = self.y_n-1
y_pos[y_pos < 1] = 1
z_pos = np.searchsorted(self.z_list,z)
z_pos[z_pos > self.z_n-1] = self.z_n-1
z_pos[z_pos < 1] = 1
dfdz = np.zeros(m) + np.nan
for i in range(1,self.y_n):
for j in range(1,self.z_n):
c = np.logical_and(i == y_pos, j == z_pos)
if np.any(c):
alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
dfdz[c] = (((1-alpha)*self.wxInterpolators[i-1][j](w[c],x[c]) + alpha*self.wxInterpolators[i][j](w[c],x[c]))
- ((1-alpha)*self.wxInterpolators[i-1][j-1](w[c],x[c]) + alpha*self.wxInterpolators[i][j-1](w[c],x[c])))/(self.z_list[j] - self.z_list[j-1])
return dfdz
class Curvilinear2DInterp(HARKinterpolator2D):
'''
A 2D interpolation method for curvilinear or "warped grid" interpolation, as
in White (2015). Used for models with two endogenous states that are solved
with the endogenous grid method.
'''
distance_criteria = ['f_values','x_values','y_values']
def __init__(self,f_values,x_values,y_values):
'''
Constructor for 2D curvilinear interpolation for a function f(x,y)
Parameters
----------
f_values: numpy.array
A 2D array of function values such that f_values[i,j] =
f(x_values[i,j],y_values[i,j]).
x_values: numpy.array
A 2D array of x values of the same size as f_values.
y_values: numpy.array
A 2D array of y values of the same size as f_values.
Returns
-------
new instance of Curvilinear2DInterp
'''
self.f_values = f_values
self.x_values = x_values
self.y_values = y_values
my_shape = f_values.shape
self.x_n = my_shape[0]
self.y_n = my_shape[1]
self.updatePolarity()
def updatePolarity(self):
'''
Fills in the polarity attribute of the interpolation, determining whether
the "plus" (True) or "minus" (False) solution of the system of equations
should be used for each sector. Needs to be called in __init__.
Parameters
----------
none
Returns
-------
none
'''
# Grab a point known to be inside each sector: the midway point between
# the lower left and upper right vertex of each sector
x_temp = 0.5*(self.x_values[0:(self.x_n-1),0:(self.y_n-1)] + self.x_values[1:self.x_n,1:self.y_n])
y_temp = 0.5*(self.y_values[0:(self.x_n-1),0:(self.y_n-1)] + self.y_values[1:self.x_n,1:self.y_n])
size = (self.x_n-1)*(self.y_n-1)
x_temp = np.reshape(x_temp,size)
y_temp = np.reshape(y_temp,size)
y_pos = np.tile(np.arange(0,self.y_n-1),self.x_n-1)
x_pos = np.reshape(np.tile(np.arange(0,self.x_n-1),(self.y_n-1,1)).transpose(),size)
# Set the polarity of all sectors to "plus", then test each sector
self.polarity = np.ones((self.x_n-1,self.y_n-1),dtype=bool)
alpha, beta = self.findCoords(x_temp,y_temp,x_pos,y_pos)
polarity = np.logical_and(
np.logical_and(alpha > 0, alpha < 1),
np.logical_and(beta > 0, beta < 1))
# Update polarity: if (alpha,beta) not in the unit square, then that
# sector must use the "minus" solution instead
self.polarity = np.reshape(polarity,(self.x_n-1,self.y_n-1))
def findSector(self,x,y):
'''
Finds the quadrilateral "sector" for each (x,y) point in the input.
Only called as a subroutine of _evaluate().
Parameters
----------
x : np.array
Values whose sector should be found.
y : np.array
Values whose sector should be found. Should be same size as x.
Returns
-------
x_pos : np.array
Sector x-coordinates for each point of the input, of the same size.
y_pos : np.array
Sector y-coordinates for each point of the input, of the same size.
'''
# Initialize the sector guess
m = x.size
x_pos_guess = (np.ones(m)*self.x_n/2).astype(int)
y_pos_guess = (np.ones(m)*self.y_n/2).astype(int)
# Define a function that checks whether a set of points violates a linear
# boundary defined by (x_bound_1,y_bound_1) and (x_bound_2,y_bound_2),
# where the latter is *COUNTER CLOCKWISE* from the former. Returns
# 1 if the point is outside the boundary and 0 otherwise.
violationCheck = lambda x_check,y_check,x_bound_1,y_bound_1,x_bound_2,y_bound_2 : (
(y_bound_2 - y_bound_1)*x_check - (x_bound_2 - x_bound_1)*y_check > x_bound_1*y_bound_2 - y_bound_1*x_bound_2 ) + 0
# Identify the correct sector for each point to be evaluated
these = np.ones(m,dtype=bool)
max_loops = self.x_n + self.y_n
loops = 0
while np.any(these) and loops < max_loops:
# Get coordinates for the four vertices: (xA,yA),...,(xD,yD)
x_temp = x[these]
y_temp = y[these]
xA = self.x_values[x_pos_guess[these],y_pos_guess[these]]
xB = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]]
xC = self.x_values[x_pos_guess[these],y_pos_guess[these]+1]
xD = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
yA = self.y_values[x_pos_guess[these],y_pos_guess[these]]
yB = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]]
yC = self.y_values[x_pos_guess[these],y_pos_guess[these]+1]
yD = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
# Check the "bounding box" for the sector: is this guess plausible?
move_down = (y_temp < np.minimum(yA,yB)) + 0
move_right = (x_temp > np.maximum(xB,xD)) + 0
move_up = (y_temp > np.maximum(yC,yD)) + 0
move_left = (x_temp < np.minimum(xA,xC)) + 0
# Check which boundaries are violated (and thus where to look next)
c = (move_down + move_right + move_up + move_left) == 0
move_down[c] = violationCheck(x_temp[c],y_temp[c],xA[c],yA[c],xB[c],yB[c])
move_right[c] = violationCheck(x_temp[c],y_temp[c],xB[c],yB[c],xD[c],yD[c])
move_up[c] = violationCheck(x_temp[c],y_temp[c],xD[c],yD[c],xC[c],yC[c])
move_left[c] = violationCheck(x_temp[c],y_temp[c],xC[c],yC[c],xA[c],yA[c])
# Update the sector guess based on the violations
x_pos_next = x_pos_guess[these] - move_left + move_right
x_pos_next[x_pos_next < 0] = 0
x_pos_next[x_pos_next > (self.x_n-2)] = self.x_n-2
y_pos_next = y_pos_guess[these] - move_down + move_up
y_pos_next[y_pos_next < 0] = 0
y_pos_next[y_pos_next > (self.y_n-2)] = self.y_n-2
# Check which sectors have not changed, and mark them as complete
no_move = np.array(np.logical_and(x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next))
x_pos_guess[these] = x_pos_next
y_pos_guess[these] = y_pos_next
temp = these.nonzero()
these[temp[0][no_move]] = False
# Move to the next iteration of the search
loops += 1
# Return the output
x_pos = x_pos_guess
y_pos = y_pos_guess
return x_pos, y_pos
def findCoords(self,x,y,x_pos,y_pos):
'''
Calculates the relative coordinates (alpha,beta) for each point (x,y),
given the sectors (x_pos,y_pos) in which they reside. Only called as
a subroutine of __call__().
Parameters
----------
x : np.array
Values whose sector should be found.
y : np.array
Values whose sector should be found. Should be same size as x.
x_pos : np.array
Sector x-coordinates for each point in (x,y), of the same size.
y_pos : np.array
Sector y-coordinates for each point in (x,y), of the same size.
Returns
-------
alpha : np.array
Relative "horizontal" position of the input in their respective sectors.
beta : np.array
Relative "vertical" position of the input in their respective sectors.
'''
# Calculate relative coordinates in the sector for each point
xA = self.x_values[x_pos,y_pos]
xB = self.x_values[x_pos+1,y_pos]
xC = self.x_values[x_pos,y_pos+1]
xD = self.x_values[x_pos+1,y_pos+1]
yA = self.y_values[x_pos,y_pos]
yB = self.y_values[x_pos+1,y_pos]
yC = self.y_values[x_pos,y_pos+1]
yD = self.y_values[x_pos+1,y_pos+1]
polarity = 2.0*self.polarity[x_pos,y_pos] - 1.0
a = xA
b = (xB-xA)
c = (xC-xA)
d = (xA-xB-xC+xD)
e = yA
f = (yB-yA)
g = (yC-yA)
h = (yA-yB-yC+yD)
denom = (d*g-h*c)
mu = (h*b-d*f)/denom
tau = (h*(a-x) - d*(e-y))/denom
zeta = a - x + c*tau
eta = b + c*mu + d*tau
theta = d*mu
alpha = (-eta + polarity*np.sqrt(eta**2.0 - 4.0*zeta*theta))/(2.0*theta)
beta = mu*alpha + tau
# Alternate method if there are sectors that are "too regular"
z = np.logical_or(np.isnan(alpha),np.isnan(beta)) # These points weren't able to identify coordinates
if np.any(z):
these = np.isclose(f/b,(yD-yC)/(xD-xC)) # iso-beta lines have equal slope
if np.any(these):
kappa = f[these]/b[these]
int_bot = yA[these] - kappa*xA[these]
int_top = yC[these] - kappa*xC[these]
int_these = y[these] - kappa*x[these]
beta_temp = (int_these-int_bot)/(int_top-int_bot)
x_left = beta_temp*xC[these] + (1.0-beta_temp)*xA[these]
x_right = beta_temp*xD[these] + (1.0-beta_temp)*xB[these]
alpha_temp= (x[these]-x_left)/(x_right-x_left)
beta[these] = beta_temp
alpha[these] = alpha_temp
#print(np.sum(np.isclose(g/c,(yD-yB)/(xD-xB))))
return alpha, beta
def _evaluate(self,x,y):
'''
Returns the level of the interpolated function at each value in x,y.
Only called internally by HARKinterpolator2D.__call__ (etc).
'''
x_pos, y_pos = self.findSector(x,y)
alpha, beta = self.findCoords(x,y,x_pos,y_pos)
# Calculate the function at each point using bilinear interpolation
f = (
(1-alpha)*(1-beta)*self.f_values[x_pos,y_pos]
+ (1-alpha)*beta*self.f_values[x_pos,y_pos+1]
+ alpha*(1-beta)*self.f_values[x_pos+1,y_pos]
+ alpha*beta*self.f_values[x_pos+1,y_pos+1])
return f
def _derX(self,x,y):
'''
Returns the derivative with respect to x of the interpolated function
at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.
'''
x_pos, y_pos = self.findSector(x,y)
alpha, beta = self.findCoords(x,y,x_pos,y_pos)
# Get four corners data for each point
xA = self.x_values[x_pos,y_pos]
xB = self.x_values[x_pos+1,y_pos]
xC = self.x_values[x_pos,y_pos+1]
xD = self.x_values[x_pos+1,y_pos+1]
yA = self.y_values[x_pos,y_pos]
yB = self.y_values[x_pos+1,y_pos]
yC = self.y_values[x_pos,y_pos+1]
yD = self.y_values[x_pos+1,y_pos+1]
fA = self.f_values[x_pos,y_pos]
fB = self.f_values[x_pos+1,y_pos]
fC = self.f_values[x_pos,y_pos+1]
fD = self.f_values[x_pos+1,y_pos+1]
# Calculate components of the alpha,beta --> x,y delta translation matrix
alpha_x = (1-beta)*(xB-xA) + beta*(xD-xC)
alpha_y = (1-beta)*(yB-yA) + beta*(yD-yC)
beta_x = (1-alpha)*(xC-xA) + alpha*(xD-xB)
beta_y = (1-alpha)*(yC-yA) + alpha*(yD-yB)
# Invert the delta translation matrix into x,y --> alpha,beta
det = alpha_x*beta_y - beta_x*alpha_y
x_alpha = beta_y/det
x_beta = -alpha_y/det
# Calculate the derivative of f w.r.t. alpha and beta
dfda = (1-beta)*(fB-fA) + beta*(fD-fC)
dfdb = (1-alpha)*(fC-fA) + alpha*(fD-fB)
# Calculate the derivative with respect to x (and return it)
dfdx = x_alpha*dfda + x_beta*dfdb
return dfdx
def _derY(self,x,y):
'''
Returns the derivative with respect to y of the interpolated function
at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.
'''
x_pos, y_pos = self.findSector(x,y)
alpha, beta = self.findCoords(x,y,x_pos,y_pos)
# Get four corners data for each point
xA = self.x_values[x_pos,y_pos]
xB = self.x_values[x_pos+1,y_pos]
xC = self.x_values[x_pos,y_pos+1]
xD = self.x_values[x_pos+1,y_pos+1]
yA = self.y_values[x_pos,y_pos]
yB = self.y_values[x_pos+1,y_pos]
yC = self.y_values[x_pos,y_pos+1]
yD = self.y_values[x_pos+1,y_pos+1]
fA = self.f_values[x_pos,y_pos]
fB = self.f_values[x_pos+1,y_pos]
fC = self.f_values[x_pos,y_pos+1]
fD = self.f_values[x_pos+1,y_pos+1]
# Calculate components of the alpha,beta --> x,y delta translation matrix
alpha_x = (1-beta)*(xB-xA) + beta*(xD-xC)
alpha_y = (1-beta)*(yB-yA) + beta*(yD-yC)
beta_x = (1-alpha)*(xC-xA) + alpha*(xD-xB)
beta_y = (1-alpha)*(yC-yA) + alpha*(yD-yB)
# Invert the delta translation matrix into x,y --> alpha,beta
det = alpha_x*beta_y - beta_x*alpha_y
y_alpha = -beta_x/det
y_beta = alpha_x/det
# Calculate the derivative of f w.r.t. alpha and beta
dfda = (1-beta)*(fB-fA) + beta*(fD-fC)
dfdb = (1-alpha)*(fC-fA) + alpha*(fD-fB)
# Calculate the derivative with respect to x (and return it)
dfdy = y_alpha*dfda + y_beta*dfdb
return dfdy
###############################################################################
## Functions used in discrete choice models with T1EV taste shocks ############
###############################################################################
def calcLogSumChoiceProbs(Vals, sigma):
'''
Returns the final optimal value and choice probabilities given the choice
specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
V : [numpy.array]
A numpy.array that holds the integrated value function.
P : [numpy.array]
A numpy.array that holds the discrete choice probabilities
'''
# Assumes that NaNs have been replaced by -numpy.inf or similar
if sigma == 0.0:
# We could construct a linear index here and use unravel_index.
Pflat = np.argmax(Vals, axis=0)
V = np.zeros(Vals[0].shape)
Probs = np.zeros(Vals.shape)
for i in range(Vals.shape[0]):
optimalIndices = Pflat == i
V[optimalIndices] = Vals[i][optimalIndices]
Probs[i][optimalIndices] = 1
return V, Probs
# else we have a taste shock
maxV = np.max(Vals, axis=0)
# calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma)
sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0)
LogSumV = np.log(sumexp)
LogSumV = maxV + sigma*LogSumV
Probs = np.exp((Vals-LogSumV)/sigma)
return LogSumV, Probs
def calcChoiceProbs(Vals, sigma):
'''
Returns the choice probabilities given the choice specific value functions
`Vals`. Probabilities are degenerate if sigma == 0.0.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
Probs : [numpy.array]
A numpy.array that holds the discrete choice probabilities
'''
# Assumes that NaNs have been replaced by -numpy.inf or similar
if sigma == 0.0:
# We could construct a linear index here and use unravel_index.
Pflat = np.argmax(Vals, axis=0)
Probs = np.zeros(Vals.shape)
for i in range(Vals.shape[0]):
Probs[i][Pflat==i] = 1
return Probs
maxV = np.max(Vals, axis=0)
Probs = np.divide(np.exp((Vals-maxV)/sigma), np.sum(np.exp((Vals-maxV)/sigma), axis=0))
return Probs
def calcLogSum(Vals, sigma):
'''
Returns the optimal value given the choice specific value functions Vals.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
V : [numpy.array]
A numpy.array that holds the integrated value function.
'''
# Assumes that NaNs have been replaced by -numpy.inf or similar
if sigma == 0.0:
# We could construct a linear index here and use unravel_index.
V = np.amax(Vals, axis=0)
return V
# else we have a taste shock
maxV = np.max(Vals, axis=0)
# calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma)
sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0)
LogSumV = np.log(sumexp)
LogSumV = maxV + sigma*LogSumV
return LogSumV
def main():
print("Sorry, HARK.interpolation doesn't actually do much on its own.")
print("To see some examples of its interpolation methods in action, look at any")
print("of the model modules in /ConsumptionSavingModel. In the future, running")
print("this module will show examples of each interpolation class.")
from time import clock
import matplotlib.pyplot as plt
RNG = np.random.RandomState(123)
if False:
x = np.linspace(1,20,39)
y = np.log(x)
dydx = 1.0/x
f = CubicInterp(x,y,dydx)
x_test = np.linspace(0,30,200)
y_test = f(x_test)
plt.plot(x_test,y_test)
plt.show()
if False:
f = lambda x,y : 3.0*x**2.0 + x*y + 4.0*y**2.0
dfdx = lambda x,y : 6.0*x + y
dfdy = lambda x,y : x + 8.0*y
y_list = np.linspace(0,5,100,dtype=float)
xInterpolators = []
xInterpolators_alt = []
for y in y_list:
this_x_list = np.sort((RNG.rand(100)*5.0))
this_interpolation = LinearInterp(this_x_list,f(this_x_list,y*np.ones(this_x_list.size)))
that_interpolation = CubicInterp(this_x_list,f(this_x_list,y*np.ones(this_x_list.size)),dfdx(this_x_list,y*np.ones(this_x_list.size)))
xInterpolators.append(this_interpolation)
xInterpolators_alt.append(that_interpolation)
g = LinearInterpOnInterp1D(xInterpolators,y_list)
h = LinearInterpOnInterp1D(xInterpolators_alt,y_list)
rand_x = RNG.rand(100)*5.0
rand_y = RNG.rand(100)*5.0
z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y)
q = (dfdx(rand_x,rand_y) - g.derivativeX(rand_x,rand_y))/dfdx(rand_x,rand_y)
r = (dfdy(rand_x,rand_y) - g.derivativeY(rand_x,rand_y))/dfdy(rand_x,rand_y)
#print(z)
#print(q)
#print(r)
z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y)
q = (dfdx(rand_x,rand_y) - g.derivativeX(rand_x,rand_y))/dfdx(rand_x,rand_y)
r = (dfdy(rand_x,rand_y) - g.derivativeY(rand_x,rand_y))/dfdy(rand_x,rand_y)
print(z)
#print(q)
#print(r)
if False:
f = lambda x,y,z : 3.0*x**2.0 + x*y + 4.0*y**2.0 - 5*z**2.0 + 1.5*x*z
dfdx = lambda x,y,z : 6.0*x + y + 1.5*z
dfdy = lambda x,y,z : x + 8.0*y
dfdz = lambda x,y,z : -10.0*z + 1.5*x
y_list = np.linspace(0,5,51,dtype=float)
z_list = np.linspace(0,5,51,dtype=float)
xInterpolators = []
for y in y_list:
temp = []
for z in z_list:
this_x_list = np.sort((RNG.rand(100)*5.0))
this_interpolation = LinearInterp(this_x_list,f(this_x_list,y*np.ones(this_x_list.size),z*np.ones(this_x_list.size)))
temp.append(this_interpolation)
xInterpolators.append(deepcopy(temp))
g = BilinearInterpOnInterp1D(xInterpolators,y_list,z_list)
rand_x = RNG.rand(1000)*5.0
rand_y = RNG.rand(1000)*5.0
rand_z = RNG.rand(1000)*5.0
z = (f(rand_x,rand_y,rand_z) - g(rand_x,rand_y,rand_z))/f(rand_x,rand_y,rand_z)
q = (dfdx(rand_x,rand_y,rand_z) - g.derivativeX(rand_x,rand_y,rand_z))/dfdx(rand_x,rand_y,rand_z)
r = (dfdy(rand_x,rand_y,rand_z) - g.derivativeY(rand_x,rand_y,rand_z))/dfdy(rand_x,rand_y,rand_z)
p = (dfdz(rand_x,rand_y,rand_z) - g.derivativeZ(rand_x,rand_y,rand_z))/dfdz(rand_x,rand_y,rand_z)
z.sort()
if False:
f = lambda w,x,y,z : 4.0*w*z - 2.5*w*x + w*y + 6.0*x*y - 10.0*x*z + 3.0*y*z - 7.0*z + 4.0*x + 2.0*y - 5.0*w
dfdw = lambda w,x,y,z : 4.0*z - 2.5*x + y - 5.0
dfdx = lambda w,x,y,z : -2.5*w + 6.0*y - 10.0*z + 4.0
dfdy = lambda w,x,y,z : w + 6.0*x + 3.0*z + 2.0
dfdz = lambda w,x,y,z : 4.0*w - 10.0*x + 3.0*y - 7
x_list = np.linspace(0,5,16,dtype=float)
y_list = np.linspace(0,5,16,dtype=float)
z_list = np.linspace(0,5,16,dtype=float)
wInterpolators = []
for x in x_list:
temp = []
for y in y_list:
temptemp = []
for z in z_list:
this_w_list = np.sort((RNG.rand(16)*5.0))
this_interpolation = LinearInterp(this_w_list,f(this_w_list,x*np.ones(this_w_list.size),y*np.ones(this_w_list.size),z*np.ones(this_w_list.size)))
temptemp.append(this_interpolation)
temp.append(deepcopy(temptemp))
wInterpolators.append(deepcopy(temp))
g = TrilinearInterpOnInterp1D(wInterpolators,x_list,y_list,z_list)
N = 20000
rand_w = RNG.rand(N)*5.0
rand_x = RNG.rand(N)*5.0
rand_y = RNG.rand(N)*5.0
rand_z = RNG.rand(N)*5.0
t_start = clock()
z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z)
q = (dfdw(rand_w,rand_x,rand_y,rand_z) - g.derivativeW(rand_w,rand_x,rand_y,rand_z))/dfdw(rand_w,rand_x,rand_y,rand_z)
r = (dfdx(rand_w,rand_x,rand_y,rand_z) - g.derivativeX(rand_w,rand_x,rand_y,rand_z))/dfdx(rand_w,rand_x,rand_y,rand_z)
p = (dfdy(rand_w,rand_x,rand_y,rand_z) - g.derivativeY(rand_w,rand_x,rand_y,rand_z))/dfdy(rand_w,rand_x,rand_y,rand_z)
s = (dfdz(rand_w,rand_x,rand_y,rand_z) - g.derivativeZ(rand_w,rand_x,rand_y,rand_z))/dfdz(rand_w,rand_x,rand_y,rand_z)
t_end = clock()
z.sort()
print(z)
print(t_end-t_start)
if False:
f = lambda x,y : 3.0*x**2.0 + x*y + 4.0*y**2.0
dfdx = lambda x,y : 6.0*x + y
dfdy = lambda x,y : x + 8.0*y
x_list = np.linspace(0,5,101,dtype=float)
y_list = np.linspace(0,5,101,dtype=float)
x_temp,y_temp = np.meshgrid(x_list,y_list,indexing='ij')
g = BilinearInterp(f(x_temp,y_temp),x_list,y_list)
rand_x = RNG.rand(100)*5.0
rand_y = RNG.rand(100)*5.0
z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y)
q = (f(x_temp,y_temp) - g(x_temp,y_temp))/f(x_temp,y_temp)
#print(z)
#print(q)
if False:
f = lambda x,y,z : 3.0*x**2.0 + x*y + 4.0*y**2.0 - 5*z**2.0 + 1.5*x*z
dfdx = lambda x,y,z : 6.0*x + y + 1.5*z
dfdy = lambda x,y,z : x + 8.0*y
dfdz = lambda x,y,z : -10.0*z + 1.5*x
x_list = np.linspace(0,5,11,dtype=float)
y_list = np.linspace(0,5,11,dtype=float)
z_list = np.linspace(0,5,101,dtype=float)
x_temp,y_temp,z_temp = np.meshgrid(x_list,y_list,z_list,indexing='ij')
g = TrilinearInterp(f(x_temp,y_temp,z_temp),x_list,y_list,z_list)
rand_x = RNG.rand(1000)*5.0
rand_y = RNG.rand(1000)*5.0
rand_z = RNG.rand(1000)*5.0
z = (f(rand_x,rand_y,rand_z) - g(rand_x,rand_y,rand_z))/f(rand_x,rand_y,rand_z)
q = (dfdx(rand_x,rand_y,rand_z) - g.derivativeX(rand_x,rand_y,rand_z))/dfdx(rand_x,rand_y,rand_z)
r = (dfdy(rand_x,rand_y,rand_z) - g.derivativeY(rand_x,rand_y,rand_z))/dfdy(rand_x,rand_y,rand_z)
p = (dfdz(rand_x,rand_y,rand_z) - g.derivativeZ(rand_x,rand_y,rand_z))/dfdz(rand_x,rand_y,rand_z)
p.sort()
plt.plot(p)
if False:
f = lambda w,x,y,z : 4.0*w*z - 2.5*w*x + w*y + 6.0*x*y - 10.0*x*z + 3.0*y*z - 7.0*z + 4.0*x + 2.0*y - 5.0*w
dfdw = lambda w,x,y,z : 4.0*z - 2.5*x + y - 5.0
dfdx = lambda w,x,y,z : -2.5*w + 6.0*y - 10.0*z + 4.0
dfdy = lambda w,x,y,z : w + 6.0*x + 3.0*z + 2.0
dfdz = lambda w,x,y,z : 4.0*w - 10.0*x + 3.0*y - 7
w_list = np.linspace(0,5,16,dtype=float)
x_list = np.linspace(0,5,16,dtype=float)
y_list = np.linspace(0,5,16,dtype=float)
z_list = np.linspace(0,5,16,dtype=float)
w_temp,x_temp,y_temp,z_temp = np.meshgrid(w_list,x_list,y_list,z_list,indexing='ij')
mySearch = lambda trash,x : np.floor(x/5*32).astype(int)
g = QuadlinearInterp(f(w_temp,x_temp,y_temp,z_temp),w_list,x_list,y_list,z_list)
N = 1000000
rand_w = RNG.rand(N)*5.0
rand_x = RNG.rand(N)*5.0
rand_y = RNG.rand(N)*5.0
rand_z = RNG.rand(N)*5.0
t_start = clock()
z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z)
t_end = clock()
#print(z)
print(t_end-t_start)
if False:
f = lambda x,y : 3.0*x**2.0 + x*y + 4.0*y**2.0
dfdx = lambda x,y : 6.0*x + y
dfdy = lambda x,y : x + 8.0*y
warp_factor = 0.01
x_list = np.linspace(0,5,71,dtype=float)
y_list = np.linspace(0,5,51,dtype=float)
x_temp,y_temp = np.meshgrid(x_list,y_list,indexing='ij')
x_adj = x_temp + warp_factor*(RNG.rand(x_list.size,y_list.size) - 0.5)
y_adj = y_temp + warp_factor*(RNG.rand(x_list.size,y_list.size) - 0.5)
g = Curvilinear2DInterp(f(x_adj,y_adj),x_adj,y_adj)
rand_x = RNG.rand(1000)*5.0
rand_y = RNG.rand(1000)*5.0
t_start = clock()
z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y)
q = (dfdx(rand_x,rand_y) - g.derivativeX(rand_x,rand_y))/dfdx(rand_x,rand_y)
r = (dfdy(rand_x,rand_y) - g.derivativeY(rand_x,rand_y))/dfdy(rand_x,rand_y)
t_end = clock()
z.sort()
q.sort()
r.sort()
#print(z)
print(t_end-t_start)
if False:
f = lambda x,y,z : 3.0*x**2.0 + x*y + 4.0*y**2.0 - 5*z**2.0 + 1.5*x*z
dfdx = lambda x,y,z : 6.0*x + y + 1.5*z
dfdy = lambda x,y,z : x + 8.0*y
dfdz = lambda x,y,z : -10.0*z + 1.5*x
warp_factor = 0.01
x_list = np.linspace(0,5,11,dtype=float)
y_list = np.linspace(0,5,11,dtype=float)
z_list = np.linspace(0,5,101,dtype=float)
x_temp,y_temp = np.meshgrid(x_list,y_list,indexing='ij')
xyInterpolators = []
for j in range(z_list.size):
x_adj = x_temp + warp_factor*(RNG.rand(x_list.size,y_list.size) - 0.5)
y_adj = y_temp + warp_factor*(RNG.rand(x_list.size,y_list.size) - 0.5)
z_temp = z_list[j]*np.ones(x_adj.shape)
thisInterp = Curvilinear2DInterp(f(x_adj,y_adj,z_temp),x_adj,y_adj)
xyInterpolators.append(thisInterp)
g = LinearInterpOnInterp2D(xyInterpolators,z_list)
N = 1000
rand_x = RNG.rand(N)*5.0
rand_y = RNG.rand(N)*5.0
rand_z = RNG.rand(N)*5.0
z = (f(rand_x,rand_y,rand_z) - g(rand_x,rand_y,rand_z))/f(rand_x,rand_y,rand_z)
p = (dfdz(rand_x,rand_y,rand_z) - g.derivativeZ(rand_x,rand_y,rand_z))/dfdz(rand_x,rand_y,rand_z)
p.sort()
plt.plot(p)
if False:
f = lambda w,x,y,z : 4.0*w*z - 2.5*w*x + w*y + 6.0*x*y - 10.0*x*z + 3.0*y*z - 7.0*z + 4.0*x + 2.0*y - 5.0*w
dfdw = lambda w,x,y,z : 4.0*z - 2.5*x + y - 5.0
dfdx = lambda w,x,y,z : -2.5*w + 6.0*y - 10.0*z + 4.0
dfdy = lambda w,x,y,z : w + 6.0*x + 3.0*z + 2.0
dfdz = lambda w,x,y,z : 4.0*w - 10.0*x + 3.0*y - 7
warp_factor = 0.1
w_list = np.linspace(0,5,16,dtype=float)
x_list = np.linspace(0,5,16,dtype=float)
y_list = np.linspace(0,5,16,dtype=float)
z_list = np.linspace(0,5,16,dtype=float)
w_temp,x_temp = np.meshgrid(w_list,x_list,indexing='ij')
wxInterpolators = []
for i in range(y_list.size):
temp = []
for j in range(z_list.size):
w_adj = w_temp + warp_factor*(RNG.rand(w_list.size,x_list.size) - 0.5)
x_adj = x_temp + warp_factor*(RNG.rand(w_list.size,x_list.size) - 0.5)
y_temp = y_list[i]*np.ones(w_adj.shape)
z_temp = z_list[j]*np.ones(w_adj.shape)
thisInterp = Curvilinear2DInterp(f(w_adj,x_adj,y_temp,z_temp),w_adj,x_adj)
temp.append(thisInterp)
wxInterpolators.append(temp)
g = BilinearInterpOnInterp2D(wxInterpolators,y_list,z_list)
N = 1000000
rand_w = RNG.rand(N)*5.0
rand_x = RNG.rand(N)*5.0
rand_y = RNG.rand(N)*5.0
rand_z = RNG.rand(N)*5.0
t_start = clock()
z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z)
t_end = clock()
z.sort()
print(z)
print(t_end-t_start)
if __name__ == '__main__':
main()
| 42.357162 | 173 | 0.561754 | [
"Apache-2.0"
] | cohenimhuji/HARK | HARK/interpolation.py | 159,390 | Python |
from django import forms
from .models import Reclamacao,Login,Comentario
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class CadastraReclamacaoForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CadastraReclamacaoForm,self).__init__(*args, **kwargs)
self.fields['titulo'].required = True
self.fields['bairro'].required = True
self.fields['rua'].required = True
self.fields['descricao'].required = True
self.fields['foto'].required = False
class Meta:
model = Reclamacao
fields = ('titulo','bairro','rua','descricao', 'foto',)
class LoginUsuarioForm(forms.ModelForm):
class Meta:
model = Login
fields = ('username','password',)
widgets = {
'password': forms.PasswordInput(),
}
class SignUpForm(UserCreationForm):
cpf = forms.CharField(max_length=11, required=True)
bairro = forms.CharField(max_length=30, required=True)
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('username', 'cpf', 'bairro', 'email', 'password1', 'password2', )
#class CadastraForum(forms.ModelForm):
# class Meta:
# model = Forum
# fields = ('text',)
class RegistroDeComentarioForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(RegistroDeComentarioForm,self).__init__(*args, **kwargs)
self.fields['text1'].required = True
class Meta:
model = Comentario
fields = ('text1',) | 33.040816 | 97 | 0.657196 | [
"MIT"
] | WesleyVitor/ReclamaCaico | Application/ReclamaCaicoProject/ReclamaCaicoApp/forms.py | 1,619 | Python |
# coding: utf-8
from django.conf.urls import url
from api_v1 import views
urlpatterns = [
url(r'^register/$', views.register),
url(r'^login/$', views.login),
url(r'^images/$', views.images),
url(r'^reccomend/$', views.reccomend),
url(r'^user_post/$', views.get_user_post),
]
# api viewer (debug用)
from rest_framework import routers
from .views import UserViewSet, TokenViewSet, ImageViewSet, PostViewSet, FavoriteViewSet
router = routers.DefaultRouter()
router.register(r'user', UserViewSet)
router.register(r'token', TokenViewSet)
router.register(r'image', ImageViewSet)
router.register(r'post', PostViewSet)
# router.register(r'favorite', FavoriteViewSet)
| 30.909091 | 88 | 0.738235 | [
"MIT"
] | Utree/TRAGRAM | server/project/api_v1/urls.py | 682 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6320, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ResponseBase(msrest.serialization.Model):
"""ResponseBase.
:param type:
:type type: str
"""
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResponseBase, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
class Identifiable(ResponseBase):
"""Defines the identity of a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Identifiable, self).__init__(**kwargs)
self.id = None
class Response(Identifiable):
"""Defines a response. All schemas that could be returned at the root of a response should inherit from this.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Response, self).__init__(**kwargs)
self.web_search_url = None
class Answer(Response):
"""Answer.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
}
def __init__(
self,
**kwargs
):
super(Answer, self).__init__(**kwargs)
self.follow_up_queries = None
class Thing(Response):
"""Thing.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Thing, self).__init__(**kwargs)
self.name = None
self.url = None
self.image = None
self.description = None
self.bing_id = None
class CreativeWork(Thing):
"""CreativeWork.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider: list[~web_search_client.models.Thing]
:ivar text:
:vartype text: str
"""
_validation = {
'id': {'readonly': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CreativeWork, self).__init__(**kwargs)
self.thumbnail_url = None
self.provider = None
self.text = None
class Article(CreativeWork):
"""Article.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider: list[~web_search_client.models.Thing]
:ivar text:
:vartype text: str
:ivar word_count: The number of words in the text of the Article.
:vartype word_count: int
"""
_validation = {
'id': {'readonly': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'word_count': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'word_count': {'key': 'wordCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(Article, self).__init__(**kwargs)
self.word_count = None
class Computation(Answer):
"""Defines an expression and its answer.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:param expression: Required. The math or conversion expression. If the query contains a request
to convert units of measure (for example, meters to feet), this field contains the from units
and value contains the to units. If the query contains a mathematical expression such as 2+2,
this field contains the expression and value contains the answer. Note that mathematical
expressions may be normalized. For example, if the query was sqrt(4^2+8^2), the normalized
expression may be sqrt((4^2)+(8^2)). If the user's query is a math question and the
textDecorations query parameter is set to true, the expression string may include formatting
markers. For example, if the user's query is log(2), the normalized expression includes the
subscript markers. For more information, see Hit Highlighting.
:type expression: str
:param value: Required. The expression's answer.
:type value: str
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'expression': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'expression': {'key': 'expression', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Computation, self).__init__(**kwargs)
self.expression = kwargs['expression']
self.value = kwargs['value']
class Error(msrest.serialization.Model):
"""Defines the error that occurred.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code that identifies the category of error. Possible values
include: "None", "ServerError", "InvalidRequest", "RateLimitExceeded", "InvalidAuthorization",
"InsufficientAuthorization". Default value: "None".
:type code: str or ~web_search_client.models.ErrorCode
:ivar sub_code: The error code that further helps to identify the error. Possible values
include: "UnexpectedError", "ResourceError", "NotImplemented", "ParameterMissing",
"ParameterInvalidValue", "HttpNotAllowed", "Blocked", "AuthorizationMissing",
"AuthorizationRedundancy", "AuthorizationDisabled", "AuthorizationExpired".
:vartype sub_code: str or ~web_search_client.models.ErrorSubCode
:param message: Required. A description of the error.
:type message: str
:ivar more_details: A description that provides additional information about the error.
:vartype more_details: str
:ivar parameter: The parameter in the request that caused the error.
:vartype parameter: str
:ivar value: The parameter's value in the request that was not valid.
:vartype value: str
"""
_validation = {
'code': {'required': True},
'sub_code': {'readonly': True},
'message': {'required': True},
'more_details': {'readonly': True},
'parameter': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'sub_code': {'key': 'subCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'more_details': {'key': 'moreDetails', 'type': 'str'},
'parameter': {'key': 'parameter', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = kwargs.get('code', "None")
self.sub_code = None
self.message = kwargs['message']
self.more_details = None
self.parameter = None
self.value = None
class ErrorResponse(Response):
"""The top-level response that represents a failed request.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:param errors: Required. A list of errors that describe the reasons why the request failed.
:type errors: list[~web_search_client.models.Error]
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'errors': {'required': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[Error]'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.errors = kwargs['errors']
class MediaObject(CreativeWork):
"""MediaObject.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider: list[~web_search_client.models.Thing]
:ivar text:
:vartype text: str
:ivar content_url: Original URL to retrieve the source (file) for the media object (e.g the
source URL for the image).
:vartype content_url: str
:ivar host_page_url: URL of the page that hosts the media object.
:vartype host_page_url: str
:ivar width: The width of the source media object, in pixels.
:vartype width: int
:ivar height: The height of the source media object, in pixels.
:vartype height: int
"""
_validation = {
'id': {'readonly': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'content_url': {'readonly': True},
'host_page_url': {'readonly': True},
'width': {'readonly': True},
'height': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'content_url': {'key': 'contentUrl', 'type': 'str'},
'host_page_url': {'key': 'hostPageUrl', 'type': 'str'},
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(MediaObject, self).__init__(**kwargs)
self.content_url = None
self.host_page_url = None
self.width = None
self.height = None
class ImageObject(MediaObject):
"""Defines an image.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider: list[~web_search_client.models.Thing]
:ivar text:
:vartype text: str
:ivar content_url: Original URL to retrieve the source (file) for the media object (e.g the
source URL for the image).
:vartype content_url: str
:ivar host_page_url: URL of the page that hosts the media object.
:vartype host_page_url: str
:ivar width: The width of the source media object, in pixels.
:vartype width: int
:ivar height: The height of the source media object, in pixels.
:vartype height: int
:ivar thumbnail: The URL to a thumbnail of the image.
:vartype thumbnail: ~web_search_client.models.ImageObject
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'content_url': {'readonly': True},
'host_page_url': {'readonly': True},
'width': {'readonly': True},
'height': {'readonly': True},
'thumbnail': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'content_url': {'key': 'contentUrl', 'type': 'str'},
'host_page_url': {'key': 'hostPageUrl', 'type': 'str'},
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
'thumbnail': {'key': 'thumbnail', 'type': 'ImageObject'},
}
def __init__(
self,
**kwargs
):
super(ImageObject, self).__init__(**kwargs)
self.thumbnail = None
class SearchResultsAnswer(Answer):
"""SearchResultsAnswer.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SearchResultsAnswer, self).__init__(**kwargs)
self.query_context = None
self.total_estimated_matches = None
self.is_family_friendly = None
class Images(SearchResultsAnswer):
"""Defines an image answer.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:ivar next_offset:
:vartype next_offset: int
:param value: Required. A list of image objects that are relevant to the query. If there are no
results, the List is empty.
:type value: list[~web_search_client.models.ImageObject]
:ivar query_expansions:
:vartype query_expansions: list[~web_search_client.models.Query]
:ivar similar_terms:
:vartype similar_terms: list[~web_search_client.models.Query]
:ivar related_searches:
:vartype related_searches: list[~web_search_client.models.Query]
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'next_offset': {'readonly': True},
'value': {'required': True},
'query_expansions': {'readonly': True},
'similar_terms': {'readonly': True},
'related_searches': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'next_offset': {'key': 'nextOffset', 'type': 'int'},
'value': {'key': 'value', 'type': '[ImageObject]'},
'query_expansions': {'key': 'queryExpansions', 'type': '[Query]'},
'similar_terms': {'key': 'similarTerms', 'type': '[Query]'},
'related_searches': {'key': 'relatedSearches', 'type': '[Query]'},
}
def __init__(
self,
**kwargs
):
super(Images, self).__init__(**kwargs)
self.next_offset = None
self.value = kwargs['value']
self.query_expansions = None
self.similar_terms = None
self.related_searches = None
class Intangible(Thing):
"""Intangible.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Intangible, self).__init__(**kwargs)
class News(SearchResultsAnswer):
"""Defines a news answer.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: Required. An array of NewsArticle objects that contain information about news
articles that are relevant to the query. If there are no results to return for the request, the
array is empty.
:type value: list[~web_search_client.models.Article]
:ivar location:
:vartype location: str
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
'location': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[Article]'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(News, self).__init__(**kwargs)
self.value = kwargs['value']
self.location = None
class NewsArticle(Article):
"""Defines a news article.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider: list[~web_search_client.models.Thing]
:ivar text:
:vartype text: str
:ivar word_count: The number of words in the text of the Article.
:vartype word_count: int
"""
_validation = {
'id': {'readonly': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'word_count': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'word_count': {'key': 'wordCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(NewsArticle, self).__init__(**kwargs)
class Places(SearchResultsAnswer):
"""Defines a local entity answer.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: Required. A list of local entities, such as restaurants or hotels.
:type value: list[~web_search_client.models.Thing]
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[Thing]'},
}
def __init__(
self,
**kwargs
):
super(Places, self).__init__(**kwargs)
self.value = kwargs['value']
class Query(msrest.serialization.Model):
"""Defines a search query.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param text: Required. The query string. Use this string as the query term in a new search
request.
:type text: str
:ivar display_text: The display version of the query term. This version of the query term may
contain special characters that highlight the search term found in the query string. The string
contains the highlighting characters only if the query enabled hit highlighting.
:vartype display_text: str
:ivar web_search_url: The URL that takes the user to the Bing search results page for the
query.Only related search results include this field.
:vartype web_search_url: str
:ivar search_link:
:vartype search_link: str
:ivar thumbnail: Defines an image.
:vartype thumbnail: ~web_search_client.models.ImageObject
"""
_validation = {
'text': {'required': True},
'display_text': {'readonly': True},
'web_search_url': {'readonly': True},
'search_link': {'readonly': True},
'thumbnail': {'readonly': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'display_text': {'key': 'displayText', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'search_link': {'key': 'searchLink', 'type': 'str'},
'thumbnail': {'key': 'thumbnail', 'type': 'ImageObject'},
}
def __init__(
self,
**kwargs
):
super(Query, self).__init__(**kwargs)
self.text = kwargs['text']
self.display_text = None
self.web_search_url = None
self.search_link = None
self.thumbnail = None
class QueryContext(msrest.serialization.Model):
"""Defines the query context that Bing used for the request.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param original_query: Required. The query string as specified in the request.
:type original_query: str
:ivar altered_query: The query string used by Bing to perform the query. Bing uses the altered
query string if the original query string contained spelling mistakes. For example, if the
query string is "saling downwind", the altered query string will be "sailing downwind". This
field is included only if the original query string contains a spelling mistake.
:vartype altered_query: str
:ivar alteration_override_query: The query string to use to force Bing to use the original
string. For example, if the query string is "saling downwind", the override query string will
be "+saling downwind". Remember to encode the query string which results in
"%2Bsaling+downwind". This field is included only if the original query string contains a
spelling mistake.
:vartype alteration_override_query: str
:ivar adult_intent: A Boolean value that indicates whether the specified query has adult
intent. The value is true if the query has adult intent; otherwise, false.
:vartype adult_intent: bool
:ivar ask_user_for_location: A Boolean value that indicates whether Bing requires the user's
location to provide accurate results. If you specified the user's location by using the
X-MSEdge-ClientIP and X-Search-Location headers, you can ignore this field. For location aware
queries, such as "today's weather" or "restaurants near me" that need the user's location to
provide accurate results, this field is set to true. For location aware queries that include
the location (for example, "Seattle weather"), this field is set to false. This field is also
set to false for queries that are not location aware, such as "best sellers".
:vartype ask_user_for_location: bool
:ivar is_transactional:
:vartype is_transactional: bool
"""
_validation = {
'original_query': {'required': True},
'altered_query': {'readonly': True},
'alteration_override_query': {'readonly': True},
'adult_intent': {'readonly': True},
'ask_user_for_location': {'readonly': True},
'is_transactional': {'readonly': True},
}
_attribute_map = {
'original_query': {'key': 'originalQuery', 'type': 'str'},
'altered_query': {'key': 'alteredQuery', 'type': 'str'},
'alteration_override_query': {'key': 'alterationOverrideQuery', 'type': 'str'},
'adult_intent': {'key': 'adultIntent', 'type': 'bool'},
'ask_user_for_location': {'key': 'askUserForLocation', 'type': 'bool'},
'is_transactional': {'key': 'isTransactional', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(QueryContext, self).__init__(**kwargs)
self.original_query = kwargs['original_query']
self.altered_query = None
self.alteration_override_query = None
self.adult_intent = None
self.ask_user_for_location = None
self.is_transactional = None
class RankingGroup(msrest.serialization.Model):
"""Defines a search results group, such as mainline.
All required parameters must be populated in order to send to Azure.
:param items: Required. A list of search result items to display in the group.
:type items: list[~web_search_client.models.RankingItem]
"""
_validation = {
'items': {'required': True},
}
_attribute_map = {
'items': {'key': 'items', 'type': '[RankingItem]'},
}
def __init__(
self,
**kwargs
):
super(RankingGroup, self).__init__(**kwargs)
self.items = kwargs['items']
class RankingItem(msrest.serialization.Model):
"""Defines a search result item to display.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param answer_type: Required. The answer that contains the item to display. Use the type to
find the answer in the SearchResponse object. The type is the name of a SearchResponse field.
Possible values include: "WebPages", "Images", "SpellSuggestions", "News", "RelatedSearches",
"Videos", "Computation", "TimeZone".
:type answer_type: str or ~web_search_client.models.AnswerType
:ivar result_index: A zero-based index of the item in the answer.If the item does not include
this field, display all items in the answer. For example, display all news articles in the News
answer.
:vartype result_index: int
:ivar value: The ID that identifies either an answer to display or an item of an answer to
display. If the ID identifies an answer, display all items of the answer.
:vartype value: ~web_search_client.models.Identifiable
:ivar html_index:
:vartype html_index: int
:ivar textual_index:
:vartype textual_index: int
:ivar screenshot_index:
:vartype screenshot_index: int
"""
_validation = {
'answer_type': {'required': True},
'result_index': {'readonly': True},
'value': {'readonly': True},
'html_index': {'readonly': True},
'textual_index': {'readonly': True},
'screenshot_index': {'readonly': True},
}
_attribute_map = {
'answer_type': {'key': 'answerType', 'type': 'str'},
'result_index': {'key': 'resultIndex', 'type': 'int'},
'value': {'key': 'value', 'type': 'Identifiable'},
'html_index': {'key': 'htmlIndex', 'type': 'int'},
'textual_index': {'key': 'textualIndex', 'type': 'int'},
'screenshot_index': {'key': 'screenshotIndex', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RankingItem, self).__init__(**kwargs)
self.answer_type = kwargs['answer_type']
self.result_index = None
self.value = None
self.html_index = None
self.textual_index = None
self.screenshot_index = None
class RankingResponse(msrest.serialization.Model):
"""Defines where on the search results page content should be placed and in what order.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar pole: The search results that should be afforded the most visible treatment (for example,
displayed above the mainline and sidebar).
:vartype pole: ~web_search_client.models.RankingGroup
:ivar mainline: The search results to display in the mainline.
:vartype mainline: ~web_search_client.models.RankingGroup
:ivar sidebar: The search results to display in the sidebar.
:vartype sidebar: ~web_search_client.models.RankingGroup
"""
_validation = {
'pole': {'readonly': True},
'mainline': {'readonly': True},
'sidebar': {'readonly': True},
}
_attribute_map = {
'pole': {'key': 'pole', 'type': 'RankingGroup'},
'mainline': {'key': 'mainline', 'type': 'RankingGroup'},
'sidebar': {'key': 'sidebar', 'type': 'RankingGroup'},
}
def __init__(
self,
**kwargs
):
super(RankingResponse, self).__init__(**kwargs)
self.pole = None
self.mainline = None
self.sidebar = None
class RelatedSearchesRelatedSearchAnswer(SearchResultsAnswer):
"""Defines a list of related queries made by others.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: Required. A list of related queries that were made by others.
:type value: list[~web_search_client.models.Query]
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[Query]'},
}
def __init__(
self,
**kwargs
):
super(RelatedSearchesRelatedSearchAnswer, self).__init__(**kwargs)
self.value = kwargs['value']
class SearchResponse(Response):
"""Defines the top-level object that the response includes when the request succeeds.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar query_context: An object that contains the query string that Bing used for the request.
This object contains the query string as entered by the user. It may also contain an altered
query string that Bing used for the query if the query string contained a spelling mistake.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar web_pages: A list of webpages that are relevant to the search query.
:vartype web_pages: ~web_search_client.models.WebAnswer
:ivar images: A list of images that are relevant to the search query.
:vartype images: ~web_search_client.models.Images
:ivar news: A list of news articles that are relevant to the search query.
:vartype news: ~web_search_client.models.News
:ivar related_searches: A list of related queries made by others.
:vartype related_searches: ~web_search_client.models.RelatedSearchesRelatedSearchAnswer
:ivar spell_suggestions: The query string that likely represents the user's intent.
:vartype spell_suggestions: ~web_search_client.models.SpellSuggestions
:ivar time_zone: The date and time of one or more geographic locations.
:vartype time_zone: ~web_search_client.models.TimeZone
:ivar videos: A list of videos that are relevant to the search query.
:vartype videos: ~web_search_client.models.Videos
:ivar computation: The answer to a math expression or units conversion expression.
:vartype computation: ~web_search_client.models.Computation
:ivar ranking_response: The order that Bing suggests that you display the search results in.
:vartype ranking_response: ~web_search_client.models.RankingResponse
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'query_context': {'readonly': True},
'web_pages': {'readonly': True},
'images': {'readonly': True},
'news': {'readonly': True},
'related_searches': {'readonly': True},
'spell_suggestions': {'readonly': True},
'time_zone': {'readonly': True},
'videos': {'readonly': True},
'computation': {'readonly': True},
'ranking_response': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'web_pages': {'key': 'webPages', 'type': 'WebAnswer'},
'images': {'key': 'images', 'type': 'Images'},
'news': {'key': 'news', 'type': 'News'},
'related_searches': {'key': 'relatedSearches', 'type': 'RelatedSearchesRelatedSearchAnswer'},
'spell_suggestions': {'key': 'spellSuggestions', 'type': 'SpellSuggestions'},
'time_zone': {'key': 'timeZone', 'type': 'TimeZone'},
'videos': {'key': 'videos', 'type': 'Videos'},
'computation': {'key': 'computation', 'type': 'Computation'},
'ranking_response': {'key': 'rankingResponse', 'type': 'RankingResponse'},
}
def __init__(
self,
**kwargs
):
super(SearchResponse, self).__init__(**kwargs)
self.query_context = None
self.web_pages = None
self.images = None
self.news = None
self.related_searches = None
self.spell_suggestions = None
self.time_zone = None
self.videos = None
self.computation = None
self.ranking_response = None
class SpellSuggestions(SearchResultsAnswer):
"""Defines a suggested query string that likely represents the user's intent. The search results include this response if Bing determines that the user may have intended to search for something different. For example, if the user searches for alon brown, Bing may determine that the user likely intended to search for Alton Brown instead (based on past searches by others of Alon Brown).
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: Required. A list of suggested query strings that may represent the user's
intention. The list contains only one Query object.
:type value: list[~web_search_client.models.Query]
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[Query]'},
}
def __init__(
self,
**kwargs
):
super(SpellSuggestions, self).__init__(**kwargs)
self.value = kwargs['value']
class StructuredValue(Thing):
"""StructuredValue.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StructuredValue, self).__init__(**kwargs)
class TimeZone(SearchResultsAnswer):
"""Defines the data and time of one or more geographic locations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param primary_city_time: Required. The data and time, in UTC, of the geographic location
specified in the query. If the query specified a specific geographic location (for example, a
city), this object contains the name of the geographic location and the current date and time
of the location, in UTC. If the query specified a general geographic location, such as a state
or country, this object contains the date and time of the primary city or state found in the
specified state or country. If the location contains additional time zones, the otherCityTimes
field contains the data and time of cities or states located in the other time zones.
:type primary_city_time: ~web_search_client.models.TimeZoneInformation
:ivar other_city_times: A list of dates and times of nearby time zones.
:vartype other_city_times: list[~web_search_client.models.TimeZoneInformation]
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'primary_city_time': {'required': True},
'other_city_times': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'primary_city_time': {'key': 'primaryCityTime', 'type': 'TimeZoneInformation'},
'other_city_times': {'key': 'otherCityTimes', 'type': '[TimeZoneInformation]'},
}
def __init__(
self,
**kwargs
):
super(TimeZone, self).__init__(**kwargs)
self.primary_city_time = kwargs['primary_city_time']
self.other_city_times = None
class TimeZoneInformation(msrest.serialization.Model):
"""Defines a date and time for a geographical location.
All required parameters must be populated in order to send to Azure.
:param location: Required. The name of the geographical location.For example, County; City;
City, State; City, State, Country; or Time Zone.
:type location: str
:param time: Required. The data and time specified in the form, YYYY-MM-DDThh;mm:ss.ssssssZ.
:type time: str
:param utc_offset: Required. The offset from UTC. For example, UTC-7.
:type utc_offset: str
"""
_validation = {
'location': {'required': True},
'time': {'required': True},
'utc_offset': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'time': {'key': 'time', 'type': 'str'},
'utc_offset': {'key': 'utcOffset', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TimeZoneInformation, self).__init__(**kwargs)
self.location = kwargs['location']
self.time = kwargs['time']
self.utc_offset = kwargs['utc_offset']
class VideoObject(MediaObject):
"""Defines a video object that is relevant to the query.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider: list[~web_search_client.models.Thing]
:ivar text:
:vartype text: str
:ivar content_url: Original URL to retrieve the source (file) for the media object (e.g the
source URL for the image).
:vartype content_url: str
:ivar host_page_url: URL of the page that hosts the media object.
:vartype host_page_url: str
:ivar width: The width of the source media object, in pixels.
:vartype width: int
:ivar height: The height of the source media object, in pixels.
:vartype height: int
:ivar motion_thumbnail_url:
:vartype motion_thumbnail_url: str
:ivar motion_thumbnail_id:
:vartype motion_thumbnail_id: str
:ivar embed_html:
:vartype embed_html: str
:ivar allow_https_embed:
:vartype allow_https_embed: bool
:ivar view_count:
:vartype view_count: int
:ivar thumbnail: Defines an image.
:vartype thumbnail: ~web_search_client.models.ImageObject
:ivar video_id:
:vartype video_id: str
:ivar allow_mobile_embed:
:vartype allow_mobile_embed: bool
:ivar is_superfresh:
:vartype is_superfresh: bool
"""
_validation = {
'id': {'readonly': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'content_url': {'readonly': True},
'host_page_url': {'readonly': True},
'width': {'readonly': True},
'height': {'readonly': True},
'motion_thumbnail_url': {'readonly': True},
'motion_thumbnail_id': {'readonly': True},
'embed_html': {'readonly': True},
'allow_https_embed': {'readonly': True},
'view_count': {'readonly': True},
'thumbnail': {'readonly': True},
'video_id': {'readonly': True},
'allow_mobile_embed': {'readonly': True},
'is_superfresh': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'content_url': {'key': 'contentUrl', 'type': 'str'},
'host_page_url': {'key': 'hostPageUrl', 'type': 'str'},
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
'motion_thumbnail_url': {'key': 'motionThumbnailUrl', 'type': 'str'},
'motion_thumbnail_id': {'key': 'motionThumbnailId', 'type': 'str'},
'embed_html': {'key': 'embedHtml', 'type': 'str'},
'allow_https_embed': {'key': 'allowHttpsEmbed', 'type': 'bool'},
'view_count': {'key': 'viewCount', 'type': 'int'},
'thumbnail': {'key': 'thumbnail', 'type': 'ImageObject'},
'video_id': {'key': 'videoId', 'type': 'str'},
'allow_mobile_embed': {'key': 'allowMobileEmbed', 'type': 'bool'},
'is_superfresh': {'key': 'isSuperfresh', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VideoObject, self).__init__(**kwargs)
self.motion_thumbnail_url = None
self.motion_thumbnail_id = None
self.embed_html = None
self.allow_https_embed = None
self.view_count = None
self.thumbnail = None
self.video_id = None
self.allow_mobile_embed = None
self.is_superfresh = None
class Videos(SearchResultsAnswer):
"""Defines a video answer.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: Required. A list of video objects that are relevant to the query.
:type value: list[~web_search_client.models.VideoObject]
:ivar next_offset:
:vartype next_offset: int
:ivar query_expansions:
:vartype query_expansions: list[~web_search_client.models.Query]
:ivar related_searches:
:vartype related_searches: list[~web_search_client.models.Query]
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
'next_offset': {'readonly': True},
'query_expansions': {'readonly': True},
'related_searches': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[VideoObject]'},
'next_offset': {'key': 'nextOffset', 'type': 'int'},
'query_expansions': {'key': 'queryExpansions', 'type': '[Query]'},
'related_searches': {'key': 'relatedSearches', 'type': '[Query]'},
}
def __init__(
self,
**kwargs
):
super(Videos, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_offset = None
self.query_expansions = None
self.related_searches = None
class WebAnswer(SearchResultsAnswer):
"""Defines a list of relevant webpage links.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries: list[~web_search_client.models.Query]
:ivar query_context: Defines the query context that Bing used for the request.
:vartype query_context: ~web_search_client.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are relevant to the query.
Use this number along with the count and offset query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: Required. A list of webpages that are relevant to the query.
:type value: list[~web_search_client.models.WebPage]
:ivar some_results_removed: A Boolean value that indicates whether the response excluded some
results from the answer. If Bing excluded some results, the value is true.
:vartype some_results_removed: bool
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
'some_results_removed': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[WebPage]'},
'some_results_removed': {'key': 'someResultsRemoved', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(WebAnswer, self).__init__(**kwargs)
self.value = kwargs['value']
self.some_results_removed = None
class WebGrouping(msrest.serialization.Model):
"""WebGrouping.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: .
All required parameters must be populated in order to send to Azure.
:param web_pages: Required.
:type web_pages: list[~web_search_client.models.WebPage]
:param type: Required. Constant filled by server.
:type type: str
"""
_validation = {
'web_pages': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'web_pages': {'key': 'webPages', 'type': '[WebPage]'},
'type': {'key': '_type', 'type': 'str'},
}
_subtype_map = {
'type': {}
}
def __init__(
self,
**kwargs
):
super(WebGrouping, self).__init__(**kwargs)
self.web_pages = kwargs['web_pages']
self.type = None # type: Optional[str]
class WebMetaTag(msrest.serialization.Model):
"""Defines a webpage's metadata.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metadata.
:vartype name: str
:ivar content: The name of the metadata.
:vartype content: str
"""
_validation = {
'name': {'readonly': True},
'content': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WebMetaTag, self).__init__(**kwargs)
self.name = None
self.content = None
class WebPage(CreativeWork):
"""Defines a webpage that is relevant to the query.
Variables are only populated by the server, and will be ignored when sending a request.
:param type:
:type type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by this object.
:vartype url: str
:ivar image: Defines an image.
:vartype image: ~web_search_client.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider: list[~web_search_client.models.Thing]
:ivar text:
:vartype text: str
:ivar display_url: The display URL of the webpage. The URL is meant for display purposes only
and is not well formed.
:vartype display_url: str
:ivar snippet: A snippet of text from the webpage that describes its contents.
:vartype snippet: str
:ivar deep_links: A list of links to related content that Bing found in the website that
contains this webpage. The Webpage object in this context includes only the name, url,
urlPingSuffix, and snippet fields.
:vartype deep_links: list[~web_search_client.models.WebPage]
:ivar date_last_crawled: The last time that Bing crawled the webpage. The date is in the form,
YYYY-MM-DDTHH:MM:SS. For example, 2015-04-13T05:23:39.
:vartype date_last_crawled: str
:ivar search_tags: A list of search tags that the webpage owner specified on the webpage. The
API returns only indexed search tags. The name field of the MetaTag object contains the indexed
search tag. Search tags begin with search.* (for example, search.assetId). The content field
contains the tag's value.
:vartype search_tags: list[~web_search_client.models.WebMetaTag]
:ivar primary_image_of_page: Defines an image.
:vartype primary_image_of_page: ~web_search_client.models.ImageObject
"""
_validation = {
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'display_url': {'readonly': True},
'snippet': {'readonly': True},
'deep_links': {'readonly': True},
'date_last_crawled': {'readonly': True},
'search_tags': {'readonly': True},
'primary_image_of_page': {'readonly': True},
}
_attribute_map = {
'type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'display_url': {'key': 'displayUrl', 'type': 'str'},
'snippet': {'key': 'snippet', 'type': 'str'},
'deep_links': {'key': 'deepLinks', 'type': '[WebPage]'},
'date_last_crawled': {'key': 'dateLastCrawled', 'type': 'str'},
'search_tags': {'key': 'searchTags', 'type': '[WebMetaTag]'},
'primary_image_of_page': {'key': 'primaryImageOfPage', 'type': 'ImageObject'},
}
def __init__(
self,
**kwargs
):
super(WebPage, self).__init__(**kwargs)
self.display_url = None
self.snippet = None
self.deep_links = None
self.date_last_crawled = None
self.search_tags = None
self.primary_image_of_page = None
| 38.298134 | 391 | 0.623508 | [
"MIT"
] | EricLiclair/bing-search-sdk-for-python | sdk/WebSearch/web_search_client/models/_models.py | 77,975 | Python |
from cudf._lib.nvtext.edit_distance import edit_distance, edit_distance_matrix
from cudf._lib.nvtext.generate_ngrams import (
generate_character_ngrams,
generate_ngrams,
)
from cudf._lib.nvtext.ngrams_tokenize import ngrams_tokenize
from cudf._lib.nvtext.normalize import normalize_characters, normalize_spaces
from cudf._lib.nvtext.replace import filter_tokens, replace_tokens
from cudf._lib.nvtext.stemmer import (
LetterType,
is_letter,
is_letter_multi,
porter_stemmer_measure,
)
from cudf._lib.nvtext.tokenize import (
_count_tokens_column,
_count_tokens_scalar,
_tokenize_column,
_tokenize_scalar,
character_tokenize,
detokenize,
)
from cudf._lib.strings.attributes import (
code_points,
count_bytes,
count_characters,
)
from cudf._lib.strings.capitalize import capitalize, title, is_title
from cudf._lib.strings.case import swapcase, to_lower, to_upper
from cudf._lib.strings.char_types import (
filter_alphanum,
is_alnum,
is_alpha,
is_decimal,
is_digit,
is_lower,
is_numeric,
is_space,
is_upper,
)
from cudf._lib.strings.combine import (
concatenate,
join,
join_lists_with_column,
join_lists_with_scalar,
)
from cudf._lib.strings.contains import contains_re, count_re, match_re
from cudf._lib.strings.convert.convert_fixed_point import to_decimal
from cudf._lib.strings.convert.convert_floats import is_float
from cudf._lib.strings.convert.convert_integers import is_integer
from cudf._lib.strings.convert.convert_urls import url_decode, url_encode
from cudf._lib.strings.extract import extract
from cudf._lib.strings.find import (
contains,
contains_multiple,
endswith,
endswith_multiple,
find,
rfind,
startswith,
startswith_multiple,
)
from cudf._lib.strings.findall import findall
from cudf._lib.strings.json import get_json_object
from cudf._lib.strings.padding import PadSide, center, ljust, pad, rjust, zfill
from cudf._lib.strings.repeat import repeat_scalar, repeat_sequence
from cudf._lib.strings.replace import (
insert,
replace,
replace_multi,
slice_replace,
)
from cudf._lib.strings.replace_re import (
replace_multi_re,
replace_re,
replace_with_backrefs,
)
from cudf._lib.strings.split.partition import partition, rpartition
from cudf._lib.strings.split.split import (
rsplit,
rsplit_record,
split,
split_record,
)
from cudf._lib.strings.strip import lstrip, rstrip, strip
from cudf._lib.strings.substring import get, slice_from, slice_strings
from cudf._lib.strings.translate import filter_characters, translate
from cudf._lib.strings.wrap import wrap
| 29.898876 | 79 | 0.782413 | [
"Apache-2.0"
] | HaoYang670/cudf | python/cudf/cudf/_lib/strings/__init__.py | 2,661 | Python |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPytestRunner(PythonPackage):
"""Invoke py.test as distutils command with dependency resolution."""
homepage = "https://github.com/pytest-dev/pytest-runner"
url = "https://pypi.io/packages/source/p/pytest-runner/pytest-runner-5.1.tar.gz"
version('5.1', sha256='25a013c8d84f0ca60bb01bd11913a3bcab420f601f0f236de4423074af656e7a')
version('2.11.1', sha256='983a31eab45e375240e250161a556163bc8d250edaba97960909338c273a89b3')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='build')
| 39.05 | 96 | 0.75032 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | JoshuaSBrown/spack | var/spack/repos/builtin/packages/py-pytest-runner/package.py | 781 | Python |
"""
OpenSpace
Copyright (c) 2014-2018
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This script traverses the file tree of OpenSpace and will check all files' include
guards for correctness. At the moment this includes:
* Correctness (file has a #ifndef. #define, and #endif lines)
* Equality (using the same name for the #ifdef and #define)
* Styling
* no empty line between #ifndef and #define lines
* Empty lines before and after #ifndef #define block
* Files end with an empty line
* Copyright header is correctly indented
* Include guard correctly uses the filename
* Include guard is all upper case
* Correct usage of the name in the final comment of the file
* Correct year of copyright notice
* Naming convention
* OpenSpace include guards start with OPENSPACE, Ghoul with GHOUL,
module includes have the module name in it
* The correct submodule is used
* Checking for duplicates between all files
* Checking that no file includes glm header directly
* Checking whether any files starts with the UTF-8 Byte-order mark
* Checking whether a file as empty-only lines
* Checking whether the default assert macros are used anywhere instead of the
ghoul_assert macro
* Checking whether there are TABs in the file
If this script is executed from the base directory of OpenSpace, no arguments need to
be passed, otherwise the first and only argument has to point to the base directory.
Thus, the default value of the first argument is '.'
"""
import fnmatch
import glob
import os
import re
import sys
current_year = '2018'
is_strict_mode = False
is_silent_mode = False
def get_ifndef_symbol(lines):
index = [i for i,s in enumerate(lines) if '#ifndef ' in s]
if len(index) == 0:
return '', -1
result = re.search('#ifndef (.*)\n', lines[index[0]])
return result.group(1), index[0]
def get_define_symbol(lines):
index = [i for i,s in enumerate(lines) if '#define ' in s]
if len(index) == 0:
return '', -1
result = re.search('#define (.*)\n', lines[index[0]])
return result.group(1), index[0]
def check_correctness(lines):
ifndef_symbol, line_number = get_ifndef_symbol(lines)
if line_number == -1:
return 'No #ifndef in file'
define_symbol, line_number = get_define_symbol(lines)
if (line_number == -1):
return 'No #define in file'
index = [i for i,s in enumerate(lines) if '#endif' in s]
if len(index) == 0:
return 'No #endif in file'
return ''
def check_equality(lines):
ifndef, _ = get_ifndef_symbol(lines)
define, _ = get_define_symbol(lines)
if ifndef == define:
return ''
else:
return ifndef + ' ' + define
def check_styling(lines):
ifndef_symbol, ifndef_line = get_ifndef_symbol(lines)
_, define_line = get_define_symbol(lines)
if abs(ifndef_line - define_line) != 1:
return '#ifndef and #define lines are not subsequent'
if lines[ifndef_line - 1].strip() != '':
return 'Preceding line is not empty'
if lines[define_line + 1].strip() != '':
return 'Following line is not empty'
if not lines[-1][-1] in ['\n', '\r']:
return 'Last line must end with a newline'
for l in lines[2:23]:
if l[0] != ' ':
return 'Copyright header must be indented'
if ifndef_symbol != ifndef_symbol.upper():
return 'Include guard is not all upper case'
return ''
def check_styling_filename(lines, filename):
ifndef_symbol, _ = get_ifndef_symbol(lines)
file = os.path.splitext(os.path.basename(filename))[0].upper()
if not (file in ifndef_symbol or file in ifndef_symbol.replace('_', '')):
return 'Malformed include guard: ' + ifndef_symbol + ' || ' + file
def check_comment(lines):
ifndef_symbol, _ = get_ifndef_symbol(lines)
index = [i for i,s in enumerate(lines) if '#endif' in s]
endif_line = lines[index[-1]].strip()
if endif_line != '#endif // ' + ifndef_symbol:
print(ifndef_symbol)
print(endif_line)
return '#endif line is not correctly formatted'
else:
return ''
def check_copyright(lines):
index = [i for i,s in enumerate(lines[0:23]) if 'Copyright' in s]
if len(index) == 0:
return 'No copyright header found'
beginning_string = ' * Copyright (c) 2012-'
# * Copyright (c) 2014-
year = lines[index[0]][len(beginning_string) : len(beginning_string) + 4]
if lines[index[0] + 1][0] != ' ':
return 'Copyright header is not correctly indented'
if year != current_year:
return 'Out of date copyright notice ' + year + ' || ' + current_year
return ''
def check_byte_order_mark_character(lines):
c = lines[0][0]
if c == 'ï':
return 'File contains UTF-8 byte mark order character'
return ''
def check_naming_convention_component(lines, component):
ifndef_symbol, _ = get_ifndef_symbol(lines)
component_part = ifndef_symbol[2:2 + len(component)]
if component_part != component.upper():
return '#ifndef naming convention broken: ' + ifndef_symbol + ' || ' + component.upper()
else:
return ''
def check_naming_convention_subcomponent(lines, component, file):
ifndef_symbol, _ = get_ifndef_symbol(lines)
if component == "ghoul" or component == "openspace_core":
return
subcomponent_part = ifndef_symbol[2 + len(component) + 1 :]
subcomponent_part = subcomponent_part[: subcomponent_part.find('_')]
path_part = file.split('/')[1]
second_path_part = file.split('/')[2]
if (path_part.upper() != subcomponent_part) and (second_path_part.upper() != subcomponent_part):
return 'Subcomponent naming convention broken: ' + ifndef_symbol
else:
return ''
def check_duplicates(lines, previousSymbols):
ifndef_symbol, _ = get_ifndef_symbol(lines)
if ifndef_symbol in previousSymbols:
return False, ifndef_symbol
else:
return True, ifndef_symbol
def check_glm_header(lines, file):
Allowed_Files = [
'ghoul/glm.h'
]
for f in Allowed_Files:
if f in file.replace('\\', '/'):
return ''
index = [i for i,s in enumerate(lines)
if '#include <glm/glm.hpp>' in s or
'#include "glm/glm.hpp>"' in s]
if len(index) > 0:
return 'File used wrong glm include. Use "#include <ghoul/glm.h>" instead'
else:
return ''
def check_core_dependency(lines, component):
if component != "openspace_core":
return ''
index = [i for i,s in enumerate(lines) if 'OPENSPACE_MODULE_' in s]
if len(index) > 0:
return lines[index[0]][:-1]
else:
return ''
def check_using_namespace(lines):
index = [i for i,s in enumerate(lines) if "using namespace" in s.strip()]
if len(index) > 0:
return lines[index[0]]
else:
return ''
def check_end_of_line(lines):
if lines[-1][-1] != '\n':
return lines[-1][-1]
else:
return ''
def check_empty_only_line(lines):
# Disable this check in non-strict mode
if not is_strict_mode:
return ''
index = [i + 1 for i, s in enumerate(lines) if s.translate({ord(c): None for c in '\n\r'}).isspace()]
if len(index) > 0:
return index
else:
return ''
def check_assert_usage(lines):
# _assert checks for both ghoul_assert and static_assert, which are both reasonable
index = [i + 1 for i,s in enumerate(lines) if ('assert(' in s and not '_assert(' in s) and s.strip()[0:2] != '//']
if len(index) > 0:
return index
else:
return '';
def check_line_length(lines):
# Disable this check in non-strict mode
if not is_strict_mode:
return ''
index = [i + 1 for i, s in enumerate(lines) if len(s) > (90 + 1)]
if len(index) > 0:
return index
else:
return ''
def check_empty_character_at_end(lines):
# Disable this check in non-strict mode
if not is_strict_mode:
return ''
index = [i + 1 for i, s in enumerate(lines) if len(s) > 1 and s[-2] == ' ' and not s.strip() == '']
if len(index) > 0:
return index
else:
return ''
def check_for_tab(lines):
index = [i + 1 for i, s in enumerate(lines) if '\t' in s]
if len(index) > 0:
return index
else:
return ''
previousSymbols = {}
def check_header_file(file, component):
with open(file, 'r+', encoding="utf8") as f:
lines = f.readlines()
correctness = check_correctness(lines)
if correctness:
print(file, '\t', 'Correctness check failed', '\t', correctness)
return
equality = check_equality(lines)
if equality:
print(file, '\t', 'Equality check failed', '\t', equality)
return
styling = check_styling(lines)
if styling:
print(file, '\t', 'Styling check failed', '\t', styling)
return
styling_filename = check_styling_filename(lines, file)
if styling_filename:
print(file, '\t', 'Filename styling check failed', '\t', styling_filename)
return
comment = check_comment(lines)
if comment:
print(file, '\t', 'Comment check failed', '\t', comment)
return
copyright = check_copyright(lines)
if copyright:
print(file, '\t', 'Copyright check failed', '\t', copyright)
return
naming_component = check_naming_convention_component(lines, component)
if naming_component:
print(file, '\t', 'Naming convention broken', '\t', naming_component)
return
naming_subcomponent = check_naming_convention_subcomponent(lines, component, file)
if naming_subcomponent:
print(file, '\t', 'Naming convention broken', '\t', naming_subcomponent)
return
end_of_line = check_end_of_line(lines)
if end_of_line:
print(file, '\t', 'Last line does not contain a newline character: ', end_of_line)
return
duplicates, symbol = check_duplicates(lines, previousSymbols)
if not duplicates:
print(file, '\t', 'Duplicate include guard', symbol, 'first in', previousSymbols[symbol])
return
else:
previousSymbols[symbol] = file
header = check_glm_header(lines, file)
if header:
print(file, '\t', 'Illegal glm header include', header)
return
core_dependency = check_core_dependency(lines, component)
if core_dependency:
print(file, '\t', 'Wrong dependency (core depends on module)', core_dependency)
if (not 'ghoul_gl.h' in file):
# ghoul_gl.h is allowed to use 'using namespace' to pull the gl namespace in
using_namespaces = check_using_namespace(lines)
if using_namespaces:
print(file, '\t', 'Using namespace found in header file')
bom = check_byte_order_mark_character(lines)
if bom:
print(file, '\t', 'Byte order mark failed:', bom)
empty_only_lines = check_empty_only_line(lines)
if empty_only_lines:
print(file, '\t', 'Empty only line: ', empty_only_lines)
line_length = check_line_length(lines)
if line_length:
print(file, '\t', 'Line length exceeded: ', line_length)
empty_character_at_end = check_empty_character_at_end(lines)
if empty_character_at_end:
print(file, '\t', 'Empty character at end: ', empty_character_at_end)
assert_usage = check_assert_usage(lines)
if assert_usage:
print(file, '\t', 'Wrong assert usage: ', assert_usage)
tabs = check_for_tab(lines)
if tabs:
print(file, '\t', 'TABs found: ', tabs)
def check_inline_file(file, component):
with open(file, 'r+', encoding="utf8") as f:
lines = f.readlines()
copyright = check_copyright(lines)
if copyright:
print(file, '\t', 'Copyright check failed', '\t', copyright)
header = check_glm_header(lines, file)
if header:
print(file, '\t', 'Illegal glm header include', header)
core_dependency = check_core_dependency(lines, component)
if core_dependency:
print(file, '\t', 'Wrong dependency (core depends on module)', core_dependency)
end_of_line = check_end_of_line(lines)
if end_of_line:
print(file, '\t', 'Last line does not contain a newline character: ', end_of_line)
return
bom = check_byte_order_mark_character(lines)
if bom:
print(file, '\t', 'Byte order mark failed:', bom)
empty_only_lines = check_empty_only_line(lines)
if empty_only_lines:
print(file, '\t', 'Empty only line: ', empty_only_lines)
line_length = check_line_length(lines)
if line_length:
print(file, '\t', 'Line length exceeded: ', line_length)
if (not '_doc.inl' in file):
# The _doc.inl files are allowed to use using namespace as they are inclued
# from the cpp files and thus don't leak it
using_namespaces = check_using_namespace(lines)
if using_namespaces:
print(file, '\t', 'Using namespace found in inline file')
line_length = check_line_length(lines)
if line_length:
print(file, '\t', 'Line length exceeded: ', line_length)
empty_character_at_end = check_empty_character_at_end(lines)
if empty_character_at_end:
print(file, '\t', 'Empty character at end: ', empty_character_at_end)
assert_usage = check_assert_usage(lines)
if assert_usage:
print(file, '\t', 'Wrong assert usage: ', assert_usage)
tabs = check_for_tab(lines)
if tabs:
print(file, '\t', 'TABs found: ', tabs)
def check_source_file(file, component):
with open(file, 'r+', encoding="utf8") as f:
lines = f.readlines()
header = check_glm_header(lines, file)
if header:
print(file, '\t', 'Illegal glm header include', header)
core_dependency = check_core_dependency(lines, component)
if core_dependency:
print(file, '\t' 'Wrong core dependency', core_dependency)
end_of_line = check_end_of_line(lines)
if end_of_line:
print(file, '\t', 'Last line does not contain a newline character: ', end_of_line)
return
copyright = check_copyright(lines)
if copyright:
print(file, '\t', 'Copyright check failed', '\t', copyright)
bom = check_byte_order_mark_character(lines)
if bom:
print(file, '\t', 'Byte order mark failed:', bom)
empty_only_lines = check_empty_only_line(lines)
if empty_only_lines:
print(file, '\t', 'Empty only line: ', empty_only_lines)
line_length = check_line_length(lines)
if line_length:
print(file, '\t', 'Line length exceeded: ', line_length)
empty_character_at_end = check_empty_character_at_end(lines)
if empty_character_at_end:
print(file, '\t', 'Empty character at end: ', empty_character_at_end)
assert_usage = check_assert_usage(lines)
if assert_usage:
print(file, '\t', 'Wrong assert usage: ', assert_usage)
tabs = check_for_tab(lines)
if tabs:
print(file, '\t', 'TABs found: ', tabs)
def check_files(positiveList, negativeList, component, check_function):
files = []
for p in positiveList:
f = glob.glob(p, recursive=True)
f = [fi.replace('\\', '/') for fi in f]
files.extend(f)
negativeFiles = []
for n in negativeList:
f = glob.glob(n, recursive=True)
f = [fi.replace('\\', '/') for fi in f]
negativeFiles.extend(f)
filtered_files = [f for f in files if f not in negativeFiles]
for file in filtered_files:
check_function(file, component)
basePath = './'
if len(sys.argv) > 1:
if sys.argv[1] != "strict":
basePath = sys.argv[1] + '/'
for a in sys.argv:
if a == "strict":
is_strict_mode = True
if a == "silent":
is_silent_mode = True
# Check header files
if not is_silent_mode:
print("Checking header files")
print("=====================")
check_files(
[basePath + 'include/**/*.h'],
[],
'openspace_core',
check_header_file
)
check_files(
[basePath + 'apps/**/*.h'],
[basePath + 'apps/**/ext/**/*.h'],
'openspace_app',
check_header_file
)
check_files(
[basePath + 'modules/**/*.h'],
[
basePath + 'modules/**/ext/**/*.h',
basePath + 'modules/**/node_modules/**/*.h',
basePath + 'modules/webbrowser/resource.h'
],
'openspace_module',
check_header_file
)
check_files(
[basePath + 'ext/ghoul/include/**/*.h'],
[],
'ghoul',
check_header_file
)
if not is_silent_mode:
print("")
print("Checking inline files")
print("=====================")
check_files(
[basePath + 'include/**/*.inl'],
[],
'openspace_core',
check_inline_file
)
check_files(
[basePath + 'src/**/*.inl'],
[],
'openspace_core',
check_inline_file
)
check_files(
[basePath + 'apps/**/*.inl'],
[basePath + 'apps/**/ext/**/*.inl'],
'openspace_app',
check_inline_file
)
check_files(
[basePath + 'modules/**/*.inl'],
[basePath + 'modules/**/ext/**/*.h'],
'openspace_module',
check_inline_file
)
check_files(
[basePath + 'ext/ghoul/include/**/*.inl'],
[],
'ghoul',
check_inline_file
)
if not is_silent_mode:
print("")
print("Checking source files")
print("=====================")
check_files(
[basePath + 'src/**/*.cpp'],
[],
'openspace_core',
check_source_file
)
check_files(
[basePath + 'apps/**/*.cpp'],
[basePath + 'apps/**/ext/**/*.cpp'],
'openspace_app',
check_source_file
)
check_files(
[basePath + 'modules/**/*.cpp'],
[basePath + 'modules/**/ext/**/*.cpp', basePath + 'modules/**/node_modules/**/*.cpp'],
'openspace_module',
check_source_file
)
check_files(
[basePath + 'ext/ghoul/src/**/*.cpp'],
[],
'ghoul',
check_source_file
)
| 28.542522 | 118 | 0.623857 | [
"MIT"
] | nbartzokas/OpenSpace | support/coding/check_style_guide.py | 19,467 | Python |
"""
Training data and validation accuracy.
"""
# Author: Changyu Liu <[email protected]>
# Last modified: 2018-07-06
# LICENSE: MIT
import os
import numpy as np
import tensorflow as tf
from PIL import Image
import train_test_split
import cnn
N_CLASSES = 2 # dogs and cats
IMG_W = 208 # resize the image, if the input image is too large, training will be very slow
IMG_H = 208
BATCH_SIZE = 16
CAPACITY = 2000
MAX_STEP = 15000
# with current parameters, it is suggested to use learning rate<0.0001
learning_rate = 0.0001
def run_training():
# Set there directories .
train_dir = './data/train/'
logs_train_dir = './logs/train/'
train, train_label = train_test_split.get_files(train_dir)
train_batch, train_label_batch = train_test_split.get_batch(train,
train_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY)
train_logits = cnn.inference(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = cnn.losses(train_logits, train_label_batch)
train_op = cnn.training(train_loss, learning_rate)
train__acc = cnn.evaluation(train_logits, train_label_batch)
summary_op = tf.summary.merge_all()
sess = tf.Session()
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for step in np.arange(MAX_STEP):
if coord.should_stop():
break
_, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])
if step % 50 == 0:
print(
"Step {}, ".format(step),
"train loss = {:.2f}, ".format(tra_loss),
"train accuracy = {:.2f}%".format(tra_acc * 100.0))
summary_str = sess.run(summary_op)
train_writer.add_summary(summary_str, step)
if step % 2000 == 0 or (step + 1) == MAX_STEP:
checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print("Done training -- epoch limit reached")
finally:
coord.request_stop()
coord.join(threads)
sess.close()
def get_image(train):
"""
Randomly pick one image from training data
====================
Args:
train: train data
====================
Return:
image
"""
n = len(train)
ind = np.random.randint(0, n)
img_dir = train[ind]
image = Image.open(img_dir)
image = image.resize([208, 208])
image = np.array(image)
return image
def evaluate():
"""
Test one image against the saved models and parameters
"""
# you need to change the directories to yours.
train_dir = './data/train/'
train, train_label = train_test_split.get_files(train_dir)
image_array = get_image(train)
with tf.Graph().as_default():
batch_size = 1
n_classes = 2
image = tf.cast(image_array, tf.float32)
image = tf.image.per_image_standardization(image)
image = tf.reshape(image, [1, 208, 208, 3])
logits = cnn.inference(image, batch_size, n_classes)
logits = tf.nn.softmax(logits)
X = tf.placeholder(tf.float32, shape=[208, 208, 3])
# you need to change the directories to yours.
logs_train_dir = './logs/train/'
saver = tf.train.Saver()
with tf.Session() as sess:
print("Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(logs_train_dir)
if ckpt and ckpt.model_checkpoint_path:
global_step = ckpt.model_checkpoint_path.split(
'/')[-1].split('-')[-1]
saver.restore(sess, ckpt.model_checkpoint_path)
print("Loading success, global_step is %s".format(global_step))
else:
print("No checkpoint file found")
prediction = sess.run(logits, feed_dict={X: image_array})
max_index = np.argmax(prediction)
if max_index == 0:
print("This is a cat with possibility {:.6f}".format(
prediction[:, 0]))
else:
print("This is a dog with possibility {:.6f}".format(
prediction[:, 1]))
| 31.686667 | 92 | 0.568062 | [
"MIT"
] | GPUworkstation/tensorflow-project | cats_dogs/base.py | 4,753 | Python |
import json
#Try with python3
try:
from urllib.request import urlopen, urlretrieve
from urllib.request import urlretrieve
#Else try python2
except:
from urllib2 import urlopen
from urllib import urlretrieve
from os import path
#User home folder
homeFolder = path.expanduser("~")
#Save pictures to a folder
pictureLocation = homeFolder + "/Downloads/"
def main():
########Defining variables#######
#URL in json format for latest wallpaper
url = "http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US"
getHighRes = 1 #Manually change the resolution in the url to 1920x1200. Change to 0 if url breaks.
#Get json response from bing.com
response = urlopen(url)
#Trying python 3
try:
output = response.readall().decode('utf-8')
#Else trying python2
except:
output = response.read()
#Get json output
data = json.loads(output)
#Form image url from json
output_url = "http://www.bing.com/" + data["images"][0]["url"]
#Form 1920x1200 image from above url
output_url_highres = output_url.replace("1080", "1200")
#If higher resolution is preferred(default)
if getHighRes == 1:
#Use try block to catch any failure in getting the high res image
try:
process_url(output_url_highres)
except:
process_url(output_url)
else:
process_url(output_url)
def process_url(image_url):
if not check_url(image_url) == 1:
#Get the filename of the new file from the url
filename = pictureLocation + image_url.split('/')[-1]
#Retrieve the image from the web and save it to desired location
req = urlretrieve(image_url, filename)
#Save the file path + filename to the output variable
bingImage = path.abspath(filename)
print(bingImage)
else:
raise Exception('bad url')
def check_url(image_url):
conn = urlopen(image_url)
if not conn.getcode() == 200:
return 1
main()
| 25.164706 | 103 | 0.624123 | [
"MIT"
] | networkprogrammer/bing-wallpaper-for-mac | Bing Wallpaper/GetWallpaper.py | 2,139 | Python |
import asyncio
from collections import defaultdict
from dataclasses import dataclass
import json
import logging
import os
import time
from typing import Dict, Set
from ray._private.utils import import_attr
from ray.core.generated import runtime_env_agent_pb2
from ray.core.generated import runtime_env_agent_pb2_grpc
from ray.core.generated import agent_manager_pb2
import ray.dashboard.utils as dashboard_utils
import ray.dashboard.modules.runtime_env.runtime_env_consts \
as runtime_env_consts
from ray.experimental.internal_kv import _internal_kv_initialized, \
_initialize_internal_kv
from ray._private.ray_logging import setup_component_logger
from ray._private.runtime_env.conda import CondaManager
from ray._private.runtime_env.context import RuntimeEnvContext
from ray._private.runtime_env.py_modules import PyModulesManager
from ray._private.runtime_env.working_dir import WorkingDirManager
from ray._private.runtime_env.container import ContainerManager
from ray._private.runtime_env.plugin import decode_plugin_uri
from ray._private.runtime_env.utils import RuntimeEnv
logger = logging.getLogger(__name__)
# TODO(edoakes): this is used for unit tests. We should replace it with a
# better pluggability mechanism once available.
SLEEP_FOR_TESTING_S = os.environ.get("RAY_RUNTIME_ENV_SLEEP_FOR_TESTING_S")
@dataclass
class CreatedEnvResult:
# Whether or not the env was installed correctly.
success: bool
# If success is True, will be a serialized RuntimeEnvContext
# If success is False, will be an error message.
result: str
class RuntimeEnvAgent(dashboard_utils.DashboardAgentModule,
runtime_env_agent_pb2_grpc.RuntimeEnvServiceServicer):
"""An RPC server to create and delete runtime envs.
Attributes:
dashboard_agent: The DashboardAgent object contains global config.
"""
def __init__(self, dashboard_agent):
super().__init__(dashboard_agent)
self._runtime_env_dir = dashboard_agent.runtime_env_dir
self._logging_params = dashboard_agent.logging_params
self._per_job_logger_cache = dict()
# Cache the results of creating envs to avoid repeatedly calling into
# conda and other slow calls.
self._env_cache: Dict[str, CreatedEnvResult] = dict()
# Maps a serialized runtime env to a lock that is used
# to prevent multiple concurrent installs of the same env.
self._env_locks: Dict[str, asyncio.Lock] = dict()
# Keeps track of the URIs contained within each env so we can
# invalidate the env cache when a URI is deleted.
# This is a temporary mechanism until we have per-URI caching.
self._uris_to_envs: Dict[str, Set[str]] = defaultdict(set)
# Initialize internal KV to be used by the working_dir setup code.
_initialize_internal_kv(self._dashboard_agent.gcs_client)
assert _internal_kv_initialized()
self._conda_manager = CondaManager(self._runtime_env_dir)
self._py_modules_manager = PyModulesManager(self._runtime_env_dir)
self._working_dir_manager = WorkingDirManager(self._runtime_env_dir)
self._container_manager = ContainerManager(dashboard_agent.temp_dir)
def get_or_create_logger(self, job_id: bytes):
job_id = job_id.decode()
if job_id not in self._per_job_logger_cache:
params = self._logging_params.copy()
params["filename"] = f"runtime_env_setup-{job_id}.log"
params["logger_name"] = f"runtime_env_{job_id}"
per_job_logger = setup_component_logger(**params)
self._per_job_logger_cache[job_id] = per_job_logger
return self._per_job_logger_cache[job_id]
async def CreateRuntimeEnv(self, request, context):
async def _setup_runtime_env(serialized_runtime_env,
serialized_allocated_resource_instances):
# This function will be ran inside a thread
def run_setup_with_logger():
runtime_env = RuntimeEnv(
serialized_runtime_env=serialized_runtime_env)
allocated_resource: dict = json.loads(
serialized_allocated_resource_instances or "{}")
# Use a separate logger for each job.
per_job_logger = self.get_or_create_logger(request.job_id)
# TODO(chenk008): Add log about allocated_resource to
# avoid lint error. That will be moved to cgroup plugin.
per_job_logger.debug(f"Worker has resource :"
f"{allocated_resource}")
context = RuntimeEnvContext(env_vars=runtime_env.env_vars())
self._conda_manager.setup(
runtime_env, context, logger=per_job_logger)
self._py_modules_manager.setup(
runtime_env, context, logger=per_job_logger)
self._working_dir_manager.setup(
runtime_env, context, logger=per_job_logger)
self._container_manager.setup(
runtime_env, context, logger=per_job_logger)
# Add the mapping of URIs -> the serialized environment to be
# used for cache invalidation.
if runtime_env.working_dir_uri():
uri = runtime_env.working_dir_uri()
self._uris_to_envs[uri].add(serialized_runtime_env)
if runtime_env.py_modules_uris():
for uri in runtime_env.py_modules_uris():
self._uris_to_envs[uri].add(serialized_runtime_env)
if runtime_env.conda_uri():
uri = runtime_env.conda_uri()
self._uris_to_envs[uri].add(serialized_runtime_env)
if runtime_env.plugin_uris():
for uri in runtime_env.plugin_uris():
self._uris_to_envs[uri].add(serialized_runtime_env)
# Run setup function from all the plugins
for plugin_class_path, config in runtime_env.plugins():
logger.debug(
f"Setting up runtime env plugin {plugin_class_path}")
plugin_class = import_attr(plugin_class_path)
# TODO(simon): implement uri support
plugin_class.create("uri not implemented",
json.loads(config), context)
plugin_class.modify_context("uri not implemented",
json.loads(config), context)
return context
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, run_setup_with_logger)
serialized_env = request.serialized_runtime_env
if serialized_env not in self._env_locks:
# async lock to prevent the same env being concurrently installed
self._env_locks[serialized_env] = asyncio.Lock()
async with self._env_locks[serialized_env]:
if serialized_env in self._env_cache:
serialized_context = self._env_cache[serialized_env]
result = self._env_cache[serialized_env]
if result.success:
context = result.result
logger.info("Runtime env already created successfully. "
f"Env: {serialized_env}, context: {context}")
return runtime_env_agent_pb2.CreateRuntimeEnvReply(
status=agent_manager_pb2.AGENT_RPC_STATUS_OK,
serialized_runtime_env_context=context)
else:
error_message = result.result
logger.info("Runtime env already failed. "
f"Env: {serialized_env}, err: {error_message}")
return runtime_env_agent_pb2.CreateRuntimeEnvReply(
status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED,
error_message=error_message)
if SLEEP_FOR_TESTING_S:
logger.info(f"Sleeping for {SLEEP_FOR_TESTING_S}s.")
time.sleep(int(SLEEP_FOR_TESTING_S))
logger.info(f"Creating runtime env: {serialized_env}")
runtime_env_context: RuntimeEnvContext = None
error_message = None
for _ in range(runtime_env_consts.RUNTIME_ENV_RETRY_TIMES):
try:
runtime_env_context = await _setup_runtime_env(
serialized_env,
request.serialized_allocated_resource_instances)
break
except Exception as ex:
logger.exception("Runtime env creation failed.")
error_message = str(ex)
await asyncio.sleep(
runtime_env_consts.RUNTIME_ENV_RETRY_INTERVAL_MS / 1000
)
if error_message:
logger.error(
"Runtime env creation failed for %d times, "
"don't retry any more.",
runtime_env_consts.RUNTIME_ENV_RETRY_TIMES)
self._env_cache[serialized_env] = CreatedEnvResult(
False, error_message)
return runtime_env_agent_pb2.CreateRuntimeEnvReply(
status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED,
error_message=error_message)
serialized_context = runtime_env_context.serialize()
self._env_cache[serialized_env] = CreatedEnvResult(
True, serialized_context)
logger.info(
"Successfully created runtime env: %s, the context: %s",
serialized_env, serialized_context)
return runtime_env_agent_pb2.CreateRuntimeEnvReply(
status=agent_manager_pb2.AGENT_RPC_STATUS_OK,
serialized_runtime_env_context=serialized_context)
async def DeleteURIs(self, request, context):
logger.info(f"Got request to delete URIs: {request.uris}.")
failed_uris = [] # URIs that we failed to delete.
for plugin_uri in request.uris:
plugin, uri = decode_plugin_uri(plugin_uri)
# Invalidate the env cache for any envs that contain this URI.
for env in self._uris_to_envs.get(uri, []):
if env in self._env_cache:
del self._env_cache[env]
if plugin == "working_dir":
if not self._working_dir_manager.delete_uri(uri):
failed_uris.append(uri)
elif plugin == "py_modules":
if not self._py_modules_manager.delete_uri(uri):
failed_uris.append(uri)
elif plugin == "conda":
if not self._conda_manager.delete_uri(uri):
failed_uris.append(uri)
else:
raise ValueError(
"RuntimeEnvAgent received DeleteURI request "
f"for unsupported plugin {plugin}. URI: {uri}")
if failed_uris:
return runtime_env_agent_pb2.DeleteURIsReply(
status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED,
error_message="Local files for URI(s) "
f"{failed_uris} not found.")
else:
return runtime_env_agent_pb2.DeleteURIsReply(
status=agent_manager_pb2.AGENT_RPC_STATUS_OK)
async def run(self, server):
runtime_env_agent_pb2_grpc.add_RuntimeEnvServiceServicer_to_server(
self, server)
| 47.703252 | 79 | 0.633319 | [
"Apache-2.0"
] | 188xuhe/ray | dashboard/modules/runtime_env/runtime_env_agent.py | 11,735 | Python |
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
st.text("This text is awesome!")
| 35.555556 | 74 | 0.760938 | [
"Apache-2.0"
] | Aaryanverma/streamlit | e2e/scripts/st_text.py | 640 | Python |
import os
import logging
import argparse
from collections import Counter
import pandas as pd
import inflect
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
_CATEGRORIES = [
'Mini Briefs',
'Advances & Business',
'Concerns & Hype',
'Analysis & Policy',
'Expert Opinions & Discussion within the field',
'Explainers'
]
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--template_file', '-tf', type=str, default='digest_template.md')
parser.add_argument('--digest_number', '-n', type=int, required=True)
parser.add_argument('--input_csv', '-i', type=str, required=True)
parser.add_argument('--output_md', '-o', type=str, required=True)
parser.add_argument('--force_overwrite', '-f', action='store_true')
args = parser.parse_args()
n = args.digest_number
p = inflect.engine()
n_english = p.number_to_words(p.ordinal(n))
logging.info('Parsing for the {} digest'.format(n_english))
logging.info('Will save result to {}'.format(args.output_md))
if os.path.isfile(args.output_md):
if not args.force_overwrite:
raise ValueError('Cannot overwrite existing output file!')
logging.info('Loading template from {}'.format(args.template_file))
with open(args.template_file, 'r') as f:
md_template = f.read()
logging.info('Reading {}'.format(args.input_csv))
articles_map = {c : [] for c in _CATEGRORIES}
csv = pd.read_csv(args.input_csv)
for row_num, row in csv.iterrows():
if not row['Type']:
print()
print('To which category does this article belong?')
print()
print(row['Name'])
print()
for i, c in enumerate(_CATEGRORIES):
print('{}) {}'.format(i, c))
while True:
try:
print()
c_idx = int(input('Category Number: '))
c = _CATEGRORIES[c_idx]
break
except:
print('Please enter a valid category!')
print()
else:
c = row['Type']
articles_map[c].append(row)
logging.info('Populating content...')
content = ''
for c in _CATEGRORIES:
items = articles_map[c]
if len(items) > 0:
content += '### {}\n'.format(c)
content += '\n'
for item in items:
if c == 'Mini Briefs':
content += '#### [{}]({})\n'.format(item['Name'], item['URL'])
content += '\n'
content += '<one-two paragraph brief>\n'
else:
content += '* [{}]({}) - {}\n'.format(item['Name'], item['URL'], item['Excerpt'])
content += '\n'
# remove the last two empty lines
content = content[:-2]
md = md_template.replace('$digest_number$', str(n)) \
.replace('$digest_number_english$', n_english) \
.replace('$content$', content)
logging.info('Saving digest markdown...')
with open(args.output_md, 'w') as f:
f.write(md)
logging.info('Done!')
| 31.142857 | 101 | 0.552905 | [
"MIT"
] | jacky-liang/skynet-today | scripts/csv2md.py | 3,270 | Python |
s = "Hey there! what should this string be?"
# Length should be 20
print("Length of s = %d" % len(s[0:20]))
# Index
print("The first occurrence of the letter a = %d" % s.index("!"))
# Count
print("t occurs %d times" % s.count("t"))
# Slicing the string into bits
s1 = "hello world"
print(s1[:1]) # splicing is exclusive
print("|",s1[:s1.index(" ")],"|", sep="") # splicing is exclusive
print("|",s1[s1.index(" "):s1.index(" ")],"|", sep="") # splicing is exclusive
print("|",s1[s1.index(" ") + 1:],"|", sep="") # splicing is exclusive
print("The first five characters are '%s'" % s[:5]) # Start to 5
print("The next five characters are '%s'" % s[5:10]) # 5 to 10
print("The thirteenth character is '%s'" % s[12]) # Just number 12
print("The characters with odd index are '%s'" %s[1::2]) #(0-based indexing)
print("The last five characters are '%s'" % s[-5:]) # 5th-from-last to end
print("Reverse the characteres are '%s'" % s[::-1]) # string reversed
print("Reverse the characteres are '%s'" % s[::-2]) # reversed with odd index
# uppercase
print("String in uppercase: %s" % s.upper())
# Convert everything to lowercase
print("String in lowercase: %s" % s.lower())
# Check how a string starts
print("String starts with 'Str'.!", s.startswith("Str"))
# Check how a string ends
print("String ends with 'ome!'.!", s.endswith("ome!"))
# Split
print("Split the words of the string: %s" % s.split(" "))
# Check ranges
x = 'b'
print('a' <= x <= 'z')
word_squares = ["ball", "area", "able", "lead", "lady"]
step = 1
prefix = ''.join([word[step] for word in word_squares])
print("prefix ", prefix)
| 32.673469 | 78 | 0.630231 | [
"MIT"
] | othonreyes/code_problems | python/python/basics/strings.py | 1,601 | Python |
import os
import pkgutil
from pathlib import Path
import pytest
from click.testing import CliRunner
from slotscheck.cli import root as cli
from .conftest import EXAMPLES_DIR
@pytest.fixture()
def runner():
return CliRunner()
@pytest.fixture(autouse=True)
def set_cwd(request):
os.chdir(EXAMPLES_DIR)
yield
os.chdir(request.config.invocation_dir)
def test_no_inputs(runner: CliRunner):
result = runner.invoke(cli, [])
assert result.exit_code == 0
assert result.output == "No files or modules given. Nothing to do!\n"
def test_module_doesnt_exist(runner: CliRunner):
result = runner.invoke(cli, ["-m", "foo"])
assert result.exit_code == 1
assert result.output == (
"ERROR: Module 'foo' not found.\n\n"
"See slotscheck.rtfd.io/en/latest/discovery.html\n"
"for help resolving common import problems.\n"
)
def test_path_doesnt_exist(runner: CliRunner):
result = runner.invoke(cli, ["doesnt_exist"])
assert result.exit_code == 2
assert (
result.output
== """\
Usage: slotscheck [OPTIONS] [FILES]...
Try 'slotscheck --help' for help.
Error: Invalid value for '[FILES]...': Path 'doesnt_exist' does not exist.
"""
)
def test_everything_ok(runner: CliRunner):
result = runner.invoke(cli, ["-m", "module_ok"])
assert result.exit_code == 0
assert result.output == "All OK!\nScanned 6 module(s), 64 class(es).\n"
def test_single_file_module(runner: CliRunner):
result = runner.invoke(
cli, ["-m", "module_singular"], catch_exceptions=False
)
assert result.exit_code == 0
assert result.output == "All OK!\nScanned 1 module(s), 5 class(es).\n"
def test_builtins(runner: CliRunner):
result = runner.invoke(cli, ["-m", "builtins"])
assert result.exit_code == 0
def test_extension(runner: CliRunner):
result = runner.invoke(cli, ["-m", "_pickle"])
assert result.exit_code == 0
assert result.output == ("All OK!\nScanned 1 module(s), 5 class(es).\n")
def test_success_verbose(runner: CliRunner):
result = runner.invoke(
cli, ["-m", "module_ok", "-v"], catch_exceptions=False
)
assert result.exit_code == 0
assert (
result.output
== """\
All OK!
stats:
modules: 7
checked: 6
excluded: 1
skipped: 0
classes: 64
has slots: 44
no slots: 20
n/a: 0
"""
)
def test_submodule(runner: CliRunner):
result = runner.invoke(
cli, ["-m", "module_ok.a.b"], catch_exceptions=False
)
assert result.exit_code == 0
assert result.output == "All OK!\nScanned 4 module(s), 32 class(es).\n"
def test_namespaced(runner: CliRunner):
result = runner.invoke(
cli, ["-m", "namespaced.module"], catch_exceptions=False
)
assert result.exit_code == 0
assert result.output == "All OK!\nScanned 4 module(s), 1 class(es).\n"
def test_multiple_modules(runner: CliRunner):
result = runner.invoke(
cli,
["-m", "module_singular", "-m", "module_ok", "-m", "namespaced"],
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output == "All OK!\nScanned 11 module(s), 70 class(es).\n"
def test_multiple_paths(runner: CliRunner):
result = runner.invoke(
cli,
[
str(EXAMPLES_DIR / "module_singular.py"),
str(EXAMPLES_DIR / "module_ok/a/b/../b"),
str(EXAMPLES_DIR / "namespaced/module/foo.py"),
],
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output == "All OK!\nScanned 8 module(s), 38 class(es).\n"
def test_path_is_module_directory(runner: CliRunner):
# let's define the path indirectly to ensure it works
path = str(EXAMPLES_DIR / "module_ok/a/../")
result = runner.invoke(cli, [path], catch_exceptions=False)
assert result.exit_code == 0
assert result.output == "All OK!\nScanned 6 module(s), 64 class(es).\n"
def test_cannot_pass_both_path_and_module(runner: CliRunner):
result = runner.invoke(cli, ["module_ok", "-m", "click"])
assert result.exit_code == 2
assert (
result.output
== "ERROR: Specify either FILES argument or `-m/--module` "
"option, not both.\n"
)
def test_errors_with_default_settings(runner: CliRunner):
result = runner.invoke(cli, ["-m", "module_not_ok"])
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.a.b:U' has slots but superclass does not.
ERROR: 'module_not_ok.foo:S' has slots but superclass does not.
ERROR: 'module_not_ok.foo:T' has slots but superclass does not.
ERROR: 'module_not_ok.foo:U' has slots but superclass does not.
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
ERROR: 'module_not_ok.foo:U.Ub' defines overlapping slots.
ERROR: 'module_not_ok.foo:W' defines overlapping slots.
ERROR: 'module_not_ok.foo:Z' has duplicate slots.
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Oh no, found some problems!
Scanned 4 module(s), 28 class(es).
"""
)
def test_errors_require_slots_subclass(runner: CliRunner):
result = runner.invoke(cli, ["-m", "module_not_ok", "--require-subclass"])
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.a.b:A' has no slots, but it could have.
ERROR: 'module_not_ok.a.b:U' has slots but superclass does not.
ERROR: 'module_not_ok.foo:A' has no slots, but it could have.
ERROR: 'module_not_ok.foo:C' has no slots, but it could have.
ERROR: 'module_not_ok.foo:R' has no slots, but it could have.
ERROR: 'module_not_ok.foo:S' has slots but superclass does not.
ERROR: 'module_not_ok.foo:T' has slots but superclass does not.
ERROR: 'module_not_ok.foo:U' has slots but superclass does not.
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
ERROR: 'module_not_ok.foo:U.Ub' defines overlapping slots.
ERROR: 'module_not_ok.foo:W' defines overlapping slots.
ERROR: 'module_not_ok.foo:Z' has duplicate slots.
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Oh no, found some problems!
Scanned 4 module(s), 28 class(es).
"""
)
def test_errors_disallow_nonslot_inherit(runner: CliRunner):
result = runner.invoke(
cli, ["-m", "module_not_ok", "--require-superclass"]
)
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.a.b:U' has slots but superclass does not.
ERROR: 'module_not_ok.foo:S' has slots but superclass does not.
ERROR: 'module_not_ok.foo:T' has slots but superclass does not.
ERROR: 'module_not_ok.foo:U' has slots but superclass does not.
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
ERROR: 'module_not_ok.foo:U.Ub' defines overlapping slots.
ERROR: 'module_not_ok.foo:W' defines overlapping slots.
ERROR: 'module_not_ok.foo:Z' has duplicate slots.
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Oh no, found some problems!
Scanned 4 module(s), 28 class(es).
"""
)
def test_errors_no_require_superclass(runner: CliRunner):
result = runner.invoke(
cli, ["-m", "module_not_ok", "--no-require-superclass"]
)
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
ERROR: 'module_not_ok.foo:U.Ub' defines overlapping slots.
ERROR: 'module_not_ok.foo:W' defines overlapping slots.
ERROR: 'module_not_ok.foo:Z' has duplicate slots.
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Oh no, found some problems!
Scanned 4 module(s), 28 class(es).
"""
)
def test_errors_with_exclude_classes(runner: CliRunner):
result = runner.invoke(
cli,
["-m", "module_not_ok", "--exclude-classes", "(foo:U$|:(W|S))"],
)
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.a.b:U' has slots but superclass does not.
ERROR: 'module_not_ok.foo:T' has slots but superclass does not.
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
ERROR: 'module_not_ok.foo:U.Ub' defines overlapping slots.
ERROR: 'module_not_ok.foo:Z' has duplicate slots.
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Oh no, found some problems!
Scanned 4 module(s), 28 class(es).
"""
)
def test_errors_with_include_classes(runner: CliRunner):
result = runner.invoke(
cli,
["-m", "module_not_ok", "--include-classes", "(foo:.*a|:(W|S))"],
)
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.foo:S' has slots but superclass does not.
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
ERROR: 'module_not_ok.foo:W' defines overlapping slots.
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Oh no, found some problems!
Scanned 4 module(s), 28 class(es).
"""
)
def test_errors_with_include_modules(runner: CliRunner):
result = runner.invoke(
cli,
[
"-m",
"module_not_ok",
"--include-modules",
"(module_not_ok$ | a)",
],
)
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.a.b:U' has slots but superclass does not.
Oh no, found some problems!
Scanned 3 module(s), 2 class(es).
"""
)
def test_ingores_given_module_completely(runner: CliRunner):
result = runner.invoke(
cli,
[
"-m",
"module_not_ok",
"--include-modules",
"nomatch",
],
)
assert result.exit_code == 0
assert (
result.output
== "Files or modules given, but filtered out by exclude/include. "
"Nothing to do!\n"
)
def test_module_not_ok_verbose(runner: CliRunner):
result = runner.invoke(cli, ["-m", "module_not_ok", "-v"])
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.a.b:U' has slots but superclass does not.
Superclasses without slots:
- 'module_not_ok.a.b:A'
ERROR: 'module_not_ok.foo:S' has slots but superclass does not.
Superclasses without slots:
- 'module_not_ok.foo:R'
ERROR: 'module_not_ok.foo:T' has slots but superclass does not.
Superclasses without slots:
- 'module_not_ok.foo:A'
ERROR: 'module_not_ok.foo:U' has slots but superclass does not.
Superclasses without slots:
- 'module_not_ok.foo:L'
- 'module_not_ok.foo:D'
- 'module_not_ok.foo:C'
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
Slots already defined in superclass:
- 'w' (module_not_ok.foo:Q)
ERROR: 'module_not_ok.foo:U.Ub' defines overlapping slots.
Slots already defined in superclass:
- 'w' (module_not_ok.foo:U.Ua)
- 'w' (module_not_ok.foo:Q)
ERROR: 'module_not_ok.foo:W' defines overlapping slots.
Slots already defined in superclass:
- 'p' (module_not_ok.foo:U)
- 'v' (module_not_ok.foo:V)
ERROR: 'module_not_ok.foo:Z' has duplicate slots.
Duplicate slot names:
- 'b'
- 'c'
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Slots already defined in superclass:
- 'b' (module_not_ok.foo:Z)
- 'c' (module_not_ok.foo:Z)
Oh no, found some problems!
stats:
modules: 4
checked: 4
excluded: 0
skipped: 0
classes: 28
has slots: 21
no slots: 7
n/a: 0
"""
)
def test_module_misc(runner: CliRunner):
result = runner.invoke(
cli,
["-m", "module_misc", "--no-strict-imports"],
catch_exceptions=False,
)
assert result.exit_code == 0
assert (
result.output
== """\
NOTE: Failed to import 'module_misc.a.evil'.
All OK!
Scanned 18 module(s), 8 class(es).
"""
)
def test_module_exclude(runner: CliRunner):
result = runner.invoke(
cli,
[
"-m",
"module_misc",
"--exclude-modules",
"evil",
"--no-strict-imports",
],
catch_exceptions=False,
)
assert result.exit_code == 0
assert (
result.output
== """\
NOTE: Failed to import 'module_misc.a.b.__main__'.
All OK!
Scanned 16 module(s), 9 class(es).
"""
)
from module_misc import a # type: ignore
assert not a.evil_was_imported
def test_module_disallow_import_failures(runner: CliRunner):
result = runner.invoke(cli, ["-m", "module_misc", "--strict-imports"])
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: Failed to import 'module_misc.a.evil'.
Oh no, found some problems!
Scanned 18 module(s), 8 class(es).
"""
)
def test_module_allow_import_failures(runner: CliRunner):
result = runner.invoke(cli, ["-m", "module_misc", "--no-strict-imports"])
assert result.exit_code == 0
assert (
result.output
== """\
NOTE: Failed to import 'module_misc.a.evil'.
All OK!
Scanned 18 module(s), 8 class(es).
"""
)
def test_finds_config(runner: CliRunner, mocker, tmpdir):
(tmpdir / "myconf.toml").write_binary(
b"""
[tool.slotscheck]
require-superclass = false
"""
)
mocker.patch(
"slotscheck.config.find_config_file",
return_value=Path(tmpdir / "myconf.toml"),
)
result = runner.invoke(cli, ["-m", "module_not_ok"])
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
ERROR: 'module_not_ok.foo:U.Ub' defines overlapping slots.
ERROR: 'module_not_ok.foo:W' defines overlapping slots.
ERROR: 'module_not_ok.foo:Z' has duplicate slots.
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Oh no, found some problems!
Scanned 4 module(s), 28 class(es).
"""
)
def test_given_config(runner: CliRunner, tmpdir):
my_config = tmpdir / "myconf.toml"
my_config.write_binary(
b"""
[tool.slotscheck]
require-superclass = false
"""
)
result = runner.invoke(
cli,
["-m", "module_not_ok", "--settings", str(my_config)],
catch_exceptions=False,
)
assert result.exit_code == 1
assert (
result.output
== """\
ERROR: 'module_not_ok.foo:U.Ua' defines overlapping slots.
ERROR: 'module_not_ok.foo:U.Ub' defines overlapping slots.
ERROR: 'module_not_ok.foo:W' defines overlapping slots.
ERROR: 'module_not_ok.foo:Z' has duplicate slots.
ERROR: 'module_not_ok.foo:Za' defines overlapping slots.
Oh no, found some problems!
Scanned 4 module(s), 28 class(es).
"""
)
def test_ambiguous_import(runner: CliRunner):
result = runner.invoke(
cli,
[str(EXAMPLES_DIR / "other/module_misc/a/b/c.py")],
catch_exceptions=False,
)
assert result.exit_code == 1
assert (
result.output
== """\
Cannot check due to import ambiguity.
The given files do not correspond with what would be imported:
'import module_misc.a.b.c' would load from:
{}
instead of:
{}
You may need to define $PYTHONPATH or run as 'python -m slotscheck'
to ensure the correct files can be imported.
See slotscheck.rtfd.io/en/latest/discovery.html
for more information on why this happens and how to resolve it.
""".format(
pkgutil.get_loader(
"module_misc.a.b.c"
).path, # type: ignore[union-attr]
EXAMPLES_DIR / "other/module_misc/a/b/c.py",
)
)
def test_ambiguous_import_excluded(runner: CliRunner):
result = runner.invoke(
cli,
["other/module_misc/a/b/c.py", "--exclude-modules", "module_misc"],
catch_exceptions=False,
)
assert result.exit_code == 0
assert (
result.output
== """\
Files or modules given, but filtered out by exclude/include. Nothing to do!
"""
)
| 28.708333 | 78 | 0.650092 | [
"MIT"
] | ariebovenberg/slotscheck | tests/src/test_cli.py | 15,847 | Python |
from keras.utils import to_categorical
import tensorflow as tf
import pygame
class pytennis:
def __init__(self, fps = 50):
self.net = Network(150,450,100,600)
self.updateRewardA = 0
self.updateRewardB = 0
self.updateIter = 0
self.lossA = 0
self.lossB = 0
# Testing
self.net = Network(150, 450, 100, 600)
self.NetworkA = self.net.network(300, ysource=100, Ynew=600) # Network A
self.NetworkB = self.net.network(200, ysource=600, Ynew=100) # Network B
# NetworkA
# display test plot of network A
#sns.jointplot(NetworkA[0], NetworkA[1])
# display test plot of network B
#sns.jointplot(NetworkB[0], NetworkB[1])
self.out = self.net.DefaultToPosition(250)
self.lastxcoordinate = 350
pygame.init()
self.BLACK = ( 0,0,0)
self.myFontA = pygame.font.SysFont("Times New Roman", 25)
self.myFontB = pygame.font.SysFont("Times New Roman", 25)
self.myFontIter = pygame.font.SysFont('Times New Roman', 25)
self.FPS = fps
self.fpsClock = pygame.time.Clock()
def setWindow(self):
# set up the window
self.DISPLAYSURF = pygame.display.set_mode((600, 700), 0, 32)
pygame.display.set_caption('REINFORCEMENT LEARNING (Discrete Mathematics) - TABLE TENNIS')
# set up the colors
self.BLACK = ( 0,0,0)
self.WHITE = (255, 255, 255)
self.RED= (255,0,0)
self.GREEN = ( 0, 255,0)
self.BLUE = ( 0,0, 255)
return
def display(self):
self.setWindow()
self.DISPLAYSURF.fill(self.WHITE)
pygame.draw.rect(self.DISPLAYSURF, self.GREEN, (150, 100, 300, 500))
pygame.draw.rect(self.DISPLAYSURF, self.RED, (150, 340, 300, 20))
pygame.draw.rect(self.DISPLAYSURF, self.BLACK, (0, 20, 600, 20))
pygame.draw.rect(self.DISPLAYSURF, self.BLACK, (0, 660, 600, 20))
return
def reset(self):
return
def evaluate_state_from_last_coordinate(self, c):
"""
cmax: 450
cmin: 150
c definately will be between 150 and 450.
state0 - (150 - 179)
state1 - (180 - 209)
state2 - (210 - 239)
state3 - (240 - 269)
state4 - (270 - 299)
state5 - (300 - 329)
state6 - (330 - 359)
state7 - (360 - 389)
state8 - (390 - 419)
state9 - (420 - 450)
"""
if c >= 150 and c <=179:
return 0
elif c >= 180 and c <= 209:
return 1
elif c >=210 and c <= 239:
return 2
elif c >=240 and c <= 269:
return 3
elif c>= 270 and c<=299:
return 4
elif c >= 300 and c <= 329:
return 5
elif c >= 330 and c <= 359:
return 6
elif c >= 360 and c <= 389:
return 7
elif c >= 390 and c <= 419:
return 8
elif c >= 420 and c <= 450:
return 9
def evaluate_action(self, action, expectedState):
if action == expectedState:
return True
else:
return False
def randomVal(self, action):
"""
cmax: 450
cmin: 150
c definately will be between 150 and 450.
state0 - (150 - 179)
state1 - (180 - 209)
state2 - (210 - 239)
state3 - (240 - 269)
state4 - (270 - 299)
state5 - (300 - 329)
state6 - (330 - 359)
state7 - (360 - 389)
state8 - (390 - 419)
state9 - (420 - 450)
"""
if action == 0:
val = np.random.choice([i for i in range(150, 180)])
elif action == 1:
val = np.random.choice([i for i in range(180, 210)])
elif action == 2:
val = np.random.choice([i for i in range(210, 240)])
elif action == 3:
val = np.random.choice([i for i in range(240, 270)])
elif action == 4:
val = np.random.choice([i for i in range(270, 300)])
elif action == 5:
val = np.random.choice([i for i in range(300, 330)])
elif action == 6:
val = np.random.choice([i for i in range(330, 360)])
elif action == 7:
val = np.random.choice([i for i in range(360, 390)])
elif action == 8:
val = np.random.choice([i for i in range(390, 420)])
else:
val = np.random.choice([i for i in range(420, 450)])
return val
def stepA(self, action, count = 0):
#playerA should play
if count == 0:
#playerax = lastxcoordinate
self.NetworkA = self.net.network(self.lastxcoordinate, ysource = 100, Ynew = 600) #Network A
self.out = self.net.DefaultToPosition(self.lastxcoordinate)
#update lastxcoordinate
self.bally = self.NetworkA[1][count]
#here
#self.playerax = self.out[count]
self.playerbx = self.randomVal(action)
# soundObj = pygame.mixer.Sound('sound/sound.wav')
# soundObj.play()
# time.sleep(0.4)
# soundObj.stop()
elif count == 49:
self.ballx = self.NetworkA[0][count]
self.bally = self.NetworkA[1][count]
# move playerbx with respect to action
self.playerbx = self.randomVal(action)
else:
self.ballx = self.NetworkA[0][count]
self.bally = self.NetworkA[1][count]
# move playerbx with respect to action
# self.playerbx = self.randomVal(action)
obs = self.evaluate_state_from_last_coordinate(int(self.ballx)) # last state of the ball
reward = self.evaluate_action(action, obs)
done = True
info = ''
return obs, reward, done, info
def stepB(self, action, count):
#playerB can play
if count == 0:
#playerbx = lastxcoordinate
self.NetworkB = self.net.network(self.lastxcoordinate, ysource = 600, Ynew = 100) #Network B
self.out = self.net.DefaultToPosition(self.lastxcoordinate)
#update lastxcoordinate
self.bally = self.NetworkB[1][count]
#self.playerax = self.out[count]
self.playerax = self.randomVal(action)
# soundObj = pygame.mixer.Sound('sound/sound.wav')
# soundObj.play()
# time.sleep(0.4)
# soundObj.stop()
elif count ==49:
self.ballx = self.NetworkA[0][count]
self.bally = self.NetworkA[1][count]
# move playerbx with respect to action
self.playerbx = self.randomVal(action)
else:
self.ballx = self.NetworkB[0][count]
self.bally = self.NetworkB[1][count]
# self.playerbx = self.randomVal(action)
obs = self.evaluate_state_from_last_coordinate(int(self.ballx)) # last state of the ball
reward = self.evaluate_action(action, obs)
done = True
info = ''
return obs, reward, done, info
def computeLossA(self, reward):
if reward == 0:
self.lossA += 1
else:
self.lossA += 0
return
def computeLossB(self, reward):
if reward == 0:
self.lossB += 1
else:
self.lossB += 0
return
def render(self):
# diplay team players
self.PLAYERA = pygame.image.load('images/cap.jpg')
self.PLAYERA = pygame.transform.scale(self.PLAYERA, (50, 50))
self.PLAYERB = pygame.image.load('images/cap.jpg')
self.PLAYERB = pygame.transform.scale(self.PLAYERB, (50, 50))
self.ball = pygame.image.load('images/ball.png')
self.ball = pygame.transform.scale(self.ball, (15, 15))
self.playerax = 150
self.playerbx = 250
self.ballx = 250
self.bally = 300
count = 0
nextplayer = 'A'
#player A starts by playing with state 0
obs, reward, done, info = self.stepA(0)
stateA = obs
stateB = obs
next_state = 0
iterations = 20000
iteration = 0
restart = False
while iteration < iterations:
self.display()
self.randNumLabelA = self.myFontA.render('A (Win): '+str(self.updateRewardA) + ', A(loss): '+str(self.lossA), 1, self.BLACK)
self.randNumLabelB = self.myFontB.render('B (Win): '+str(self.updateRewardB) + ', B(loss): '+ str(self.lossB), 1, self.BLACK)
self.randNumLabelIter = self.myFontIter.render('Iterations: '+str(self.updateIter), 1, self.BLACK)
if nextplayer == 'A':
if count == 0:
# Online DQN evaluates what to do
q_valueA = AgentA.model.predict([stateA])
actionA = AgentA.epsilon_greedy(q_valueA, iteration)
# Online DQN plays
obs, reward, done, info = self.stepA(action = actionA, count = count)
next_stateA = obs
# Let's memorize what just happened
AgentA.replay_memory.append((stateA, actionA, reward, next_stateA, 1.0 - done))
stateA = next_stateA
else:
# Online DQN evaluates what to do
q_valueA = AgentA.model.predict([stateA])
actionA = AgentA.epsilon_greedy(q_valueA, iteration)
# Online DQN plays
obs, reward, done, info = self.stepA(action = actionA, count = count)
next_stateA = obs
# Let's memorize what just happened
# AgentA.replay_memory.append((state, action, reward, next_state, 1.0 - done))
stateA = next_stateA
count += 1
if count == 50:
count = 0
self.updateRewardA += reward
self.computeLossA(reward)
#restart the game if player A fails to get the ball, and let B start the game
if reward == 0:
restart = True
time.sleep(0.5)
nextplayer = 'B'
self.playerbx = self.ballx
else:
restart = False
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (AgentA.sample_memories(AgentA.batch_size))
next_q_values = AgentA.model.predict([X_next_state_val])
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * AgentA.discount_rate * max_next_q_values
# Train the online DQN
AgentA.model.fit(X_state_val,tf.keras.utils.to_categorical(X_next_state_val, num_classes=10), verbose = 0)
nextplayer = 'B'
self.updateIter += 1
#evaluate A
else:
nextplayer = 'A'
else:
if count == 0:
# Online DQN evaluates what to do
q_valueB = AgentB.model.predict([stateB])
actionB = AgentB.epsilon_greedy(q_valueB, iteration)
# Online DQN plays
obs, reward, done, info = self.stepB(action = actionB, count = count)
next_stateB = obs
# Let's memorize what just happened
AgentB.replay_memory.append((stateB, actionB, reward, next_stateB, 1.0 - done))
stateB = next_stateB
else:
# Online DQN evaluates what to do
q_valueB = AgentB.model.predict([stateB])
actionB = AgentB.epsilon_greedy(q_valueB, iteration)
# Online DQN plays
obs, reward, done, info = self.stepB(action = actionB, count = count)
next_stateB = obs
# Let's memorize what just happened
# AgentB.replay_memory.append((state, action, reward, next_state, 1.0 - done))
stateB = next_stateB
count += 1
if count == 50:
count = 0
self.updateRewardB += reward
self.computeLossB(reward)
#restart the game if player A fails to get the ball, and let B start the game
if reward == 0:
restart = True
time.sleep(0.5)
nextplayer = 'A'
self.playerax = self.ballx
else:
restart = False
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (AgentB.sample_memories(AgentB.batch_size))
next_q_values = AgentB.model.predict([X_next_state_val])
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * AgentB.discount_rate * max_next_q_values
# Train the online DQN
AgentB.model.fit(X_state_val,tf.keras.utils.to_categorical(X_next_state_val, num_classes=10), verbose = 0)
nextplayer = 'A'
self.updateIter += 1
#evaluate B
else:
nextplayer = 'B'
count += 1
#CHECK BALL MOVEMENT
self.DISPLAYSURF.blit(self.PLAYERA, (self.playerax, 50))
self.DISPLAYSURF.blit(self.PLAYERB, (self.playerbx, 600))
self.DISPLAYSURF.blit(self.ball, (self.ballx, self.bally))
self.DISPLAYSURF.blit(self.randNumLabelA, (300, 630))
self.DISPLAYSURF.blit(self.randNumLabelB, (300, 40))
self.DISPLAYSURF.blit(self.randNumLabelIter, (50, 40))
#update last coordinate
self.lastxcoordinate = self.ballx
pygame.display.update()
self.fpsClock.tick(self.FPS)
for event in pygame.event.get():
if event.type == QUIT:
AgentA.model.save('AgentA.h5')
AgentB.model.save('AgentB.h5')
pygame.quit()
sys.exit()
| 35.803653 | 137 | 0.491583 | [
"Apache-2.0"
] | elishatofunmi/ReinEnv | pytennis/play.py | 15,682 | Python |
"""
Optuna example that optimizes a classifier configuration for cancer dataset
using XGBoost.
In this example, we optimize the validation accuracy of cancer detection
using XGBoost. We optimize both the choice of booster model and their hyper
parameters.
We have following two ways to execute this example:
(1) Execute this code directly.
$ python xgboost_simple.py
(2) Execute through CLI.
$ STUDY_NAME=`optuna create-study --storage sqlite:///example.db`
$ optuna study optimize xgboost_simple.py objective --n-trials=100 --study $STUDY_NAME \
--storage sqlite:///example.db
"""
from __future__ import division
import numpy as np
import sklearn.datasets
import sklearn.metrics
from sklearn.model_selection import train_test_split
import xgboost as xgb
import optuna
# FYI: Objective functions can take additional arguments
# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).
def objective(trial):
(data, target) = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.25)
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x, label=test_y)
param = {
'silent': 1,
'objective': 'binary:logistic',
'booster': trial.suggest_categorical('booster', ['gbtree', 'gblinear', 'dart']),
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)
}
if param['booster'] == 'gbtree' or param['booster'] == 'dart':
param['max_depth'] = trial.suggest_int('max_depth', 1, 9)
param['eta'] = trial.suggest_loguniform('eta', 1e-8, 1.0)
param['gamma'] = trial.suggest_loguniform('gamma', 1e-8, 1.0)
param['grow_policy'] = trial.suggest_categorical('grow_policy', ['depthwise', 'lossguide'])
if param['booster'] == 'dart':
param['sample_type'] = trial.suggest_categorical('sample_type', ['uniform', 'weighted'])
param['normalize_type'] = trial.suggest_categorical('normalize_type', ['tree', 'forest'])
param['rate_drop'] = trial.suggest_loguniform('rate_drop', 1e-8, 1.0)
param['skip_drop'] = trial.suggest_loguniform('skip_drop', 1e-8, 1.0)
bst = xgb.train(param, dtrain)
preds = bst.predict(dtest)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(test_y, pred_labels)
return 1.0 - accuracy
if __name__ == '__main__':
study = optuna.create_study()
study.optimize(objective, n_trials=100)
print(study.best_trial)
| 36.605634 | 99 | 0.69873 | [
"MIT"
] | AkihiroTajima/optuna | examples/xgboost_simple.py | 2,599 | Python |
import re,sys
class Instruction:
def __init__(self,defn):
m = re.match("^([A-Fa-f0-9\-\,]+)\s+\"(.*?)\"\s+(.*)$",defn)
assert m is not None,"Bad line "+defn
range = m.group(1)
range = range+"-"+range if len(range) == 2 else range
range = range+",1" if len(range) == 5 else range
self.first = int(range[:2],16)
self.last = int(range[3:5],16)
self.step = int(range[-1],16)
self.name = m.group(2).strip()
self.code = m.group(3).strip()
#print(defn,range,self.first,self.last,self.step,self.getOpcodes())
def getOpcodes(self):
return range(self.first,self.last+self.step,self.step)
def getMnemonics(self,opcode):
base = self.name
base = self.process(base,opcode)
return base.lower()
def getCode(self,opcode,type = "C"):
base = self.process(self.code,opcode)
if (opcode & 0xF0) == 0xC0:
base = base + ";$CYCLES++"
isFirst = True
while base.find("$") >= 0:
if isFirst:
mWord = "$DF"
isFirst = False
else:
m = re.search("(\$[A-Za-z]+)",base)
mWord = m.group(1)
if type == "C":
base = base.replace(mWord,mWord[1:].upper())
elif type == "T":
base = base.replace(mWord,"this."+mWord[1:].lower())
else:
raise Exception()
while base.find(";;") >= 0:
base = base.replace(";;",";")
if base[0] == ';':
base = base[1:]
return base
def process(self,s,opc):
s = s.replace("@R","{0:X}".format(opc & 0x0F))
s = s.replace("@P","{0:X}".format(opc & 0x07))
s = s.replace("@E","{0:X}".format((opc & 0x03)+1))
s = s.replace("@BRANCH","$R[$P] = ($R[$P] & 0xFF00) | $T8")
s = s.replace("@LBRANCH","$R[$P] = $T16")
s = s.replace("@FETCH16","$T16=$FETCH();$T16=($T16 << 8)|$FETCH()")
s = s.replace("@LSKIP","$R[$P] = ($R[$P]+2) & 0xFFFF")
if s[:4] == "@ADD":
params = ["("+x+")" for x in s.strip()[5:-1].split(",")]
s = "$T16 = "+("+".join(params))+";$D = $T16 & 0xFF;$DF = ($T16 >> 8) & 1"
#print(s,params)
#sys.exit(0)
return s
src = open("1802.def").readlines()
src = [x if x.find("//") < 0 else x[:x.find("//")] for x in src]
src = [x.replace("\t"," ").strip() for x in src]
src = [x for x in src if x != ""]
instructions = [ None ] * 256
for l in src:
instr = Instruction(l)
for opc in instr.getOpcodes():
assert instructions[opc] is None,"Duplicate opcode : "+l
instructions[opc] = instr
mList = ",".join(['"'+instructions[x].getMnemonics(x)+'"' for x in range(0,256)])
open("_1802_mnemonics.h","w").write("{ "+mList+ " };\n\n")
h = open("_1802_case.h","w")
for i in range(0,256):
h.write("case 0x{0:02x}: /*** {1} ***/\n".format(i,instructions[i].getMnemonics(i)))
h.write(" "+instructions[i].getCode(i,"C")+";break;\n")
h.close()
h = open("_1802_opcodes.ts","w")
h.write("class CPU1802_Opcodes extends CPU1802_Base {\n\n")
h.write("public getOpcodeList():Function[] {\n ")
h.write(",".join("opcode_{0:02x}()".format(n) for n in range(0,256)))
h.write("\n}\n\n")
for i in range(0,256):
h.write("private opcode_{0:02x}(): void {{ /*** {1} ***/\n".format(i,instructions[i].getMnemonics(i)))
h.write(" "+instructions[i].getCode(i,"T")+";\n}\n")
h.write("}\n")
h.close()
h = open("_1802_ports.h","w")
for p in range(1,8):
h.write("#ifndef INPUT{0}\n#define INPUT{0}() (0)\n#endif\n".format(p))
h.write("#ifndef OUTPUT{0}\n#define OUTPUT{0}(x) {{}}\n#endif\n".format(p))
for p in range(1,5):
h.write("#ifndef EFLAG{0}\n#define EFLAG{0}() (0)\n#endif\n".format(p))
h.write("#ifndef UPDATEQ\n#define UPDATEQ(x) {{}}\n#endif\n".format(p))
h.close() | 32.607477 | 103 | 0.580109 | [
"MIT"
] | paulscottrobson/RCA-Cosmac-VIP-III | processor/generate.py | 3,489 | Python |
from __future__ import absolute_import, division, print_function
from glue.core.state_objects import State
from glue.external.echo import CallbackProperty, keep_in_sync
class AladinLiteLayerState(State):
layer = CallbackProperty()
visible = CallbackProperty(True)
zorder = CallbackProperty(0)
color = CallbackProperty()
alpha = CallbackProperty()
def __init__(self, **kwargs):
super(AladinLiteLayerState, self).__init__(**kwargs)
self._sync_color = None
self._sync_alpha = None
self.add_callback('layer', self._layer_changed)
self._layer_changed()
def _layer_changed(self):
if self._sync_color is not None:
self._sync_color.stop_syncing()
if self._sync_alpha is not None:
self._sync_alpha.stop_syncing()
if self.layer is not None:
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self._sync_color = keep_in_sync(self, 'color', self.layer.style, 'color')
self._sync_alpha = keep_in_sync(self, 'alpha', self.layer.style, 'alpha')
| 28.225 | 85 | 0.674934 | [
"BSD-3-Clause"
] | glue-viz/glue-aladin | glue_aladin/layer_state.py | 1,129 | Python |
"""Writes the given metrics in a csv."""
import numpy as np
import os
import pandas as pd
import sys
models_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(models_dir)
from baseline_constants import CLIENT_ID_KEY, NUM_ROUND_KEY, NUM_SAMPLES_KEY
COLUMN_NAMES = [
CLIENT_ID_KEY, NUM_ROUND_KEY, 'hierarchy', NUM_SAMPLES_KEY]
def print_metrics(
round_number,
client_ids,
metrics,
hierarchies,
num_samples,
path):
"""Prints or appends the given metrics in a csv.
The resulting dataframe is of the form:
client_id, round_number, hierarchy, num_samples, metric1, metric2
twebbstack, 0, , 18, 0.5, 0.89
Args:
round_number: Number of the round the metrics correspond to. If
0, then the file in path is overwritten. If not 0, we append to
that file.
client_ids: Ids of the clients. Not all ids must be in the following
dicts.
metrics: Dict keyed by client id. Each element is a dict of metrics
for that client in the specified round. The dicts for all clients
are expected to have the same set of keys.
hierarchies: Dict keyed by client id. Each element is a list of hierarchies
to which the client belongs.
num_samples: Dict keyed by client id. Each element is the number of test
samples for the client.
"""
columns = COLUMN_NAMES + get_metrics_names(metrics)
client_data = pd.DataFrame(columns=columns)
for i, c_id in enumerate(client_ids):
current_client = {
'client_id': c_id,
'round_number': round_number,
'hierarchy': ','.join(hierarchies.get(c_id, [])),
'num_samples': num_samples.get(c_id, np.nan)
}
current_metrics = metrics.get(c_id, {})
for metric, metric_value in current_metrics.items():
current_client[metric] = metric_value
client_data.loc[len(client_data)] = current_client
mode = 'w' if round_number == 0 else 'a'
print_dataframe(client_data, path, mode)
def print_dataframe(df, path, mode='w'):
"""Writes the given dataframe in path as a csv"""
header = mode == 'w'
df.to_csv(path, mode=mode, header=header, index=False)
def get_metrics_names(metrics):
"""Gets the names of the metrics.
Args:
metrics: Dict keyed by client id. Each element is a dict of metrics
for that client in the specified round. The dicts for all clients
are expected to have the same set of keys."""
if len(metrics) == 0:
return []
metrics_dict = next(iter(metrics.values()))
return list(metrics_dict.keys())
| 33.036145 | 83 | 0.651349 | [
"BSD-2-Clause"
] | slowbull/leaf | models/metrics/writer.py | 2,742 | Python |
/home/runner/.cache/pip/pool/3f/10/5e/0da870cfd442c4b93168f62f7eb1f09417d637dc6c7f4acefd6341907e | 96 | 96 | 0.895833 | [
"Apache-2.0"
] | 035NotEnd/Vowel-Chacker | venv/lib/python3.8/site-packages/yapftests/reformatter_python3_test.py | 96 | Python |
from math import log
number = int(input())
base = input()
if base == 'natural':
print(f'{log(number):.2f}')
else:
print(f'{log(number , int(base)):.2f}')
| 15 | 43 | 0.593939 | [
"MIT"
] | borisboychev/SoftUni | Python_Advanced_Softuni/Modules_Lab/venv/logarithm.py | 165 | Python |
import numpy as np
import pandas as pd
import sys # can use sys to take command line arguments
class Recommender():
'''
What is this class all about - write a really good doc string here
'''
def __init__(self, ):
'''
what do we need to start out our recommender system
'''
def fit(self, ):
'''
fit the recommender to your dataset and also have this save the results
to pull from when you need to make predictions
'''
def predict_rating(self, ):
'''
makes predictions of a rating for a user on a movie-user combo
'''
def make_recs(self,):
'''
given a user id or a movie that an individual likes
make recommendations
'''
if __name__ == '__main__':
# test different parts to make sure it works
| 23.416667 | 79 | 0.601423 | [
"MIT"
] | 43piRcubed/DSND_Term2 | lessons/Recommendations/2_Matrix_Factorization_for_Recommendations/recommender_template.py | 843 | Python |
import tensorflow as tf
import sys
sys.path.insert(0,'..')
import vtrace_popart as vtrace
nest = tf.contrib.framework.nest
from .flags import *
def compute_baseline_loss(advantages):
# Loss for the baseline, summed over the time dimension.
# Multiply by 0.5 to match the standard update rule:
# d(loss) / d(baseline) = advantage
return .5 * tf.reduce_sum(tf.square(advantages))
def compute_entropy_loss(logits):
policy = tf.nn.softmax(logits)
log_policy = tf.nn.log_softmax(logits)
entropy_per_timestep = tf.reduce_sum(-policy * log_policy, axis=-1)
return -tf.reduce_sum(entropy_per_timestep)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=actions, logits=logits)
advantages = tf.stop_gradient(advantages)
policy_gradient_loss_per_timestep = cross_entropy * advantages
return tf.reduce_sum(policy_gradient_loss_per_timestep)
def build_learner(agent, env_outputs, agent_outputs, env_id):
"""Builds the learner loop.
Args:
agent: A snt.RNNCore module outputting `AgentOutput` named tuples, with an
`unroll` call for computing the outputs for a whole trajectory.
agent_state: The initial agent state for each sequence in the batch.
env_outputs: A `StepOutput` namedtuple where each field is of shape
[T+1, ...].
agent_outputs: An `AgentOutput` namedtuple where each field is of shape
[T+1, ...].
Returns:
A tuple of (done, infos, and environment frames) where
the environment frames tensor causes an update.
"""
learner_outputs = agent.unroll(agent_outputs.action, env_outputs, env_id)
# Use last baseline value (from the value function) to bootstrap.
bootstrap_value = learner_outputs.un_normalized_vf[-1]
# At this point, the environment outputs at time step `t` are the inputs that
# lead to the learner_outputs at time step `t`. After the following shifting,
# the actions in agent_outputs and learner_outputs at time step `t` is what
# leads to the environment outputs at time step `t`.
agent_outputs = nest.map_structure(lambda t: t[1:], agent_outputs)
rewards, infos, done, _ = nest.map_structure(
lambda t: t[1:], env_outputs)
learner_outputs = nest.map_structure(lambda t: t[:-1], learner_outputs)
if FLAGS.reward_clipping == 'abs_one':
clipped_rewards = tf.clip_by_value(rewards, -1, 1)
elif FLAGS.reward_clipping == 'soft_asymmetric':
squeezed = tf.tanh(rewards / 5.0)
# Negative rewards are given less weight than positive rewards.
clipped_rewards = tf.where(rewards < 0, .3 * squeezed, squeezed) * 5.
discounts = tf.to_float(~done) * FLAGS.discounting
game_specific_mean = tf.gather(agent._mean, env_id)
game_specific_std = tf.gather(agent._std, env_id)
# Compute V-trace returns and weights.
# Note, this is put on the CPU because it's faster than on GPU. It can be
# improved further with XLA-compilation or with a custom TensorFlow operation.
with tf.device('/cpu'):
vtrace_returns = vtrace.from_logits(
behaviour_policy_logits=agent_outputs.policy_logits,
target_policy_logits=learner_outputs.policy_logits,
actions=agent_outputs.action,
discounts=discounts,
rewards=clipped_rewards,
un_normalized_values=learner_outputs.un_normalized_vf,
normalized_values=learner_outputs.normalized_vf,
mean=game_specific_mean,
std=game_specific_std,
bootstrap_value=bootstrap_value)
# First term of equation (7) in (Hessel et al., 2018)
normalized_vtrace = (vtrace_returns.vs - game_specific_mean) / game_specific_std
normalized_vtrace = nest.map_structure(tf.stop_gradient, normalized_vtrace)
# Compute loss as a weighted sum of the baseline loss, the policy gradient
# loss and an entropy regularization term.
total_loss = compute_policy_gradient_loss(
learner_outputs.policy_logits, agent_outputs.action,
vtrace_returns.pg_advantages)
baseline_loss = compute_baseline_loss(
normalized_vtrace - learner_outputs.normalized_vf)
total_loss += FLAGS.baseline_cost * baseline_loss
total_loss += FLAGS.entropy_cost * compute_entropy_loss(
learner_outputs.policy_logits)
# Optimization
num_env_frames = tf.train.get_global_step()
learning_rate = tf.train.polynomial_decay(FLAGS.learning_rate, num_env_frames,
FLAGS.total_environment_frames, 0)
optimizer = tf.train.RMSPropOptimizer(learning_rate, FLAGS.decay,
FLAGS.momentum, FLAGS.epsilon)
# Use reward clipping for atari games only
if FLAGS.gradient_clipping > 0.0:
variables = tf.trainable_variables()
gradients = tf.gradients(total_loss, variables)
gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.gradient_clipping)
train_op = optimizer.apply_gradients(zip(gradients, variables))
else:
train_op = optimizer.minimize(total_loss)
# Merge updating the network and environment frames into a single tensor.
with tf.control_dependencies([train_op]):
num_env_frames_and_train = num_env_frames.assign_add(
FLAGS.batch_size * FLAGS.unroll_length)
# Adding a few summaries.
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('total_loss', total_loss)
tf.summary.histogram('action', agent_outputs.action)
# I'm not sure if it's really necessary to put this operation on the CPU.
with tf.device('/cpu'):
(mean, mean_squared) = (agent.update_moments(vtrace_returns.vs, env_id))
return (done, infos, num_env_frames_and_train) + (mean, mean_squared) | 41.507353 | 82 | 0.740478 | [
"Apache-2.0"
] | steffenvan/IMPALA-PopArt | popart/build_learner.py | 5,645 | Python |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_optimizer_recommendation_actions
short_description: Perform actions on a Recommendation resource in Oracle Cloud Infrastructure
description:
- Perform actions on a Recommendation resource in Oracle Cloud Infrastructure
- For I(action=bulk_apply), applies the specified recommendations to the resources.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
recommendation_id:
description:
- The unique OCID associated with the recommendation.
type: str
aliases: ["id"]
required: true
resource_action_ids:
description:
- The unique OCIDs of the resource actions that recommendations are applied to.
- This field is deprecated.
type: list
elements: str
actions:
description:
- The unique resource actions that recommendations are applied to.
type: list
elements: dict
suboptions:
resource_action_id:
description:
- The unique OCIDs of the resource actions that recommendations are applied to.
type: str
required: true
status:
description:
- The current status of the recommendation.
type: str
choices:
- "PENDING"
- "DISMISSED"
- "POSTPONED"
- "IMPLEMENTED"
time_status_end:
description:
- The date and time the current status will change. The format is defined by RFC3339.
- "For example, \\"The current `postponed` status of the resource action will end and change to `pending` on this
date and time.\\""
type: str
parameters:
description:
- "Additional parameter key-value pairs defining the resource action.
For example:"
- "`{\\"timeAmount\\": 15, \\"timeUnit\\": \\"seconds\\"}`"
type: dict
strategy_name:
description:
- The name of the strategy.
type: str
status:
description:
- The current status of the recommendation.
type: str
choices:
- "PENDING"
- "DISMISSED"
- "POSTPONED"
- "IMPLEMENTED"
required: true
time_status_end:
description:
- The date and time the current status will change. The format is defined by RFC3339.
- "For example, \\"The current `postponed` status of the resource action will end and change to `pending` on this
date and time.\\""
type: str
action:
description:
- The action to perform on the Recommendation.
type: str
required: true
choices:
- "bulk_apply"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Perform action bulk_apply on recommendation
oci_optimizer_recommendation_actions:
# required
recommendation_id: "ocid1.recommendation.oc1..xxxxxxEXAMPLExxxxxx"
status: PENDING
action: bulk_apply
# optional
resource_action_ids: [ "null" ]
actions:
- # required
resource_action_id: "ocid1.resourceaction.oc1..xxxxxxEXAMPLExxxxxx"
# optional
status: PENDING
time_status_end: 2013-10-20T19:20:30+01:00
parameters: null
strategy_name: strategy_name_example
time_status_end: 2013-10-20T19:20:30+01:00
"""
RETURN = """
recommendation:
description:
- Details of the Recommendation resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The unique OCID associated with the recommendation.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The OCID of the tenancy. The tenancy is the root compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
category_id:
description:
- The unique OCID associated with the category.
returned: on success
type: str
sample: "ocid1.category.oc1..xxxxxxEXAMPLExxxxxx"
name:
description:
- The name assigned to the recommendation.
returned: on success
type: str
sample: name_example
description:
description:
- Text describing the recommendation.
returned: on success
type: str
sample: description_example
importance:
description:
- The level of importance assigned to the recommendation.
returned: on success
type: str
sample: CRITICAL
resource_counts:
description:
- An array of `ResourceCount` objects grouped by the status of the resource actions.
returned: on success
type: complex
contains:
status:
description:
- The recommendation status of the resource.
returned: on success
type: str
sample: PENDING
count:
description:
- The count of resources.
returned: on success
type: int
sample: 56
lifecycle_state:
description:
- The recommendation's current state.
returned: on success
type: str
sample: ACTIVE
estimated_cost_saving:
description:
- The estimated cost savings, in dollars, for the recommendation.
returned: on success
type: float
sample: 1.2
status:
description:
- The current status of the recommendation.
returned: on success
type: str
sample: PENDING
time_status_begin:
description:
- The date and time that the recommendation entered its current status. The format is defined by RFC3339.
- "For example, \\"The status of the recommendation changed from `pending` to `current(ignored)` on this date and time.\\""
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_status_end:
description:
- The date and time the current status will change. The format is defined by RFC3339.
- "For example, \\"The current `postponed` status of the recommendation will end and change to `pending` on this
date and time.\\""
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_created:
description:
- The date and time the recommendation details were created, in the format defined by RFC3339.
returned: on success
type: str
sample: "2020-08-25T21:10:29.600Z"
time_updated:
description:
- The date and time the recommendation details were last updated, in the format defined by RFC3339.
returned: on success
type: str
sample: "2020-08-25T21:10:29.600Z"
supported_levels:
description:
- ""
returned: on success
type: complex
contains:
items:
description:
- The list of supported levels.
returned: on success
type: complex
contains:
name:
description:
- The name of the profile level.
returned: on success
type: str
sample: name_example
extended_metadata:
description:
- Additional metadata key/value pairs for the recommendation.
- "For example:"
- "`{\\"EstimatedSaving\\": \\"200\\"}`"
returned: on success
type: dict
sample: {}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"category_id": "ocid1.category.oc1..xxxxxxEXAMPLExxxxxx",
"name": "name_example",
"description": "description_example",
"importance": "CRITICAL",
"resource_counts": [{
"status": "PENDING",
"count": 56
}],
"lifecycle_state": "ACTIVE",
"estimated_cost_saving": 1.2,
"status": "PENDING",
"time_status_begin": "2013-10-20T19:20:30+01:00",
"time_status_end": "2013-10-20T19:20:30+01:00",
"time_created": "2020-08-25T21:10:29.600Z",
"time_updated": "2020-08-25T21:10:29.600Z",
"supported_levels": {
"items": [{
"name": "name_example"
}]
},
"extended_metadata": {}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.optimizer import OptimizerClient
from oci.optimizer.models import BulkApplyRecommendationsDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class RecommendationActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
bulk_apply
"""
@staticmethod
def get_module_resource_id_param():
return "recommendation_id"
def get_module_resource_id(self):
return self.module.params.get("recommendation_id")
def get_get_fn(self):
return self.client.get_recommendation
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_recommendation,
recommendation_id=self.module.params.get("recommendation_id"),
)
def bulk_apply(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, BulkApplyRecommendationsDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.bulk_apply_recommendations,
call_fn_args=(),
call_fn_kwargs=dict(
recommendation_id=self.module.params.get("recommendation_id"),
bulk_apply_recommendations_details=action_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
RecommendationActionsHelperCustom = get_custom_class(
"RecommendationActionsHelperCustom"
)
class ResourceHelper(RecommendationActionsHelperCustom, RecommendationActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
recommendation_id=dict(aliases=["id"], type="str", required=True),
resource_action_ids=dict(type="list", elements="str"),
actions=dict(
type="list",
elements="dict",
options=dict(
resource_action_id=dict(type="str", required=True),
status=dict(
type="str",
choices=["PENDING", "DISMISSED", "POSTPONED", "IMPLEMENTED"],
),
time_status_end=dict(type="str"),
parameters=dict(type="dict"),
strategy_name=dict(type="str"),
),
),
status=dict(
type="str",
required=True,
choices=["PENDING", "DISMISSED", "POSTPONED", "IMPLEMENTED"],
),
time_status_end=dict(type="str"),
action=dict(type="str", required=True, choices=["bulk_apply"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="recommendation",
service_client_class=OptimizerClient,
namespace="optimizer",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 34.591687 | 139 | 0.571742 | [
"Apache-2.0"
] | sagar2938/oci-ansible-collection | plugins/modules/oci_optimizer_recommendation_actions.py | 14,148 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
from django_secrets.startup import check
check()
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_secrets.settings'
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 32.185185 | 77 | 0.643268 | [
"MIT"
] | kakulukia/django-secrets | manage.py | 869 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/31 上午10:21
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : __init__.py.py
# @Software: PyCharm
from .huawei import SMSCenter
__author__ = 'blackmatrix'
if __name__ == '__main__':
pass
| 21.666667 | 45 | 0.667692 | [
"Apache-2.0"
] | blackmatrix7/iphone-hunter | sms/__init__.py | 329 | Python |
from __future__ import unicode_literals
import boto
from boto.exception import BotoServerError
from moto import mock_sns
import sure # noqa
@mock_sns
def test_create_platform_application():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
application_arn.should.equal('arn:aws:sns:us-east-1:123456789012:app/APNS/my-application')
@mock_sns
def test_get_platform_application_attributes():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse']['GetPlatformApplicationAttributesResult']['Attributes']
attributes.should.equal({
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
})
@mock_sns
def test_get_missing_platform_application_attributes():
conn = boto.connect_sns()
conn.get_platform_application_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError)
@mock_sns
def test_set_platform_application_attributes():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
conn.set_platform_application_attributes(arn,
{"PlatformPrincipal": "other"}
)
attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse']['GetPlatformApplicationAttributesResult']['Attributes']
attributes.should.equal({
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "other",
})
@mock_sns
def test_list_platform_applications():
conn = boto.connect_sns()
conn.create_platform_application(
name="application1",
platform="APNS",
)
conn.create_platform_application(
name="application2",
platform="APNS",
)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications']
applications.should.have.length_of(2)
@mock_sns
def test_delete_platform_application():
conn = boto.connect_sns()
conn.create_platform_application(
name="application1",
platform="APNS",
)
conn.create_platform_application(
name="application2",
platform="APNS",
)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications']
applications.should.have.length_of(2)
application_arn = applications[0]['PlatformApplicationArn']
conn.delete_platform_application(application_arn)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications']
applications.should.have.length_of(1)
@mock_sns
def test_create_platform_endpoint():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
endpoint_arn.should.contain("arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/")
@mock_sns
def test_get_list_endpoints_by_platform_application():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"CustomUserData": "some data",
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
endpoint_list = conn.list_endpoints_by_platform_application(
platform_application_arn=application_arn
)['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints']
endpoint_list.should.have.length_of(1)
endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data')
endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn)
@mock_sns
def test_get_endpoint_attributes():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
"CustomUserData": "some data",
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse']['GetEndpointAttributesResult']['Attributes']
attributes.should.equal({
"Enabled": 'False',
"CustomUserData": "some data",
})
@mock_sns
def test_get_missing_endpoint_attributes():
conn = boto.connect_sns()
conn.get_endpoint_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError)
@mock_sns
def test_set_endpoint_attributes():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
"CustomUserData": "some data",
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
conn.set_endpoint_attributes(endpoint_arn,
{"CustomUserData": "other data"}
)
attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse']['GetEndpointAttributesResult']['Attributes']
attributes.should.equal({
"Enabled": 'False',
"CustomUserData": "other data",
})
@mock_sns
def test_delete_endpoint():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
"CustomUserData": "some data",
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
endpoint_list = conn.list_endpoints_by_platform_application(
platform_application_arn=application_arn
)['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints']
endpoint_list.should.have.length_of(1)
conn.delete_endpoint(endpoint_arn)
endpoint_list = conn.list_endpoints_by_platform_application(
platform_application_arn=application_arn
)['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints']
endpoint_list.should.have.length_of(0)
@mock_sns
def test_publish_to_platform_endpoint():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
conn.publish(message="some message", message_structure="json", target_arn=endpoint_arn)
| 36.546429 | 162 | 0.731262 | [
"Apache-2.0"
] | IlyaSukhanov/moto | tests/test_sns/test_application.py | 10,233 | Python |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.logo = A('Javelin',_class="brand",_href="/")
response.title = request.application.replace('_',' ').title()
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Your Name <[email protected]>'
response.meta.description = 'a cool new app'
response.meta.keywords = 'web2py, python, framework'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Home'), False, URL('default', 'index'), [])
]
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
response.menu += [
(SPAN('web2py', _class='highlighted'), False, 'http://web2py.com', [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, URL('admin', 'default', 'design/%s' % app), [
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py.css' % app)),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, 'http://www.web2py.com', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Layouts'), False, 'http://web2py.com/layouts'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
(T('Semantic'), False, 'http://web2py.com/semantic'),
]),
(T('Documentation'), False, 'http://www.web2py.com/book', [
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T('Buy this book'), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
(T('Plugins'), False, None, [
('plugin_wiki', False,
'http://web2py.com/examples/default/download'),
(T('Other Plugins'), False,
'http://web2py.com/plugins'),
(T('Layout Plugins'),
False, 'http://web2py.com/layouts'),
])
]
)]
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
| 44.266187 | 79 | 0.478628 | [
"BSD-3-Clause"
] | jjacobson93/javelin-web2py | applications/javelin/models/menu.py | 6,153 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/videointelligence_v1/proto/video_intelligence.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/videointelligence_v1/proto/video_intelligence.proto",
package="google.cloud.videointelligence.v1",
syntax="proto3",
serialized_options=b"\n%com.google.cloud.videointelligence.v1B\035VideoIntelligenceServiceProtoP\001ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\252\002!Google.Cloud.VideoIntelligence.V1\312\002!Google\\Cloud\\VideoIntelligence\\V1\352\002$Google::Cloud::VideoIntelligence::V1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n@google/cloud/videointelligence_v1/proto/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xfe\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.FeatureB\x03\xe0\x41\x02\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xc1\x06\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12Y\n\x17person_detection_config\x18\x0b \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.PersonDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xa5\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame\x12\x0f\n\x07version\x18\x05 \x01(\t"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood"u\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame\x12\x0f\n\x07version\x18\x02 \x01(\t"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"*\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12\x0f\n\x07version\x18\x05 \x01(\t"f\n\x19PersonDetectionAnnotation\x12\x38\n\x06tracks\x18\x01 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x0f\n\x07version\x18\x02 \x01(\t"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"\x9c\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration:\x02\x18\x01"\xa7\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame:\x02\x18\x01"\xba\x02\n\x11TimestampedObject\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12K\n\tlandmarks\x18\x04 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.DetectedLandmarkB\x03\xe0\x41\x01"\x84\x02\n\x05Track\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Q\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.TimestampedObject\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"x\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x05point\x18\x02 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"\xe9\n\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12@\n\x07segment\x18\n \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12^\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12[\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12O\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotationB\x02\x18\x01\x12^\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32:.google.cloud.videointelligence.v1.FaceDetectionAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12\x62\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32<.google.cloud.videointelligence.v1.LogoRecognitionAnnotation\x12\x62\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32<.google.cloud.videointelligence.v1.PersonDetectionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress"\x81\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12N\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x88\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x8c\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12?\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"q\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment\x12\x0f\n\x07version\x18\x03 \x01(\t"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xa8\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrame\x12\x0f\n\x07version\x18\x06 \x01(\tB\x0c\n\ntrack_info"\xd3\x01\n\x19LogoRecognitionAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x38\n\x06tracks\x18\x02 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment*\xf5\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x14\n\x10PERSON_DETECTION\x10\x0e*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc0\x02\n\x18VideoIntelligenceService\x12\xcd\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"d\x82\xd3\xe4\x93\x02\x18"\x13/v1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8b\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3',
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_api_dot_client__pb2.DESCRIPTOR,
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_rpc_dot_status__pb2.DESCRIPTOR,
],
)
_FEATURE = _descriptor.EnumDescriptor(
name="Feature",
full_name="google.cloud.videointelligence.v1.Feature",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="FEATURE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="LABEL_DETECTION",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="SHOT_CHANGE_DETECTION",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="EXPLICIT_CONTENT_DETECTION",
index=3,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FACE_DETECTION",
index=4,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="SPEECH_TRANSCRIPTION",
index=5,
number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="TEXT_DETECTION",
index=6,
number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="OBJECT_TRACKING",
index=7,
number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="LOGO_RECOGNITION",
index=8,
number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="PERSON_DETECTION",
index=9,
number=14,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=8439,
serialized_end=8684,
)
_sym_db.RegisterEnumDescriptor(_FEATURE)
Feature = enum_type_wrapper.EnumTypeWrapper(_FEATURE)
_LABELDETECTIONMODE = _descriptor.EnumDescriptor(
name="LabelDetectionMode",
full_name="google.cloud.videointelligence.v1.LabelDetectionMode",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="LABEL_DETECTION_MODE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="SHOT_MODE",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FRAME_MODE",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="SHOT_AND_FRAME_MODE",
index=3,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=8686,
serialized_end=8800,
)
_sym_db.RegisterEnumDescriptor(_LABELDETECTIONMODE)
LabelDetectionMode = enum_type_wrapper.EnumTypeWrapper(_LABELDETECTIONMODE)
_LIKELIHOOD = _descriptor.EnumDescriptor(
name="Likelihood",
full_name="google.cloud.videointelligence.v1.Likelihood",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="LIKELIHOOD_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="VERY_UNLIKELY",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="UNLIKELY",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="POSSIBLE",
index=3,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="LIKELY",
index=4,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="VERY_LIKELY",
index=5,
number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=8802,
serialized_end=8918,
)
_sym_db.RegisterEnumDescriptor(_LIKELIHOOD)
Likelihood = enum_type_wrapper.EnumTypeWrapper(_LIKELIHOOD)
FEATURE_UNSPECIFIED = 0
LABEL_DETECTION = 1
SHOT_CHANGE_DETECTION = 2
EXPLICIT_CONTENT_DETECTION = 3
FACE_DETECTION = 4
SPEECH_TRANSCRIPTION = 6
TEXT_DETECTION = 7
OBJECT_TRACKING = 9
LOGO_RECOGNITION = 12
PERSON_DETECTION = 14
LABEL_DETECTION_MODE_UNSPECIFIED = 0
SHOT_MODE = 1
FRAME_MODE = 2
SHOT_AND_FRAME_MODE = 3
LIKELIHOOD_UNSPECIFIED = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
_ANNOTATEVIDEOREQUEST = _descriptor.Descriptor(
name="AnnotateVideoRequest",
full_name="google.cloud.videointelligence.v1.AnnotateVideoRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input_uri",
full_name="google.cloud.videointelligence.v1.AnnotateVideoRequest.input_uri",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="input_content",
full_name="google.cloud.videointelligence.v1.AnnotateVideoRequest.input_content",
index=1,
number=6,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="features",
full_name="google.cloud.videointelligence.v1.AnnotateVideoRequest.features",
index=2,
number=2,
type=14,
cpp_type=8,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\002",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="video_context",
full_name="google.cloud.videointelligence.v1.AnnotateVideoRequest.video_context",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="output_uri",
full_name="google.cloud.videointelligence.v1.AnnotateVideoRequest.output_uri",
index=4,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="location_id",
full_name="google.cloud.videointelligence.v1.AnnotateVideoRequest.location_id",
index=5,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=319,
serialized_end=573,
)
_VIDEOCONTEXT = _descriptor.Descriptor(
name="VideoContext",
full_name="google.cloud.videointelligence.v1.VideoContext",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="segments",
full_name="google.cloud.videointelligence.v1.VideoContext.segments",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="label_detection_config",
full_name="google.cloud.videointelligence.v1.VideoContext.label_detection_config",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="shot_change_detection_config",
full_name="google.cloud.videointelligence.v1.VideoContext.shot_change_detection_config",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="explicit_content_detection_config",
full_name="google.cloud.videointelligence.v1.VideoContext.explicit_content_detection_config",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="face_detection_config",
full_name="google.cloud.videointelligence.v1.VideoContext.face_detection_config",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="speech_transcription_config",
full_name="google.cloud.videointelligence.v1.VideoContext.speech_transcription_config",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="text_detection_config",
full_name="google.cloud.videointelligence.v1.VideoContext.text_detection_config",
index=6,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="person_detection_config",
full_name="google.cloud.videointelligence.v1.VideoContext.person_detection_config",
index=7,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="object_tracking_config",
full_name="google.cloud.videointelligence.v1.VideoContext.object_tracking_config",
index=8,
number=13,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=576,
serialized_end=1409,
)
_LABELDETECTIONCONFIG = _descriptor.Descriptor(
name="LabelDetectionConfig",
full_name="google.cloud.videointelligence.v1.LabelDetectionConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="label_detection_mode",
full_name="google.cloud.videointelligence.v1.LabelDetectionConfig.label_detection_mode",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="stationary_camera",
full_name="google.cloud.videointelligence.v1.LabelDetectionConfig.stationary_camera",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.videointelligence.v1.LabelDetectionConfig.model",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="frame_confidence_threshold",
full_name="google.cloud.videointelligence.v1.LabelDetectionConfig.frame_confidence_threshold",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="video_confidence_threshold",
full_name="google.cloud.videointelligence.v1.LabelDetectionConfig.video_confidence_threshold",
index=4,
number=5,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1412,
serialized_end=1633,
)
_SHOTCHANGEDETECTIONCONFIG = _descriptor.Descriptor(
name="ShotChangeDetectionConfig",
full_name="google.cloud.videointelligence.v1.ShotChangeDetectionConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.videointelligence.v1.ShotChangeDetectionConfig.model",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1635,
serialized_end=1677,
)
_OBJECTTRACKINGCONFIG = _descriptor.Descriptor(
name="ObjectTrackingConfig",
full_name="google.cloud.videointelligence.v1.ObjectTrackingConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.videointelligence.v1.ObjectTrackingConfig.model",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1679,
serialized_end=1716,
)
_FACEDETECTIONCONFIG = _descriptor.Descriptor(
name="FaceDetectionConfig",
full_name="google.cloud.videointelligence.v1.FaceDetectionConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.videointelligence.v1.FaceDetectionConfig.model",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="include_bounding_boxes",
full_name="google.cloud.videointelligence.v1.FaceDetectionConfig.include_bounding_boxes",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="include_attributes",
full_name="google.cloud.videointelligence.v1.FaceDetectionConfig.include_attributes",
index=2,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1718,
serialized_end=1814,
)
_PERSONDETECTIONCONFIG = _descriptor.Descriptor(
name="PersonDetectionConfig",
full_name="google.cloud.videointelligence.v1.PersonDetectionConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="include_bounding_boxes",
full_name="google.cloud.videointelligence.v1.PersonDetectionConfig.include_bounding_boxes",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="include_pose_landmarks",
full_name="google.cloud.videointelligence.v1.PersonDetectionConfig.include_pose_landmarks",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="include_attributes",
full_name="google.cloud.videointelligence.v1.PersonDetectionConfig.include_attributes",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1816,
serialized_end=1931,
)
_EXPLICITCONTENTDETECTIONCONFIG = _descriptor.Descriptor(
name="ExplicitContentDetectionConfig",
full_name="google.cloud.videointelligence.v1.ExplicitContentDetectionConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.videointelligence.v1.ExplicitContentDetectionConfig.model",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1933,
serialized_end=1980,
)
_TEXTDETECTIONCONFIG = _descriptor.Descriptor(
name="TextDetectionConfig",
full_name="google.cloud.videointelligence.v1.TextDetectionConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="language_hints",
full_name="google.cloud.videointelligence.v1.TextDetectionConfig.language_hints",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.videointelligence.v1.TextDetectionConfig.model",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1982,
serialized_end=2042,
)
_VIDEOSEGMENT = _descriptor.Descriptor(
name="VideoSegment",
full_name="google.cloud.videointelligence.v1.VideoSegment",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time_offset",
full_name="google.cloud.videointelligence.v1.VideoSegment.start_time_offset",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time_offset",
full_name="google.cloud.videointelligence.v1.VideoSegment.end_time_offset",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2044,
serialized_end=2164,
)
_LABELSEGMENT = _descriptor.Descriptor(
name="LabelSegment",
full_name="google.cloud.videointelligence.v1.LabelSegment",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="segment",
full_name="google.cloud.videointelligence.v1.LabelSegment.segment",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.LabelSegment.confidence",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2166,
serialized_end=2266,
)
_LABELFRAME = _descriptor.Descriptor(
name="LabelFrame",
full_name="google.cloud.videointelligence.v1.LabelFrame",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="time_offset",
full_name="google.cloud.videointelligence.v1.LabelFrame.time_offset",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.LabelFrame.confidence",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2268,
serialized_end=2348,
)
_ENTITY = _descriptor.Descriptor(
name="Entity",
full_name="google.cloud.videointelligence.v1.Entity",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="entity_id",
full_name="google.cloud.videointelligence.v1.Entity.entity_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="description",
full_name="google.cloud.videointelligence.v1.Entity.description",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="language_code",
full_name="google.cloud.videointelligence.v1.Entity.language_code",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2350,
serialized_end=2421,
)
_LABELANNOTATION = _descriptor.Descriptor(
name="LabelAnnotation",
full_name="google.cloud.videointelligence.v1.LabelAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="entity",
full_name="google.cloud.videointelligence.v1.LabelAnnotation.entity",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="category_entities",
full_name="google.cloud.videointelligence.v1.LabelAnnotation.category_entities",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="segments",
full_name="google.cloud.videointelligence.v1.LabelAnnotation.segments",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="frames",
full_name="google.cloud.videointelligence.v1.LabelAnnotation.frames",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="version",
full_name="google.cloud.videointelligence.v1.LabelAnnotation.version",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2424,
serialized_end=2717,
)
_EXPLICITCONTENTFRAME = _descriptor.Descriptor(
name="ExplicitContentFrame",
full_name="google.cloud.videointelligence.v1.ExplicitContentFrame",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="time_offset",
full_name="google.cloud.videointelligence.v1.ExplicitContentFrame.time_offset",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="pornography_likelihood",
full_name="google.cloud.videointelligence.v1.ExplicitContentFrame.pornography_likelihood",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2720,
serialized_end=2869,
)
_EXPLICITCONTENTANNOTATION = _descriptor.Descriptor(
name="ExplicitContentAnnotation",
full_name="google.cloud.videointelligence.v1.ExplicitContentAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="frames",
full_name="google.cloud.videointelligence.v1.ExplicitContentAnnotation.frames",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="version",
full_name="google.cloud.videointelligence.v1.ExplicitContentAnnotation.version",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2871,
serialized_end=2988,
)
_NORMALIZEDBOUNDINGBOX = _descriptor.Descriptor(
name="NormalizedBoundingBox",
full_name="google.cloud.videointelligence.v1.NormalizedBoundingBox",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="left",
full_name="google.cloud.videointelligence.v1.NormalizedBoundingBox.left",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="top",
full_name="google.cloud.videointelligence.v1.NormalizedBoundingBox.top",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="right",
full_name="google.cloud.videointelligence.v1.NormalizedBoundingBox.right",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="bottom",
full_name="google.cloud.videointelligence.v1.NormalizedBoundingBox.bottom",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2990,
serialized_end=3071,
)
_FACEDETECTIONANNOTATION = _descriptor.Descriptor(
name="FaceDetectionAnnotation",
full_name="google.cloud.videointelligence.v1.FaceDetectionAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="version",
full_name="google.cloud.videointelligence.v1.FaceDetectionAnnotation.version",
index=0,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3073,
serialized_end=3115,
)
_PERSONDETECTIONANNOTATION = _descriptor.Descriptor(
name="PersonDetectionAnnotation",
full_name="google.cloud.videointelligence.v1.PersonDetectionAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="tracks",
full_name="google.cloud.videointelligence.v1.PersonDetectionAnnotation.tracks",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="version",
full_name="google.cloud.videointelligence.v1.PersonDetectionAnnotation.version",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3117,
serialized_end=3219,
)
_FACESEGMENT = _descriptor.Descriptor(
name="FaceSegment",
full_name="google.cloud.videointelligence.v1.FaceSegment",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="segment",
full_name="google.cloud.videointelligence.v1.FaceSegment.segment",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3221,
serialized_end=3300,
)
_FACEFRAME = _descriptor.Descriptor(
name="FaceFrame",
full_name="google.cloud.videointelligence.v1.FaceFrame",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="normalized_bounding_boxes",
full_name="google.cloud.videointelligence.v1.FaceFrame.normalized_bounding_boxes",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time_offset",
full_name="google.cloud.videointelligence.v1.FaceFrame.time_offset",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\030\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3303,
serialized_end=3459,
)
_FACEANNOTATION = _descriptor.Descriptor(
name="FaceAnnotation",
full_name="google.cloud.videointelligence.v1.FaceAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="thumbnail",
full_name="google.cloud.videointelligence.v1.FaceAnnotation.thumbnail",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="segments",
full_name="google.cloud.videointelligence.v1.FaceAnnotation.segments",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="frames",
full_name="google.cloud.videointelligence.v1.FaceAnnotation.frames",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\030\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3462,
serialized_end=3629,
)
_TIMESTAMPEDOBJECT = _descriptor.Descriptor(
name="TimestampedObject",
full_name="google.cloud.videointelligence.v1.TimestampedObject",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="normalized_bounding_box",
full_name="google.cloud.videointelligence.v1.TimestampedObject.normalized_bounding_box",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time_offset",
full_name="google.cloud.videointelligence.v1.TimestampedObject.time_offset",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="attributes",
full_name="google.cloud.videointelligence.v1.TimestampedObject.attributes",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="landmarks",
full_name="google.cloud.videointelligence.v1.TimestampedObject.landmarks",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3632,
serialized_end=3946,
)
_TRACK = _descriptor.Descriptor(
name="Track",
full_name="google.cloud.videointelligence.v1.Track",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="segment",
full_name="google.cloud.videointelligence.v1.Track.segment",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="timestamped_objects",
full_name="google.cloud.videointelligence.v1.Track.timestamped_objects",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="attributes",
full_name="google.cloud.videointelligence.v1.Track.attributes",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.Track.confidence",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3949,
serialized_end=4209,
)
_DETECTEDATTRIBUTE = _descriptor.Descriptor(
name="DetectedAttribute",
full_name="google.cloud.videointelligence.v1.DetectedAttribute",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.videointelligence.v1.DetectedAttribute.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.DetectedAttribute.confidence",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.videointelligence.v1.DetectedAttribute.value",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4211,
serialized_end=4279,
)
_DETECTEDLANDMARK = _descriptor.Descriptor(
name="DetectedLandmark",
full_name="google.cloud.videointelligence.v1.DetectedLandmark",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.videointelligence.v1.DetectedLandmark.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="point",
full_name="google.cloud.videointelligence.v1.DetectedLandmark.point",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.DetectedLandmark.confidence",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4281,
serialized_end=4401,
)
_VIDEOANNOTATIONRESULTS = _descriptor.Descriptor(
name="VideoAnnotationResults",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input_uri",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.input_uri",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="segment",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.segment",
index=1,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="segment_label_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.segment_label_annotations",
index=2,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="segment_presence_label_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.segment_presence_label_annotations",
index=3,
number=23,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="shot_label_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.shot_label_annotations",
index=4,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="shot_presence_label_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.shot_presence_label_annotations",
index=5,
number=24,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="frame_label_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.frame_label_annotations",
index=6,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="face_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.face_annotations",
index=7,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\030\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="face_detection_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.face_detection_annotations",
index=8,
number=13,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="shot_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.shot_annotations",
index=9,
number=6,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="explicit_annotation",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.explicit_annotation",
index=10,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="speech_transcriptions",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.speech_transcriptions",
index=11,
number=11,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="text_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.text_annotations",
index=12,
number=12,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="object_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.object_annotations",
index=13,
number=14,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="logo_recognition_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.logo_recognition_annotations",
index=14,
number=19,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="person_detection_annotations",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.person_detection_annotations",
index=15,
number=20,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="error",
full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.error",
index=16,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4404,
serialized_end=5789,
)
_ANNOTATEVIDEORESPONSE = _descriptor.Descriptor(
name="AnnotateVideoResponse",
full_name="google.cloud.videointelligence.v1.AnnotateVideoResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="annotation_results",
full_name="google.cloud.videointelligence.v1.AnnotateVideoResponse.annotation_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=5791,
serialized_end=5901,
)
_VIDEOANNOTATIONPROGRESS = _descriptor.Descriptor(
name="VideoAnnotationProgress",
full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input_uri",
full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress.input_uri",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_percent",
full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress.progress_percent",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress.start_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="update_time",
full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress.update_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="feature",
full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress.feature",
index=4,
number=5,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="segment",
full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress.segment",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=5904,
serialized_end=6198,
)
_ANNOTATEVIDEOPROGRESS = _descriptor.Descriptor(
name="AnnotateVideoProgress",
full_name="google.cloud.videointelligence.v1.AnnotateVideoProgress",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="annotation_progress",
full_name="google.cloud.videointelligence.v1.AnnotateVideoProgress.annotation_progress",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6200,
serialized_end=6312,
)
_SPEECHTRANSCRIPTIONCONFIG = _descriptor.Descriptor(
name="SpeechTranscriptionConfig",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="language_code",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.language_code",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\002",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_alternatives",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.max_alternatives",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filter_profanity",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.filter_profanity",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="speech_contexts",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.speech_contexts",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="enable_automatic_punctuation",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.enable_automatic_punctuation",
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="audio_tracks",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.audio_tracks",
index=5,
number=6,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="enable_speaker_diarization",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.enable_speaker_diarization",
index=6,
number=7,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="diarization_speaker_count",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.diarization_speaker_count",
index=7,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="enable_word_confidence",
full_name="google.cloud.videointelligence.v1.SpeechTranscriptionConfig.enable_word_confidence",
index=8,
number=9,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6315,
serialized_end=6700,
)
_SPEECHCONTEXT = _descriptor.Descriptor(
name="SpeechContext",
full_name="google.cloud.videointelligence.v1.SpeechContext",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="phrases",
full_name="google.cloud.videointelligence.v1.SpeechContext.phrases",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6702,
serialized_end=6739,
)
_SPEECHTRANSCRIPTION = _descriptor.Descriptor(
name="SpeechTranscription",
full_name="google.cloud.videointelligence.v1.SpeechTranscription",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="alternatives",
full_name="google.cloud.videointelligence.v1.SpeechTranscription.alternatives",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="language_code",
full_name="google.cloud.videointelligence.v1.SpeechTranscription.language_code",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6742,
serialized_end=6878,
)
_SPEECHRECOGNITIONALTERNATIVE = _descriptor.Descriptor(
name="SpeechRecognitionAlternative",
full_name="google.cloud.videointelligence.v1.SpeechRecognitionAlternative",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="transcript",
full_name="google.cloud.videointelligence.v1.SpeechRecognitionAlternative.transcript",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.SpeechRecognitionAlternative.confidence",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="words",
full_name="google.cloud.videointelligence.v1.SpeechRecognitionAlternative.words",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6881,
serialized_end=7021,
)
_WORDINFO = _descriptor.Descriptor(
name="WordInfo",
full_name="google.cloud.videointelligence.v1.WordInfo",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.videointelligence.v1.WordInfo.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.cloud.videointelligence.v1.WordInfo.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="word",
full_name="google.cloud.videointelligence.v1.WordInfo.word",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.WordInfo.confidence",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="speaker_tag",
full_name="google.cloud.videointelligence.v1.WordInfo.speaker_tag",
index=4,
number=5,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=7024,
serialized_end=7191,
)
_NORMALIZEDVERTEX = _descriptor.Descriptor(
name="NormalizedVertex",
full_name="google.cloud.videointelligence.v1.NormalizedVertex",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="x",
full_name="google.cloud.videointelligence.v1.NormalizedVertex.x",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="y",
full_name="google.cloud.videointelligence.v1.NormalizedVertex.y",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=7193,
serialized_end=7233,
)
_NORMALIZEDBOUNDINGPOLY = _descriptor.Descriptor(
name="NormalizedBoundingPoly",
full_name="google.cloud.videointelligence.v1.NormalizedBoundingPoly",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="vertices",
full_name="google.cloud.videointelligence.v1.NormalizedBoundingPoly.vertices",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=7235,
serialized_end=7330,
)
_TEXTSEGMENT = _descriptor.Descriptor(
name="TextSegment",
full_name="google.cloud.videointelligence.v1.TextSegment",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="segment",
full_name="google.cloud.videointelligence.v1.TextSegment.segment",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.TextSegment.confidence",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="frames",
full_name="google.cloud.videointelligence.v1.TextSegment.frames",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=7333,
serialized_end=7494,
)
_TEXTFRAME = _descriptor.Descriptor(
name="TextFrame",
full_name="google.cloud.videointelligence.v1.TextFrame",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="rotated_bounding_box",
full_name="google.cloud.videointelligence.v1.TextFrame.rotated_bounding_box",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time_offset",
full_name="google.cloud.videointelligence.v1.TextFrame.time_offset",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=7497,
serialized_end=7645,
)
_TEXTANNOTATION = _descriptor.Descriptor(
name="TextAnnotation",
full_name="google.cloud.videointelligence.v1.TextAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="text",
full_name="google.cloud.videointelligence.v1.TextAnnotation.text",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="segments",
full_name="google.cloud.videointelligence.v1.TextAnnotation.segments",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="version",
full_name="google.cloud.videointelligence.v1.TextAnnotation.version",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=7647,
serialized_end=7760,
)
_OBJECTTRACKINGFRAME = _descriptor.Descriptor(
name="ObjectTrackingFrame",
full_name="google.cloud.videointelligence.v1.ObjectTrackingFrame",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="normalized_bounding_box",
full_name="google.cloud.videointelligence.v1.ObjectTrackingFrame.normalized_bounding_box",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time_offset",
full_name="google.cloud.videointelligence.v1.ObjectTrackingFrame.time_offset",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=7763,
serialized_end=7923,
)
_OBJECTTRACKINGANNOTATION = _descriptor.Descriptor(
name="ObjectTrackingAnnotation",
full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="segment",
full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.segment",
index=0,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="track_id",
full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.track_id",
index=1,
number=5,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="entity",
full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.entity",
index=2,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="confidence",
full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.confidence",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="frames",
full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.frames",
index=4,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="version",
full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.version",
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="track_info",
full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.track_info",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=7926,
serialized_end=8222,
)
_LOGORECOGNITIONANNOTATION = _descriptor.Descriptor(
name="LogoRecognitionAnnotation",
full_name="google.cloud.videointelligence.v1.LogoRecognitionAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="entity",
full_name="google.cloud.videointelligence.v1.LogoRecognitionAnnotation.entity",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="tracks",
full_name="google.cloud.videointelligence.v1.LogoRecognitionAnnotation.tracks",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="segments",
full_name="google.cloud.videointelligence.v1.LogoRecognitionAnnotation.segments",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=8225,
serialized_end=8436,
)
_ANNOTATEVIDEOREQUEST.fields_by_name["features"].enum_type = _FEATURE
_ANNOTATEVIDEOREQUEST.fields_by_name["video_context"].message_type = _VIDEOCONTEXT
_VIDEOCONTEXT.fields_by_name["segments"].message_type = _VIDEOSEGMENT
_VIDEOCONTEXT.fields_by_name[
"label_detection_config"
].message_type = _LABELDETECTIONCONFIG
_VIDEOCONTEXT.fields_by_name[
"shot_change_detection_config"
].message_type = _SHOTCHANGEDETECTIONCONFIG
_VIDEOCONTEXT.fields_by_name[
"explicit_content_detection_config"
].message_type = _EXPLICITCONTENTDETECTIONCONFIG
_VIDEOCONTEXT.fields_by_name[
"face_detection_config"
].message_type = _FACEDETECTIONCONFIG
_VIDEOCONTEXT.fields_by_name[
"speech_transcription_config"
].message_type = _SPEECHTRANSCRIPTIONCONFIG
_VIDEOCONTEXT.fields_by_name[
"text_detection_config"
].message_type = _TEXTDETECTIONCONFIG
_VIDEOCONTEXT.fields_by_name[
"person_detection_config"
].message_type = _PERSONDETECTIONCONFIG
_VIDEOCONTEXT.fields_by_name[
"object_tracking_config"
].message_type = _OBJECTTRACKINGCONFIG
_LABELDETECTIONCONFIG.fields_by_name[
"label_detection_mode"
].enum_type = _LABELDETECTIONMODE
_VIDEOSEGMENT.fields_by_name[
"start_time_offset"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_VIDEOSEGMENT.fields_by_name[
"end_time_offset"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_LABELSEGMENT.fields_by_name["segment"].message_type = _VIDEOSEGMENT
_LABELFRAME.fields_by_name[
"time_offset"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_LABELANNOTATION.fields_by_name["entity"].message_type = _ENTITY
_LABELANNOTATION.fields_by_name["category_entities"].message_type = _ENTITY
_LABELANNOTATION.fields_by_name["segments"].message_type = _LABELSEGMENT
_LABELANNOTATION.fields_by_name["frames"].message_type = _LABELFRAME
_EXPLICITCONTENTFRAME.fields_by_name[
"time_offset"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_EXPLICITCONTENTFRAME.fields_by_name["pornography_likelihood"].enum_type = _LIKELIHOOD
_EXPLICITCONTENTANNOTATION.fields_by_name["frames"].message_type = _EXPLICITCONTENTFRAME
_PERSONDETECTIONANNOTATION.fields_by_name["tracks"].message_type = _TRACK
_FACESEGMENT.fields_by_name["segment"].message_type = _VIDEOSEGMENT
_FACEFRAME.fields_by_name[
"normalized_bounding_boxes"
].message_type = _NORMALIZEDBOUNDINGBOX
_FACEFRAME.fields_by_name[
"time_offset"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_FACEANNOTATION.fields_by_name["segments"].message_type = _FACESEGMENT
_FACEANNOTATION.fields_by_name["frames"].message_type = _FACEFRAME
_TIMESTAMPEDOBJECT.fields_by_name[
"normalized_bounding_box"
].message_type = _NORMALIZEDBOUNDINGBOX
_TIMESTAMPEDOBJECT.fields_by_name[
"time_offset"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_TIMESTAMPEDOBJECT.fields_by_name["attributes"].message_type = _DETECTEDATTRIBUTE
_TIMESTAMPEDOBJECT.fields_by_name["landmarks"].message_type = _DETECTEDLANDMARK
_TRACK.fields_by_name["segment"].message_type = _VIDEOSEGMENT
_TRACK.fields_by_name["timestamped_objects"].message_type = _TIMESTAMPEDOBJECT
_TRACK.fields_by_name["attributes"].message_type = _DETECTEDATTRIBUTE
_DETECTEDLANDMARK.fields_by_name["point"].message_type = _NORMALIZEDVERTEX
_VIDEOANNOTATIONRESULTS.fields_by_name["segment"].message_type = _VIDEOSEGMENT
_VIDEOANNOTATIONRESULTS.fields_by_name[
"segment_label_annotations"
].message_type = _LABELANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"segment_presence_label_annotations"
].message_type = _LABELANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"shot_label_annotations"
].message_type = _LABELANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"shot_presence_label_annotations"
].message_type = _LABELANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"frame_label_annotations"
].message_type = _LABELANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"face_annotations"
].message_type = _FACEANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"face_detection_annotations"
].message_type = _FACEDETECTIONANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name["shot_annotations"].message_type = _VIDEOSEGMENT
_VIDEOANNOTATIONRESULTS.fields_by_name[
"explicit_annotation"
].message_type = _EXPLICITCONTENTANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"speech_transcriptions"
].message_type = _SPEECHTRANSCRIPTION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"text_annotations"
].message_type = _TEXTANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"object_annotations"
].message_type = _OBJECTTRACKINGANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"logo_recognition_annotations"
].message_type = _LOGORECOGNITIONANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"person_detection_annotations"
].message_type = _PERSONDETECTIONANNOTATION
_VIDEOANNOTATIONRESULTS.fields_by_name[
"error"
].message_type = google_dot_rpc_dot_status__pb2._STATUS
_ANNOTATEVIDEORESPONSE.fields_by_name[
"annotation_results"
].message_type = _VIDEOANNOTATIONRESULTS
_VIDEOANNOTATIONPROGRESS.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_VIDEOANNOTATIONPROGRESS.fields_by_name[
"update_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_VIDEOANNOTATIONPROGRESS.fields_by_name["feature"].enum_type = _FEATURE
_VIDEOANNOTATIONPROGRESS.fields_by_name["segment"].message_type = _VIDEOSEGMENT
_ANNOTATEVIDEOPROGRESS.fields_by_name[
"annotation_progress"
].message_type = _VIDEOANNOTATIONPROGRESS
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name[
"speech_contexts"
].message_type = _SPEECHCONTEXT
_SPEECHTRANSCRIPTION.fields_by_name[
"alternatives"
].message_type = _SPEECHRECOGNITIONALTERNATIVE
_SPEECHRECOGNITIONALTERNATIVE.fields_by_name["words"].message_type = _WORDINFO
_WORDINFO.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_WORDINFO.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_NORMALIZEDBOUNDINGPOLY.fields_by_name["vertices"].message_type = _NORMALIZEDVERTEX
_TEXTSEGMENT.fields_by_name["segment"].message_type = _VIDEOSEGMENT
_TEXTSEGMENT.fields_by_name["frames"].message_type = _TEXTFRAME
_TEXTFRAME.fields_by_name["rotated_bounding_box"].message_type = _NORMALIZEDBOUNDINGPOLY
_TEXTFRAME.fields_by_name[
"time_offset"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_TEXTANNOTATION.fields_by_name["segments"].message_type = _TEXTSEGMENT
_OBJECTTRACKINGFRAME.fields_by_name[
"normalized_bounding_box"
].message_type = _NORMALIZEDBOUNDINGBOX
_OBJECTTRACKINGFRAME.fields_by_name[
"time_offset"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_OBJECTTRACKINGANNOTATION.fields_by_name["segment"].message_type = _VIDEOSEGMENT
_OBJECTTRACKINGANNOTATION.fields_by_name["entity"].message_type = _ENTITY
_OBJECTTRACKINGANNOTATION.fields_by_name["frames"].message_type = _OBJECTTRACKINGFRAME
_OBJECTTRACKINGANNOTATION.oneofs_by_name["track_info"].fields.append(
_OBJECTTRACKINGANNOTATION.fields_by_name["segment"]
)
_OBJECTTRACKINGANNOTATION.fields_by_name[
"segment"
].containing_oneof = _OBJECTTRACKINGANNOTATION.oneofs_by_name["track_info"]
_OBJECTTRACKINGANNOTATION.oneofs_by_name["track_info"].fields.append(
_OBJECTTRACKINGANNOTATION.fields_by_name["track_id"]
)
_OBJECTTRACKINGANNOTATION.fields_by_name[
"track_id"
].containing_oneof = _OBJECTTRACKINGANNOTATION.oneofs_by_name["track_info"]
_LOGORECOGNITIONANNOTATION.fields_by_name["entity"].message_type = _ENTITY
_LOGORECOGNITIONANNOTATION.fields_by_name["tracks"].message_type = _TRACK
_LOGORECOGNITIONANNOTATION.fields_by_name["segments"].message_type = _VIDEOSEGMENT
DESCRIPTOR.message_types_by_name["AnnotateVideoRequest"] = _ANNOTATEVIDEOREQUEST
DESCRIPTOR.message_types_by_name["VideoContext"] = _VIDEOCONTEXT
DESCRIPTOR.message_types_by_name["LabelDetectionConfig"] = _LABELDETECTIONCONFIG
DESCRIPTOR.message_types_by_name[
"ShotChangeDetectionConfig"
] = _SHOTCHANGEDETECTIONCONFIG
DESCRIPTOR.message_types_by_name["ObjectTrackingConfig"] = _OBJECTTRACKINGCONFIG
DESCRIPTOR.message_types_by_name["FaceDetectionConfig"] = _FACEDETECTIONCONFIG
DESCRIPTOR.message_types_by_name["PersonDetectionConfig"] = _PERSONDETECTIONCONFIG
DESCRIPTOR.message_types_by_name[
"ExplicitContentDetectionConfig"
] = _EXPLICITCONTENTDETECTIONCONFIG
DESCRIPTOR.message_types_by_name["TextDetectionConfig"] = _TEXTDETECTIONCONFIG
DESCRIPTOR.message_types_by_name["VideoSegment"] = _VIDEOSEGMENT
DESCRIPTOR.message_types_by_name["LabelSegment"] = _LABELSEGMENT
DESCRIPTOR.message_types_by_name["LabelFrame"] = _LABELFRAME
DESCRIPTOR.message_types_by_name["Entity"] = _ENTITY
DESCRIPTOR.message_types_by_name["LabelAnnotation"] = _LABELANNOTATION
DESCRIPTOR.message_types_by_name["ExplicitContentFrame"] = _EXPLICITCONTENTFRAME
DESCRIPTOR.message_types_by_name[
"ExplicitContentAnnotation"
] = _EXPLICITCONTENTANNOTATION
DESCRIPTOR.message_types_by_name["NormalizedBoundingBox"] = _NORMALIZEDBOUNDINGBOX
DESCRIPTOR.message_types_by_name["FaceDetectionAnnotation"] = _FACEDETECTIONANNOTATION
DESCRIPTOR.message_types_by_name[
"PersonDetectionAnnotation"
] = _PERSONDETECTIONANNOTATION
DESCRIPTOR.message_types_by_name["FaceSegment"] = _FACESEGMENT
DESCRIPTOR.message_types_by_name["FaceFrame"] = _FACEFRAME
DESCRIPTOR.message_types_by_name["FaceAnnotation"] = _FACEANNOTATION
DESCRIPTOR.message_types_by_name["TimestampedObject"] = _TIMESTAMPEDOBJECT
DESCRIPTOR.message_types_by_name["Track"] = _TRACK
DESCRIPTOR.message_types_by_name["DetectedAttribute"] = _DETECTEDATTRIBUTE
DESCRIPTOR.message_types_by_name["DetectedLandmark"] = _DETECTEDLANDMARK
DESCRIPTOR.message_types_by_name["VideoAnnotationResults"] = _VIDEOANNOTATIONRESULTS
DESCRIPTOR.message_types_by_name["AnnotateVideoResponse"] = _ANNOTATEVIDEORESPONSE
DESCRIPTOR.message_types_by_name["VideoAnnotationProgress"] = _VIDEOANNOTATIONPROGRESS
DESCRIPTOR.message_types_by_name["AnnotateVideoProgress"] = _ANNOTATEVIDEOPROGRESS
DESCRIPTOR.message_types_by_name[
"SpeechTranscriptionConfig"
] = _SPEECHTRANSCRIPTIONCONFIG
DESCRIPTOR.message_types_by_name["SpeechContext"] = _SPEECHCONTEXT
DESCRIPTOR.message_types_by_name["SpeechTranscription"] = _SPEECHTRANSCRIPTION
DESCRIPTOR.message_types_by_name[
"SpeechRecognitionAlternative"
] = _SPEECHRECOGNITIONALTERNATIVE
DESCRIPTOR.message_types_by_name["WordInfo"] = _WORDINFO
DESCRIPTOR.message_types_by_name["NormalizedVertex"] = _NORMALIZEDVERTEX
DESCRIPTOR.message_types_by_name["NormalizedBoundingPoly"] = _NORMALIZEDBOUNDINGPOLY
DESCRIPTOR.message_types_by_name["TextSegment"] = _TEXTSEGMENT
DESCRIPTOR.message_types_by_name["TextFrame"] = _TEXTFRAME
DESCRIPTOR.message_types_by_name["TextAnnotation"] = _TEXTANNOTATION
DESCRIPTOR.message_types_by_name["ObjectTrackingFrame"] = _OBJECTTRACKINGFRAME
DESCRIPTOR.message_types_by_name["ObjectTrackingAnnotation"] = _OBJECTTRACKINGANNOTATION
DESCRIPTOR.message_types_by_name[
"LogoRecognitionAnnotation"
] = _LOGORECOGNITIONANNOTATION
DESCRIPTOR.enum_types_by_name["Feature"] = _FEATURE
DESCRIPTOR.enum_types_by_name["LabelDetectionMode"] = _LABELDETECTIONMODE
DESCRIPTOR.enum_types_by_name["Likelihood"] = _LIKELIHOOD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AnnotateVideoRequest = _reflection.GeneratedProtocolMessageType(
"AnnotateVideoRequest",
(_message.Message,),
{
"DESCRIPTOR": _ANNOTATEVIDEOREQUEST,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video annotation request.
Attributes:
input_uri:
Input video location. Currently, only `Cloud Storage
<https://cloud.google.com/storage/>`__ URIs are supported.
URIs must be specified in the following format: ``gs://bucket-
id/object-id`` (other URI formats return [google.rpc.Code.INVA
LID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more
information, see `Request URIs
<https://cloud.google.com/storage/docs/request-endpoints>`__.
To identify multiple videos, a video URI may include wildcards
in the ``object-id``. Supported wildcards: ’*’ to match 0 or
more characters; ‘?’ to match 1 character. If unset, the input
video should be embedded in the request as ``input_content``.
If set, ``input_content`` must be unset.
input_content:
The video data bytes. If unset, the input video(s) should be
specified via the ``input_uri``. If set, ``input_uri`` must be
unset.
features:
Required. Requested video annotation features.
video_context:
Additional video context and/or feature-specific parameters.
output_uri:
Optional. Location where the output (in JSON format) should be
stored. Currently, only `Cloud Storage
<https://cloud.google.com/storage/>`__ URIs are supported.
These must be specified in the following format:
``gs://bucket-id/object-id`` (other URI formats return [google
.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT])
. For more information, see `Request URIs
<https://cloud.google.com/storage/docs/request-endpoints>`__.
location_id:
Optional. Cloud region where annotation should take place.
Supported cloud regions are: ``us-east1``, ``us-west1``,
``europe-west1``, ``asia-east1``. If no region is specified,
the region will be determined based on video file location.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.AnnotateVideoRequest)
},
)
_sym_db.RegisterMessage(AnnotateVideoRequest)
VideoContext = _reflection.GeneratedProtocolMessageType(
"VideoContext",
(_message.Message,),
{
"DESCRIPTOR": _VIDEOCONTEXT,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video context and/or feature-specific parameters.
Attributes:
segments:
Video segments to annotate. The segments may overlap and are
not required to be contiguous or span the whole video. If
unspecified, each video is treated as a single segment.
label_detection_config:
Config for LABEL_DETECTION.
shot_change_detection_config:
Config for SHOT_CHANGE_DETECTION.
explicit_content_detection_config:
Config for EXPLICIT_CONTENT_DETECTION.
face_detection_config:
Config for FACE_DETECTION.
speech_transcription_config:
Config for SPEECH_TRANSCRIPTION.
text_detection_config:
Config for TEXT_DETECTION.
person_detection_config:
Config for PERSON_DETECTION.
object_tracking_config:
Config for OBJECT_TRACKING.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoContext)
},
)
_sym_db.RegisterMessage(VideoContext)
LabelDetectionConfig = _reflection.GeneratedProtocolMessageType(
"LabelDetectionConfig",
(_message.Message,),
{
"DESCRIPTOR": _LABELDETECTIONCONFIG,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for LABEL_DETECTION.
Attributes:
label_detection_mode:
What labels should be detected with LABEL_DETECTION, in
addition to video-level labels or segment-level labels. If
unspecified, defaults to ``SHOT_MODE``.
stationary_camera:
Whether the video has been shot from a stationary (i.e., non-
moving) camera. When set to true, might improve detection
accuracy for moving objects. Should be used with
``SHOT_AND_FRAME_MODE`` enabled.
model:
Model to use for label detection. Supported values:
“builtin/stable” (the default if unset) and “builtin/latest”.
frame_confidence_threshold:
The confidence threshold we perform filtering on the labels
from frame-level detection. If not set, it is set to 0.4 by
default. The valid range for this threshold is [0.1, 0.9]. Any
value set outside of this range will be clipped. Note: For
best results, follow the default threshold. We will update the
default threshold everytime when we release a new model.
video_confidence_threshold:
The confidence threshold we perform filtering on the labels
from video-level and shot-level detections. If not set, it’s
set to 0.3 by default. The valid range for this threshold is
[0.1, 0.9]. Any value set outside of this range will be
clipped. Note: For best results, follow the default threshold.
We will update the default threshold everytime when we release
a new model.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelDetectionConfig)
},
)
_sym_db.RegisterMessage(LabelDetectionConfig)
ShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType(
"ShotChangeDetectionConfig",
(_message.Message,),
{
"DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for SHOT_CHANGE_DETECTION.
Attributes:
model:
Model to use for shot change detection. Supported values:
“builtin/stable” (the default if unset) and “builtin/latest”.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ShotChangeDetectionConfig)
},
)
_sym_db.RegisterMessage(ShotChangeDetectionConfig)
ObjectTrackingConfig = _reflection.GeneratedProtocolMessageType(
"ObjectTrackingConfig",
(_message.Message,),
{
"DESCRIPTOR": _OBJECTTRACKINGCONFIG,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for OBJECT_TRACKING.
Attributes:
model:
Model to use for object tracking. Supported values:
“builtin/stable” (the default if unset) and “builtin/latest”.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingConfig)
},
)
_sym_db.RegisterMessage(ObjectTrackingConfig)
FaceDetectionConfig = _reflection.GeneratedProtocolMessageType(
"FaceDetectionConfig",
(_message.Message,),
{
"DESCRIPTOR": _FACEDETECTIONCONFIG,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for FACE_DETECTION.
Attributes:
model:
Model to use for face detection. Supported values:
“builtin/stable” (the default if unset) and “builtin/latest”.
include_bounding_boxes:
Whether bounding boxes are included in the face annotation
output.
include_attributes:
Whether to enable face attributes detection, such as glasses,
dark_glasses, mouth_open etc. Ignored if
‘include_bounding_boxes’ is set to false.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceDetectionConfig)
},
)
_sym_db.RegisterMessage(FaceDetectionConfig)
PersonDetectionConfig = _reflection.GeneratedProtocolMessageType(
"PersonDetectionConfig",
(_message.Message,),
{
"DESCRIPTOR": _PERSONDETECTIONCONFIG,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for PERSON_DETECTION.
Attributes:
include_bounding_boxes:
Whether bounding boxes are included in the person detection
annotation output.
include_pose_landmarks:
Whether to enable pose landmarks detection. Ignored if
‘include_bounding_boxes’ is set to false.
include_attributes:
Whether to enable person attributes detection, such as cloth
color (black, blue, etc), type (coat, dress, etc), pattern
(plain, floral, etc), hair, etc. Ignored if
‘include_bounding_boxes’ is set to false.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.PersonDetectionConfig)
},
)
_sym_db.RegisterMessage(PersonDetectionConfig)
ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType(
"ExplicitContentDetectionConfig",
(_message.Message,),
{
"DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for EXPLICIT_CONTENT_DETECTION.
Attributes:
model:
Model to use for explicit content detection. Supported values:
“builtin/stable” (the default if unset) and “builtin/latest”.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentDetectionConfig)
},
)
_sym_db.RegisterMessage(ExplicitContentDetectionConfig)
TextDetectionConfig = _reflection.GeneratedProtocolMessageType(
"TextDetectionConfig",
(_message.Message,),
{
"DESCRIPTOR": _TEXTDETECTIONCONFIG,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for TEXT_DETECTION.
Attributes:
language_hints:
Language hint can be specified if the language to be detected
is known a priori. It can increase the accuracy of the
detection. Language hint must be language code in BCP-47
format. Automatic language detection is performed if no hint
is provided.
model:
Model to use for text detection. Supported values:
“builtin/stable” (the default if unset) and “builtin/latest”.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextDetectionConfig)
},
)
_sym_db.RegisterMessage(TextDetectionConfig)
VideoSegment = _reflection.GeneratedProtocolMessageType(
"VideoSegment",
(_message.Message,),
{
"DESCRIPTOR": _VIDEOSEGMENT,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video segment.
Attributes:
start_time_offset:
Time-offset, relative to the beginning of the video,
corresponding to the start of the segment (inclusive).
end_time_offset:
Time-offset, relative to the beginning of the video,
corresponding to the end of the segment (inclusive).
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoSegment)
},
)
_sym_db.RegisterMessage(VideoSegment)
LabelSegment = _reflection.GeneratedProtocolMessageType(
"LabelSegment",
(_message.Message,),
{
"DESCRIPTOR": _LABELSEGMENT,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for label detection.
Attributes:
segment:
Video segment where a label was detected.
confidence:
Confidence that the label is accurate. Range: [0, 1].
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelSegment)
},
)
_sym_db.RegisterMessage(LabelSegment)
LabelFrame = _reflection.GeneratedProtocolMessageType(
"LabelFrame",
(_message.Message,),
{
"DESCRIPTOR": _LABELFRAME,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for label detection.
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
corresponding to the video frame for this location.
confidence:
Confidence that the label is accurate. Range: [0, 1].
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelFrame)
},
)
_sym_db.RegisterMessage(LabelFrame)
Entity = _reflection.GeneratedProtocolMessageType(
"Entity",
(_message.Message,),
{
"DESCRIPTOR": _ENTITY,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Detected entity from video analysis.
Attributes:
entity_id:
Opaque entity ID. Some IDs may be available in `Google
Knowledge Graph Search API
<https://developers.google.com/knowledge-graph/>`__.
description:
Textual description, e.g., ``Fixed-gear bicycle``.
language_code:
Language code for ``description`` in BCP-47 format.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.Entity)
},
)
_sym_db.RegisterMessage(Entity)
LabelAnnotation = _reflection.GeneratedProtocolMessageType(
"LabelAnnotation",
(_message.Message,),
{
"DESCRIPTOR": _LABELANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Label annotation.
Attributes:
entity:
Detected entity.
category_entities:
Common categories for the detected entity. For example, when
the label is ``Terrier``, the category is likely ``dog``. And
in some cases there might be more than one categories e.g.,
``Terrier`` could also be a ``pet``.
segments:
All video segments where a label was detected.
frames:
All video frames where a label was detected.
version:
Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelAnnotation)
},
)
_sym_db.RegisterMessage(LabelAnnotation)
ExplicitContentFrame = _reflection.GeneratedProtocolMessageType(
"ExplicitContentFrame",
(_message.Message,),
{
"DESCRIPTOR": _EXPLICITCONTENTFRAME,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for explicit content.
Attributes:
time_offset:
Time-offset, relative to the beginning of the video,
corresponding to the video frame for this location.
pornography_likelihood:
Likelihood of the pornography content..
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentFrame)
},
)
_sym_db.RegisterMessage(ExplicitContentFrame)
ExplicitContentAnnotation = _reflection.GeneratedProtocolMessageType(
"ExplicitContentAnnotation",
(_message.Message,),
{
"DESCRIPTOR": _EXPLICITCONTENTANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Explicit content annotation (based on per-frame visual signals only).
If no explicit content has been detected in a frame, no annotations
are present for that frame.
Attributes:
frames:
All video frames where explicit content was detected.
version:
Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentAnnotation)
},
)
_sym_db.RegisterMessage(ExplicitContentAnnotation)
NormalizedBoundingBox = _reflection.GeneratedProtocolMessageType(
"NormalizedBoundingBox",
(_message.Message,),
{
"DESCRIPTOR": _NORMALIZEDBOUNDINGBOX,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Normalized bounding box. The normalized vertex coordinates are
relative to the original image. Range: [0, 1].
Attributes:
left:
Left X coordinate.
top:
Top Y coordinate.
right:
Right X coordinate.
bottom:
Bottom Y coordinate.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.NormalizedBoundingBox)
},
)
_sym_db.RegisterMessage(NormalizedBoundingBox)
FaceDetectionAnnotation = _reflection.GeneratedProtocolMessageType(
"FaceDetectionAnnotation",
(_message.Message,),
{
"DESCRIPTOR": _FACEDETECTIONANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Face detection annotation.
Attributes:
version:
Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceDetectionAnnotation)
},
)
_sym_db.RegisterMessage(FaceDetectionAnnotation)
PersonDetectionAnnotation = _reflection.GeneratedProtocolMessageType(
"PersonDetectionAnnotation",
(_message.Message,),
{
"DESCRIPTOR": _PERSONDETECTIONANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Person detection annotation per video.
Attributes:
tracks:
The detected tracks of a person.
version:
Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.PersonDetectionAnnotation)
},
)
_sym_db.RegisterMessage(PersonDetectionAnnotation)
FaceSegment = _reflection.GeneratedProtocolMessageType(
"FaceSegment",
(_message.Message,),
{
"DESCRIPTOR": _FACESEGMENT,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for face detection.
Attributes:
segment:
Video segment where a face was detected.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceSegment)
},
)
_sym_db.RegisterMessage(FaceSegment)
FaceFrame = _reflection.GeneratedProtocolMessageType(
"FaceFrame",
(_message.Message,),
{
"DESCRIPTOR": _FACEFRAME,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Deprecated. No effect.
Attributes:
normalized_bounding_boxes:
Normalized Bounding boxes in a frame. There can be more than
one boxes if the same face is detected in multiple locations
within the current frame.
time_offset:
Time-offset, relative to the beginning of the video,
corresponding to the video frame for this location.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceFrame)
},
)
_sym_db.RegisterMessage(FaceFrame)
FaceAnnotation = _reflection.GeneratedProtocolMessageType(
"FaceAnnotation",
(_message.Message,),
{
"DESCRIPTOR": _FACEANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Deprecated. No effect.
Attributes:
thumbnail:
Thumbnail of a representative face view (in JPEG format).
segments:
All video segments where a face was detected.
frames:
All video frames where a face was detected.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceAnnotation)
},
)
_sym_db.RegisterMessage(FaceAnnotation)
TimestampedObject = _reflection.GeneratedProtocolMessageType(
"TimestampedObject",
(_message.Message,),
{
"DESCRIPTOR": _TIMESTAMPEDOBJECT,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """For tracking related features. An object at time_offset with
attributes, and located with normalized_bounding_box.
Attributes:
normalized_bounding_box:
Normalized Bounding box in a frame, where the object is
located.
time_offset:
Time-offset, relative to the beginning of the video,
corresponding to the video frame for this object.
attributes:
Optional. The attributes of the object in the bounding box.
landmarks:
Optional. The detected landmarks.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TimestampedObject)
},
)
_sym_db.RegisterMessage(TimestampedObject)
Track = _reflection.GeneratedProtocolMessageType(
"Track",
(_message.Message,),
{
"DESCRIPTOR": _TRACK,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """A track of an object instance.
Attributes:
segment:
Video segment of a track.
timestamped_objects:
The object with timestamp and attributes per frame in the
track.
attributes:
Optional. Attributes in the track level.
confidence:
Optional. The confidence score of the tracked object.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.Track)
},
)
_sym_db.RegisterMessage(Track)
DetectedAttribute = _reflection.GeneratedProtocolMessageType(
"DetectedAttribute",
(_message.Message,),
{
"DESCRIPTOR": _DETECTEDATTRIBUTE,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """A generic detected attribute represented by name in string format.
Attributes:
name:
The name of the attribute, for example, glasses, dark_glasses,
mouth_open. A full list of supported type names will be
provided in the document.
confidence:
Detected attribute confidence. Range [0, 1].
value:
Text value of the detection result. For example, the value for
“HairColor” can be “black”, “blonde”, etc.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.DetectedAttribute)
},
)
_sym_db.RegisterMessage(DetectedAttribute)
DetectedLandmark = _reflection.GeneratedProtocolMessageType(
"DetectedLandmark",
(_message.Message,),
{
"DESCRIPTOR": _DETECTEDLANDMARK,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """A generic detected landmark represented by name in string format and a
2D location.
Attributes:
name:
The name of this landmark, for example, left_hand,
right_shoulder.
point:
The 2D point of the detected landmark using the normalized
image coordindate system. The normalized coordinates have the
range from 0 to 1.
confidence:
The confidence score of the detected landmark. Range [0, 1].
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.DetectedLandmark)
},
)
_sym_db.RegisterMessage(DetectedLandmark)
VideoAnnotationResults = _reflection.GeneratedProtocolMessageType(
"VideoAnnotationResults",
(_message.Message,),
{
"DESCRIPTOR": _VIDEOANNOTATIONRESULTS,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Annotation results for a single video.
Attributes:
input_uri:
Video file location in `Cloud Storage
<https://cloud.google.com/storage/>`__.
segment:
Video segment on which the annotation is run.
segment_label_annotations:
Topical label annotations on video level or user-specified
segment level. There is exactly one element for each unique
label.
segment_presence_label_annotations:
Presence label annotations on video level or user-specified
segment level. There is exactly one element for each unique
label. Compared to the existing topical
``segment_label_annotations``, this field presents more fine-
grained, segment-level labels detected in video content and is
made available only when the client sets
``LabelDetectionConfig.model`` to “builtin/latest” in the
request.
shot_label_annotations:
Topical label annotations on shot level. There is exactly one
element for each unique label.
shot_presence_label_annotations:
Presence label annotations on shot level. There is exactly one
element for each unique label. Compared to the existing
topical ``shot_label_annotations``, this field presents more
fine-grained, shot-level labels detected in video content and
is made available only when the client sets
``LabelDetectionConfig.model`` to “builtin/latest” in the
request.
frame_label_annotations:
Label annotations on frame level. There is exactly one element
for each unique label.
face_annotations:
Deprecated. Please use ``face_detection_annotations`` instead.
face_detection_annotations:
Face detection annotations.
shot_annotations:
Shot annotations. Each shot is represented as a video segment.
explicit_annotation:
Explicit content annotation.
speech_transcriptions:
Speech transcription.
text_annotations:
OCR text detection and tracking. Annotations for list of
detected text snippets. Each will have list of frame
information associated with it.
object_annotations:
Annotations for list of objects detected and tracked in video.
logo_recognition_annotations:
Annotations for list of logos detected, tracked and recognized
in video.
person_detection_annotations:
Person detection annotations.
error:
If set, indicates an error. Note that for a single
``AnnotateVideoRequest`` some videos may succeed and some may
fail.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoAnnotationResults)
},
)
_sym_db.RegisterMessage(VideoAnnotationResults)
AnnotateVideoResponse = _reflection.GeneratedProtocolMessageType(
"AnnotateVideoResponse",
(_message.Message,),
{
"DESCRIPTOR": _ANNOTATEVIDEORESPONSE,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video annotation response. Included in the ``response`` field of the
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
Attributes:
annotation_results:
Annotation results for all videos specified in
``AnnotateVideoRequest``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.AnnotateVideoResponse)
},
)
_sym_db.RegisterMessage(AnnotateVideoResponse)
VideoAnnotationProgress = _reflection.GeneratedProtocolMessageType(
"VideoAnnotationProgress",
(_message.Message,),
{
"DESCRIPTOR": _VIDEOANNOTATIONPROGRESS,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Annotation progress for a single video.
Attributes:
input_uri:
Video file location in `Cloud Storage
<https://cloud.google.com/storage/>`__.
progress_percent:
Approximate percentage processed thus far. Guaranteed to be
100 when fully processed.
start_time:
Time when the request was received.
update_time:
Time of the most recent update.
feature:
Specifies which feature is being tracked if the request
contains more than one feature.
segment:
Specifies which segment is being tracked if the request
contains more than one segment.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoAnnotationProgress)
},
)
_sym_db.RegisterMessage(VideoAnnotationProgress)
AnnotateVideoProgress = _reflection.GeneratedProtocolMessageType(
"AnnotateVideoProgress",
(_message.Message,),
{
"DESCRIPTOR": _ANNOTATEVIDEOPROGRESS,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video annotation progress. Included in the ``metadata`` field of the
``Operation`` returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
Attributes:
annotation_progress:
Progress metadata for all videos specified in
``AnnotateVideoRequest``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.AnnotateVideoProgress)
},
)
_sym_db.RegisterMessage(AnnotateVideoProgress)
SpeechTranscriptionConfig = _reflection.GeneratedProtocolMessageType(
"SpeechTranscriptionConfig",
(_message.Message,),
{
"DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Config for SPEECH_TRANSCRIPTION.
Attributes:
language_code:
Required. *Required* The language of the supplied audio as a
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__
language tag. Example: “en-US”. See `Language Support
<https://cloud.google.com/speech/docs/languages>`__ for a list
of the currently supported language codes.
max_alternatives:
Optional. Maximum number of recognition hypotheses to be
returned. Specifically, the maximum number of
``SpeechRecognitionAlternative`` messages within each
``SpeechTranscription``. The server may return fewer than
``max_alternatives``. Valid values are ``0``-``30``. A value
of ``0`` or ``1`` will return a maximum of one. If omitted,
will return a maximum of one.
filter_profanity:
Optional. If set to ``true``, the server will attempt to
filter out profanities, replacing all but the initial
character in each filtered word with asterisks, e.g. "f***".
If set to ``false`` or omitted, profanities won’t be filtered
out.
speech_contexts:
Optional. A means to provide context to assist the speech
recognition.
enable_automatic_punctuation:
Optional. If ‘true’, adds punctuation to recognition result
hypotheses. This feature is only available in select
languages. Setting this for requests in other languages has no
effect at all. The default ‘false’ value does not add
punctuation to result hypotheses. NOTE: “This is currently
offered as an experimental service, complimentary to all
users. In the future this may be exclusively available as a
premium feature.”
audio_tracks:
Optional. For file formats, such as MXF or MKV, supporting
multiple audio tracks, specify up to two tracks. Default:
track 0.
enable_speaker_diarization:
Optional. If ‘true’, enables speaker detection for each
recognized word in the top alternative of the recognition
result using a speaker_tag provided in the WordInfo. Note:
When this is true, we send all the words from the beginning of
the audio for the top alternative in every consecutive
response. This is done in order to improve our speaker tags as
our models learn to identify the speakers in the conversation
over time.
diarization_speaker_count:
Optional. If set, specifies the estimated number of speakers
in the conversation. If not set, defaults to ‘2’. Ignored
unless enable_speaker_diarization is set to true.
enable_word_confidence:
Optional. If ``true``, the top result includes a list of words
and the confidence for those words. If ``false``, no word-
level confidence information is returned. The default is
``false``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechTranscriptionConfig)
},
)
_sym_db.RegisterMessage(SpeechTranscriptionConfig)
SpeechContext = _reflection.GeneratedProtocolMessageType(
"SpeechContext",
(_message.Message,),
{
"DESCRIPTOR": _SPEECHCONTEXT,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Provides “hints” to the speech recognizer to favor specific words and
phrases in the results.
Attributes:
phrases:
Optional. A list of strings containing words and phrases
“hints” so that the speech recognition is more likely to
recognize them. This can be used to improve the accuracy for
specific words and phrases, for example, if specific commands
are typically spoken by the user. This can also be used to add
additional words to the vocabulary of the recognizer. See
`usage limits
<https://cloud.google.com/speech/limits#content>`__.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechContext)
},
)
_sym_db.RegisterMessage(SpeechContext)
SpeechTranscription = _reflection.GeneratedProtocolMessageType(
"SpeechTranscription",
(_message.Message,),
{
"DESCRIPTOR": _SPEECHTRANSCRIPTION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """A speech recognition result corresponding to a portion of the audio.
Attributes:
alternatives:
May contain one or more recognition hypotheses (up to the
maximum specified in ``max_alternatives``). These alternatives
are ordered in terms of accuracy, with the top (first)
alternative being the most probable, as ranked by the
recognizer.
language_code:
Output only. The `BCP-47 <https://www.rfc-
editor.org/rfc/bcp/bcp47.txt>`__ language tag of the language
in this result. This language code was detected to have the
most likelihood of being spoken in the audio.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechTranscription)
},
)
_sym_db.RegisterMessage(SpeechTranscription)
SpeechRecognitionAlternative = _reflection.GeneratedProtocolMessageType(
"SpeechRecognitionAlternative",
(_message.Message,),
{
"DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Alternative hypotheses (a.k.a. n-best list).
Attributes:
transcript:
Transcript text representing the words that the user spoke.
confidence:
Output only. The confidence estimate between 0.0 and 1.0. A
higher number indicates an estimated greater likelihood that
the recognized words are correct. This field is set only for
the top alternative. This field is not guaranteed to be
accurate and users should not rely on it to be always
provided. The default of 0.0 is a sentinel value indicating
``confidence`` was not set.
words:
Output only. A list of word-specific information for each
recognized word. Note: When ``enable_speaker_diarization`` is
set to true, you will see all the words from the beginning of
the audio.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechRecognitionAlternative)
},
)
_sym_db.RegisterMessage(SpeechRecognitionAlternative)
WordInfo = _reflection.GeneratedProtocolMessageType(
"WordInfo",
(_message.Message,),
{
"DESCRIPTOR": _WORDINFO,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Word-specific information for recognized words. Word information is
only included in the response when certain request parameters are set,
such as ``enable_word_time_offsets``.
Attributes:
start_time:
Time offset relative to the beginning of the audio, and
corresponding to the start of the spoken word. This field is
only set if ``enable_word_time_offsets=true`` and only in the
top hypothesis. This is an experimental feature and the
accuracy of the time offset can vary.
end_time:
Time offset relative to the beginning of the audio, and
corresponding to the end of the spoken word. This field is
only set if ``enable_word_time_offsets=true`` and only in the
top hypothesis. This is an experimental feature and the
accuracy of the time offset can vary.
word:
The word corresponding to this set of information.
confidence:
Output only. The confidence estimate between 0.0 and 1.0. A
higher number indicates an estimated greater likelihood that
the recognized words are correct. This field is set only for
the top alternative. This field is not guaranteed to be
accurate and users should not rely on it to be always
provided. The default of 0.0 is a sentinel value indicating
``confidence`` was not set.
speaker_tag:
Output only. A distinct integer value is assigned for every
speaker within the audio. This field specifies which one of
those speakers was detected to have spoken this word. Value
ranges from 1 up to diarization_speaker_count, and is only set
if speaker diarization is enabled.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.WordInfo)
},
)
_sym_db.RegisterMessage(WordInfo)
NormalizedVertex = _reflection.GeneratedProtocolMessageType(
"NormalizedVertex",
(_message.Message,),
{
"DESCRIPTOR": _NORMALIZEDVERTEX,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """X coordinate.
Attributes:
y:
Y coordinate.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.NormalizedVertex)
},
)
_sym_db.RegisterMessage(NormalizedVertex)
NormalizedBoundingPoly = _reflection.GeneratedProtocolMessageType(
"NormalizedBoundingPoly",
(_message.Message,),
{
"DESCRIPTOR": _NORMALIZEDBOUNDINGPOLY,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Normalized bounding polygon for text (that might not be aligned with
axis). Contains list of the corner points in clockwise order starting
from top-left corner. For example, for a rectangular bounding box:
When the text is horizontal it might look like: 0—-1 \| \| 3—-2 When
it’s clockwise rotated 180 degrees around the top-left corner it
becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2,
3). Note that values can be less than 0, or greater than 1 due to
trignometric calculations for location of the box.
Attributes:
vertices:
Normalized vertices of the bounding polygon.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.NormalizedBoundingPoly)
},
)
_sym_db.RegisterMessage(NormalizedBoundingPoly)
TextSegment = _reflection.GeneratedProtocolMessageType(
"TextSegment",
(_message.Message,),
{
"DESCRIPTOR": _TEXTSEGMENT,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video segment level annotation results for text detection.
Attributes:
segment:
Video segment where a text snippet was detected.
confidence:
Confidence for the track of detected text. It is calculated as
the highest over all frames where OCR detected text appears.
frames:
Information related to the frames where OCR detected text
appears.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextSegment)
},
)
_sym_db.RegisterMessage(TextSegment)
TextFrame = _reflection.GeneratedProtocolMessageType(
"TextFrame",
(_message.Message,),
{
"DESCRIPTOR": _TEXTFRAME,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotation results for text annotation (OCR).
Contains information regarding timestamp and bounding box locations
for the frames containing detected OCR text snippets.
Attributes:
rotated_bounding_box:
Bounding polygon of the detected text for this frame.
time_offset:
Timestamp of this frame.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextFrame)
},
)
_sym_db.RegisterMessage(TextFrame)
TextAnnotation = _reflection.GeneratedProtocolMessageType(
"TextAnnotation",
(_message.Message,),
{
"DESCRIPTOR": _TEXTANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Annotations related to one detected OCR text snippet. This will
contain the corresponding text, confidence value, and frame level
information for each detection.
Attributes:
text:
The detected text.
segments:
All video segments where OCR detected text appears.
version:
Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextAnnotation)
},
)
_sym_db.RegisterMessage(TextAnnotation)
ObjectTrackingFrame = _reflection.GeneratedProtocolMessageType(
"ObjectTrackingFrame",
(_message.Message,),
{
"DESCRIPTOR": _OBJECTTRACKINGFRAME,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Video frame level annotations for object detection and tracking. This
field stores per frame location, time offset, and confidence.
Attributes:
normalized_bounding_box:
The normalized bounding box location of this object track for
the frame.
time_offset:
The timestamp of the frame in microseconds.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingFrame)
},
)
_sym_db.RegisterMessage(ObjectTrackingFrame)
ObjectTrackingAnnotation = _reflection.GeneratedProtocolMessageType(
"ObjectTrackingAnnotation",
(_message.Message,),
{
"DESCRIPTOR": _OBJECTTRACKINGANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Annotations corresponding to one tracked object.
Attributes:
track_info:
Different representation of tracking info in non-streaming
batch and streaming modes.
segment:
Non-streaming batch mode ONLY. Each object track corresponds
to one video segment where it appears.
track_id:
Streaming mode ONLY. In streaming mode, we do not know the end
time of a tracked object before it is completed. Hence, there
is no VideoSegment info returned. Instead, we provide a unique
identifiable integer track_id so that the customers can
correlate the results of the ongoing ObjectTrackAnnotation of
the same track_id over time.
entity:
Entity to specify the object category that this track is
labeled as.
confidence:
Object category’s labeling confidence of this track.
frames:
Information corresponding to all frames where this object
track appears. Non-streaming batch mode: it may be one or
multiple ObjectTrackingFrame messages in frames. Streaming
mode: it can only be one ObjectTrackingFrame message in
frames.
version:
Feature version.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingAnnotation)
},
)
_sym_db.RegisterMessage(ObjectTrackingAnnotation)
LogoRecognitionAnnotation = _reflection.GeneratedProtocolMessageType(
"LogoRecognitionAnnotation",
(_message.Message,),
{
"DESCRIPTOR": _LOGORECOGNITIONANNOTATION,
"__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2",
"__doc__": """Annotation corresponding to one detected, tracked and recognized logo
class.
Attributes:
entity:
Entity category information to specify the logo class that all
the logo tracks within this LogoRecognitionAnnotation are
recognized as.
tracks:
All logo tracks where the recognized logo appears. Each track
corresponds to one logo instance appearing in consecutive
frames.
segments:
All video segments where the recognized logo appears. There
might be multiple instances of the same logo class appearing
in one VideoSegment.
""",
# @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LogoRecognitionAnnotation)
},
)
_sym_db.RegisterMessage(LogoRecognitionAnnotation)
DESCRIPTOR._options = None
_ANNOTATEVIDEOREQUEST.fields_by_name["features"]._options = None
_ANNOTATEVIDEOREQUEST.fields_by_name["output_uri"]._options = None
_ANNOTATEVIDEOREQUEST.fields_by_name["location_id"]._options = None
_FACEFRAME._options = None
_FACEANNOTATION._options = None
_TIMESTAMPEDOBJECT.fields_by_name["attributes"]._options = None
_TIMESTAMPEDOBJECT.fields_by_name["landmarks"]._options = None
_TRACK.fields_by_name["attributes"]._options = None
_TRACK.fields_by_name["confidence"]._options = None
_VIDEOANNOTATIONRESULTS.fields_by_name["face_annotations"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["language_code"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["max_alternatives"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["filter_profanity"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["speech_contexts"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name[
"enable_automatic_punctuation"
]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["audio_tracks"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["enable_speaker_diarization"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["diarization_speaker_count"]._options = None
_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["enable_word_confidence"]._options = None
_SPEECHCONTEXT.fields_by_name["phrases"]._options = None
_SPEECHTRANSCRIPTION.fields_by_name["language_code"]._options = None
_SPEECHRECOGNITIONALTERNATIVE.fields_by_name["confidence"]._options = None
_SPEECHRECOGNITIONALTERNATIVE.fields_by_name["words"]._options = None
_WORDINFO.fields_by_name["confidence"]._options = None
_WORDINFO.fields_by_name["speaker_tag"]._options = None
_VIDEOINTELLIGENCESERVICE = _descriptor.ServiceDescriptor(
name="VideoIntelligenceService",
full_name="google.cloud.videointelligence.v1.VideoIntelligenceService",
file=DESCRIPTOR,
index=0,
serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform",
create_key=_descriptor._internal_create_key,
serialized_start=8921,
serialized_end=9241,
methods=[
_descriptor.MethodDescriptor(
name="AnnotateVideo",
full_name="google.cloud.videointelligence.v1.VideoIntelligenceService.AnnotateVideo",
index=0,
containing_service=None,
input_type=_ANNOTATEVIDEOREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\030"\023/v1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress',
create_key=_descriptor._internal_create_key,
),
],
)
_sym_db.RegisterServiceDescriptor(_VIDEOINTELLIGENCESERVICE)
DESCRIPTOR.services_by_name["VideoIntelligenceService"] = _VIDEOINTELLIGENCESERVICE
# @@protoc_insertion_point(module_scope)
| 36.169513 | 14,099 | 0.642526 | [
"Apache-2.0"
] | danoscarmike/python-videointelligence | google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py | 197,079 | Python |
# Copyright 2019, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from a10_octavia.db import repositories as repo
from a10_octavia.db import api as db_apis
from oslo_utils import uuidutils
class VThunderDB():
def __init__(self, **kwargs):
self.vthunder_repo = repo.VThunderRepository()
def create_vthunder(self, project_id, device_name, username, password, ip_address, undercloud=None, axapi_version=30):
if axapi_version == 2.1:
axapi_version = 21
else:
axapi_version = 30
amphora_id = uuidutils.generate_uuid()
vthunder_id = uuidutils.generate_uuid()
if undercloud == 'True' or undercloud == 'true':
undercloud = True
else:
undercloud = False
db_session = db_apis.get_session()
vthunder = self.vthunder_repo.create(db_session, vthunder_id=vthunder_id, amphora_id=amphora_id,
project_id=project_id, device_name=device_name,
username=username,
password=password, ip_address=ip_address,
undercloud=undercloud, axapi_version=axapi_version)
db_apis.close_session(db_session)
print("vThunder entry created successfully.")
def update_vthunder(self, id, project_id, device_name, username, password, ip_address, undercloud=None, axapi_version=30):
if axapi_version == 2.1:
axapi_version = 21
else:
axapi_version = 30
if undercloud == 'True' or undercloud == 'true':
undercloud = True
else:
undercloud = False
db_session = db_apis.get_session()
vthunder = self.vthunder_repo.update(db_session, id, project_id=project_id,
device_name=device_name, username=username,
password=password, ip_address=ip_address,
undercloud=undercloud, axapi_version=axapi_version)
db_apis.close_session(db_session)
print("vThunder entry updated successfully.")
def delete_vthunder(self, vthunderid):
db_session = db_apis.get_session()
vthunder = self.vthunder_repo.delete(db_session, id=vthunderid)
db_apis.close_session(db_session)
print("vThunder entry deleted successfully.")
| 40.36 | 127 | 0.624711 | [
"Apache-2.0"
] | richuc/a10-octavia | a10_octavia/controller/worker/tasks/a10_vthunder_db.py | 3,027 | Python |
from __future__ import unicode_literals
from nose.tools import assert_raises
import datetime
import boto
import boto3
import sure # noqa
from boto.exception import JSONResponseError
from moto import mock_ec2
from moto.backends import get_model
from moto.core.utils import iso_8601_datetime_with_milliseconds
@mock_ec2
def test_request_spot_instances():
conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']
conn = boto.connect_ec2()
conn.create_security_group('group1', 'description')
conn.create_security_group('group2', 'description')
start = iso_8601_datetime_with_milliseconds(datetime.datetime(2013, 1, 1))
end = iso_8601_datetime_with_milliseconds(datetime.datetime(2013, 1, 2))
with assert_raises(JSONResponseError) as ex:
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234', count=1, type='one-time',
valid_from=start, valid_until=end, launch_group="the-group",
availability_zone_group='my-group', key_name="test",
security_groups=['group1', 'group2'], user_data=b"some test data",
instance_type='m1.small', placement='us-east-1c',
kernel_id="test-kernel", ramdisk_id="test-ramdisk",
monitoring_enabled=True, subnet_id=subnet_id, dry_run=True
)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set')
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234', count=1, type='one-time',
valid_from=start, valid_until=end, launch_group="the-group",
availability_zone_group='my-group', key_name="test",
security_groups=['group1', 'group2'], user_data=b"some test data",
instance_type='m1.small', placement='us-east-1c',
kernel_id="test-kernel", ramdisk_id="test-ramdisk",
monitoring_enabled=True, subnet_id=subnet_id,
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("open")
request.price.should.equal(0.5)
request.launch_specification.image_id.should.equal('ami-abcd1234')
request.type.should.equal('one-time')
request.valid_from.should.equal(start)
request.valid_until.should.equal(end)
request.launch_group.should.equal("the-group")
request.availability_zone_group.should.equal('my-group')
request.launch_specification.key_name.should.equal("test")
security_group_names = [group.name for group in request.launch_specification.groups]
set(security_group_names).should.equal(set(['group1', 'group2']))
request.launch_specification.instance_type.should.equal('m1.small')
request.launch_specification.placement.should.equal('us-east-1c')
request.launch_specification.kernel.should.equal("test-kernel")
request.launch_specification.ramdisk.should.equal("test-ramdisk")
request.launch_specification.subnet_id.should.equal(subnet_id)
@mock_ec2
def test_request_spot_instances_default_arguments():
"""
Test that moto set the correct default arguments
"""
conn = boto.connect_ec2()
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("open")
request.price.should.equal(0.5)
request.launch_specification.image_id.should.equal('ami-abcd1234')
request.type.should.equal('one-time')
request.valid_from.should.equal(None)
request.valid_until.should.equal(None)
request.launch_group.should.equal(None)
request.availability_zone_group.should.equal(None)
request.launch_specification.key_name.should.equal(None)
security_group_names = [group.name for group in request.launch_specification.groups]
security_group_names.should.equal(["default"])
request.launch_specification.instance_type.should.equal('m1.small')
request.launch_specification.placement.should.equal(None)
request.launch_specification.kernel.should.equal(None)
request.launch_specification.ramdisk.should.equal(None)
request.launch_specification.subnet_id.should.equal(None)
@mock_ec2
def test_cancel_spot_instance_request():
conn = boto.connect_ec2()
conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
with assert_raises(JSONResponseError) as ex:
conn.cancel_spot_instance_requests([requests[0].id], dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set')
conn.cancel_spot_instance_requests([requests[0].id])
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(0)
@mock_ec2
def test_request_spot_instances_fulfilled():
"""
Test that moto correctly fullfills a spot instance request
"""
conn = boto.ec2.connect_to_region("us-east-1")
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("open")
get_model('SpotInstanceRequest')[0].state = 'active'
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("active")
@mock_ec2
def test_tag_spot_instance_request():
"""
Test that moto correctly tags a spot instance request
"""
conn = boto.connect_ec2()
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request[0].add_tag('tag1', 'value1')
request[0].add_tag('tag2', 'value2')
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
tag_dict = dict(request.tags)
tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'})
@mock_ec2
def test_get_all_spot_instance_requests_filtering():
"""
Test that moto correctly filters spot instance requests
"""
conn = boto.connect_ec2()
request1 = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request2 = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request1[0].add_tag('tag1', 'value1')
request1[0].add_tag('tag2', 'value2')
request2[0].add_tag('tag1', 'value1')
request2[0].add_tag('tag2', 'wrong')
requests = conn.get_all_spot_instance_requests(filters={'state': 'active'})
requests.should.have.length_of(0)
requests = conn.get_all_spot_instance_requests(filters={'state': 'open'})
requests.should.have.length_of(3)
requests = conn.get_all_spot_instance_requests(filters={'tag:tag1': 'value1'})
requests.should.have.length_of(2)
requests = conn.get_all_spot_instance_requests(filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'})
requests.should.have.length_of(1)
@mock_ec2
def test_request_spot_instances_setting_instance_id():
conn = boto.ec2.connect_to_region("us-east-1")
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234')
req = get_model('SpotInstanceRequest')[0]
req.state = 'active'
req.instance_id = 'i-12345678'
request = conn.get_all_spot_instance_requests()[0]
assert request.state == 'active'
assert request.instance_id == 'i-12345678'
| 35.95614 | 177 | 0.723347 | [
"Apache-2.0"
] | GoodRx/moto | tests/test_ec2/test_spot_instances.py | 8,198 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from msrest import Deserializer, Serializer
from ._configuration import ContainerRegistryManagementClientConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class ContainerRegistryManagementClient(MultiApiClientMixin, _SDKClient):
"""ContainerRegistryManagementClient.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2019-05-01'
_PROFILE_TAG = "azure.mgmt.containerregistry.ContainerRegistryManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'build_steps': '2018-02-01-preview',
'build_tasks': '2018-02-01-preview',
'builds': '2018-02-01-preview',
'runs': '2019-04-01',
'tasks': '2019-04-01',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
api_version: Optional[str] = None,
base_url: Optional[str] = None,
profile: KnownProfiles = KnownProfiles.default,
**kwargs # type: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ContainerRegistryManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(ContainerRegistryManagementClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2017-03-01: :mod:`v2017_03_01.models<azure.mgmt.containerregistry.v2017_03_01.models>`
* 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.containerregistry.v2017_10_01.models>`
* 2018-02-01-preview: :mod:`v2018_02_01_preview.models<azure.mgmt.containerregistry.v2018_02_01_preview.models>`
* 2018-09-01: :mod:`v2018_09_01.models<azure.mgmt.containerregistry.v2018_09_01.models>`
* 2019-04-01: :mod:`v2019_04_01.models<azure.mgmt.containerregistry.v2019_04_01.models>`
* 2019-05-01: :mod:`v2019_05_01.models<azure.mgmt.containerregistry.v2019_05_01.models>`
* 2019-05-01-preview: :mod:`v2019_05_01_preview.models<azure.mgmt.containerregistry.v2019_05_01_preview.models>`
* 2019-06-01-preview: :mod:`v2019_06_01_preview.models<azure.mgmt.containerregistry.v2019_06_01_preview.models>`
* 2019-12-01-preview: :mod:`v2019_12_01_preview.models<azure.mgmt.containerregistry.v2019_12_01_preview.models>`
* 2020-11-01-preview: :mod:`v2020_11_01_preview.models<azure.mgmt.containerregistry.v2020_11_01_preview.models>`
* 2021-06-01-preview: :mod:`v2021_06_01_preview.models<azure.mgmt.containerregistry.v2021_06_01_preview.models>`
* 2021-08-01-preview: :mod:`v2021_08_01_preview.models<azure.mgmt.containerregistry.v2021_08_01_preview.models>`
"""
if api_version == '2017-03-01':
from ..v2017_03_01 import models
return models
elif api_version == '2017-10-01':
from ..v2017_10_01 import models
return models
elif api_version == '2018-02-01-preview':
from ..v2018_02_01_preview import models
return models
elif api_version == '2018-09-01':
from ..v2018_09_01 import models
return models
elif api_version == '2019-04-01':
from ..v2019_04_01 import models
return models
elif api_version == '2019-05-01':
from ..v2019_05_01 import models
return models
elif api_version == '2019-05-01-preview':
from ..v2019_05_01_preview import models
return models
elif api_version == '2019-06-01-preview':
from ..v2019_06_01_preview import models
return models
elif api_version == '2019-12-01-preview':
from ..v2019_12_01_preview import models
return models
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview import models
return models
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview import models
return models
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def agent_pools(self):
"""Instance depends on the API version:
* 2019-06-01-preview: :class:`AgentPoolsOperations<azure.mgmt.containerregistry.v2019_06_01_preview.aio.operations.AgentPoolsOperations>`
"""
api_version = self._get_api_version('agent_pools')
if api_version == '2019-06-01-preview':
from ..v2019_06_01_preview.aio.operations import AgentPoolsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'agent_pools'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def build_steps(self):
"""Instance depends on the API version:
* 2018-02-01-preview: :class:`BuildStepsOperations<azure.mgmt.containerregistry.v2018_02_01_preview.aio.operations.BuildStepsOperations>`
"""
api_version = self._get_api_version('build_steps')
if api_version == '2018-02-01-preview':
from ..v2018_02_01_preview.aio.operations import BuildStepsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'build_steps'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def build_tasks(self):
"""Instance depends on the API version:
* 2018-02-01-preview: :class:`BuildTasksOperations<azure.mgmt.containerregistry.v2018_02_01_preview.aio.operations.BuildTasksOperations>`
"""
api_version = self._get_api_version('build_tasks')
if api_version == '2018-02-01-preview':
from ..v2018_02_01_preview.aio.operations import BuildTasksOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'build_tasks'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def builds(self):
"""Instance depends on the API version:
* 2018-02-01-preview: :class:`BuildsOperations<azure.mgmt.containerregistry.v2018_02_01_preview.aio.operations.BuildsOperations>`
"""
api_version = self._get_api_version('builds')
if api_version == '2018-02-01-preview':
from ..v2018_02_01_preview.aio.operations import BuildsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'builds'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def connected_registries(self):
"""Instance depends on the API version:
* 2020-11-01-preview: :class:`ConnectedRegistriesOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ConnectedRegistriesOperations>`
* 2021-06-01-preview: :class:`ConnectedRegistriesOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.ConnectedRegistriesOperations>`
* 2021-08-01-preview: :class:`ConnectedRegistriesOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.ConnectedRegistriesOperations>`
"""
api_version = self._get_api_version('connected_registries')
if api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import ConnectedRegistriesOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import ConnectedRegistriesOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import ConnectedRegistriesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'connected_registries'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def export_pipelines(self):
"""Instance depends on the API version:
* 2019-12-01-preview: :class:`ExportPipelinesOperations<azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ExportPipelinesOperations>`
* 2020-11-01-preview: :class:`ExportPipelinesOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ExportPipelinesOperations>`
* 2021-06-01-preview: :class:`ExportPipelinesOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.ExportPipelinesOperations>`
* 2021-08-01-preview: :class:`ExportPipelinesOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.ExportPipelinesOperations>`
"""
api_version = self._get_api_version('export_pipelines')
if api_version == '2019-12-01-preview':
from ..v2019_12_01_preview.aio.operations import ExportPipelinesOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import ExportPipelinesOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import ExportPipelinesOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import ExportPipelinesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'export_pipelines'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def import_pipelines(self):
"""Instance depends on the API version:
* 2019-12-01-preview: :class:`ImportPipelinesOperations<azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ImportPipelinesOperations>`
* 2020-11-01-preview: :class:`ImportPipelinesOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ImportPipelinesOperations>`
* 2021-06-01-preview: :class:`ImportPipelinesOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.ImportPipelinesOperations>`
* 2021-08-01-preview: :class:`ImportPipelinesOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.ImportPipelinesOperations>`
"""
api_version = self._get_api_version('import_pipelines')
if api_version == '2019-12-01-preview':
from ..v2019_12_01_preview.aio.operations import ImportPipelinesOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import ImportPipelinesOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import ImportPipelinesOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import ImportPipelinesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'import_pipelines'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2017-03-01: :class:`Operations<azure.mgmt.containerregistry.v2017_03_01.aio.operations.Operations>`
* 2017-10-01: :class:`Operations<azure.mgmt.containerregistry.v2017_10_01.aio.operations.Operations>`
* 2019-05-01: :class:`Operations<azure.mgmt.containerregistry.v2019_05_01.aio.operations.Operations>`
* 2019-12-01-preview: :class:`Operations<azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.Operations>`
* 2020-11-01-preview: :class:`Operations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.Operations>`
* 2021-06-01-preview: :class:`Operations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.Operations>`
* 2021-08-01-preview: :class:`Operations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2017-03-01':
from ..v2017_03_01.aio.operations import Operations as OperationClass
elif api_version == '2017-10-01':
from ..v2017_10_01.aio.operations import Operations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import Operations as OperationClass
elif api_version == '2019-12-01-preview':
from ..v2019_12_01_preview.aio.operations import Operations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import Operations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import Operations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def pipeline_runs(self):
"""Instance depends on the API version:
* 2019-12-01-preview: :class:`PipelineRunsOperations<azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.PipelineRunsOperations>`
* 2020-11-01-preview: :class:`PipelineRunsOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.PipelineRunsOperations>`
* 2021-06-01-preview: :class:`PipelineRunsOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.PipelineRunsOperations>`
* 2021-08-01-preview: :class:`PipelineRunsOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.PipelineRunsOperations>`
"""
api_version = self._get_api_version('pipeline_runs')
if api_version == '2019-12-01-preview':
from ..v2019_12_01_preview.aio.operations import PipelineRunsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import PipelineRunsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import PipelineRunsOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import PipelineRunsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'pipeline_runs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_endpoint_connections(self):
"""Instance depends on the API version:
* 2019-12-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.PrivateEndpointConnectionsOperations>`
* 2020-11-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.PrivateEndpointConnectionsOperations>`
* 2021-06-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.PrivateEndpointConnectionsOperations>`
* 2021-08-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.PrivateEndpointConnectionsOperations>`
"""
api_version = self._get_api_version('private_endpoint_connections')
if api_version == '2019-12-01-preview':
from ..v2019_12_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def registries(self):
"""Instance depends on the API version:
* 2017-03-01: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2017_03_01.aio.operations.RegistriesOperations>`
* 2017-10-01: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2017_10_01.aio.operations.RegistriesOperations>`
* 2018-02-01-preview: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2018_02_01_preview.aio.operations.RegistriesOperations>`
* 2018-09-01: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2018_09_01.aio.operations.RegistriesOperations>`
* 2019-04-01: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2019_04_01.aio.operations.RegistriesOperations>`
* 2019-05-01: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2019_05_01.aio.operations.RegistriesOperations>`
* 2019-05-01-preview: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2019_05_01_preview.aio.operations.RegistriesOperations>`
* 2019-06-01-preview: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2019_06_01_preview.aio.operations.RegistriesOperations>`
* 2019-12-01-preview: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.RegistriesOperations>`
* 2020-11-01-preview: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.RegistriesOperations>`
* 2021-06-01-preview: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.RegistriesOperations>`
* 2021-08-01-preview: :class:`RegistriesOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.RegistriesOperations>`
"""
api_version = self._get_api_version('registries')
if api_version == '2017-03-01':
from ..v2017_03_01.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2017-10-01':
from ..v2017_10_01.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2018-02-01-preview':
from ..v2018_02_01_preview.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2018-09-01':
from ..v2018_09_01.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2019-05-01-preview':
from ..v2019_05_01_preview.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2019-06-01-preview':
from ..v2019_06_01_preview.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2019-12-01-preview':
from ..v2019_12_01_preview.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import RegistriesOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import RegistriesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'registries'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def replications(self):
"""Instance depends on the API version:
* 2017-10-01: :class:`ReplicationsOperations<azure.mgmt.containerregistry.v2017_10_01.aio.operations.ReplicationsOperations>`
* 2019-05-01: :class:`ReplicationsOperations<azure.mgmt.containerregistry.v2019_05_01.aio.operations.ReplicationsOperations>`
* 2019-12-01-preview: :class:`ReplicationsOperations<azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ReplicationsOperations>`
* 2020-11-01-preview: :class:`ReplicationsOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ReplicationsOperations>`
* 2021-06-01-preview: :class:`ReplicationsOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.ReplicationsOperations>`
* 2021-08-01-preview: :class:`ReplicationsOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.ReplicationsOperations>`
"""
api_version = self._get_api_version('replications')
if api_version == '2017-10-01':
from ..v2017_10_01.aio.operations import ReplicationsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import ReplicationsOperations as OperationClass
elif api_version == '2019-12-01-preview':
from ..v2019_12_01_preview.aio.operations import ReplicationsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import ReplicationsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import ReplicationsOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import ReplicationsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'replications'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def runs(self):
"""Instance depends on the API version:
* 2018-09-01: :class:`RunsOperations<azure.mgmt.containerregistry.v2018_09_01.aio.operations.RunsOperations>`
* 2019-04-01: :class:`RunsOperations<azure.mgmt.containerregistry.v2019_04_01.aio.operations.RunsOperations>`
* 2019-06-01-preview: :class:`RunsOperations<azure.mgmt.containerregistry.v2019_06_01_preview.aio.operations.RunsOperations>`
"""
api_version = self._get_api_version('runs')
if api_version == '2018-09-01':
from ..v2018_09_01.aio.operations import RunsOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations import RunsOperations as OperationClass
elif api_version == '2019-06-01-preview':
from ..v2019_06_01_preview.aio.operations import RunsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'runs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def scope_maps(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`ScopeMapsOperations<azure.mgmt.containerregistry.v2019_05_01_preview.aio.operations.ScopeMapsOperations>`
* 2020-11-01-preview: :class:`ScopeMapsOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.ScopeMapsOperations>`
* 2021-06-01-preview: :class:`ScopeMapsOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.ScopeMapsOperations>`
* 2021-08-01-preview: :class:`ScopeMapsOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.ScopeMapsOperations>`
"""
api_version = self._get_api_version('scope_maps')
if api_version == '2019-05-01-preview':
from ..v2019_05_01_preview.aio.operations import ScopeMapsOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import ScopeMapsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import ScopeMapsOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import ScopeMapsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'scope_maps'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def task_runs(self):
"""Instance depends on the API version:
* 2019-06-01-preview: :class:`TaskRunsOperations<azure.mgmt.containerregistry.v2019_06_01_preview.aio.operations.TaskRunsOperations>`
"""
api_version = self._get_api_version('task_runs')
if api_version == '2019-06-01-preview':
from ..v2019_06_01_preview.aio.operations import TaskRunsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'task_runs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def tasks(self):
"""Instance depends on the API version:
* 2018-09-01: :class:`TasksOperations<azure.mgmt.containerregistry.v2018_09_01.aio.operations.TasksOperations>`
* 2019-04-01: :class:`TasksOperations<azure.mgmt.containerregistry.v2019_04_01.aio.operations.TasksOperations>`
* 2019-06-01-preview: :class:`TasksOperations<azure.mgmt.containerregistry.v2019_06_01_preview.aio.operations.TasksOperations>`
"""
api_version = self._get_api_version('tasks')
if api_version == '2018-09-01':
from ..v2018_09_01.aio.operations import TasksOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations import TasksOperations as OperationClass
elif api_version == '2019-06-01-preview':
from ..v2019_06_01_preview.aio.operations import TasksOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'tasks'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def tokens(self):
"""Instance depends on the API version:
* 2019-05-01-preview: :class:`TokensOperations<azure.mgmt.containerregistry.v2019_05_01_preview.aio.operations.TokensOperations>`
* 2020-11-01-preview: :class:`TokensOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.TokensOperations>`
* 2021-06-01-preview: :class:`TokensOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.TokensOperations>`
* 2021-08-01-preview: :class:`TokensOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.TokensOperations>`
"""
api_version = self._get_api_version('tokens')
if api_version == '2019-05-01-preview':
from ..v2019_05_01_preview.aio.operations import TokensOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import TokensOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import TokensOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import TokensOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'tokens'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def webhooks(self):
"""Instance depends on the API version:
* 2017-10-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2017_10_01.aio.operations.WebhooksOperations>`
* 2019-05-01: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2019_05_01.aio.operations.WebhooksOperations>`
* 2019-12-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.WebhooksOperations>`
* 2020-11-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2020_11_01_preview.aio.operations.WebhooksOperations>`
* 2021-06-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2021_06_01_preview.aio.operations.WebhooksOperations>`
* 2021-08-01-preview: :class:`WebhooksOperations<azure.mgmt.containerregistry.v2021_08_01_preview.aio.operations.WebhooksOperations>`
"""
api_version = self._get_api_version('webhooks')
if api_version == '2017-10-01':
from ..v2017_10_01.aio.operations import WebhooksOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import WebhooksOperations as OperationClass
elif api_version == '2019-12-01-preview':
from ..v2019_12_01_preview.aio.operations import WebhooksOperations as OperationClass
elif api_version == '2020-11-01-preview':
from ..v2020_11_01_preview.aio.operations import WebhooksOperations as OperationClass
elif api_version == '2021-06-01-preview':
from ..v2021_06_01_preview.aio.operations import WebhooksOperations as OperationClass
elif api_version == '2021-08-01-preview':
from ..v2021_08_01_preview.aio.operations import WebhooksOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'webhooks'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
async def close(self):
await self._client.close()
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details):
await self._client.__aexit__(*exc_details)
| 64.972325 | 180 | 0.721056 | [
"MIT"
] | AFengKK/azure-sdk-for-python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/aio/_container_registry_management_client.py | 35,215 | Python |
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import get_template
from ...settings import STATIC_URL
from ...order.models import DeliveryGroup, Order
INVOICE_TEMPLATE = 'dashboard/order/pdf/invoice.html'
PACKING_SLIP_TEMPLATE = 'dashboard/order/pdf/packing_slip.html'
def get_statics_absolute_url(request):
site = get_current_site(request)
absolute_url = '%(protocol)s://%(domain)s%(static_url)s' % {
'protocol': 'https' if request.is_secure() else 'http',
'domain': site.domain,
'static_url': STATIC_URL,
}
return absolute_url
def _create_pdf(rendered_template, absolute_url):
from weasyprint import HTML
pdf_file = (HTML(string=rendered_template, base_url=absolute_url)
.write_pdf())
return pdf_file
def create_invoice_pdf(order_pk, absolute_url):
order = (Order.objects.prefetch_related(
'user', 'shipping_address',
'billing_address', 'voucher', 'groups').get(
pk=order_pk))
shipping_methods = [
{'name': d.shipping_method_name,
'price': d.shipping_price} for d in order.groups.all()]
ctx = {'order': order, 'shipping_methods': shipping_methods}
rendered_template = get_template(INVOICE_TEMPLATE).render(ctx)
pdf_file = _create_pdf(rendered_template, absolute_url)
return pdf_file, order
def create_packing_slip_pdf(group_pk, absolute_url):
group = (DeliveryGroup.objects.prefetch_related(
'items', 'order', 'order__user', 'order__shipping_address',
'order__billing_address').get(pk=group_pk))
ctx = {'group': group}
rendered_template = get_template(PACKING_SLIP_TEMPLATE).render(ctx)
pdf_file = _create_pdf(rendered_template, absolute_url)
return pdf_file, group
| 35.078431 | 71 | 0.718278 | [
"BSD-3-Clause"
] | imran1234567/saleor | saleor/dashboard/order/utils.py | 1,789 | Python |
import os
import json
import numpy as np
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits import mplot3d
def liver_dump_init(env, name = None):
liver = {'x':[],'Fes':[],'Fis':[],'Ficp':[],'volume':[],'col_p_n':[],'crash':[]}
liver['vtx'] = env.liver.x.copy()
if name is not None:
liver['name'] = name
else:
liver['name'] = f"_dt{env.timestep}_down_gm{env.liver.gamma}"
return liver
def liver_dump_step(liver,env):
liver['x'].append(env.liver.x)
liver['Fes'].append(env.liver.Fes)
liver['Fis'].append(env.liver.Fis)
liver['Ficp'].append(env.liver.Ficp)
liver['volume'].append(np.round(env.liver.volumes6.sum() / env.liver.init_volume6.sum(),3))
liver['col_p_n'].append(len(env.liver.check_tet_aabb_collision(env.sg.x)))
liver['crash'].append(env.liver.crash_flag)
return liver
def liver_dump(liver,ep = None):
liver_save ={}
liver_save['vtx'] = liver['vtx'].tolist()
liver_save['x'] = np.array(liver['x']).tolist()
liver_save['Fes'] = np.array(liver['Fes']).tolist()
liver_save['Fis'] = np.array(liver['Fis']).tolist()
liver_save['Ficp'] = np.array(liver['Ficp']).tolist()
liver_save['volume'] = np.array(liver['volume']).tolist()
liver_save['col_p_n']= np.array(liver['col_p_n']).tolist()
liver_save['crash'] = np.array(liver['crash']).tolist()
if ep is None:
with open(os.path.join('liver_json',f"liver_record{liver['name']}.json"),'w') as f:
json.dump(liver_save,f)
else:
with open(os.path.join('liver_json',f"liver_record_{int(ep)}.json"),'w') as f:
json.dump(liver_save,f)
def liver_dump_load(liver):
vtx = np.array(liver['vtx'])
x = np.array(liver['x'])
Fes = np.array(liver['Fes'])
Fis = np.array(liver['Fis'])
Ficp = np.array(liver['Ficp'])
volume = np.array(liver['volume'])
col_p_n = np.array(liver['col_p_n'])
crash = np.array(liver['crash'])
return vtx, x, Fes, Fis, Ficp, volume, col_p_n, crash
'''
temp:
1. collision_response_cotin
2. collision_response_self
'''
def collision_response_cotin(pair,liver,past_p,current_p):
# check bc_co for all surface tri_element
# add dn to decide
move_v_disp_dict = {}
move_tri_indexs = []
flat_list = [item for sublist in list(pair.values()) for item in sublist]
p_indexs = np.array(flat_list).reshape(-1)
p_n = p_indexs.shape[0]
ray = current_p[p_indexs]-past_p[p_indexs]
ray = ray*(1/np.linalg.norm(ray,axis=-1))[:,None] # p_n x3
# compute ray and normal vector, d= ray,n=normal_vec
dn = [email protected]_normal_vec.T # p_n x n_tri
ap = liver.x[liver.tri_elements[:,0]][None,:] - past_p[p_indexs][:,None] # p_n x n_tri x 3 #choose first point as a
apn = (ap * liver.tri_normal_vec[None,:]).sum(axis=-1) # p_n x n_tri x 3 -> p_n x n_tri
ts = apn * (1/dn) # p_n x n_tri
int_p = ts[:,:,None]*ray[:,None]+past_p[p_indexs][:,None] # p_n x n_tri x3 <- p_n x n_tri x1 * p_n x1 x3 + p_n x1 x3
# compute barycentric coordinates of intersection points
v1 = liver.x[liver.tri_elements[:,1]]-liver.x[liver.tri_elements[:,0]] # n_tri x3
v2 = liver.x[liver.tri_elements[:,2]]-liver.x[liver.tri_elements[:,0]]
tri_areax2 = np.linalg.norm(np.cross(v1,v2,axis=-1),axis=-1) # n_tri
bc_temp = np.zeros((p_n,liver.n_tri,3,3,3))
bc_temp[:] = np.tile(liver.x[liver.tri_elements], 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x n_tri x 3area x 3ps x 3
for itemp in range(p_n):
bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_p[itemp]
v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x n_tri x 3area x 3xyz
v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]
areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x n_tri x 3area
bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,
np.newaxis] # p_n x n_tri x 3area<- p_n x n_tri x 3area * 1 x n_tri x 3area
for itemp in range(p_n):
# check bc_co
check1 = np.argwhere(abs(bc_co[itemp].sum(axis=-1) - 1) < 1e-3).flatten() # each p should have at least 1
check2 = np.argwhere(dn[itemp] < 0).flatten()
psb_tri_index = np.intersect1d(check1,check2) # all possible tri_elements satisfies the bc_co and the negative normal vector
if psb_tri_index.size!=0:
psb_ts = ts[itemp,psb_tri_index] # n_psb_tri_index
# if np.any(psb_ts<0):
# raise ValueError("liver shape error")
move_tri_index = psb_tri_index[psb_ts.argmin()] # only 1 the tri_elements should move
move_t = current_p[p_indexs[itemp]] - int_p[itemp,move_tri_index]
move_v_index_p = liver.tri_elements[move_tri_index]
for ividx in move_v_index_p: # same points may move multiple times.
if ividx not in move_v_disp_dict.keys():
move_v_disp_dict[ividx] = move_t # move_t put in for new vindex
else:# compare move_t for old vindex
if np.linalg.norm(np.c_[move_v_disp_dict[ividx],move_t].T,axis=-1).argmax() == 1 : # older move closer than new
move_v_disp_dict[ividx] = move_t
move_tri_indexs.append(move_tri_index.tolist())
print(move_tri_indexs)
return move_v_disp_dict
def collision_response_self(pair, liver, tool):
# not so good when the deform is bigger
# change to old fixed to test, problem still, try cotin methods
new_vtx_delta = None
move_tris = {}
nv_aves = {}
new_vtx_deltas = {}
for key, value in pair.items():
new_vtx_delta = np.zeros(liver.x.shape)
i_tet, p_index = int(key), np.array(value)
p_n = p_index.shape[0]
# find potential collpaision surface tri_element
col_tri_index = np.argwhere(liver.tri_tet[:, 0] == i_tet).flatten()
if col_tri_index.size == 0: raise ValueError(
"Update time step too big, vertices skip the surface tetrahedron elements")
col_tri_n = col_tri_index.shape[0]
col_tri_nv = liver.tri_normal_vec[col_tri_index]
col_tri_p = liver.x[liver.tri_elements[col_tri_index].T[0]] # chose the first points
# compute nv_ave
nv_ave = tool.vtx_normal_vec[p_index].sum(axis=0)
nv_ave = nv_ave / np.linalg.norm(nv_ave)
nv_aves[key] = nv_ave
# compute ts and intersection points
dn = nv_ave.dot(col_tri_nv.T) # col_tri_n
ap = col_tri_p[np.newaxis, :] - tool.x[p_index, np.newaxis] # p_n x col_tri_n x 3
dotn = np.tile(col_tri_nv, p_n).reshape(-1, p_n, 3).transpose(1, 0, 2)
apn = (ap * dotn).sum(axis=-1) # p_n x col_tri_n
ts = apn * (1 / dn) # p_n x col_tri_n
int_col_p = ts[:, :, np.newaxis] * nv_ave[np.newaxis, np.newaxis, :] \
+ tool.vertices[p_index][:, np.newaxis, :] # p_n x col_tri_n x 1 * 1 x 1 x 3 + p_n x 1 x 3
# compute barycentric coordinates of intersection points
tri_vertices = liver.x[liver.tri_elements[col_tri_index]] # n_tri x 3 x 3
v1 = tri_vertices[:, 1] - tri_vertices[:, 0]
v2 = tri_vertices[:, 2] - tri_vertices[:, 0]
tri_areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # n_tri
bc_temp = np.zeros((p_n, col_tri_n, 3, 3, 3))
bc_temp[:] = np.tile(tri_vertices, 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x col_tri_n x 3 x 3 x 3
for itemp in range(p_n):
bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_col_p[itemp]
v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x col_tri_n x 3area x 3xyz
v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]
areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x col_tri_n x 3area
bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,
np.newaxis] # p_n x col_tri_n x 3area * 1 x col_tri_n x 3area = p_n x col_tri_n x 3area
# Move tri to point with tmax
check1 = np.argwhere(abs(bc_co.sum(axis=-1) - 1) < 1e-3)
check2 = np.argwhere(dn < 0)
inter_tri_index = np.intersect1d(check1[:, 1], check2) # find colliable surface tri_elements index
# no colliable tri_elements
if inter_tri_index.size == 0:
the_best_tri = dn.argmin() # chose one of most collidable tri
move_tri = liver.tri_elements[col_tri_index[the_best_tri]]
tri_nv = liver.tri_normal_vec[col_tri_index[the_best_tri]].flatten()
tri_vtx = liver.x[move_tri].reshape(3, 3)
v = nv_ave - tri_nv # find a new direction, not so sharp as nv_ave
v = v / np.linalg.norm(v)
dn_t = v.dot(tri_nv) # 1
ap_t = tri_vtx[0] - tool.x[p_index]
t_t = ap_t.dot(tri_nv) / dn_t
move_t = t_t.min()
new_vtx_delta[move_tri] += - move_t * v
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
move_tris.setdefault(key, []).append(move_tri.flatten())
print(' None ',end='')
else:
# more than 1 colliable tri_elements
if len(inter_tri_index) > 1:
temp_delta = np.zeros((liver.x.shape[0], len(inter_tri_index))) # n_v * n_inter
itemp = 0
for inter_tri_i in inter_tri_index:
part_p_index = check1[ check1[:, 1] == inter_tri_i, 0] # p index of each tri_element that satisfies bc_co condition
move_t = ts[part_p_index, inter_tri_i].min()
move_tri = liver.tri_elements[col_tri_index[inter_tri_i]]
temp_delta[move_tri, itemp] = - move_t # collect all possible move_t for all vertices
move_tris.setdefault(key, []).append(move_tri.flatten())
itemp += 1
new_vtx_delta += temp_delta.max(axis=-1)[:, np.newaxis] * nv_ave[np.newaxis,:] # move with the maximal move_t
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
print(' Multi ',end='')
else:
# only 1 colliable tri_elements
move_t = ts[:, inter_tri_index].min()
move_tri = liver.tri_elements[col_tri_index[inter_tri_index]]
new_vtx_delta[move_tri] += -move_t * nv_ave
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
move_tris.setdefault(key, []).append(move_tri.flatten())
print(' Single ',end='')
return new_vtx_delta, move_tris, nv_aves, new_vtx_deltas
'''
static methods:
1. lame_param
2. tri_mid_vec
3. rotation_matrix
4. flatten_list
'''
def lame_param(E, v):
la = E * v / (1 + v) / (1 - 2 * v)
mu = E / 2 / (1 + v)
return la, mu
def tri_mid_vec(vertices, tri_elements):
tri_vtx = vertices[tri_elements]
tri_mid = tri_vtx.mean(axis=1)
tri_normal_vec = np.cross(tri_vtx[:, 1] - tri_vtx[:, 0], tri_vtx[:, 2] - tri_vtx[:, 0])
tri_normal_vec = tri_normal_vec * (1.0 / np.linalg.norm(tri_normal_vec, axis=1))[:, np.newaxis]
return tri_mid, tri_normal_vec
def rotation_matrix(deg,axis='x'):
rad = np.deg2rad(deg)
s,c = np.sin(rad),np.cos(rad)
if axis=='x':
return np.array([ 1, 0, 0,
0, c, -s,
0, s, c]).reshape(-1,3)
elif axis=='y':
return np.array([ c, 0, s,
0, 1, 0,
-s, 0, c]).reshape(-1,3)
elif axis=='z':
return np.array([ c, -s, 0,
s, c, 0,
0, 0, 1]).reshape(-1,3)
else:
return np.ones((3,3))
# def flatten_list(l):
# # not work well
# for el in l:
# if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
# return flatten_list(el)
# else:
# return el
'''
matplotlibe subplot
1. create_axs
2. draw_liver
3. draw_liver_tool
'''
def create_axs(subplot_n,block=False,return_fig=False):
r = int(np.floor(np.sqrt(subplot_n)))
c = int(subplot_n/r)
fig = plt.figure(figsize=plt.figaspect(0.5))
axs = {}
for i in range(subplot_n):
axs[i] = fig.add_subplot(r, c, i+1, projection='3d')
if return_fig:
return axs,fig
return axs
def draw_liver(liver,ax):
ax.cla()
ax = liver.plt_vtx(ax=ax)
ax = liver.plt_x(ax=ax)
plt_equal(ax)
return ax
def draw_liver_F(liver,axs,f_scl = 5e0):
# Fes, Ficp, Fis+ displacement
axs[0].cla()
axs[0] = liver.plt_x(ax=axs[0])
axs[0] = liver.plt_Fes(vec_to_scl=f_scl,ax=axs[0])
plt_equal(axs[0])
axs[1].cla()
axs[1] = liver.plt_x(ax=axs[1])
axs[1] = liver.plt_Ficp(vec_to_scl=f_scl,ax=axs[1])
plt_equal(axs[1])
axs[2].cla()
axs[2] = liver.plt_vtx(ax=axs[2])
axs[2] = liver.plt_x(ax=axs[2])
axs[2] = liver.plt_Fis(vec_to_scl=f_scl,ax=axs[2])
plt_equal(axs[2])
return axs
def draw_liver_tool(liver,sg,axs,f_scl=5e0):
axs[0].cla()
axs[0] = liver.plt_x(ax=axs[0])
axs[0] = liver.plt_tri_normal_vec(vec_scl=f_scl/2,ax=axs[0])
plt_equal(axs[0])
axs[1].cla()
axs[1] = sg.plt_sg_x(ax=axs[1])
axs[1] = sg._plt_vtx_normal_vec(sg.x,vec_scl=f_scl/2,ax=axs[1])
plt_equal(axs[1])
axs[2].cla()
axs[2] = liver.plt_x(ax=axs[2])
axs[2] = sg.plt_sg_x(ax=axs[2])
plt_equal(axs[2])
axs_l = {axs[3],axs[4],axs[5]}
axs_l = draw_liver(liver,axs_l,f_scl=f_scl)
axs[3],axs[4],axs[5] = axs_l[0],axs_l[1],axs_l[2]
plt.draw()#plt.show(block=False)
return axs
'''
aabb
1. xyzminmax
2. _plt_AABB
3. plt_aabb_p
'''
def xyzminmax(aabb):
# xmin, ymin, zmin, xmax, ymax, zmax = aabb[0], aabb[1], aabb[2], aabb[3], aabb[4], aabb[5]
return aabb[0], aabb[1], aabb[2], aabb[3], aabb[4], aabb[5]
def plt_AABB(aabb, **kwargs):
c_line = '#9467bd'
c_p = '#e377c2'
if 'c' in kwargs.keys():
colors = kwargs['c']
if type(colors) is list:
c_line = colors[0]
c_p = colors[1]
elif type(colors) is str:
c_line = colors
ax = ax3d_handle(**kwargs)
# aabb: 1x6, xmin, ymin, zmin, xmax, ymax, zmax
xmin, ymin, zmin, xmax, ymax, zmax = xyzminmax(aabb)
xyz = np.array([xmin, ymin, zmin, xmax, ymin, zmin, xmax, ymax, zmin, xmin, ymax, zmin,
xmin, ymin, zmax, xmax, ymin, zmax, xmax, ymax, zmax, xmin, ymax, zmax]).reshape(-1, 3)
line_segs = np.array([1, 2, 2, 3, 3, 4, 4, 1,
1, 5, 2, 6, 3, 7, 4, 8,
5, 6, 6, 7, 7, 8, 8, 5]).reshape(-1, 2) - 1
line_vt = np.hstack((xyz[line_segs[:, 0]], xyz[line_segs[:, 1]])).copy()
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors=c_line, linestyles='--')
ax.add_collection(lc)
ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], marker='o', c=c_p)
return ax
def plt_aabb_p(aabb, p, **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(p[0], p[1], p[2], c='#22D8C3')
plt_AABB(aabb, ax=ax)
return ax
'''
ax handle
1. 1) plt_equal
2) plt_show_equal
3) set_axes_equal
4) _set_axes_radius
2. ax3d_handle
3. plt_tet
4. plt_tet_ps
5. plt_normal_vecs
6. plt_tri
7. plt_tri_ps
'''
def plt_equal(ax,limits = None):
ax.set_box_aspect((1, 1, 1)) # IMPORTANT - this is the new, key line
set_axes_equal(ax,limits=limits) # IMPORTANT - this is also required
def plt_show_equal(ax,block=False,limits = None):
plt_equal(ax,limits=limits)
plt.show(block=block)
def set_axes_equal(ax: plt.Axes,limits = None):
"""Set 3D plot axes to equal scale.
Make axes of 3D plot have equal scale so that spheres appear as
spheres and cubes as cubes. Required since `ax.axis('equal')`
and `ax.set_aspect('equal')` don't work on 3D.
"""
if limits is None:
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
_set_axes_radius(ax, origin, radius)
def _set_axes_radius(ax, origin, radius):
x, y, z = origin
ax.set_xlim3d([x - radius, x + radius])
ax.set_ylim3d([y - radius, y + radius])
ax.set_zlim3d([z - radius, z + radius])
def ax3d_handle(return_fig=False,**kwargs):
if 'ax' in kwargs:
ax = kwargs['ax']
else:
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(projection='3d')
if return_fig:
return ax,fig
return ax
def plt_tet(vs, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(vs[:, 0], vs[:, 1], vs[:, 2], c='#BCB6E3')
if text_opt == "on":
for i in range(4): ax.text(vs[i, 0], vs[i, 1], vs[i, 2], f'{i + 1}')
line_order = np.array([1, 2, 1, 3, 1, 4, 2, 3, 2, 4, 3, 4]).reshape(-1, 2) - 1
line_vt = np.hstack((vs[line_order[:, 0]], vs[line_order[:, 1]]))
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#8A7BFB')
ax.add_collection(lc)
return ax
def plt_tet_ps(vs, p, text_opt='off', **kwargs):
p = np.array(p)
ax = ax3d_handle(**kwargs)
ax = plt_tet(vs, text_opt=text_opt, ax=ax)
if len(p.shape) == 1: p = p.reshape(1, -1)
ax.scatter(p[:, 0], p[:, 1], p[:, 2], c='#22D8C3')
return ax
def plt_normal_vecs(base_ps, vecs, scl=1, **kwargs):
vesc_scl = vecs * scl
ax = ax3d_handle(**kwargs)
ax.scatter(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2], c='#1D1788')
ax.quiver(base_ps[:, 0], base_ps[:, 1], base_ps[:, 2],
vesc_scl[:, 0], vesc_scl[:, 1], vesc_scl[:, 2], color='#7D75FE')
return ax
def plt_tet_ps_vecs(vs, p, vec, scl=1, text_opt = 'off', **kwargs):
ax = ax3d_handle(**kwargs)
ax = plt_tet_ps(vs, p, ax=ax, text_opt = text_opt)
if len(p.shape) == 1: p = p.reshape(1, -1)
if len(vec.shape) == 1: vec = vec.reshape(1, -1)
ax = plt_normal_vecs(p, vec, scl=scl, ax=ax)
return ax
def plt_tri(vs, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax.scatter(vs[:, 0], vs[:, 1], vs[:, 2], c='#ff00ff')
if text_opt == "on":
for i in range(3): ax.text(vs[i, 0], vs[i, 1], vs[i, 2], f'{i + 1}')
line_order = np.array([1, 2, 1, 3, 2, 3]).reshape(-1, 2) - 1
line_vt = np.hstack((vs[line_order[:, 0]], vs[line_order[:, 1]]))
lc = Line3DCollection(line_vt.reshape(-1, 2, 3), colors='#9933ff')
ax.add_collection(lc)
return ax
def plt_tri_ps(vs, p, text_opt='off', **kwargs):
ax = ax3d_handle(**kwargs)
ax = plt_tri(vs, text_opt=text_opt, ax=ax)
if len(p.shape) == 1: p = p.reshape(1, -1)
ax.scatter(p[:, 0], p[:, 1], p[:, 2], c='#22D8C3')
return ax
| 40.867647 | 137 | 0.569886 | [
"MIT"
] | Kexin-Wei/spinnup | env_pyrep/utils.py | 19,453 | Python |
from django.db import models
# Create your models here.
class Message(models.Model):
tab_name = models.TextField(max_length=50)
text = models.TextField(max_length=300) | 29.333333 | 46 | 0.761364 | [
"MIT"
] | robocol-rem-u/ros_web_app_2 | Robocol/testapp/Interfaz/models.py | 176 | Python |
from a10sdk.common.A10BaseClass import A10BaseClass
class AuthSamlIdp(A10BaseClass):
""" :param remote_file: {"optional": true, "type": "string", "description": "Profile name for remote url", "format": "url"}
:param use_mgmt_port: {"default": 0, "optional": true, "type": "number", "description": "Use management port as source port", "format": "flag"}
:param verify_xml_signature: {"default": 0, "optional": true, "type": "number", "description": "Verify metadata's XML signature", "format": "flag"}
:param saml_idp_name: {"description": "Metadata name", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param overwrite: {"default": 0, "optional": true, "type": "number", "description": "Overwrite existing file", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
SAML metadata of identity provider.
Class auth-saml-idp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/import/auth-saml-idp`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "auth-saml-idp"
self.a10_url="/axapi/v3/import/auth-saml-idp"
self.DeviceProxy = ""
self.remote_file = ""
self.use_mgmt_port = ""
self.verify_xml_signature = ""
self.saml_idp_name = ""
self.overwrite = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| 38.727273 | 151 | 0.642019 | [
"Apache-2.0"
] | a10networks/a10sdk-python | a10sdk/core/A10_import/import_auth_saml_idp.py | 1,704 | Python |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class EquipmentChassis(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'device_mo_id': 'str',
'dn': 'str',
'rn': 'str',
'model': 'str',
'revision': 'str',
'serial': 'str',
'vendor': 'str',
'blades': 'list[ComputeBladeRef]',
'fanmodules': 'list[EquipmentFanModuleRef]',
'ioms': 'list[EquipmentIoCardRef]',
'oper_state': 'str',
'psus': 'list[EquipmentPsuRef]',
'registered_device': 'AssetDeviceRegistrationRef',
'sasexpanders': 'list[StorageSasExpanderRef]',
'siocs': 'list[EquipmentSystemIoControllerRef]',
'storage_enclosures': 'list[StorageEnclosureRef]'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'device_mo_id': 'DeviceMoId',
'dn': 'Dn',
'rn': 'Rn',
'model': 'Model',
'revision': 'Revision',
'serial': 'Serial',
'vendor': 'Vendor',
'blades': 'Blades',
'fanmodules': 'Fanmodules',
'ioms': 'Ioms',
'oper_state': 'OperState',
'psus': 'Psus',
'registered_device': 'RegisteredDevice',
'sasexpanders': 'Sasexpanders',
'siocs': 'Siocs',
'storage_enclosures': 'StorageEnclosures'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, device_mo_id=None, dn=None, rn=None, model=None, revision=None, serial=None, vendor=None, blades=None, fanmodules=None, ioms=None, oper_state=None, psus=None, registered_device=None, sasexpanders=None, siocs=None, storage_enclosures=None):
"""
EquipmentChassis - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._device_mo_id = None
self._dn = None
self._rn = None
self._model = None
self._revision = None
self._serial = None
self._vendor = None
self._blades = None
self._fanmodules = None
self._ioms = None
self._oper_state = None
self._psus = None
self._registered_device = None
self._sasexpanders = None
self._siocs = None
self._storage_enclosures = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if device_mo_id is not None:
self.device_mo_id = device_mo_id
if dn is not None:
self.dn = dn
if rn is not None:
self.rn = rn
if model is not None:
self.model = model
if revision is not None:
self.revision = revision
if serial is not None:
self.serial = serial
if vendor is not None:
self.vendor = vendor
if blades is not None:
self.blades = blades
if fanmodules is not None:
self.fanmodules = fanmodules
if ioms is not None:
self.ioms = ioms
if oper_state is not None:
self.oper_state = oper_state
if psus is not None:
self.psus = psus
if registered_device is not None:
self.registered_device = registered_device
if sasexpanders is not None:
self.sasexpanders = sasexpanders
if siocs is not None:
self.siocs = siocs
if storage_enclosures is not None:
self.storage_enclosures = storage_enclosures
@property
def account_moid(self):
"""
Gets the account_moid of this EquipmentChassis.
The Account ID for this managed object.
:return: The account_moid of this EquipmentChassis.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this EquipmentChassis.
The Account ID for this managed object.
:param account_moid: The account_moid of this EquipmentChassis.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this EquipmentChassis.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this EquipmentChassis.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this EquipmentChassis.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this EquipmentChassis.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this EquipmentChassis.
The time when this managed object was created.
:return: The create_time of this EquipmentChassis.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this EquipmentChassis.
The time when this managed object was created.
:param create_time: The create_time of this EquipmentChassis.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this EquipmentChassis.
The time when this managed object was last modified.
:return: The mod_time of this EquipmentChassis.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this EquipmentChassis.
The time when this managed object was last modified.
:param mod_time: The mod_time of this EquipmentChassis.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this EquipmentChassis.
A unique identifier of this Managed Object instance.
:return: The moid of this EquipmentChassis.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this EquipmentChassis.
A unique identifier of this Managed Object instance.
:param moid: The moid of this EquipmentChassis.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this EquipmentChassis.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this EquipmentChassis.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this EquipmentChassis.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this EquipmentChassis.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this EquipmentChassis.
An array of owners which represent effective ownership of this object.
:return: The owners of this EquipmentChassis.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this EquipmentChassis.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this EquipmentChassis.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this EquipmentChassis.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this EquipmentChassis.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this EquipmentChassis.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this EquipmentChassis.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this EquipmentChassis.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this EquipmentChassis.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this EquipmentChassis.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this EquipmentChassis.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this EquipmentChassis.
The versioning info for this managed object
:return: The version_context of this EquipmentChassis.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this EquipmentChassis.
The versioning info for this managed object
:param version_context: The version_context of this EquipmentChassis.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def device_mo_id(self):
"""
Gets the device_mo_id of this EquipmentChassis.
:return: The device_mo_id of this EquipmentChassis.
:rtype: str
"""
return self._device_mo_id
@device_mo_id.setter
def device_mo_id(self, device_mo_id):
"""
Sets the device_mo_id of this EquipmentChassis.
:param device_mo_id: The device_mo_id of this EquipmentChassis.
:type: str
"""
self._device_mo_id = device_mo_id
@property
def dn(self):
"""
Gets the dn of this EquipmentChassis.
:return: The dn of this EquipmentChassis.
:rtype: str
"""
return self._dn
@dn.setter
def dn(self, dn):
"""
Sets the dn of this EquipmentChassis.
:param dn: The dn of this EquipmentChassis.
:type: str
"""
self._dn = dn
@property
def rn(self):
"""
Gets the rn of this EquipmentChassis.
:return: The rn of this EquipmentChassis.
:rtype: str
"""
return self._rn
@rn.setter
def rn(self, rn):
"""
Sets the rn of this EquipmentChassis.
:param rn: The rn of this EquipmentChassis.
:type: str
"""
self._rn = rn
@property
def model(self):
"""
Gets the model of this EquipmentChassis.
:return: The model of this EquipmentChassis.
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""
Sets the model of this EquipmentChassis.
:param model: The model of this EquipmentChassis.
:type: str
"""
self._model = model
@property
def revision(self):
"""
Gets the revision of this EquipmentChassis.
:return: The revision of this EquipmentChassis.
:rtype: str
"""
return self._revision
@revision.setter
def revision(self, revision):
"""
Sets the revision of this EquipmentChassis.
:param revision: The revision of this EquipmentChassis.
:type: str
"""
self._revision = revision
@property
def serial(self):
"""
Gets the serial of this EquipmentChassis.
:return: The serial of this EquipmentChassis.
:rtype: str
"""
return self._serial
@serial.setter
def serial(self, serial):
"""
Sets the serial of this EquipmentChassis.
:param serial: The serial of this EquipmentChassis.
:type: str
"""
self._serial = serial
@property
def vendor(self):
"""
Gets the vendor of this EquipmentChassis.
:return: The vendor of this EquipmentChassis.
:rtype: str
"""
return self._vendor
@vendor.setter
def vendor(self, vendor):
"""
Sets the vendor of this EquipmentChassis.
:param vendor: The vendor of this EquipmentChassis.
:type: str
"""
self._vendor = vendor
@property
def blades(self):
"""
Gets the blades of this EquipmentChassis.
:return: The blades of this EquipmentChassis.
:rtype: list[ComputeBladeRef]
"""
return self._blades
@blades.setter
def blades(self, blades):
"""
Sets the blades of this EquipmentChassis.
:param blades: The blades of this EquipmentChassis.
:type: list[ComputeBladeRef]
"""
self._blades = blades
@property
def fanmodules(self):
"""
Gets the fanmodules of this EquipmentChassis.
:return: The fanmodules of this EquipmentChassis.
:rtype: list[EquipmentFanModuleRef]
"""
return self._fanmodules
@fanmodules.setter
def fanmodules(self, fanmodules):
"""
Sets the fanmodules of this EquipmentChassis.
:param fanmodules: The fanmodules of this EquipmentChassis.
:type: list[EquipmentFanModuleRef]
"""
self._fanmodules = fanmodules
@property
def ioms(self):
"""
Gets the ioms of this EquipmentChassis.
:return: The ioms of this EquipmentChassis.
:rtype: list[EquipmentIoCardRef]
"""
return self._ioms
@ioms.setter
def ioms(self, ioms):
"""
Sets the ioms of this EquipmentChassis.
:param ioms: The ioms of this EquipmentChassis.
:type: list[EquipmentIoCardRef]
"""
self._ioms = ioms
@property
def oper_state(self):
"""
Gets the oper_state of this EquipmentChassis.
:return: The oper_state of this EquipmentChassis.
:rtype: str
"""
return self._oper_state
@oper_state.setter
def oper_state(self, oper_state):
"""
Sets the oper_state of this EquipmentChassis.
:param oper_state: The oper_state of this EquipmentChassis.
:type: str
"""
self._oper_state = oper_state
@property
def psus(self):
"""
Gets the psus of this EquipmentChassis.
:return: The psus of this EquipmentChassis.
:rtype: list[EquipmentPsuRef]
"""
return self._psus
@psus.setter
def psus(self, psus):
"""
Sets the psus of this EquipmentChassis.
:param psus: The psus of this EquipmentChassis.
:type: list[EquipmentPsuRef]
"""
self._psus = psus
@property
def registered_device(self):
"""
Gets the registered_device of this EquipmentChassis.
:return: The registered_device of this EquipmentChassis.
:rtype: AssetDeviceRegistrationRef
"""
return self._registered_device
@registered_device.setter
def registered_device(self, registered_device):
"""
Sets the registered_device of this EquipmentChassis.
:param registered_device: The registered_device of this EquipmentChassis.
:type: AssetDeviceRegistrationRef
"""
self._registered_device = registered_device
@property
def sasexpanders(self):
"""
Gets the sasexpanders of this EquipmentChassis.
:return: The sasexpanders of this EquipmentChassis.
:rtype: list[StorageSasExpanderRef]
"""
return self._sasexpanders
@sasexpanders.setter
def sasexpanders(self, sasexpanders):
"""
Sets the sasexpanders of this EquipmentChassis.
:param sasexpanders: The sasexpanders of this EquipmentChassis.
:type: list[StorageSasExpanderRef]
"""
self._sasexpanders = sasexpanders
@property
def siocs(self):
"""
Gets the siocs of this EquipmentChassis.
:return: The siocs of this EquipmentChassis.
:rtype: list[EquipmentSystemIoControllerRef]
"""
return self._siocs
@siocs.setter
def siocs(self, siocs):
"""
Sets the siocs of this EquipmentChassis.
:param siocs: The siocs of this EquipmentChassis.
:type: list[EquipmentSystemIoControllerRef]
"""
self._siocs = siocs
@property
def storage_enclosures(self):
"""
Gets the storage_enclosures of this EquipmentChassis.
:return: The storage_enclosures of this EquipmentChassis.
:rtype: list[StorageEnclosureRef]
"""
return self._storage_enclosures
@storage_enclosures.setter
def storage_enclosures(self, storage_enclosures):
"""
Sets the storage_enclosures of this EquipmentChassis.
:param storage_enclosures: The storage_enclosures of this EquipmentChassis.
:type: list[StorageEnclosureRef]
"""
self._storage_enclosures = storage_enclosures
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, EquipmentChassis):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.90806 | 418 | 0.589282 | [
"Apache-2.0"
] | fdemello/intersight-python | intersight/models/equipment_chassis.py | 21,365 | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'button_editor.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(493, 380)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.scrollArea = QtWidgets.QScrollArea(Dialog)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 473, 327))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Power Button Editor"))
| 42.175 | 106 | 0.733254 | [
"MIT"
] | BoettigerLab/Hal2 | storm_control/hal4000/illumination/button_editor_ui.py | 1,687 | Python |
#!/usr/bin/env python
# Copyright (c) 2005 Gavin E. Crooks <[email protected]>
#
# This software is distributed under the MIT Open Source License.
# <http://www.opensource.org/licenses/mit-license.html>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import unittest
from corebio import *
from corebio._py3k import StringIO
from corebio.seq import *
from corebio.seq_io import *
from test_corebio import *
class test_table_io(unittest.TestCase):
def test_read(self):
f = StringIO(table_io.example)
seqs = table_io.read(f)
self.assertEqual(len(seqs), 10)
self.assertEqual(seqs[2].name, "EC0003")
self.assertEqual(len(seqs[1]), 50)
def test_read_fail(self):
f = StringIO(plain_io.example)
# Wrong alphabet
self.assertRaises(ValueError, table_io.read, f)
def test_write_seq(self):
f = StringIO(table_io.example)
seqs = table_io.read(f)
fout = StringIO()
table_io.write(fout, seqs)
fout.seek(0)
seqs2 = table_io.read(fout)
self.assertEqual(seqs, seqs2)
if __name__ == '__main__':
unittest.main()
| 33.507692 | 80 | 0.71258 | [
"MIT"
] | javicorvi/weblogo_edited | test_corebio/test_table_io.py | 2,178 | Python |
#!/usr/bin/env python
# Copyright 2015 Rackspace, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lib import script
class DecomPort(script.TeethScript):
use_ironic = True
use_neutron = True
def __init__(self):
super(DecomPort, self).__init__(
'Utility for temporarily putting a node on the decom network.')
self.add_ironic_node_arguments()
self.add_argument('command',
help='Run command',
choices=['add', 'remove'])
def run(self):
uuid = self.get_argument('node_uuid')
node = self.ironic_client.get_node(uuid)
command = self.get_argument('command')
if command == 'add':
self.neutron_client.add_decom_port(node)
elif command == 'remove':
self.neutron_client.remove_decom_port(node)
if __name__ == "__main__":
DecomPort().run()
| 30.5625 | 78 | 0.655078 | [
"Apache-2.0"
] | jayofdoom/onmetal-scripts | onmetal_scripts/decom_port.py | 1,467 | Python |
"""
File: weather_master.py
Name: Claire Lin
-----------------------
This program should implement a console program
that asks weather data from user to compute the
average, highest, lowest, cold days among the inputs.
Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
EXIT = -100
def main():
"""
To find the highest and lowest temperature, cold days and the average.
"""
print('stanCode \"Weather Master 4.0\"!')
# my friend told me the maximum and minimum variable can set like this.
maximum = -100000000
minimum = 100000000
total = 0
count = 0
cold_day = 0
while True:
temperature = int(input('Next Temperature: (or '+str(EXIT) + ' to quit)? '))
# To jump out from the program when no temperature were entered.
if temperature == EXIT and count == 0:
print('No temperatures were entered.')
break
# To exclude the temperature not exist.
if temperature > 90 or temperature < -100:
print('>>> The temperature \"'+str(temperature)+'\" not exist, so we exclude and stop it.')
break
if temperature == EXIT:
break
else:
count += 1 # count the total days.
if temperature < 16:
cold_day += 1 # count the cold days which temperature below 16.
total += temperature # To plus all temperature.
if temperature > maximum:
maximum = temperature
if temperature < minimum:
minimum = temperature
else:
pass
if count != 0:
avg = total / count
print("")
print('Highest temperature = ' + str(maximum))
print('Lowest temperature = ' + str(minimum))
print('Average = '+str(avg))
print(str(cold_day) + ' cold day(s)')
# For checking
# print(total)
# print(count)
"""
My note:
This is the first try, when I debug I found the calculation logic is wrong.
The first variable I type will disappear when it enter into the while loop. And the count of
total days would include the EXIT constant.
"""
# if temperature == EXIT:
# print('No temperatures were entered.')
#
# else:
# while True:
# # if temperature < 16:
# # cold_day += 1
#
# temperature = int(input('Next Temperature: (or '+str(EXIT) + ' to quit)? '))
#
# # count the total days.
# count += 1
#
# if temperature == EXIT:
# break
#
# total += temperature
# if temperature > maximum:
# maximum = temperature
# elif temperature < minimum:
# minimum = temperature
# else:
# pass
#
# avg = total / count
# print('Highest temperature = ' + str(maximum))
# print('Lowest temperature = ' + str(minimum))
# print('Average = '+str(avg))
# print(str(cold_day) + ' cold day(s)')
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
| 28.513274 | 103 | 0.543451 | [
"MIT"
] | clairejrlin/stanCode_projects | stanCode_Projects/weather_master/weather_master.py | 3,222 | Python |
"""Module of sample legends for some commonly used geospatial datasets.
"""
import os
import pkg_resources
# Land Cover datasets in Earth Engine https://developers.google.com/earth-engine/datasets/tags/landcover
builtin_legends = {
# National Land Cover Database 2016 (NLCD2016) Legend https://www.mrlc.gov/data/legends/national-land-cover-database-2016-nlcd2016-legend
'NLCD': {
'11 Open Water': '466b9f',
'12 Perennial Ice/Snow': 'd1def8',
'21 Developed, Open Space': 'dec5c5',
'22 Developed, Low Intensity': 'd99282',
'23 Developed, Medium Intensity': 'eb0000',
'24 Developed High Intensity': 'ab0000',
'31 Barren Land (Rock/Sand/Clay)': 'b3ac9f',
'41 Deciduous Forest': '68ab5f',
'42 Evergreen Forest': '1c5f2c',
'43 Mixed Forest': 'b5c58f',
'51 Dwarf Scrub': 'af963c',
'52 Shrub/Scrub': 'ccb879',
'71 Grassland/Herbaceous': 'dfdfc2',
'72 Sedge/Herbaceous': 'd1d182',
'73 Lichens': 'a3cc51',
'74 Moss': '82ba9e',
'81 Pasture/Hay': 'dcd939',
'82 Cultivated Crops': 'ab6c28',
'90 Woody Wetlands': 'b8d9eb',
'95 Emergent Herbaceous Wetlands': '6c9fb8'
},
# National Wetlands Inventory Legend: https://www.fws.gov/wetlands/data/Mapper-Wetlands-Legend.html
'NWI': {
'Freshwater- Forested and Shrub wetland': (0, 136, 55),
'Freshwater Emergent wetland': (127, 195, 28),
'Freshwater pond': (104, 140, 192),
'Estuarine and Marine wetland': (102, 194, 165),
'Riverine': (1, 144, 191),
'Lakes': (19, 0, 124),
'Estuarine and Marine Deepwater': (0, 124, 136),
'Other Freshwater wetland': (178, 134, 86)
},
# MCD12Q1.051 Land Cover Type Yearly Global 500m https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1
'MODIS/051/MCD12Q1': {
'0 Water': '1c0dff',
'1 Evergreen needleleaf forest': '05450a',
'2 Evergreen broadleaf forest': '086a10',
'3 Deciduous needleleaf forest': '54a708',
'4 Deciduous broadleaf forest': '78d203',
'5 Mixed forest': '009900',
'6 Closed shrublands': 'c6b044',
'7 Open shrublands': 'dcd159',
'8 Woody savannas': 'dade48',
'9 Savannas': 'fbff13',
'10 Grasslands': 'b6ff05',
'11 Permanent wetlands': '27ff87',
'12 Croplands': 'c24f44',
'13 Urban and built-up': 'a5a5a5',
'14 Cropland/natural vegetation mosaic': 'ff6d4c',
'15 Snow and ice': '69fff8',
'16 Barren or sparsely vegetated': 'f9ffa4',
'254 Unclassified': 'ffffff'
},
# GlobCover: Global Land Cover Map https://developers.google.com/earth-engine/datasets/catalog/ESA_GLOBCOVER_L4_200901_200912_V2_3
'GLOBCOVER': {
'11 Post-flooding or irrigated croplands': 'aaefef',
'14 Rainfed croplands': 'ffff63',
'20 Mosaic cropland (50-70%) / vegetation (grassland, shrubland, forest) (20-50%)': 'dcef63',
'30 Mosaic vegetation (grassland, shrubland, forest) (50-70%) / cropland (20-50%)': 'cdcd64',
'40 Closed to open (>15%) broadleaved evergreen and/or semi-deciduous forest (>5m)': '006300',
'50 Closed (>40%) broadleaved deciduous forest (>5m)': '009f00',
'60 Open (15-40%) broadleaved deciduous forest (>5m)': 'aac700',
'70 Closed (>40%) needleleaved evergreen forest (>5m)': '003b00',
'90 Open (15-40%) needleleaved deciduous or evergreen forest (>5m)': '286300',
'100 Closed to open (>15%) mixed broadleaved and needleleaved forest (>5m)': '788300',
'110 Mosaic forest-shrubland (50-70%) / grassland (20-50%)': '8d9f00',
'120 Mosaic grassland (50-70%) / forest-shrubland (20-50%)': 'bd9500',
'130 Closed to open (>15%) shrubland (<5m)': '956300',
'140 Closed to open (>15%) grassland': 'ffb431',
'150 Sparse (>15%) vegetation (woody vegetation, shrubs, grassland)': 'ffebae',
'160 Closed (>40%) broadleaved forest regularly flooded - Fresh water': '00785a',
'170 Closed (>40%) broadleaved semi-deciduous and/or evergreen forest regularly flooded - saline water': '009578',
'180 Closed to open (>15%) vegetation (grassland, shrubland, woody vegetation) on regularly flooded or waterlogged soil - fresh, brackish or saline water': '00dc83',
'190 Artificial surfaces and associated areas (urban areas >50%) GLOBCOVER 2009': 'c31300',
'200 Bare areas': 'fff5d6',
'210 Water bodies': '0046c7',
'220 Permanent snow and ice': 'ffffff',
'230 Unclassified': '743411'
},
# Global PALSAR-2/PALSAR Forest/Non-Forest Map https://developers.google.com/earth-engine/datasets/catalog/JAXA_ALOS_PALSAR_YEARLY_FNF
'JAXA/PALSAR': {
'1 Forest': '006400',
'2 Non-Forest': 'FEFF99',
'3 Water': '0000FF'
},
# MCD12Q1.006 MODIS Land Cover Type Yearly Global 500m https://developers.google.com/earth-engine/datasets/catalog/MODIS_006_MCD12Q1
'MODIS/006/MCD12Q1': {
'1 Evergreen Needleleaf Forests: dominated by evergreen conifer trees (canopy >2m). Tree cover >60%.': '05450a',
'2 Evergreen Broadleaf Forests: dominated by evergreen broadleaf and palmate trees (canopy >2m). Tree cover >60%.': '086a10',
'3 Deciduous Needleleaf Forests: dominated by deciduous needleleaf (larch) trees (canopy >2m). Tree cover >60%.': '54a708',
'4 Deciduous Broadleaf Forests: dominated by deciduous broadleaf trees (canopy >2m). Tree cover >60%.': '78d203',
'5 Mixed Forests: dominated by neither deciduous nor evergreen (40-60% of each) tree type (canopy >2m). Tree cover >60%.': '009900',
'6 Closed Shrublands: dominated by woody perennials (1-2m height) >60% cover.': 'c6b044',
'7 Open Shrublands: dominated by woody perennials (1-2m height) 10-60% cover.': 'dcd159',
'8 Woody Savannas: tree cover 30-60% (canopy >2m).': 'dade48',
'9 Savannas: tree cover 10-30% (canopy >2m).': 'fbff13',
'10 Grasslands: dominated by herbaceous annuals (<2m).': 'b6ff05',
'11 Permanent Wetlands: permanently inundated lands with 30-60% water cover and >10% vegetated cover.': '27ff87',
'12 Croplands: at least 60% of area is cultivated cropland.': 'c24f44',
'13 Urban and Built-up Lands: at least 30% impervious surface area including building materials, asphalt and vehicles.': 'a5a5a5',
'14 Cropland/Natural Vegetation Mosaics: mosaics of small-scale cultivation 40-60% with natural tree, shrub, or herbaceous vegetation.': 'ff6d4c',
'15 Permanent Snow and Ice: at least 60% of area is covered by snow and ice for at least 10 months of the year.': '69fff8',
'16 Barren: at least 60% of area is non-vegetated barren (sand, rock, soil) areas with less than 10% vegetation.': 'f9ffa4',
'17 Water Bodies: at least 60% of area is covered by permanent water bodies.': '1c0dff'
},
# Oxford MAP: Malaria Atlas Project Fractional International Geosphere-Biosphere Programme Landcover https://developers.google.com/earth-engine/datasets/catalog/Oxford_MAP_IGBP_Fractional_Landcover_5km_Annual
'Oxford': {
'0 Water': '032f7e',
'1 Evergreen_Needleleaf_Fores': '02740b',
'2 Evergreen_Broadleaf_Forest': '02740b',
'3 Deciduous_Needleleaf_Forest': '8cf502',
'4 Deciduous_Broadleaf_Forest': '8cf502',
'5 Mixed_Forest': 'a4da01',
'6 Closed_Shrublands': 'ffbd05',
'7 Open_Shrublands': 'ffbd05',
'8 Woody_Savannas': '7a5a02',
'9 Savannas': 'f0ff0f',
'10 Grasslands': '869b36',
'11 Permanent_Wetlands': '6091b4',
'12 Croplands': 'ff4e4e',
'13 Urban_and_Built-up': '999999',
'14 Cropland_Natural_Vegetation_Mosaic': 'ff4e4e',
'15 Snow_and_Ice': 'ffffff',
'16 Barren_Or_Sparsely_Vegetated': 'feffc0',
'17 Unclassified': '020202'
},
# Canada AAFC Annual Crop Inventory https://developers.google.com/earth-engine/datasets/catalog/AAFC_ACI
'AAFC/ACI': {
'10 Cloud': '000000',
'20 Water': '3333ff',
'30 Exposed Land and Barren': '996666',
'34 Urban and Developed': 'cc6699',
'35 Greenhouses': 'e1e1e1',
'50 Shrubland': 'ffff00',
'80 Wetland': '993399',
'110 Grassland': 'cccc00',
'120 Agriculture (undifferentiated)': 'cc6600',
'122 Pasture and Forages': 'ffcc33',
'130 Too Wet to be Seeded': '7899f6',
'131 Fallow': 'ff9900',
'132 Cereals': '660000',
'133 Barley': 'dae31d',
'134 Other Grains': 'd6cc00',
'135 Millet': 'd2db25',
'136 Oats': 'd1d52b',
'137 Rye': 'cace32',
'138 Spelt': 'c3c63a',
'139 Triticale': 'b9bc44',
'140 Wheat': 'a7b34d',
'141 Switchgrass': 'b9c64e',
'142 Sorghum': '999900',
'145 Winter Wheat': '92a55b',
'146 Spring Wheat': '809769',
'147 Corn': 'ffff99',
'148 Tobacco': '98887c',
'149 Ginseng': '799b93',
'150 Oilseeds': '5ea263',
'151 Borage': '52ae77',
'152 Camelina': '41bf7a',
'153 Canola and Rapeseed': 'd6ff70',
'154 Flaxseed': '8c8cff',
'155 Mustard': 'd6cc00',
'156 Safflower': 'ff7f00',
'157 Sunflower': '315491',
'158 Soybeans': 'cc9933',
'160 Pulses': '896e43',
'162 Peas': '8f6c3d',
'167 Beans': '82654a',
'174 Lentils': 'b85900',
'175 Vegetables': 'b74b15',
'176 Tomatoes': 'ff8a8a',
'177 Potatoes': 'ffcccc',
'178 Sugarbeets': '6f55ca',
'179 Other Vegetables': 'ffccff',
'180 Fruits': 'dc5424',
'181 Berries': 'd05a30',
'182 Blueberry': 'd20000',
'183 Cranberry': 'cc0000',
'185 Other Berry': 'dc3200',
'188 Orchards': 'ff6666',
'189 Other Fruits': 'c5453b',
'190 Vineyards': '7442bd',
'191 Hops': 'ffcccc',
'192 Sod': 'b5fb05',
'193 Herbs': 'ccff05',
'194 Nursery': '07f98c',
'195 Buckwheat': '00ffcc',
'196 Canaryseed': 'cc33cc',
'197 Hemp': '8e7672',
'198 Vetch': 'b1954f',
'199 Other Crops': '749a66',
'200 Forest (undifferentiated)': '009900',
'210 Coniferous': '006600',
'220 Broadleaf': '00cc00',
'230 Mixedwood': 'cc9900'
},
# Copernicus CORINE Land Cover https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_CORINE_V20_100m
'COPERNICUS/CORINE/V20/100m': {
'111 Artificial surfaces > Urban fabric > Continuous urban fabric': 'E6004D',
'112 Artificial surfaces > Urban fabric > Discontinuous urban fabric': 'FF0000',
'121 Artificial surfaces > Industrial, commercial, and transport units > Industrial or commercial units': 'CC4DF2',
'122 Artificial surfaces > Industrial, commercial, and transport units > Road and rail networks and associated land': 'CC0000',
'123 Artificial surfaces > Industrial, commercial, and transport units > Port areas': 'E6CCCC',
'124 Artificial surfaces > Industrial, commercial, and transport units > Airports': 'E6CCE6',
'131 Artificial surfaces > Mine, dump, and construction sites > Mineral extraction sites': 'A600CC',
'132 Artificial surfaces > Mine, dump, and construction sites > Dump sites': 'A64DCC',
'133 Artificial surfaces > Mine, dump, and construction sites > Construction sites': 'FF4DFF',
'141 Artificial surfaces > Artificial, non-agricultural vegetated areas > Green urban areas': 'FFA6FF',
'142 Artificial surfaces > Artificial, non-agricultural vegetated areas > Sport and leisure facilities': 'FFE6FF',
'211 Agricultural areas > Arable land > Non-irrigated arable land': 'FFFFA8',
'212 Agricultural areas > Arable land > Permanently irrigated land': 'FFFF00',
'213 Agricultural areas > Arable land > Rice fields': 'E6E600',
'221 Agricultural areas > Permanent crops > Vineyards': 'E68000',
'222 Agricultural areas > Permanent crops > Fruit trees and berry plantations': 'F2A64D',
'223 Agricultural areas > Permanent crops > Olive groves': 'E6A600',
'231 Agricultural areas > Pastures > Pastures': 'E6E64D',
'241 Agricultural areas > Heterogeneous agricultural areas > Annual crops associated with permanent crops': 'FFE6A6',
'242 Agricultural areas > Heterogeneous agricultural areas > Complex cultivation patterns': 'FFE64D',
'243 Agricultural areas > Heterogeneous agricultural areas > Land principally occupied by agriculture, with significant areas of natural vegetation': 'E6CC4D',
'244 Agricultural areas > Heterogeneous agricultural areas > Agro-forestry areas': 'F2CCA6',
'311 Forest and semi natural areas > Forests > Broad-leaved forest': '80FF00',
'312 Forest and semi natural areas > Forests > Coniferous forest': '00A600',
'313 Forest and semi natural areas > Forests > Mixed forest': '4DFF00',
'321 Forest and semi natural areas > Scrub and/or herbaceous vegetation associations > Natural grasslands': 'CCF24D',
'322 Forest and semi natural areas > Scrub and/or herbaceous vegetation associations > Moors and heathland': 'A6FF80',
'323 Forest and semi natural areas > Scrub and/or herbaceous vegetation associations > Sclerophyllous vegetation': 'A6E64D',
'324 Forest and semi natural areas > Scrub and/or herbaceous vegetation associations > Transitional woodland-shrub': 'A6F200',
'331 Forest and semi natural areas > Open spaces with little or no vegetation > Beaches, dunes, sands': 'E6E6E6',
'332 Forest and semi natural areas > Open spaces with little or no vegetation > Bare rocks': 'CCCCCC',
'333 Forest and semi natural areas > Open spaces with little or no vegetation > Sparsely vegetated areas': 'CCFFCC',
'334 Forest and semi natural areas > Open spaces with little or no vegetation > Burnt areas': '000000',
'335 Forest and semi natural areas > Open spaces with little or no vegetation > Glaciers and perpetual snow': 'A6E6CC',
'411 Wetlands > Inland wetlands > Inland marshes': 'A6A6FF',
'412 Wetlands > Inland wetlands > Peat bogs': '4D4DFF',
'421 Wetlands > Maritime wetlands > Salt marshes': 'CCCCFF',
'422 Wetlands > Maritime wetlands > Salines': 'E6E6FF',
'423 Wetlands > Maritime wetlands > Intertidal flats': 'A6A6E6',
'511 Water bodies > Inland waters > Water courses': '00CCF2',
'512 Water bodies > Inland waters > Water bodies': '80F2E6',
'521 Water bodies > Marine waters > Coastal lagoons': '00FFA6',
'522 Water bodies > Marine waters > Estuaries': 'A6FFE6',
'523 Water bodies > Marine waters > Sea and ocean': 'E6F2FF'
},
# Copernicus Global Land Cover Layers: CGLS-LC100 collection 2 https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_Landcover_100m_Proba-V_Global
'COPERNICUS/Landcover/100m/Proba-V/Global': {
'0 Unknown': '282828',
'20 Shrubs. Woody perennial plants with persistent and woody stems and without any defined main stem being less than 5 m tall. The shrub foliage can be either evergreen or deciduous.': 'FFBB22',
'30 Herbaceous vegetation. Plants without persistent stem or shoots above ground and lacking definite firm structure. Tree and shrub cover is less than 10 %.': 'FFFF4C',
'40 Cultivated and managed vegetation / agriculture. Lands covered with temporary crops followed by harvest and a bare soil period (e.g., single and multiple cropping systems). Note that perennial woody crops will be classified as the appropriate forest or shrub land cover type.': 'F096FF',
'50 Urban / built up. Land covered by buildings and other man-made structures.': 'FA0000',
'60 Bare / sparse vegetation. Lands with exposed soil, sand, or rocks and never has more than 10 % vegetated cover during any time of the year.': 'B4B4B4',
'70 Snow and ice. Lands under snow or ice cover throughout the year.': 'F0F0F0',
'80 Permanent water bodies. Lakes, reservoirs, and rivers. Can be either fresh or salt-water bodies.': '0032C8',
'90 Herbaceous wetland. Lands with a permanent mixture of water and herbaceous or woody vegetation. The vegetation can be present in either salt, brackish, or fresh water.': '0096A0',
'100 Moss and lichen.': 'FAE6A0',
'111 Closed forest, evergreen needle leaf. Tree canopy >70 %, almost all needle leaf trees remain green all year. Canopy is never without green foliage.': '58481F',
'112 Closed forest, evergreen broad leaf. Tree canopy >70 %, almost all broadleaf trees remain green year round. Canopy is never without green foliage.': '009900',
'113 Closed forest, deciduous needle leaf. Tree canopy >70 %, consists of seasonal needle leaf tree communities with an annual cycle of leaf-on and leaf-off periods.': '70663E',
'114 Closed forest, deciduous broad leaf. Tree canopy >70 %, consists of seasonal broadleaf tree communities with an annual cycle of leaf-on and leaf-off periods.': '00CC00',
'115 Closed forest, mixed.': '4E751F',
'116 Closed forest, not matching any of the other definitions.': '007800',
'121 Open forest, evergreen needle leaf. Top layer- trees 15-70 % and second layer- mixed of shrubs and grassland, almost all needle leaf trees remain green all year. Canopy is never without green foliage.': '666000',
'122 Open forest, evergreen broad leaf. Top layer- trees 15-70 % and second layer- mixed of shrubs and grassland, almost all broadleaf trees remain green year round. Canopy is never without green foliage.': '8DB400',
'123 Open forest, deciduous needle leaf. Top layer- trees 15-70 % and second layer- mixed of shrubs and grassland, consists of seasonal needle leaf tree communities with an annual cycle of leaf-on and leaf-off periods.': '8D7400',
'124 Open forest, deciduous broad leaf. Top layer- trees 15-70 % and second layer- mixed of shrubs and grassland, consists of seasonal broadleaf tree communities with an annual cycle of leaf-on and leaf-off periods.': 'A0DC00',
'125 Open forest, mixed.': '929900',
'126 Open forest, not matching any of the other definitions.': '648C00',
'200 Oceans, seas. Can be either fresh or salt-water bodies.': '000080'
},
# USDA NASS Cropland Data Layers https://developers.google.com/earth-engine/datasets/catalog/USDA_NASS_CDL
'USDA/NASS/CDL': {
'1 Corn': 'ffd300',
'2 Cotton': 'ff2626',
'3 Rice': '00a8e2',
'4 Sorghum': 'ff9e0a',
'5 Soybeans': '267000',
'6 Sunflower': 'ffff00',
'10 Peanuts': '70a500',
'11 Tobacco': '00af49',
'12 Sweet Corn': 'dda50a',
'13 Pop or Orn Corn': 'dda50a',
'14 Mint': '7cd3ff',
'21 Barley': 'e2007c',
'22 Durum Wheat': '896054',
'23 Spring Wheat': 'd8b56b',
'24 Winter Wheat': 'a57000',
'25 Other Small Grains': 'd69ebc',
'26 Dbl Crop WinWht/Soybeans': '707000',
'27 Rye': 'aa007c',
'28 Oats': 'a05989',
'29 Millet': '700049',
'30 Speltz': 'd69ebc',
'31 Canola': 'd1ff00',
'32 Flaxseed': '7c99ff',
'33 Safflower': 'd6d600',
'34 Rape Seed': 'd1ff00',
'35 Mustard': '00af49',
'36 Alfalfa': 'ffa5e2',
'37 Other Hay/Non Alfalfa': 'a5f28c',
'38 Camelina': '00af49',
'39 Buckwheat': 'd69ebc',
'41 Sugarbeets': 'a800e2',
'42 Dry Beans': 'a50000',
'43 Potatoes': '702600',
'44 Other Crops': '00af49',
'45 Sugarcane': 'af7cff',
'46 Sweet Potatoes': '702600',
'47 Misc Vegs & Fruits': 'ff6666',
'48 Watermelons': 'ff6666',
'49 Onions': 'ffcc66',
'50 Cucumbers': 'ff6666',
'51 Chick Peas': '00af49',
'52 Lentils': '00ddaf',
'53 Peas': '54ff00',
'54 Tomatoes': 'f2a377',
'55 Caneberries': 'ff6666',
'56 Hops': '00af49',
'57 Herbs': '7cd3ff',
'58 Clover/Wildflowers': 'e8bfff',
'59 Sod/Grass Seed': 'afffdd',
'60 Switchgrass': '00af49',
'61 Fallow/Idle Cropland': 'bfbf77',
'63 Forest': '93cc93',
'64 Shrubland': 'c6d69e',
'65 Barren': 'ccbfa3',
'66 Cherries': 'ff00ff',
'67 Peaches': 'ff8eaa',
'68 Apples': 'ba004f',
'69 Grapes': '704489',
'70 Christmas Trees': '007777',
'71 Other Tree Crops': 'af9970',
'72 Citrus': 'ffff7c',
'74 Pecans': 'b5705b',
'75 Almonds': '00a582',
'76 Walnuts': 'e8d6af',
'77 Pears': 'af9970',
'81 Clouds/No Data': 'f2f2f2',
'82 Developed': '999999',
'83 Water': '4970a3',
'87 Wetlands': '7cafaf',
'88 Nonag/Undefined': 'e8ffbf',
'92 Aquaculture': '00ffff',
'111 Open Water': '4970a3',
'112 Perennial Ice/Snow': 'd3e2f9',
'121 Developed/Open Space': '999999',
'122 Developed/Low Intensity': '999999',
'123 Developed/Med Intensity': '999999',
'124 Developed/High Intensity': '999999',
'131 Barren': 'ccbfa3',
'141 Deciduous Forest': '93cc93',
'142 Evergreen Forest': '93cc93',
'143 Mixed Forest': '93cc93',
'152 Shrubland': 'c6d69e',
'176 Grassland/Pasture': 'e8ffbf',
'190 Woody Wetlands': '7cafaf',
'195 Herbaceous Wetlands': '7cafaf',
'204 Pistachios': '00ff8c',
'205 Triticale': 'd69ebc',
'206 Carrots': 'ff6666',
'207 Asparagus': 'ff6666',
'208 Garlic': 'ff6666',
'209 Cantaloupes': 'ff6666',
'210 Prunes': 'ff8eaa',
'211 Olives': '334933',
'212 Oranges': 'e27026',
'213 Honeydew Melons': 'ff6666',
'214 Broccoli': 'ff6666',
'216 Peppers': 'ff6666',
'217 Pomegranates': 'af9970',
'218 Nectarines': 'ff8eaa',
'219 Greens': 'ff6666',
'220 Plums': 'ff8eaa',
'221 Strawberries': 'ff6666',
'222 Squash': 'ff6666',
'223 Apricots': 'ff8eaa',
'224 Vetch': '00af49',
'225 Dbl Crop WinWht/Corn': 'ffd300',
'226 Dbl Crop Oats/Corn': 'ffd300',
'227 Lettuce': 'ff6666',
'229 Pumpkins': 'ff6666',
'230 Dbl Crop Lettuce/Durum Wht': '896054',
'231 Dbl Crop Lettuce/Cantaloupe': 'ff6666',
'232 Dbl Crop Lettuce/Cotton': 'ff2626',
'233 Dbl Crop Lettuce/Barley': 'e2007c',
'234 Dbl Crop Durum Wht/Sorghum': 'ff9e0a',
'235 Dbl Crop Barley/Sorghum': 'ff9e0a',
'236 Dbl Crop WinWht/Sorghum': 'a57000',
'237 Dbl Crop Barley/Corn': 'ffd300',
'238 Dbl Crop WinWht/Cotton': 'a57000',
'239 Dbl Crop Soybeans/Cotton': '267000',
'240 Dbl Crop Soybeans/Oats': '267000',
'241 Dbl Crop Corn/Soybeans': 'ffd300',
'242 Blueberries': '000099',
'243 Cabbage': 'ff6666',
'244 Cauliflower': 'ff6666',
'245 Celery': 'ff6666',
'246 Radishes': 'ff6666',
'247 Turnips': 'ff6666',
'248 Eggplants': 'ff6666',
'249 Gourds': 'ff6666',
'250 Cranberries': 'ff6666',
'254 Dbl Crop Barley/Soybeans': '267000'
}
}
def ee_table_to_legend(in_table, out_file):
"""Converts an Earth Engine color table to a dictionary
Args:
in_table (str): The input file path (*.txt) to the Earth Engine color table.
out_file (str): The output file path (*.txt) to the legend dictionary.
"""
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
ee_legend_table = os.path.join(pkg_dir, 'data/template/ee_legend_table.txt')
if not os.path.exists(in_table):
print('The class table does not exist.')
out_file = os.path.abspath(out_file)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
legend_dict = {}
with open(in_table) as f:
lines = f.readlines()
for index, line in enumerate(lines):
if index > 0:
items = line.split("\t")
items = [item.strip() for item in items]
color = items[1]
key = items[0] + " " + items[2]
legend_dict[key] = color
out_lines = []
out_lines.append('{\n')
for key in legend_dict.keys():
line = "\t'{}': '{}',\n".format(key,legend_dict[key])
out_lines.append(line)
out_lines[-1] = out_lines[-1].rstrip()[:-1] + '\n'
out_lines.append('}\n')
with open(out_file, 'w') as f:
f.writelines(out_lines)
| 53.958874 | 299 | 0.625216 | [
"MIT"
] | GSRS/geemap | geemap/legends.py | 24,929 | Python |
"""
naivefit.py
A NaiveFit follows the approach described in Crundall et al. (2019).
NaiveFit begins with an initial guess provided by user of an N component fit.
If no guess is provided, all provided stars are assumed to be members of one
component.
NaiveFit will perform an Expectation Maximisation on this N component fit until
converged.
Then NaiveFit will test increasing the compoennt count to N+1. This is done by
for each component out of the N existing, substituting it for 2 similar
components with slight age offsets, and running an EM fit. The result
is N separate "N+1 component" fits. The best one will be compared to the
"N component" fit using the Bayesian Information Criterion (BIC). If the
BIC has improved, this "N+1 component fit" will be taken as the best fit so far.
This process iterates until adding a component fails to yield a better fit.
"""
import numpy as np
import os
import sys
import logging
from distutils.dir_util import mkpath
import random
import uuid
#~ from emcee.utils import MPIPool
from multiprocessing import Pool
from multiprocessing import cpu_count
sys.path.insert(0, os.path.abspath('..'))
from . import expectmax
from . import readparam
from . import tabletool
from . import component
from . import traceorbit
# python3 throws FileNotFoundError that is essentially the same as IOError
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def dummy_trace_orbit_func(loc, times=None):
"""
Purely for testing purposes
Dummy trace orbit func to skip irrelevant computation
A little constraint on age (since otherwise its a free floating
parameter)
"""
if times is not None:
if np.all(times > 1.):
return loc + 1000.
return loc
def log_message(msg, symbol='.', surround=False):
"""Little formatting helper"""
res = '{}{:^40}{}'.format(5 * symbol, msg, 5 * symbol)
if surround:
res = '\n{}\n{}\n{}'.format(50 * symbol, res, 50 * symbol)
logging.info(res)
class NaiveFit(object):
"""
Many arguments can be taken straight from the fit_pars dictionary,
so no point explicitly looking for them.
Description of parameters can be found in README.md along with their
default values and whether they are required.
"""
# Internal filestems that Chronostar uses to store results throughout a fit
# Should not be changed, otherwise Chronostar may struggle to retreive progress
# from previous fits.
final_comps_file = 'final_comps.npy'
final_med_and_spans_file = 'final_med_and_spans.npy'
final_memb_probs_file = 'final_membership.npy'
# For detailed description of parameters, see the main README.md file
# in parent directory.
DEFAULT_FIT_PARS = {
'results_dir':'',
# Output from dataprep, XYZUVW data, plus background overlaps
# Can be a filename to a astropy table, or an actual table
'data_table':None,
# Whether to look for dX, .. c_XY or X_error, .. corr_X_Y in
# the column names
'historical_colnames':False,
# Column name for stellar IDs. This is used at the end when generating
# final fits table with IDs and membership probabilities.
# This is optional.
'stellar_id_colname': None,
# File name that points to a stored list of components, typically from
# a previous fit. Some example filenames could be:
# - 'some/prev/fit/final_comps.npy
# - 'some/prev/fit/2/A/final_comps.npy
# Alternatively, if you already have the list of components, just
# provide them to `init_comps`. Don't do both.
# 'init_comps_file':None, # TODO: Is this redundant with 'init_comps'
'init_comps':None,
# One of these two are required if initialising a run with ncomps != 1
# One can also initialise a Chronostar run with memberships.
# Array is [nstars, ncomps] float array
# Each row should sum to 1.
# Same as in 'final_membership.npy'
# TODO: implement this in a way that info can be passed in from text file
# e.g. a path to a file name
# for now, can only be used from within a script, i.e. given a numpy
# array object
'init_memb_probs':None,
# Provide a string name that corresponds to a ComponentClass
# An actual Component Class will be inserted into the paramter
# dictionary to be passed into expectmax
'component':'sphere',
'max_comp_count':20,
'max_em_iterations':200,
'nthreads':1, # TODO: NOT IMPLEMENTED
'use_background':True,
'overwrite_prev_run':False,
'burnin':500,
'sampling_steps':1000,
'store_burnin_chains':False,
'ignore_stable_comps':True,
# If loading parameters from text file, can provide strings:
# - 'epicyclic' for epicyclic
# - 'dummy_trace_orbit_func' for a trace orbit funciton that doens't do antyhing (for testing)
# Alternativley, if building up parameter dictionary in a script, can
# provide actual function.
'trace_orbit_func':traceorbit.trace_cartesian_orbit,
# MZ
# Specify what optimisation method in the maximisation step of
# the EM algorithm to use. Default: emcee. Also available:
# In principle any method from scipy.optimise.minimise, but
# here we recommend Nelder-Mead (because the initialisation
# with any additional arguments, e.g. Jacobian etc. is not
# implemented in Chronostar).
# 'emcee' | 'Nelder-Mead'
'optimisation_method': 'emcee',
# Optimise components in parallel in expectmax.maximise.
'nprocess_ncomp': False,
# Overwrite final results in a fits file
'overwrite_fits': False,
# How to split group: in age or in space?
'split_group': 'age',
'par_log_file':'fit_pars.log',
}
def __init__(self, fit_pars):
"""
Parameters
----------
fit_pars : str -or- dictionary
If a string, `fit_pars` should be a path to a parameter file which
can be parsed by readparam.readParam, to construct a dictionary.
Alternatively, an actual dictionary can be passed in. See README.md
for a description of parameters.
"""
# Parse parameter file if required
if type(fit_pars) is str:
fit_pars = readparam.readParam(fit_pars, default_pars=self.DEFAULT_FIT_PARS)
# Make a new dictionary, with priority given to contents of fit_pars
self.fit_pars = dict(self.DEFAULT_FIT_PARS)
self.fit_pars.update(fit_pars)
assert type(self.fit_pars) is dict
# MZ: Make sure 'par_log_file' is written into the results folder
self.fit_pars['par_log_file'] = os.path.join(self.fit_pars['results_dir'], self.fit_pars['par_log_file'])
# Data prep should already have been completed, so we simply build
# the dictionary of arrays from the astropy table
self.data_dict = tabletool.build_data_dict_from_table(self.fit_pars['data_table'],
historical=self.fit_pars['historical_colnames'])
# The NaiveFit approach is to assume starting with 1 component
self.ncomps = 1
# Import suitable component class
if self.fit_pars['component'] == 'sphere':
self.Component = component.SphereComponent
self.fit_pars['Component'] = component.SphereComponent
elif self.fit_pars['component'] == 'ellip':
self.Component = component.EllipComponent
self.fit_pars['Component'] = component.EllipComponent
else:
raise UserWarning('Unknown (or missing) component parametrisation')
# Check results directory is valid
# If path exists, make a new results_directory with a random int
if os.path.exists(self.fit_pars['results_dir']) and \
not self.fit_pars['overwrite_prev_run']:
rdir = '{}_{}'.format(self.fit_pars['results_dir'].rstrip('/'),
random.randint(0, 1000))
else:
rdir = self.fit_pars['results_dir']
self.rdir = rdir.rstrip('/') + '/'
mkpath(self.rdir)
assert os.access(self.rdir, os.W_OK)
# Log fit parameters,
readparam.log_used_pars(self.fit_pars, default_pars=self.DEFAULT_FIT_PARS)
# Now that results directory is set up, can set up log file
logging.basicConfig(filename=self.rdir + 'log.log', level=logging.INFO)
# Make some logs about how many iterations (+ other stuff) code can run for
log_message(msg='Component count cap set to {}'.format(
self.fit_pars['max_comp_count']),
symbol='+', surround=True)
log_message(msg='Iteration count cap set to {}'.format(
self.fit_pars['max_em_iterations']),
symbol='+', surround=True)
print('printed')
# Check nthreads does not exceed hardware
if self.fit_pars['nthreads'] > cpu_count() - 1:
raise UserWarning('Provided nthreads exceeds cpu count on this machine. '
'Rememeber to leave one cpu free for master thread!')
# MZ: If nthreads>1: create an MPIPool
if self.fit_pars['nthreads']>1:
#self.pool = MPIPool()
log_message('pool = Pool(nthreads) = pool(%d)'%self.fit_pars['nthreads'])
self.fit_pars['pool']=Pool(self.fit_pars['nthreads'])
else:
self.pool = None
# ------------------------------------------------------------
# ----- SETTING UP RUN CUSTOMISATIONS ----------------------
# ------------------------------------------------------------
# Set up trace_orbit_func
if self.fit_pars['trace_orbit_func'] == 'dummy_trace_orbit_func':
self.fit_pars['trace_orbit_func'] = dummy_trace_orbit_func
elif self.fit_pars['trace_orbit_func'] == 'epicyclic':
log_message('trace_orbit: epicyclic')
self.fit_pars['trace_orbit_func'] = traceorbit.trace_epicyclic_orbit
else:
self.fit_pars['trace_orbit_func'] = traceorbit.trace_cartesian_orbit
if type(self.fit_pars['init_comps']) is str:
self.fit_pars['init_comps'] = self.Component.load_raw_components(
self.fit_pars['init_comps'])
self.ncomps = len(self.fit_pars['init_comps'])
print('Managed to load in init_comps from file')
else:
self.fit_pars['init_comps'] = None
print("'Init comps' is initialised as none")
# TODO: If initialising with membership probabilities, adjust self.ncomps
def build_comps_from_chains(self, run_dir):
"""
Build compoennt objects from stored emcee chains and cooresponding
lnprobs.
Parameters
----------
run_dir: str
Directory of an EM fit, which in the context of NaiveFit will be
e.g. 'myfit/1', or 'myfit/2/A'
Returns
-------
comps: [Component]
A list of components that correspond to the best fit from the
run in question.
"""
logging.info('Component class has been modified, reconstructing '
'from chain')
comps = self.ncomps * [None]
for i in range(self.ncomps):
final_cdir = run_dir + 'final/comp{}/'.format(i)
chain = np.load(final_cdir + 'final_chain.npy')
lnprob = np.load(final_cdir + 'final_lnprob.npy')
npars = len(self.Component.PARAMETER_FORMAT)
best_ix = np.argmax(lnprob)
best_pars = chain.reshape(-1, npars)[best_ix]
comps[i] = self.Component(emcee_pars=best_pars)
self.Component.store_raw_components(
str(run_dir + 'final/' + self.final_comps_file),
comps)
return comps
def log_score_comparison(self, prev, new):
"""
Purely a logging helper function.
Log BIC comparisons.
Parameters
----------
prev: dict
A dictinoary of scores from the previous run with the following entries
- bic: the Bayesian Information Criterion
- lnlike : the log likelihood
- lnpost : the log posterior
new: dict
A dictinoary of scores from the new run, with identical entries as
`prev`
Result
------
None
"""
if new['bic'] < prev['bic']:
logging.info("Extra component has improved BIC...")
logging.info(
"New BIC: {} < Old BIC: {}".format(new['bic'], prev['bic']))
else:
logging.info("Extra component has worsened BIC...")
logging.info(
"New BIC: {} > Old BIC: {}".format(new['bic'], prev['bic']))
logging.info("lnlike: {} | {}".format(new['lnlike'], prev['lnlike']))
logging.info("lnpost: {} | {}".format(new['lnpost'], prev['lnpost']))
def build_init_comps(self, prev_comps, split_comp_ix, prev_med_and_spans,
memb_probs):
"""
Given a list of converged components from a N component fit, generate
a list of N+1 components with which to initialise an EM run.
This is done by taking the target component, `prev_comps[comp_ix]`,
replacing it in the list of comps, by splitting it into two components
with a lower and higher age,
Parameters
----------
prev_comps : [N] list of Component objects
List of components from the N component fit
split_comp_ix : int
The index of component which is to be split into two
prev_med_and_spans : [ncomps,npars,3] np.array
The median and spans of
Return
------
init_comps: [N+1] list of Component objects
Side effects
------------
Updates self.fit_pars['init_comps'] with a [N+1] list of Component
objects
"""
target_comp = prev_comps[split_comp_ix]
assert isinstance(target_comp, self.Component)
# Decompose and replace the ith component with two new components
# by using the 16th and 84th percentile ages from previous run
if self.fit_pars['split_group']=='age':
if self.fit_pars['optimisation_method']=='emcee':
split_comps = target_comp.split_group_age(
lo_age=prev_med_and_spans[split_comp_ix, -1, 1],
hi_age=prev_med_and_spans[split_comp_ix, -1, 2])
elif self.fit_pars['optimisation_method']=='Nelder-Mead':
age = target_comp.get_age()
split_comps = target_comp.split_group_age( # TODO: Maybe even smaller change
lo_age=0.8*age,
hi_age=1.2*age)
elif self.fit_pars['split_group']=='spatial':
split_comps = target_comp.split_group_spatial(self.data_dict,
memb_probs[:,split_comp_ix])
init_comps = list(prev_comps)
init_comps.pop(split_comp_ix)
init_comps.insert(split_comp_ix, split_comps[1])
init_comps.insert(split_comp_ix, split_comps[0])
return init_comps
def run_em_unless_loadable(self, run_dir):
"""
Run and EM fit, but only if not loadable from a previous run
"""
try:
# This fails when gradient descent is used and med_and_spans are not meaningful.
try:
med_and_spans = np.load(os.path.join(run_dir, 'final/', self.final_med_and_spans_file))
except ValueError:
logging.info('med_and_spans not read. Presumably you are using gradient descent optimisation procedure?')
med_and_spans = [None]
memb_probs = np.load(os.path.join(
run_dir, 'final/', self.final_memb_probs_file))
comps = self.Component.load_raw_components(
str(os.path.join(run_dir, 'final/', self.final_comps_file)))
logging.info('Loaded from previous run')
# Handle case where Component class has been modified and can't
# load the raw components
except AttributeError:
# TODO: check that the final chains looked for are guaranteed to be saved
comps = self.build_comps_from_chains(run_dir)
# Handle the case where files are missing, which means we must
# perform the fit.
#~ except (IOError, FileNotFoundError) as e:
except IOError:
comps, med_and_spans, memb_probs = \
expectmax.fit_many_comps(data=self.data_dict,
ncomps=self.ncomps, rdir=run_dir,
**self.fit_pars)
# Since init_comps and init_memb_probs are only meant for one time uses
# we clear them to avoid any future usage
self.fit_pars['init_comps'] = None
self.fit_pars['init_memb_probs'] = None
return {'comps':comps, 'med_and_spans':med_and_spans, 'memb_probs':memb_probs}
def iter_end_log(self, best_split_ix, prev_result, new_result):
logging.info("Selected {} as best decomposition".format(
chr(ord('A') + best_split_ix)))
logging.info(
"Turned\n{}".format(prev_result['comps'][best_split_ix].get_pars()))
logging.info('with {} members'.format(
prev_result['memb_probs'].sum(axis=0)[best_split_ix]))
logging.info("into\n{}\n&\n{}".format(
new_result['comps'][best_split_ix].get_pars(),
new_result['comps'][best_split_ix + 1].get_pars(),
))
logging.info('with {} and {} members'.format(
new_result['memb_probs'].sum(axis=0)[best_split_ix],
new_result['memb_probs'].sum(axis=0)[best_split_ix + 1],
))
logging.info("for an overall membership breakdown\n{}".format(
new_result['memb_probs'].sum(axis=0)
))
def log_final_log(self, prev_result, prev_score):
logging.info('Final best fits:')
[logging.info(c.get_pars()) for c in prev_result['comps']]
logging.info('Final age med and span:')
if self.fit_pars['optimisation_method']=='emcee':
[logging.info(row[-1]) for row in prev_result['med_and_spans']]
logging.info('Membership distribution: {}'.format(
prev_result['memb_probs'].sum(axis=0)))
logging.info('Final membership:')
logging.info('\n{}'.format(np.round(prev_result['memb_probs'] * 100)))
logging.info('Final lnlikelihood: {}'.format(prev_score['lnlike']))
logging.info('Final lnposterior: {}'.format(prev_score['lnpost']))
logging.info('Final BIC: {}'.format(prev_score['bic']))
logging.info('#########################')
logging.info('### END #################')
logging.info('#########################')
def calc_score(self, comps, memb_probs):
"""
Calculate global score of fit for comparison with future fits with different
component counts
Parameters
----------
:param comps:
:param memb_probs:
:return:
TODO: Establish relevance of bg_ln_ols
"""
lnlike = expectmax.get_overall_lnlikelihood(self.data_dict,
comps,
old_memb_probs=memb_probs,
# bg_ln_ols=bg_ln_ols,
)
lnpost = expectmax.get_overall_lnlikelihood(self.data_dict,
comps,
# bg_ln_ols=bg_ln_ols,
old_memb_probs=memb_probs,
inc_posterior=True)
bic = expectmax.calc_bic(self.data_dict, self.ncomps, lnlike,
memb_probs=memb_probs,
Component=self.Component)
return {'bic':bic, 'lnlike':lnlike, 'lnpost':lnpost}
def run_fit(self):
"""
Perform a fit (as described in Paper I) to a set of prepared data.
Results are outputted as two dictionaries
results = {'comps':best_fit, (list of components)
'med_and_spans':median and spans of model parameters,
'memb_probs': membership probability array (the standard one)}
scores = {'bic': the bic,
'lnlike': log likelihood of that run,
'lnpost': log posterior of that run}
"""
log_message('Beginning Chronostar run',
symbol='_', surround=True)
# ------------------------------------------------------------
# ----- EXECUTE RUN ----------------------------------------
# ------------------------------------------------------------
if self.fit_pars['store_burnin_chains']:
log_message(msg='Storing burnin chains', symbol='-')
# ------------------------------------------------------------
# ----- STAGE 1: ESTABLISHING INITIAL FIT -----------
# ------------------------------------------------------------
# Handle special case of very first run
# Either by fitting one component (default) or by using `init_comps`
# to initialise the EM fit.
# Check if not provided with init comps or membs
if (self.fit_pars['init_comps'] is None) and (self.fit_pars['init_memb_probs'] is None):
# NaiveFit doesn't know how to blindly intiialise runs with ncomps > 1
assert self.ncomps == 1, 'If no initialisation set, can only accept ncomp==1'
# If no init conditions provided, assume all stars are members and begine
# fit with 1 component.
init_memb_probs = np.zeros((len(self.data_dict['means']),
self.ncomps + self.fit_pars[
'use_background']))
init_memb_probs[:, 0] = 1.
# Otherwise, we must have been given an init_comps, or an init_memb_probs
# to start things with
else:
log_message(msg='Initialising with init_comps or init_memb_probs with'
'%i components'%self.ncomps, symbol='*', surround=True)
pass
log_message(msg='FITTING {} COMPONENT'.format(self.ncomps),
symbol='*', surround=True)
run_dir = self.rdir + '{}/'.format(self.ncomps)
prev_result = self.run_em_unless_loadable(run_dir)
prev_score = self.calc_score(prev_result['comps'], prev_result['memb_probs'])
self.ncomps += 1
# ------------------------------------------------------------
# ----- STAGE 2: EXPLORE EXTRA COMPONENT BY DECOMPOSITION --
# ------------------------------------------------------------
# Calculate global score of fit for comparison with future fits with different
# component counts
# Begin iterative loop, each time trialing the incorporation of a new component
while self.ncomps <= self.fit_pars['max_comp_count']:
log_message(msg='FITTING {} COMPONENT'.format(self.ncomps),
symbol='*', surround=True)
all_results = []
all_scores = []
# Iteratively try subdividing each previous component
# target_comp is the component we will split into two.
# This will make a total of ncomps (the target comp split into 2,
# plus the remaining components from prev_result['comps']
for i, target_comp in enumerate(prev_result['comps']):
div_label = chr(ord('A') + i)
run_dir = self.rdir + '{}/{}/'.format(self.ncomps, div_label)
log_message(msg='Subdividing stage {}'.format(div_label),
symbol='+', surround=True)
mkpath(run_dir)
self.fit_pars['init_comps'] = self.build_init_comps(
prev_result['comps'], split_comp_ix=i,
prev_med_and_spans=prev_result['med_and_spans'],
memb_probs = prev_result['memb_probs'])
result = self.run_em_unless_loadable(run_dir)
all_results.append(result)
score = self.calc_score(result['comps'], result['memb_probs'])
all_scores.append(score)
logging.info(
'Decomposition {} finished with \nBIC: {}\nlnlike: {}\n'
'lnpost: {}'.format(
div_label, all_scores[-1]['bic'],
all_scores[-1]['lnlike'], all_scores[-1]['lnpost'],
))
# identify the best performing decomposition
all_bics = [score['bic'] for score in all_scores]
best_split_ix = np.nanargmin(all_bics)
new_result = all_results[best_split_ix]
new_score = all_scores[best_split_ix]
self.iter_end_log(best_split_ix, prev_result=prev_result, new_result=new_result)
# Check if the fit has improved
self.log_score_comparison(new=new_score,
prev=prev_score)
if new_score['bic'] < prev_score['bic']:
prev_score = new_score
prev_result = new_result
self.ncomps += 1
log_message(msg="Commencing {} component fit on {}{}".format(
self.ncomps, self.ncomps - 1,
chr(ord('A') + best_split_ix)), symbol='+'
)
else:
# WRITING THE FINAL RESULTS INTO FILES
logging.info("... saving previous fit as best fit to data")
self.Component.store_raw_components(self.rdir + self.final_comps_file,
prev_result['comps'])
np.save(self.rdir + self.final_med_and_spans_file, prev_result['med_and_spans'])
np.save(self.rdir + self.final_memb_probs_file, prev_result['memb_probs'])
np.save(self.rdir + 'final_likelihood_post_and_bic',
prev_score)
# Save components in fits file
tabcomps = self.Component.convert_components_array_into_astropy_table(prev_result['comps'])
if self.fit_pars['overwrite_fits']:
tabcomps.write(os.path.join(self.rdir, 'final_comps_%d.fits'%len(prev_result['comps'])), overwrite=self.fit_pars['overwrite_fits'])
else:
filename_comps_fits_random = os.path.join(self.rdir, 'final_comps_%d_%s.fits'%(len(prev_result['comps']), str(uuid.uuid4().hex)))
tabcomps.write(filename_comps_fits_random, overwrite=self.fit_pars['overwrite_fits'])
# Save membership fits file
try:
if self.fit_pars['overwrite_fits']:
tabletool.construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(self.fit_pars['data_table'], prev_result['memb_probs'], prev_result['comps'], os.path.join(self.rdir, 'final_memberships_%d.fits'%len(prev_result['comps'])), get_background_overlaps=True, stellar_id_colname = self.fit_pars['stellar_id_colname'], overwrite_fits = self.fit_pars['overwrite_fits'])
else:
filename_memb_probs_fits_random = os.path.join(self.rdir, 'final_memberships_%d_%s.fits'%(len(prev_result['comps']), str(uuid.uuid4().hex)))
tabletool.construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(self.fit_pars['data_table'], prev_result['memb_probs'], prev_result['comps'], filename_memb_probs_fits_random, get_background_overlaps=True, stellar_id_colname = self.fit_pars['stellar_id_colname'], overwrite_fits = self.fit_pars['overwrite_fits'])
except:
logging.info("[WARNING] Couldn't print membership.fits file. Check column id.")
self.log_final_log(prev_result, prev_score)
break
logging.info("Best fit:\n{}".format(
[group.get_pars() for group in prev_result['comps']]))
if self.ncomps >= self.fit_pars['max_comp_count']:
log_message(msg='REACHED MAX COMP LIMIT', symbol='+',
surround=True)
return prev_result, prev_score
| 43.391111 | 399 | 0.583086 | [
"MIT"
] | mikeireland/chronostar | chronostar/naivefit-bak.py | 29,289 | Python |
from conans import ConanFile
class libxbitset_conan(ConanFile):
name = "libxbitset"
version = "0.0.1"
license = "Apache License Version 2.0"
author = "Khalil Estell"
url = "https://github.com/SJSU-Dev2/libxbitset"
description = "Extension of std::bitset that includes multi-bit insertion and extraction and more"
topics = ("bit manipulation", "bits", "hardware", "registers")
exports_sources = "CMakeLists.txt", "include/*"
no_copy_source = True
def package(self):
self.copy("*.hpp")
def package_id(self):
self.info.header_only()
| 29.7 | 102 | 0.66835 | [
"Apache-2.0"
] | SJSU-Dev2/libxbitset | conanfile.py | 594 | Python |
"""The token kinds currently recognized."""
from shivyc.tokens import TokenKind
keyword_kinds = []
symbol_kinds = []
# Until function definition is ready, we define `main` as a hardcoded keyword
main = TokenKind("main", keyword_kinds)
bool_kw = TokenKind("_Bool", keyword_kinds)
char_kw = TokenKind("char", keyword_kinds)
short_kw = TokenKind("short", keyword_kinds)
int_kw = TokenKind("int", keyword_kinds)
long_kw = TokenKind("long", keyword_kinds)
signed_kw = TokenKind("signed", keyword_kinds)
unsigned_kw = TokenKind("unsigned", keyword_kinds)
void_kw = TokenKind("void", keyword_kinds)
return_kw = TokenKind("return", keyword_kinds)
if_kw = TokenKind("if", keyword_kinds)
else_kw = TokenKind("else", keyword_kinds)
while_kw = TokenKind("while", keyword_kinds)
for_kw = TokenKind("for", keyword_kinds)
break_kw = TokenKind("break", keyword_kinds)
continue_kw = TokenKind("continue", keyword_kinds)
auto_kw = TokenKind("auto", keyword_kinds)
static_kw = TokenKind("static", keyword_kinds)
extern_kw = TokenKind("extern", keyword_kinds)
struct_kw = TokenKind("struct", keyword_kinds)
const_kw = TokenKind("const", keyword_kinds)
plus = TokenKind("+", symbol_kinds)
minus = TokenKind("-", symbol_kinds)
star = TokenKind("*", symbol_kinds)
slash = TokenKind("/", symbol_kinds)
mod = TokenKind("%", symbol_kinds)
incr = TokenKind("++", symbol_kinds)
decr = TokenKind("--", symbol_kinds)
equals = TokenKind("=", symbol_kinds)
plusequals = TokenKind("+=", symbol_kinds)
minusequals = TokenKind("-=", symbol_kinds)
starequals = TokenKind("*=", symbol_kinds)
divequals = TokenKind("/=", symbol_kinds)
modequals = TokenKind("%=", symbol_kinds)
twoequals = TokenKind("==", symbol_kinds)
notequal = TokenKind("!=", symbol_kinds)
bool_and = TokenKind("&&", symbol_kinds)
bool_or = TokenKind("||", symbol_kinds)
bool_not = TokenKind("!", symbol_kinds)
lt = TokenKind("<", symbol_kinds)
gt = TokenKind(">", symbol_kinds)
ltoe = TokenKind("<=", symbol_kinds)
gtoe = TokenKind(">=", symbol_kinds)
amp = TokenKind("&", symbol_kinds)
pound = TokenKind("#", symbol_kinds)
dquote = TokenKind('"', symbol_kinds)
squote = TokenKind("'", symbol_kinds)
open_paren = TokenKind("(", symbol_kinds)
close_paren = TokenKind(")", symbol_kinds)
open_brack = TokenKind("{", symbol_kinds)
close_brack = TokenKind("}", symbol_kinds)
open_sq_brack = TokenKind("[", symbol_kinds)
close_sq_brack = TokenKind("]", symbol_kinds)
comma = TokenKind(",", symbol_kinds)
semicolon = TokenKind(";", symbol_kinds)
dot = TokenKind(".", symbol_kinds)
arrow = TokenKind("->", symbol_kinds)
identifier = TokenKind()
number = TokenKind()
string = TokenKind()
char_string = TokenKind()
include_file = TokenKind()
| 33.873418 | 77 | 0.7358 | [
"MIT"
] | TBladen/ShivyC | shivyc/token_kinds.py | 2,676 | Python |
# coding: utf-8
def get_dict_output_dir_to_parameters_ini_dump_filename():
import os
dir_ = '.'
output_dir_list = sorted([output_dir for output_dir in os.listdir(dir_) if output_dir.startswith('output')])
ret = {}
for output_dir in output_dir_list:
with open(os.path.join(output_dir, 'parameters_ini_filename')) as f:
parameters_ini_filename = list(f)[0].rstrip()
ret[output_dir] = parameters_ini_filename + '.dump'
return ret
dict_output_dir_to_parameters_ini_dump = get_dict_output_dir_to_parameters_ini_dump_filename()
import finess.util
import finess.params.util
import finess.dim2
import generate_iniparams
# q(:, :, i - 1):
# * i = 1: mass
# * i = 2: momentum-1
# * i = 3: momentum-2
# * i = 4: momentum-3
# * i = 5: energy
# * i = 6: B1
# * i = 7: B2
# * i = 8: B3
import finess.viz.dim2
def L1_error_list(output_dir_list):
global debug_B1_abs_error
global debug_B2_abs_error
global debug_B_perp, debug_B3, debug_u_perp, debug_u3
global debug_B_perp_rel_error, debug_B_perp_abs_error, debug_B_perp_exact
global debug_u_perp_rel_error, debug_u_perp_abs_error, debug_u_perp_exact
global debug_B3_rel_error, debug_B3_abs_error, debug_B3_exact
global debug_u3_rel_error, debug_u3_abs_error, debug_u3_exact
global debug_B3_rel_error_100, debug_u3_rel_error_100
global debug_tfinal
global debug_B_plane_perp
global debug_B_plane_perp_abs_error
import finess.viz.dim2
error_list = []
for output_dir in output_dir_list:
parameters_ini_dump_filename = dict_output_dir_to_parameters_ini_dump[output_dir]
import os.path
params = finess.params.util.read_params(os.path.join(output_dir, parameters_ini_dump_filename), generate_iniparams.parameter_list)
xlow = params['grid', 'xlow']
xhigh = params['grid', 'xhigh']
ylow = params['grid', 'ylow']
yhigh = params['grid', 'yhigh']
mx = params['grid', 'mx']
my = params['grid', 'my']
dx = (xhigh - xlow) / float(mx)
dy = (yhigh - ylow) / float(my)
nout = params['finess', 'nout']
tfinal, q, aux = finess.dim2.read_qa(params, nout)
debug_tfinal = tfinal
print "tfinal: ", tfinal
from numpy import sin, cos, sum, abs, pi, max
angle = params['initial', 'angle']
X, Y = finess.viz.dim2.meshgrid(params)
u3_exact = 0.1 * cos(2*pi * (X*cos(angle) + Y*sin(angle) + tfinal))
B3_exact = u3_exact
u_perp_exact = 0.1 * sin(2*pi * (X * cos(angle) + Y * sin(angle) + tfinal) )
B_perp_exact = u_perp_exact
rho_exact = 1.0
u1_exact = -u_perp_exact * sin(angle)
u2_exact = u_perp_exact * cos(angle)
B1_exact = 1.0 * cos(angle) - B_perp_exact * sin(angle)
B2_exact = 1.0 * sin(angle) + B_perp_exact * cos(angle)
rho = q[:, :, 1 - 1]
u1 = q[:, :, 2 - 1] / q[:, :, 1 - 1]
u2 = q[:, :, 3 - 1] / q[:, :, 1 - 1]
u3 = q[:, :, 4 - 1] / q[:, :, 1 - 1]
B1 = q[:, :, 6 - 1]
B2 = q[:, :, 7 - 1]
B3 = q[:, :, 8 - 1]
u_perp = -u1 * sin(angle) + u2 * cos(angle)
B_perp = -B1 * sin(angle) + B2 * cos(angle)
L1_error_u_perp = sum(abs(u_perp - u_perp_exact))
L1_u_perp_exact = sum(abs(u_perp_exact))
# print "u_perp error: ", L1_error_u_perp / L1_u_perp_exact
L1_error_u1 = sum(abs(u1 - u1_exact))
L1_u1_exact = sum(abs(u1_exact))
L1_error_u2 = sum(abs(u2 - u2_exact))
L1_u2_exact = sum(abs(u2_exact))
L1_error_u3 = sum(abs(u3 - u3_exact))
L1_u3_exact = sum(abs(u3_exact))
# print "u3 error: ", L1_error_u3 / L1_u3_exact
L1_error_B_perp = sum(abs(B_perp - B_perp_exact))
L1_B_perp_exact = sum(abs(B_perp_exact))
# print "B_perp error: ", L1_error_B_perp / L1_B_perp_exact
debug_B1_abs_error = abs(B1 - B1_exact)
debug_B2_abs_error = abs(B2 - B2_exact)
debug_B_perp_exact = B_perp_exact
debug_B_perp_abs_error = abs(B_perp - B_perp_exact)
debug_B_perp_rel_error = debug_B_perp_abs_error / abs(B_perp_exact)
debug_u_perp_exact = u_perp_exact
debug_u_perp_abs_error = abs(u_perp - u_perp_exact)
debug_u_perp_rel_error = debug_u_perp_abs_error / abs(u_perp_exact)
debug_B3_exact = B3_exact
debug_B3_abs_error = abs(B3 - B3_exact)
debug_B3_rel_error = debug_B3_abs_error / abs(B3_exact)
debug_B3_rel_error_100 = debug_B3_rel_error * 100
debug_u3_exact = u3_exact
debug_u3_abs_error = abs(u3 - u3_exact)
debug_u3_rel_error = debug_u3_abs_error / abs(u3_exact)
debug_u3_rel_error_100 = 100 * debug_u3_rel_error
debug_B3 = B3
debug_B_perp = B_perp
debug_B_plane_perp = ((B3 / 0.1)**2 + (B_perp / 0.1)**2) * 0.1
debug_B_plane_perp_abs_error = abs(debug_B_plane_perp - 0.1)
L1_error_B3 = sum(abs(B3 - B3_exact))
L1_B3_exact = sum(abs(B3_exact))
# print "B3 error: ", L1_error_B3 / L1_B3_exact
# delta = 0.25 * (L1_error_u_perp / L1_u_perp_exact + L1_error_u3 / L1_u3_exact + L1_error_B_perp / L1_B_perp_exact + L1_error_B3 / L1_B3_exact)
# delta = 0.5 * (L1_error_B_perp / L1_B_perp_exact + L1_error_B3 / L1_B3_exact)
# delta = 0.5 * (L1_error_u_perp / L1_u_perp_exact + L1_error_u3 / L1_u3_exact)
#delta = max(abs(u3 - u3_exact))
#delta = max(abs(u1 - u1_exact))
#delta = max(abs(u2 - u2_exact))
#delta = max(abs(u3 - u3_exact))
#delta = max(abs(B1 - B1_exact))
#delta = max(abs(B2 - B2_exact))
#delta = max(abs(B3 - B3_exact))
delta = max(abs(rho - rho_exact))
#delta = L1_error_u1 / L1_u1_exact
error_list.append(delta)
return error_list
def log2_adjacent_ratio(error_list):
order_list = []
from numpy import log2
for i in range(len(error_list) - 1):
order_list.append(log2(error_list[i] / error_list[i+1]))
return order_list
def L1_A_error_list(output_dir_list):
from numpy import max
global debug_A_abs_error
import finess.viz.dim2
error_list = []
for output_dir in output_dir_list:
parameters_ini_dump_filename = dict_output_dir_to_parameters_ini_dump[output_dir]
import os.path
params = finess.params.util.read_params(os.path.join(output_dir, parameters_ini_dump_filename), generate_iniparams.parameter_list)
xlow = params['grid', 'xlow']
xhigh = params['grid', 'xhigh']
ylow = params['grid', 'ylow']
yhigh = params['grid', 'yhigh']
mx = params['grid', 'mx']
my = params['grid', 'my']
dx = (xhigh - xlow) / float(mx)
dy = (yhigh - ylow) / float(my)
nout = params['finess', 'nout']
tfinal, q, aux = finess.dim2.read_qa(params, nout)
A = aux[:, :, 1 - 1]
from numpy import allclose, sin, cos, sum, abs, pi
angle = params['initial', 'angle']
X, Y = finess.viz.dim2.meshgrid(params)
A_exact = -X * sin(angle) + Y * cos(angle) + 0.1 / (2 * pi) * cos(2*pi * (X*cos(angle) + Y*sin(angle) + tfinal))
debug_A_abs_error = abs(A - A_exact)
L1_A_exact = sum(abs(A_exact))
L1_A_error = sum(abs(A - A_exact))
#delta = L1_A_error / L1_A_exact
delta = max(abs(A - A_exact))
error_list.append(delta)
return error_list
#output_dir_list = ['output_1deg_%(i)02d' % {'i': i} for i in range(6)]
#error_list = L1_error_list(output_dir_list)
#order_list = log2_adjacent_ratio(error_list)
#print order_list
#
#
#output_dir_list = ['output_30deg_%(i)02d' % {'i': i} for i in range(6)]
#error_list = L1_error_list(output_dir_list)
#order_list = log2_adjacent_ratio(error_list)
#print order_list
#
#
## In[140]:
output_dir_list = ['output_30deg_%(i)02d' % {'i': i} for i in [0, 1, 2, 3, 4]]
error_list = L1_error_list(output_dir_list)
order_list = log2_adjacent_ratio(error_list)
print 'rho'
print order_list
print error_list
A_error_list = L1_A_error_list(output_dir_list)
A_order_list = log2_adjacent_ratio(A_error_list)
print 'A:'
print A_order_list
print A_error_list
| 34.271255 | 151 | 0.623036 | [
"BSD-3-Clause"
] | dcseal/finess | apps/2d/mhd/rotated_alfven/convergence/convergence_study.py | 8,465 | Python |
import logging
import torch.nn as nn
from . import arch as archs
logger = logging.getLogger()
def build_model(cfg_model):
if cfg_model.get('pretrained', False):
info = "=> building pre-trained model {}".format(cfg_model['arch'])
model = archs.__dict__[cfg_model.arch](pretrained=True)
in_features = model.fc.in_features
model.fc = nn.Linear(in_features, cfg_model.num_classes)
else:
info = "=> building model {}".format(cfg_model.arch)
model = archs.__dict__[cfg_model.arch](num_classes=cfg_model.num_classes)
logger.info(info)
return model
| 29.047619 | 81 | 0.685246 | [
"Apache-2.0"
] | ChaseMonsterAway/vedacls | vedacls/models/builder.py | 610 | Python |
from __future__ import print_function
import click
from openelex.base.cache import StateCache
from .utils import default_state_options, print_files
@click.command(name="cache.files", help="List files in state cache diretory")
@default_state_options
def files(state, datefilter=''):
"""List files in state cache diretory
State is required. Optionally provide a date
filter to limit results.
NOTE: Cache must be populated in order to load data.
"""
cache = StateCache(state)
files = cache.list_dir(datefilter)
if files:
print_files(files)
else:
msg = "No files found"
if datefilter:
msg += " using date filter: %s" % datefilter
print(msg)
@click.command(name='cache.clear', help="Delete files in state cache diretory")
@default_state_options
def clear(state, datefilter=''):
"""Delete files in state cache diretory
State is required. Optionally provide a date
filter to limit results.
"""
cache = StateCache(state)
cache.clear(datefilter)
def cache_discrepancy(self):
pass
| 25.372093 | 79 | 0.695692 | [
"MIT"
] | ColCarroll/openelections-core | openelex/tasks/cache.py | 1,091 | Python |
from tzscan.tzscan_reward_calculator import test_tzscan_reward_calculator
if __name__ == '__main__':
test_tzscan_reward_calculator() | 34.25 | 73 | 0.846715 | [
"MIT"
] | Twente-Mining/tezos-reward-distributor | src/Test.py | 137 | Python |
import os
import sys
# os.system('bash ./set_env.sh')
PARALLEL = 1 # assuming a quad-core machine
ATTRIBUTE = "organic_figure"
os.environ['FONDUERHOME'] = '/home/xiuyuan/private/839/fonduer_new/839_fonduer/'
os.environ['FONDUERDBNAME'] = ATTRIBUTE
os.environ['SNORKELDB'] = 'postgres://postgres:112233@localhost:5432/' + os.environ['FONDUERDBNAME']
from fonduer import SnorkelSession
session = SnorkelSession()
from fonduer import candidate_subclass
Org_Fig = candidate_subclass('Org_Fig', ['product','figure'])
from fonduer import HTMLPreprocessor, OmniParser
docs_path = os.environ['FONDUERHOME'] + 'tutorials/organic_synthesis_figures/data/html/'
pdf_path = os.environ['FONDUERHOME'] + 'tutorials/organic_synthesis_figures/data/pdf/'
max_docs = float(10)
doc_preprocessor = HTMLPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = OmniParser(structural=True, lingual=True, visual=True, pdf_path=pdf_path,
# flatten=['sup', 'sub', 'small'],
# ignore=['italic', 'bold'],
blacklist=['style', 'script', 'meta', 'noscript'])
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
from fonduer import Document
# divide train/dev/test
docs = session.query(Document).order_by(Document.name).all()
ld = len(docs)
train_docs = set()
dev_docs = set()
test_docs = set()
splits = (1.0, 0.9)
data = [(doc.name, doc) for doc in docs]
data.sort(key=lambda x: x[0])
for i, (doc_name, doc) in enumerate(data):
if i < splits[0] * ld:
train_docs.add(doc)
elif i < splits[1] * ld:
dev_docs.add(doc)
else:
test_docs.add(doc)
from pprint import pprint
pprint([x.name for x in train_docs])
from fonduer.snorkel.matchers import RegexMatchSpan, RegexMatchSplitEach,\
DictionaryMatch, LambdaFunctionMatcher, Intersect, Union
prefix_rgx = '(\(?((mono|bi|di|tri|tetra|hex|hept|oct|iso|a?cycl|poly).+)?(meth|carb|benz|fluoro|chloro|bromo|iodo|hydro(xy)?|amino|alk).+)'
suffix_rgx = '(.+(ane|yl|adiene|atriene|yne|anol|anediol|anetriol|anone|acid|amine|xide|dine|(or?mone)|thiol)\)?)'
dash_rgx = '((\w+\-|\(?)([a-z|\d]\'?\-)\w*)'
comma_dash_rgx = '((\w+\-|\(?)([a-z|\d]\'?,[a-z|\d]\'?\-)\w*)'
inorganic_rgx = '(([A-Z][a-z]?\d*\+?){2,})'
org_rgx = '|'.join([prefix_rgx, suffix_rgx, dash_rgx, comma_dash_rgx, inorganic_rgx])
rgx_matcher = RegexMatchSplitEach(rgx = org_rgx,
longest_match_only=False, ignore_case=False)
blacklist = ['CAS', 'PDF', 'RSC', 'SAR', 'TEM']
prod_blacklist_lambda_matcher = LambdaFunctionMatcher(func=lambda x: x.text not in blacklist, ignore_case=False)
blacklist_rgx = ['methods?.?']
prod_blacklist_rgx_lambda_matcher = LambdaFunctionMatcher(
func=lambda x: all([re.match(r, x.text) is None for r in blacklist_rgx]), ignore_case=False)
prod_matcher = Intersect(rgx_matcher, prod_blacklist_lambda_matcher)
from fonduer import CandidateExtractor
from fonduer.lf_helpers import *
import re
def candidate_filter(c):
(organic, figure) = c
if same_file(organic, figure):
if mentionsFig(organic, figure) or mentionsOrg(figure, organic):
return True
from tutorials.organic_synthesis_figures.organic_spaces import OmniNgramsProd
prod_ngrams = OmniNgramsProd(parts_by_doc=None, n_max=3)
from fonduer.matchers import LambdaFunctionFigureMatcher
import time
def white_black_list_matcher(fig):
# print("enter filter 1")
# enter_time = time.time()
white_list = ['synthesis', 'plausible']
black_list = ['spectra', 'x-ray', 'copyright', 'structur', 'application']
fig_desc = fig.figure.description.lower()
in_white = in_black = False
if any(fig_desc.find(v) >= 0 for v in white_list): in_white = True
if any(fig_desc.find(v) >= 0 for v in black_list): in_black = True
if in_black and (not in_white):
# print('Filtered by f1!')
return False
# # print("{} has passed filter 1 in {} seconds!".format(fig.figure.name, time.time()-enter_time))
# elif in_black:
# desc_wordlist = fig.figure.description.lower().split(' ')
# if any(re.search(org_rgx, w) for w in desc_wordlist): return True
# if not fig.figure.text == '':
# orc_wordlist = fig.figure.text.lower().split('\n')
# orc_wordlist = [w for w in orc_wordlist if not w == '']
# if any(re.search(org_rgx, w) for w in orc_wordlist): return True
#
# print('Filtered by f2! Removed!')
# print(fig.figure.name + " " + fig.figure.description)
# return False
return True
def contain_organic_matcher(fig):
# print("{} has failed filter 1 in {} seconds!".format(fig.figure.name, time.time() - enter_time))
# filter 2
desc_wordlist = fig.figure.description.lower().split(' ')
if any(re.search(org_rgx, w) for w in desc_wordlist): return True
if not fig.figure.text == '':
orc_wordlist = fig.figure.text.lower().split('\n')
orc_wordlist = [w for w in orc_wordlist if not w == '']
if any(re.search(org_rgx, w) for w in orc_wordlist): return True
# print('Filtered by f2! Removed!')
# print(fig.figure.name + " " + fig.figure.description)
return False
fig_matcher1 = LambdaFunctionFigureMatcher(func=white_black_list_matcher)
fig_matcher2 = LambdaFunctionFigureMatcher(func=contain_organic_matcher)
fig_matcher = Union(fig_matcher1, fig_matcher2)
# fig_matcher = LambdaFunctionFigureMatcher(func=white_black_list_matcher)
from fonduer.candidates import OmniDetailedFigures
figs = OmniDetailedFigures()
# extract candidate
candidate_extractor = CandidateExtractor(Org_Fig,
[prod_ngrams, figs],
[prod_matcher, fig_matcher],
candidate_filter=candidate_filter)
candidate_extractor.apply(train_docs, split=0, parallelism=PARALLEL)
train_cands = session.query(Org_Fig).filter(Org_Fig.split == 0).all()
print("Number of candidates:", len(train_cands))
# extract feature
from fonduer import BatchFeatureAnnotator
from fonduer.features.features import get_organic_image_feats
featurizer = BatchFeatureAnnotator(Org_Fig, f=get_organic_image_feats)
F_train = featurizer.apply(split=0, replace_key_set=True, parallelism=PARALLEL)
F_train = featurizer.load_matrix(split=0)
# load gold label
from tutorials.organic_synthesis_figures.organic_utils import load_organic_labels
gold_file = os.environ['FONDUERHOME'] + 'tutorials/organic_synthesis_figures/data/organic_gold.csv'
load_organic_labels(session, Org_Fig, gold_file, ATTRIBUTE ,annotator_name='gold')
# labeling function
from fonduer.lf_helpers import *
from fuzzywuzzy import fuzz
import re
def LF_fig_name_match(c):
args = c.get_contexts()
if len(args) != 2:
raise NotImplementedError("Only handles binary candidates currently")
product, img = args
if img.name == '':
return -1
else:
return 0
def LF_text_desc_match(c):
args = c.get_contexts()
if len(args) != 2:
raise NotImplementedError("Only handles binary candidates currently")
product, img = args
if fuzz.partial_ratio(product.text, img.description) >= 70:
return 1
elif fuzz.partial_ratio(product.text, img.description) <= 40:
return -1
else:
return 0
def LF_ocr_text_match(c):
args = c.get_contexts()
if len(args) != 2:
raise NotImplementedError("Only handles binary candidates currently")
product, img = args
ocr_wordlist = img.text.lower().split('\n')
ocr_wordlist = [w for w in ocr_wordlist if not w == '']
for w in ocr_wordlist:
if fuzz.partial_ratio(product.text, w) >= 90:
return 1
return 0
def LF_text_lenth_match(c):
args = c.get_contexts()
if len(args) != 2:
raise NotImplementedError("Only handles binary candidates currently")
product, img = args
return -1 if len(product.text) < 5 else 0
def LF_match_keywords(c):
args = c.get_contexts()
if len(args) != 2:
raise NotImplementedError("Only handles binary candidates currently")
organic, figure, = args
keywords = ['synthesis', 'reaction', 'produce', 'yield', 'formation', 'approach']
return 1 if both_contain_keywords(organic, figure, keywords) else 0
def LF_match_page(c):
args = c.get_contexts()
if len(args) != 2:
raise NotImplementedError("Only handles binary candidates currently")
organic, figure, = args
return 1 if is_same_org_fig_page(organic, figure) else 0
def LF_page_not_match(c):
args = c.get_contexts()
if len(args) != 2:
raise NotImplementedError("Only handles binary candidates currently")
organic, figure, = args
if abs(max(organic.sentence.page) - figure.page) > 1 or abs(min(organic.sentence.page) - figure.page) > 1:
return -1
else:
return 0
def LF_pos_near(c):
args = c.get_contexts()
if len(args) != 2:
raise NotImplementedError("Only handles binary candidates currently")
organic, figure, = args
return 1 if org_pos_near_fig(organic, figure) else 0
def LF_organic_compound(c):
args = c.get_contexts()
organic = args[0]
result = re.search(org_rgx, organic.text)
return 1 if result else 0
org_fig_lfs = [
LF_fig_name_match,
LF_text_desc_match,
LF_ocr_text_match,
LF_text_lenth_match,
LF_match_keywords,
LF_match_page,
LF_page_not_match,
LF_pos_near,
LF_organic_compound
]
from fonduer import BatchLabelAnnotator
labeler = BatchLabelAnnotator(Org_Fig, lfs = org_fig_lfs)
L_train = labeler.apply(split=0, clear=True, parallelism=PARALLEL)
print(L_train.shape)
L_train.get_candidate(session, 0)
from fonduer import GenerativeModel
gen_model = GenerativeModel()
gen_model.train(L_train, epochs=500, decay=0.9, step_size=0.001/L_train.shape[0], reg_param=0)
train_marginals = gen_model.marginals(L_train)
print(gen_model.weights.lf_accuracy)
from fonduer import SparseLogisticRegression
from fonduer import BatchFeatureAnnotator
from fonduer.features.features import get_organic_image_feats
### load feature
# featurizer = BatchFeatureAnnotator(Org_Fig, f=get_organic_image_feats)
# F_train = featurizer.load_matrix(split=0)
disc_model = SparseLogisticRegression()
disc_model.train(F_train, train_marginals, n_epochs=200, lr=0.001)
#Current we only predict on the training set
test_candidates = [F_train.get_candidate(session, i) for i in range(F_train.shape[0])]
test_score = disc_model.predictions(F_train)
true_pred = [test_candidates[_] for _ in np.nditer(np.where(test_score > 0))] | 33.625397 | 140 | 0.703078 | [
"Apache-2.0"
] | leewaymay/839_fonduer | tutorials/organic_synthesis_figures/parse_organic_figures_xiuyuan.py | 10,592 | Python |
"""Support for local control of entities by emulating a Philips Hue bridge."""
import logging
from aiohttp import web
import voluptuous as vol
from homeassistant import util
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.deprecation import get_deprecated
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
from homeassistant.components.http import real_ip
from .hue_api import (
HueUsernameView,
HueAllLightsStateView,
HueOneLightStateView,
HueOneLightChangeView,
HueGroupView,
HueAllGroupsStateView,
)
from .upnp import DescriptionXmlView, UPNPResponderThread
DOMAIN = "emulated_hue"
_LOGGER = logging.getLogger(__name__)
NUMBERS_FILE = "emulated_hue_ids.json"
CONF_ADVERTISE_IP = "advertise_ip"
CONF_ADVERTISE_PORT = "advertise_port"
CONF_ENTITIES = "entities"
CONF_ENTITY_HIDDEN = "hidden"
CONF_ENTITY_NAME = "name"
CONF_EXPOSE_BY_DEFAULT = "expose_by_default"
CONF_EXPOSED_DOMAINS = "exposed_domains"
CONF_HOST_IP = "host_ip"
CONF_LISTEN_PORT = "listen_port"
CONF_OFF_MAPS_TO_ON_DOMAINS = "off_maps_to_on_domains"
CONF_TYPE = "type"
CONF_UPNP_BIND_MULTICAST = "upnp_bind_multicast"
TYPE_ALEXA = "alexa"
TYPE_GOOGLE = "google_home"
DEFAULT_LISTEN_PORT = 8300
DEFAULT_UPNP_BIND_MULTICAST = True
DEFAULT_OFF_MAPS_TO_ON_DOMAINS = ["script", "scene"]
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
"switch",
"light",
"group",
"input_boolean",
"media_player",
"fan",
]
DEFAULT_TYPE = TYPE_GOOGLE
CONFIG_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENTITY_NAME): cv.string,
vol.Optional(CONF_ENTITY_HIDDEN): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT): cv.port,
vol.Optional(CONF_ADVERTISE_IP): cv.string,
vol.Optional(CONF_ADVERTISE_PORT): cv.port,
vol.Optional(CONF_UPNP_BIND_MULTICAST): cv.boolean,
vol.Optional(CONF_OFF_MAPS_TO_ON_DOMAINS): cv.ensure_list,
vol.Optional(CONF_EXPOSE_BY_DEFAULT): cv.boolean,
vol.Optional(CONF_EXPOSED_DOMAINS): cv.ensure_list,
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): vol.Any(
TYPE_ALEXA, TYPE_GOOGLE
),
vol.Optional(CONF_ENTITIES): vol.Schema(
{cv.entity_id: CONFIG_ENTITY_SCHEMA}
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
ATTR_EMULATED_HUE = "emulated_hue"
ATTR_EMULATED_HUE_NAME = "emulated_hue_name"
ATTR_EMULATED_HUE_HIDDEN = "emulated_hue_hidden"
async def async_setup(hass, yaml_config):
"""Activate the emulated_hue component."""
config = Config(hass, yaml_config.get(DOMAIN, {}))
app = web.Application()
app["hass"] = hass
real_ip.setup_real_ip(app, False, [])
# We misunderstood the startup signal. You're not allowed to change
# anything during startup. Temp workaround.
# pylint: disable=protected-access
app._on_startup.freeze()
await app.startup()
runner = None
site = None
DescriptionXmlView(config).register(app, app.router)
HueUsernameView().register(app, app.router)
HueAllLightsStateView(config).register(app, app.router)
HueOneLightStateView(config).register(app, app.router)
HueOneLightChangeView(config).register(app, app.router)
HueAllGroupsStateView(config).register(app, app.router)
HueGroupView(config).register(app, app.router)
upnp_listener = UPNPResponderThread(
config.host_ip_addr,
config.listen_port,
config.upnp_bind_multicast,
config.advertise_ip,
config.advertise_port,
)
async def stop_emulated_hue_bridge(event):
"""Stop the emulated hue bridge."""
upnp_listener.stop()
if site:
await site.stop()
if runner:
await runner.cleanup()
async def start_emulated_hue_bridge(event):
"""Start the emulated hue bridge."""
upnp_listener.start()
nonlocal site
nonlocal runner
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, config.host_ip_addr, config.listen_port)
try:
await site.start()
except OSError as error:
_LOGGER.error(
"Failed to create HTTP server at port %d: %s", config.listen_port, error
)
else:
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, stop_emulated_hue_bridge
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_emulated_hue_bridge)
return True
class Config:
"""Hold configuration variables for the emulated hue bridge."""
def __init__(self, hass, conf):
"""Initialize the instance."""
self.hass = hass
self.type = conf.get(CONF_TYPE)
self.numbers = None
self.cached_states = {}
if self.type == TYPE_ALEXA:
_LOGGER.warning(
"Emulated Hue running in legacy mode because type has been "
"specified. More info at https://goo.gl/M6tgz8"
)
# Get the IP address that will be passed to the Echo during discovery
self.host_ip_addr = conf.get(CONF_HOST_IP)
if self.host_ip_addr is None:
self.host_ip_addr = util.get_local_ip()
_LOGGER.info(
"Listen IP address not specified, auto-detected address is %s",
self.host_ip_addr,
)
# Get the port that the Hue bridge will listen on
self.listen_port = conf.get(CONF_LISTEN_PORT)
if not isinstance(self.listen_port, int):
self.listen_port = DEFAULT_LISTEN_PORT
_LOGGER.info(
"Listen port not specified, defaulting to %s", self.listen_port
)
# Get whether or not UPNP binds to multicast address (239.255.255.250)
# or to the unicast address (host_ip_addr)
self.upnp_bind_multicast = conf.get(
CONF_UPNP_BIND_MULTICAST, DEFAULT_UPNP_BIND_MULTICAST
)
# Get domains that cause both "on" and "off" commands to map to "on"
# This is primarily useful for things like scenes or scripts, which
# don't really have a concept of being off
self.off_maps_to_on_domains = conf.get(CONF_OFF_MAPS_TO_ON_DOMAINS)
if not isinstance(self.off_maps_to_on_domains, list):
self.off_maps_to_on_domains = DEFAULT_OFF_MAPS_TO_ON_DOMAINS
# Get whether or not entities should be exposed by default, or if only
# explicitly marked ones will be exposed
self.expose_by_default = conf.get(
CONF_EXPOSE_BY_DEFAULT, DEFAULT_EXPOSE_BY_DEFAULT
)
# Get domains that are exposed by default when expose_by_default is
# True
self.exposed_domains = conf.get(CONF_EXPOSED_DOMAINS, DEFAULT_EXPOSED_DOMAINS)
# Calculated effective advertised IP and port for network isolation
self.advertise_ip = conf.get(CONF_ADVERTISE_IP) or self.host_ip_addr
self.advertise_port = conf.get(CONF_ADVERTISE_PORT) or self.listen_port
self.entities = conf.get(CONF_ENTITIES, {})
def entity_id_to_number(self, entity_id):
"""Get a unique number for the entity id."""
if self.type == TYPE_ALEXA:
return entity_id
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
# Google Home
for number, ent_id in self.numbers.items():
if entity_id == ent_id:
return number
number = "1"
if self.numbers:
number = str(max(int(k) for k in self.numbers) + 1)
self.numbers[number] = entity_id
save_json(self.hass.config.path(NUMBERS_FILE), self.numbers)
return number
def number_to_entity_id(self, number):
"""Convert unique number to entity id."""
if self.type == TYPE_ALEXA:
return number
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
# Google Home
assert isinstance(number, str)
return self.numbers.get(number)
def get_entity_name(self, entity):
"""Get the name of an entity."""
if (
entity.entity_id in self.entities
and CONF_ENTITY_NAME in self.entities[entity.entity_id]
):
return self.entities[entity.entity_id][CONF_ENTITY_NAME]
return entity.attributes.get(ATTR_EMULATED_HUE_NAME, entity.name)
def is_entity_exposed(self, entity):
"""Determine if an entity should be exposed on the emulated bridge.
Async friendly.
"""
if entity.attributes.get("view") is not None:
# Ignore entities that are views
return False
domain = entity.domain.lower()
explicit_expose = entity.attributes.get(ATTR_EMULATED_HUE, None)
explicit_hidden = entity.attributes.get(ATTR_EMULATED_HUE_HIDDEN, None)
if (
entity.entity_id in self.entities
and CONF_ENTITY_HIDDEN in self.entities[entity.entity_id]
):
explicit_hidden = self.entities[entity.entity_id][CONF_ENTITY_HIDDEN]
if explicit_expose is True or explicit_hidden is False:
expose = True
elif explicit_expose is False or explicit_hidden is True:
expose = False
else:
expose = None
get_deprecated(
entity.attributes, ATTR_EMULATED_HUE_HIDDEN, ATTR_EMULATED_HUE, None
)
domain_exposed_by_default = (
self.expose_by_default and domain in self.exposed_domains
)
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
is_default_exposed = domain_exposed_by_default and expose is not False
return is_default_exposed or expose
def _load_json(filename):
"""Wrapper, because we actually want to handle invalid json."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
| 33.432177 | 88 | 0.663144 | [
"Apache-2.0"
] | 0x00-0xFF/home-assistant | homeassistant/components/emulated_hue/__init__.py | 10,598 | Python |
# coding: utf-8
"""
ESMInterfaceTypeData.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class ESMInterfaceTypeData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ESMInterfaceTypeData - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'io_interface_type': 'str', # (required parameter)
'port_list': 'PortList'
}
self.attribute_map = {
'io_interface_type': 'ioInterfaceType', # (required parameter)
'port_list': 'portList'
}
self._io_interface_type = None
self._port_list = None
@property
def io_interface_type(self):
"""
Gets the io_interface_type of this ESMInterfaceTypeData.
This enumeration defines the different I/O interface types that may be reported as part of the configuration information associated with a controller.
:return: The io_interface_type of this ESMInterfaceTypeData.
:rtype: str
:required/optional: required
"""
return self._io_interface_type
@io_interface_type.setter
def io_interface_type(self, io_interface_type):
"""
Sets the io_interface_type of this ESMInterfaceTypeData.
This enumeration defines the different I/O interface types that may be reported as part of the configuration information associated with a controller.
:param io_interface_type: The io_interface_type of this ESMInterfaceTypeData.
:type: str
"""
allowed_values = ["notImplemented", "scsi", "fc", "sata", "sas", "iscsi", "ib", "fcoe", "nvmeof", "__UNDEFINED"]
if io_interface_type not in allowed_values:
raise ValueError(
"Invalid value for `io_interface_type`, must be one of {0}"
.format(allowed_values)
)
self._io_interface_type = io_interface_type
@property
def port_list(self):
"""
Gets the port_list of this ESMInterfaceTypeData.
A list of detailed information for each port.
:return: The port_list of this ESMInterfaceTypeData.
:rtype: PortList
:required/optional: optional
"""
return self._port_list
@port_list.setter
def port_list(self, port_list):
"""
Sets the port_list of this ESMInterfaceTypeData.
A list of detailed information for each port.
:param port_list: The port_list of this ESMInterfaceTypeData.
:type: PortList
"""
self._port_list = port_list
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 38.506329 | 844 | 0.640368 | [
"BSD-3-Clause-Clear"
] | NetApp/santricity-webapi-pythonsdk | netapp/santricity/models/symbol/esm_interface_type_data.py | 6,086 | Python |
#!/usr/bin/python3
#
# Copyright (C) 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing lock performance"""
import os
import sys
import time
import optparse
import threading
import resource
from ganeti import locking
def ParseOptions():
"""Parses the command line options.
In case of command line errors, it will show the usage and exit the
program.
@return: the options in a tuple
"""
parser = optparse.OptionParser()
parser.add_option("-t", dest="thread_count", default=1, type="int",
help="Number of threads", metavar="NUM")
parser.add_option("-d", dest="duration", default=5, type="float",
help="Duration", metavar="SECS")
(opts, args) = parser.parse_args()
if opts.thread_count < 1:
parser.error("Number of threads must be at least 1")
return (opts, args)
class State(object):
def __init__(self, thread_count):
"""Initializes this class.
"""
self.verify = [0 for _ in range(thread_count)]
self.counts = [0 for _ in range(thread_count)]
self.total_count = 0
def _Counter(lock, state, me):
"""Thread function for acquiring locks.
"""
counts = state.counts
verify = state.verify
while True:
lock.acquire()
try:
verify[me] = 1
counts[me] += 1
state.total_count += 1
if state.total_count % 1000 == 0:
sys.stdout.write(" %8d\r" % state.total_count)
sys.stdout.flush()
if sum(verify) != 1:
print("Inconsistent state!")
os._exit(1) # pylint: disable=W0212
verify[me] = 0
finally:
lock.release()
def main():
(opts, _) = ParseOptions()
lock = locking.SharedLock("TestLock")
state = State(opts.thread_count)
lock.acquire(shared=0)
try:
for i in range(opts.thread_count):
t = threading.Thread(target=_Counter, args=(lock, state, i))
t.setDaemon(True)
t.start()
start = time.clock()
finally:
lock.release()
while True:
if (time.clock() - start) > opts.duration:
break
time.sleep(0.1)
# Make sure we get a consistent view
lock.acquire(shared=0)
lock_cputime = time.clock() - start
res = resource.getrusage(resource.RUSAGE_SELF)
print("Total number of acquisitions: %s" % state.total_count)
print("Per-thread acquisitions:")
for (i, count) in enumerate(state.counts):
print(" Thread %s: %d (%0.1f%%)" %
(i, count, (100.0 * count / state.total_count)))
print("Benchmark CPU time: %0.3fs" % lock_cputime)
print("Average time per lock acquisition: %0.5fms" %
(1000.0 * lock_cputime / state.total_count))
print("Process:")
print(" User time: %0.3fs" % res.ru_utime)
print(" System time: %0.3fs" % res.ru_stime)
print(" Total time: %0.3fs" % (res.ru_utime + res.ru_stime))
# Exit directly without attempting to clean up threads
os._exit(0) # pylint: disable=W0212
if __name__ == "__main__":
main()
| 27.180645 | 76 | 0.68431 | [
"BSD-2-Clause"
] | RegioHelden/ganeti | test/py/lockperf.py | 4,213 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ecl.compute import compute_service
from ecl.compute.v2 import metadata
from ecl import resource2
class Image(resource2.Resource, metadata.MetadataMixin):
resource_key = 'image'
resources_key = 'images'
base_path = '/images'
service = compute_service.ComputeService()
# capabilities
allow_get = True
allow_delete = True
allow_list = True
_query_mapping = resource2.QueryParameters("server", "name",
"status", "type",
min_disk="minDisk",
min_ram="minRam",
changes_since="changes-since")
# Properties
#: Links pertaining to this image. This is a list of dictionaries,
#: each including keys ``href`` and ``rel``, and optionally ``type``.
links = resource2.Body('links')
#: The name of this image.
name = resource2.Body('name')
#: Timestamp when the image was created.
created_at = resource2.Body('created')
#: Metadata pertaining to this image. *Type: dict*
metadata = resource2.Body('metadata', type=dict)
#: The mimimum disk size. *Type: int*
min_disk = resource2.Body('minDisk', type=int)
#: The minimum RAM size. *Type: int*
min_ram = resource2.Body('minRam', type=int)
#: If this image is still building, its progress is represented here.
#: Once an image is created, progres will be 100. *Type: int*
progress = resource2.Body('progress', type=int)
#: The status of this image.
status = resource2.Body('status')
#: Timestamp when the image was updated.
updated_at = resource2.Body('updated')
#: Size of the image in bytes. *Type: int*
size = resource2.Body('OS-EXT-IMG-SIZE:size', type=int)
class ImageDetail(Image):
base_path = '/images/detail'
allow_get = False
allow_delete = False
allow_list = True
| 37.818182 | 77 | 0.647837 | [
"Apache-2.0"
] | JiyeYu/eclsdk | ecl/compute/v2/image.py | 2,496 | Python |
from os import environ
from scrapy_heroku.poller import Psycopg2QueuePoller
from scrapy_heroku.scheduler import Psycopg2SpiderScheduler
from scrapyd.eggstorage import FilesystemEggStorage
from scrapyd.environ import Environment
from scrapyd.interfaces import (IEggStorage, IEnvironment, IPoller,
ISpiderScheduler)
from scrapyd.launcher import Launcher
from scrapyd.website import Root
from twisted.application.internet import TCPServer, TimerService
from twisted.application.service import Application
from twisted.python import log
from twisted.web import server
def application(config):
app = Application("Scrapyd")
http_port = int(environ.get('PORT', config.getint('http_port', 6800)))
config.cp.set('scrapyd', 'database_url', environ.get('DATABASE_URL'))
poller = Psycopg2QueuePoller(config)
eggstorage = FilesystemEggStorage(config)
scheduler = Psycopg2SpiderScheduler(config)
environment = Environment(config)
app.setComponent(IPoller, poller)
app.setComponent(IEggStorage, eggstorage)
app.setComponent(ISpiderScheduler, scheduler)
app.setComponent(IEnvironment, environment)
launcher = Launcher(config, app)
timer = TimerService(5, poller.poll)
webservice = TCPServer(http_port, server.Site(Root(config, app)))
log.msg("Scrapyd web console available at http://localhost:%s/ (HEROKU)" %
http_port)
launcher.setServiceParent(app)
timer.setServiceParent(app)
webservice.setServiceParent(app)
return app
| 35.697674 | 78 | 0.760912 | [
"BSD-3-Clause"
] | esavara/scrapy-heroku | scrapy_heroku/app.py | 1,535 | Python |
# -*- coding: utf-8 -*-
"""
Adjustment from the 2D version from Machine Learning & Simulation code and video:
https://www.youtube.com/watch?v=ZUXmO4hu-20&list=LL&index=1&ab_channel=MachineLearning%26Simulation
https://github.com/Ceyron/machine-learning-and-simulation/blob/main/english/simulation_scripts/lattice_boltzmann_method_python_jax.py
by Bart Davids. Originally made in Google Colab:
https://colab.research.google.com/drive/1F3EH9_2N3lkEpgQXOScR3lcQ6oqCARPk?usp=sharing
Additional notes and figures for clarification can be found there.
"""
# Import dependancies
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import cmasher as cmr
from tqdm import tqdm
if __name__ == '__main__':
# Enable 64bit JAX
jax.config.update("jax_enable_x64", True)
# Radius of the cylinder
radius = 5.5
# Dimensions of domain
ny = 50
nz = 60
nx = 300
KINEMATIC_VISCOSITY = 0.0025
HORIZONTAL_INFLOW_VELOCITY = 0.04
reynolds_number = (HORIZONTAL_INFLOW_VELOCITY * radius) / KINEMATIC_VISCOSITY
RELAXATION_OMEGA = (1.0 / (3.0 * KINEMATIC_VISCOSITY + 0.5))
PLOT_EVERY_N_STEPS = 100
SKIP_FIRS_N_ITERATIONS = 5000
N_ITERATIONS = 20000
print('Reynolds number:', reynolds_number)
# Define a mesh for the obstacle mask
x = jnp.arange(nx)
y = jnp.arange(ny)
z = jnp.arange(nz)
X, Y, Z = jnp.meshgrid(x, y, z, indexing="ij")
cylinder = jnp.sqrt((X - nx//5)**2 + (Y - ny//2)**2)
obstacle_mask = cylinder < radius
# Show topview of the cylinder:
plt.imshow(obstacle_mask[:, :, nz//2].T)
plt.show()
# Front view:
plt.imshow(obstacle_mask[nx//5, :, :].T)
plt.show()
# Side View:
plt.imshow(obstacle_mask[:, ny//2, :].T)
plt.show()
def get_density(discrete_velocities):
density = jnp.sum(discrete_velocities, axis=-1)
return density
def get_macroscopic_velocities(discrete_velocities, density):
return jnp.einsum("NMLQ,dQ->NMLd", discrete_velocities, LATTICE_VELOCITIES) / density[..., jnp.newaxis]
def get_equilibrium_discrete_velocities(macroscopic_velocities, density):
projected_discrete_velocities = jnp.einsum("dQ,NMLd->NMLQ", LATTICE_VELOCITIES, macroscopic_velocities)
macroscopic_velocity_magnitude = jnp.linalg.norm(macroscopic_velocities, axis=-1, ord=2)
equilibrium_discrete_velocities = (density[..., jnp.newaxis] * LATTICE_WEIGHTS[jnp.newaxis, jnp.newaxis, jnp.newaxis, :] *
(1 + 3 * projected_discrete_velocities + 9/2 * projected_discrete_velocities**2 -
3/2 * macroscopic_velocity_magnitude[..., jnp.newaxis]**2))
return equilibrium_discrete_velocities
N_DISCRETE_VELOCITIES = 19
# 3D lattice velocities and numbering used as in:
# https://www.researchgate.net/publication/290158292_An_introduction_to_Lattice-Boltzmann_methods
LATTICE_INDICES = jnp.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18])
LATICE_VELOCITIES_X = jnp.array([ 0, 1, 0,-1, 0, 0, 0, 1,-1,-1, 1, 1,-1,-1, 1, 0, 0, 0, 0])
LATICE_VELOCITIES_Y = jnp.array([ 0, 0, 1, 0,-1, 0, 0, 1, 1,-1,-1, 0, 0, 0, 0, 1,-1,-1, 1])
LATICE_VELOCITIES_Z = jnp.array([ 0, 0, 0, 0, 0, 1,-1, 0, 0, 0, 0, 1, 1,-1,-1, 1, 1,-1,-1])
OPPOSITE_LATTICE_INDICES = jnp.array([ 0, 3, 4, 1, 2, 6, 5, 9,10, 7, 8,13,14,11,12,17,18,15,16])
LATTICE_VELOCITIES = jnp.array([LATICE_VELOCITIES_X,
LATICE_VELOCITIES_Y,
LATICE_VELOCITIES_Z])
LATTICE_WEIGHTS = jnp.array([# rest particle
1/3,
# face-connected neighbors
1/18, 1/18, 1/18, 1/18, 1/18, 1/18,
# edge-connected neighbors
1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36])
# Velocity directions/planes
RIGHT_VELOCITIES = jnp.array([1, 7, 10, 11, 14]) # LATICE_VELOCITIES_X = 1
LEFT_VELOCITIES = jnp.array([3, 8, 9, 12, 13]) # LATICE_VELOCITIES_X =-1
YZ_VELOCITIES = jnp.array([0, 2, 4, 5, 6, 15, 16, 17, 18]) # LATICE_VELOCITIES_X = 0
VELOCITY_PROFILE = jnp.zeros((nx, ny, nz, 3))
VELOCITY_PROFILE = VELOCITY_PROFILE.at[:, :, :, 0].set(HORIZONTAL_INFLOW_VELOCITY)
discrete_velocities_prev = get_equilibrium_discrete_velocities(VELOCITY_PROFILE,
jnp.ones((nx, ny, nz)))
@jax.jit
def update(discrete_velocities_prev):
# (1) Prescribe the outflow BC on the right boundary. Flow can go out, but not back in.
discrete_velocities_prev = discrete_velocities_prev.at[-1, :, :, LEFT_VELOCITIES].set(discrete_velocities_prev[-2, :, :, LEFT_VELOCITIES])
# (2) Determine macroscopic velocities
density_prev = get_density(discrete_velocities_prev)
macroscopic_velocities_prev = get_macroscopic_velocities(
discrete_velocities_prev,
density_prev)
# (3) Prescribe Inflow Dirichlet BC using Zou/He scheme in 3D:
# https://arxiv.org/pdf/0811.4593.pdf
# https://terpconnect.umd.edu/~aydilek/papers/LB.pdf
macroscopic_velocities_prev = macroscopic_velocities_prev.at[0, 1:-1, 1:-1, :].set(VELOCITY_PROFILE[0, 1:-1, 1:-1, :])
lateral_densities = get_density(jnp.transpose(discrete_velocities_prev[0, :, :, YZ_VELOCITIES], axes = (1, 2, 0)))
left_densities = get_density(jnp.transpose(discrete_velocities_prev[0, :, :, LEFT_VELOCITIES], axes = (1, 2, 0)))
density_prev = density_prev.at[0, :, :].set((lateral_densities + 2 * left_densities) /
(1 - macroscopic_velocities_prev[0, :, :, 0]))
# (4) Compute discrete Equilibria velocities
equilibrium_discrete_velocities = get_equilibrium_discrete_velocities(
macroscopic_velocities_prev,
density_prev)
# (3) Belongs to the Zou/He scheme
discrete_velocities_prev =\
discrete_velocities_prev.at[0, :, :, RIGHT_VELOCITIES].set(
equilibrium_discrete_velocities[0, :, :, RIGHT_VELOCITIES])
# (5) Collide according to BGK
discrete_velocities_post_collision = (discrete_velocities_prev - RELAXATION_OMEGA *
(discrete_velocities_prev - equilibrium_discrete_velocities))
# (6) Bounce-Back Boundary Conditions to enfore the no-slip
for i in range(N_DISCRETE_VELOCITIES):
discrete_velocities_post_collision = discrete_velocities_post_collision.at[obstacle_mask, LATTICE_INDICES[i]].set(
discrete_velocities_prev[obstacle_mask, OPPOSITE_LATTICE_INDICES[i]])
# (7) Stream alongside lattice velocities
discrete_velocities_streamed = discrete_velocities_post_collision
for i in range(N_DISCRETE_VELOCITIES):
discrete_velocities_streamed = discrete_velocities_streamed.at[:, :, :, i].set(
jnp.roll(
jnp.roll(
jnp.roll(
discrete_velocities_post_collision[:, :, :, i], LATTICE_VELOCITIES[0, i], axis = 0),
LATTICE_VELOCITIES[1, i], axis = 1),
LATTICE_VELOCITIES[2, i], axis = 2))
return discrete_velocities_streamed
def run(discrete_velocities_prev):
for i in tqdm(range(N_ITERATIONS)):
discrete_velocities_next = update(discrete_velocities_prev)
discrete_velocities_prev = discrete_velocities_next
if i % PLOT_EVERY_N_STEPS == 0 and i > SKIP_FIRS_N_ITERATIONS - PLOT_EVERY_N_STEPS:
density = get_density(discrete_velocities_next)
macroscopic_velocities = get_macroscopic_velocities(
discrete_velocities_next,
density)
print('\n', jnp.max(macroscopic_velocities))
velocity_magnitude = jnp.linalg.norm(
macroscopic_velocities,
axis=-1,
ord=2)
fig = plt.figure(figsize = (15, 3))
cont = plt.contourf(X[:, :, nz//2], Y[:, :, nz//2], jnp.flip(velocity_magnitude[:, :, nz//2], axis = 1), alpha=0.8, cmap=cmr.iceburn)
plt.axis('scaled')
plt.axis('off')
plt.show()
return
run(discrete_velocities_prev)
| 45.912821 | 153 | 0.598235 | [
"MIT"
] | bartdavids/machine-learning-and-simulation | english/simulation_scripts/D3Q19_lattice_boltzmann_method_python_jax.py | 8,953 | Python |
__author__ = 'ialbert'
from django.views.generic import DetailView, ListView, TemplateView, RedirectView, View
from haystack.views import SearchView
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet, AutoQuery
from haystack.utils import Highlighter
from django.conf import settings
from biostar.server.views import BaseListMixin
from ajax import ajax_error, ajax_success, ajax_error_wrapper, json_response
from django.conf.urls import patterns
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap
from biostar.apps.posts.models import Post, Tag
from biostar.apps.planet.models import BlogPost
import logging
logger = logging.getLogger(__name__)
info_dict = {
'queryset': Post.objects.all(),
}
sitemaps = {
'flatpages': FlatPageSitemap,
'posts': GenericSitemap(info_dict, priority=0.6),
}
class SiteSearch(SearchView):
extra_context = lambda x: dict(topic="search", page_title="Search")
def slow_highlight(query, text):
"Invoked only if the search backend does not support highlighting"
highlight = Highlighter(query)
value = highlight.highlight(text)
return value
def join_highlights(row):
"Joins the highlighted text"
if type(row.highlighted) is dict:
return ''
if not row.highlighted:
return
return '<br>'.join(x for x in row.highlighted)
class Search(BaseListMixin):
template_name = "search/search.html"
paginate_by = settings.PAGINATE_BY
context_object_name = "results"
page_title = "Search"
def get_queryset(self):
self.q = self.request.GET.get('q', '')
if not self.q:
return []
content = AutoQuery(self.q)
query = SearchQuerySet().filter(content=content).highlight()[:50]
for row in query:
if row is None:
continue
context = join_highlights(row)
context = context or slow_highlight(query=self.q, text=row.content)
row.context = context
return query
def get_context_data(self, **kwargs):
context = super(Search, self).get_context_data(**kwargs)
context['q'] = self.q
return context
def suggest_tags(request):
"Returns suggested tags"
tags = Tag.objects.all().order_by('-count')#[:10]
data = settings.POST_TAG_LIST + [t.name for t in tags]
data = filter(None, data)
return json_response(data)
#@ajax_error_wrapper
def search_title(request):
"Handles title searches"
q = request.GET.get('q', '')
content = AutoQuery(q)
results = SearchQuerySet().filter(content=content).highlight()[:50]
items = []
for row in results:
try:
ob = row.object
# Why can this happen?
if not ob:
continue
context = join_highlights(row)
context = context or slow_highlight(query=q, text=row.content)
text = "%s" % row.title
items.append(
dict(id=ob.get_absolute_url(), text=text, context=context, author=row.author,
url=ob.get_absolute_url()),
)
except Exception, exc:
logger.error(content)
logger.error(exc)
pass
payload = dict(items=items)
return json_response(payload)
| 28.067797 | 93 | 0.658514 | [
"MIT"
] | biostars/support.bioconductor.org | biostar/server/search.py | 3,312 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.