ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40acc78035a08e0a4a97e63fb9e970cf728f5f4 | """Utility methods"""
import datetime as dt
DEFAULT_TIME_ZONE: dt.tzinfo = dt.timezone.utc
def now(time_zone=None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
|
py | b40acce9de3188045605e99c0f2e346991c10f89 | import graphene
from ...core.permissions import ProductPermissions
from ...warehouse import models
from ...warehouse.availability import get_available_quantity_for_customer
from ..account.enums import CountryCodeEnum
from ..core.connection import CountableDjangoObjectType
from ..decorators import permission_required
class WarehouseAddressInput(graphene.InputObjectType):
street_address_1 = graphene.String(description="Address.", required=True)
street_address_2 = graphene.String(description="Address.")
city = graphene.String(description="City.", required=True)
city_area = graphene.String(description="District.")
postal_code = graphene.String(description="Postal code.")
country = CountryCodeEnum(description="Country.", required=True)
country_area = graphene.String(description="State or province.")
phone = graphene.String(description="Phone number.")
class WarehouseInput(graphene.InputObjectType):
slug = graphene.String(description="Warehouse slug.")
company_name = graphene.String(description="Company name.")
email = graphene.String(description="The email address of the warehouse.")
class WarehouseCreateInput(WarehouseInput):
name = graphene.String(description="Warehouse name.", required=True)
address = WarehouseAddressInput(
description="Address of the warehouse.", required=True
)
shipping_zones = graphene.List(
graphene.ID, description="Shipping zones supported by the warehouse."
)
class WarehouseUpdateInput(WarehouseInput):
name = graphene.String(description="Warehouse name.", required=False)
address = WarehouseAddressInput(
description="Address of the warehouse.", required=False
)
class Warehouse(CountableDjangoObjectType):
class Meta:
description = "Represents warehouse."
model = models.Warehouse
interfaces = [graphene.relay.Node]
only_fields = [
"id",
"name",
"slug",
"company_name",
"shipping_zones",
"address",
"email",
]
class Stock(CountableDjangoObjectType):
stock_quantity = graphene.Int(
description="Quantity of a product available for sale.", required=True
)
quantity = graphene.Int(
required=True,
description="Quantity of a product in the warehouse's possession, "
"including the allocated stock that is waiting for shipment.",
)
quantity_allocated = graphene.Int(
required=True, description="Quantity allocated for orders"
)
class Meta:
description = "Represents stock."
model = models.Stock
interfaces = [graphene.relay.Node]
only_fields = ["warehouse", "product_variant", "quantity", "quantity_allocated"]
@staticmethod
def resolve_stock_quantity(root, *_args):
return get_available_quantity_for_customer(root)
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_quantity(root, *_args):
return root.quantity
@staticmethod
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_quantity_allocated(root, *_args):
return root.quantity_allocated
|
py | b40ace9dcdb95184fb45e54831aaba60526cefc4 | # Copyright 2016 Dravetech AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Import local modules
from incendio.junos.junos import JunOSDriver
__all__ = ("JunOSDriver",)
|
py | b40acede7bd80984d1ebde40211b37222223edf3 | import click
import codecs
import os
from collections import Counter
from nlppln.utils import get_files, out_file_name
@click.command()
@click.argument('in_dir', type=click.Path(exists=True))
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
def concat_files(in_dir, out_dir):
in_files = get_files(in_dir)
counts = Counter()
for in_file in in_files:
parts = os.path.basename(in_file).split(u'_')
prefix = u'_'.join(parts[:2])
counts[prefix] += 1
out_file = out_file_name(out_dir, prefix, ext='txt')
with codecs.open(in_file, 'r', encoding='utf-8') as fi:
text = fi.read()
text = text.replace(u'\n', u'')
text = text.strip()
with codecs.open(out_file, 'a', encoding='utf-8') as fo:
fo.write(text)
fo.write(u'\n')
if __name__ == '__main__':
concat_files()
|
py | b40acf32d92add4d92a076b063dd6eb22be8a321 | from PyFina import getMeta, PyFina
import matplotlib.pylab as plt
import datetime
import time
import random
# télécharger le fichier contenant les données de Marc Bloch 2021
# unzip your files in C:/var/opt/emoncms
dir = "C:/var/opt/emoncms/phpfina"
feeds = { "Text" : 5, "TziqNord": 8, "TtecNord": 11, "TdepNord": 21, "TretNord": 22, "pompeNord": 23}
step = 3600
verbose = False
# epL : episode length : 8 days !!
epL = 8*24*3600
def analyse():
"""
étant donné un dictionnaire de numéros de flux
calcule les timestamps de départ et de fin communs à cet ensemble de flux
"""
starts = []
ends = []
for f in feeds:
meta = getMeta(feeds[f], dir)
if verbose:
print(meta)
start = meta["start_time"]
length = meta["npoints"] * meta["interval"]
end = start + length
starts.append(start)
ends.append(end)
start = max(starts)
end = min(ends)
length = end - start
nbpts = epL // step
if epL > length:
nbpts = length // step
return start, end, nbpts
start, end, nbpts = analyse()
def viewEpisode(start_ts):
"""
permet de visualiser un épisode commencant à start_ts
"""
Text = PyFina(feeds["Text"],dir,start_ts,step,nbpts)
TziqNord = PyFina(feeds["TziqNord"],dir,start_ts,step,nbpts)
TtecNord = PyFina(feeds["TtecNord"],dir,start_ts,step,nbpts)
TdepNord = PyFina(feeds["TdepNord"],dir,start_ts,step,nbpts)
TretNord = PyFina(feeds["TretNord"],dir,start_ts,step,nbpts)
localstart = datetime.datetime.fromtimestamp(start_ts)
utcstart = datetime.datetime.utcfromtimestamp(start_ts)
title = "starting on : UTC {}\n{} {}".format(utcstart,time.tzname[0],localstart)
ax1 = plt.subplot(211)
plt.title(title)
plt.ylabel("outdoor Temp °C")
plt.xlabel("time in hours")
plt.plot(Text, label="Text")
plt.legend(loc='upper left')
ax1 = ax1.twinx()
plt.ylabel("indoor Temp °C")
plt.plot(TziqNord, label = "TziqNord", color="green")
plt.plot(TtecNord, label = "TtecNord", color="orange")
plt.legend(loc='upper right')
ax3 = plt.subplot(212, sharex=ax1)
plt.ylabel("hot water Temp °C")
plt.plot(TdepNord, label = "TdepNord")
plt.plot(TretNord, label = "TretNord")
plt.legend(loc='upper right')
plt.show()
import signal
class Loop:
"""
visualisation des épisodes
"""
def __init__(self):
self._exit = False
def run(self):
"""
boucle
"""
signal.signal(signal.SIGINT, self._sigint_handler)
signal.signal(signal.SIGTERM, self._sigint_handler)
while not self._exit:
start_ts = random.randrange(start, end - epL)
viewEpisode(start_ts)
def _sigint_handler(self, signal, frame):
"""
Réception du signal de fermeture
"""
print("signal de fermeture reçu")
self._exit = True
def close(self):
print("fermeture effectuée")
if end - epL > start :
boucle = Loop()
boucle.run()
boucle.close()
else :
viewEpisode(start)
|
py | b40ad1903b938afb66418901e7b2f3d6e8ccfdbd | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import tempfile
from telemetry.internal.platform import profiler
class V8Profiler(profiler.Profiler):
_V8_ARG = '--js-flags=--logfile=%s --prof --log-timer-events'
@classmethod
def name(cls):
return 'v8'
@classmethod
def is_supported(cls, browser_type):
return not browser_type.startswith('cros')
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
if browser_type.startswith('android'):
dump_file = '/data/local/tmp/v8-profile.log'
else:
dump_file = tempfile.mkstemp()[1]
options.AppendExtraBrowserArgs([cls._V8_ARG % dump_file, '--no-sandbox'])
def CollectProfile(self):
# Find output filename from browser argument.
for i in self._browser_backend.browser_options.extra_browser_args:
match = re.match(self._V8_ARG % r'(\S+)', i)
if match:
output_file = match.groups(0)[0]
assert output_file
# On Android pull the output file to the host.
if self._platform_backend.GetOSName() == 'android':
host_output_file = '%s.log' % self._output_path
try:
self._browser_backend.device.PullFile(
output_file, host_output_file)
except:
logging.exception('New exception caused by DeviceUtils conversion')
raise
# Clean the device
self._browser_backend.device.RunShellCommand('rm %s' % output_file)
output_file = host_output_file
print 'V8 profile saved as %s' % output_file
print 'To view, open in ' \
'http://v8.googlecode.com/svn/trunk/tools/tick-processor.html'
return [output_file]
|
py | b40ad226e9e8eb2156d8c8df603de6aa827ef9af | from talon import Context, Module, actions, imgui, registry
ctx = Context()
mod = Module()
mod.list("code_common_function", desc="List of common functions for active language")
# global
function_list = []
@mod.capture(rule="{user.code_common_function}")
def code_common_function(m) -> str:
"""Returns a function name"""
return m.code_common_function
mod.tag("code_functions_common", desc="Tag for enabling support for common functions")
mod.tag(
"code_functions_common_gui_active",
desc="Active when the function picker GUI is showing",
)
@mod.action_class
class Actions:
def code_toggle_functions():
"""GUI: List functions for active language"""
global function_list
if gui_functions.showing:
function_list = []
gui_functions.hide()
ctx.tags.discard("user.code_functions_common_gui_active")
else:
update_function_list_and_freeze()
def code_select_function(number: int, selection: str):
"""Inserts the selected function when the imgui is open"""
if gui_functions.showing and number < len(function_list):
actions.user.code_insert_function(
registry.lists["user.code_common_function"][0][function_list[number]],
selection,
)
# TODO: clarify the relation between `code_insert_function`
# and the various functions declared in the functions
def code_insert_function(text: str, selection: str):
"""Inserts a function and positions the cursor appropriately"""
def update_function_list_and_freeze():
global function_list
if "user.code_common_function" in registry.lists:
function_list = sorted(registry.lists["user.code_common_function"][0].keys())
else:
function_list = []
gui_functions.show()
ctx.tags.add("user.code_functions_common_gui_active")
@imgui.open()
def gui_functions(gui: imgui.GUI):
gui.text("Functions")
gui.line()
# print(str(registry.lists["user.code_functions"]))
for i, entry in enumerate(function_list, 1):
if entry in registry.lists["user.code_common_function"][0]:
gui.text(
f"{i}. {entry}: {registry.lists['user.code_common_function'][0][entry]}"
)
gui.spacer()
if gui.button("Toggle funk (close window)"):
actions.user.code_toggle_functions_hide()
def commands_updated(_):
if gui_functions.showing:
update_function_list_and_freeze()
registry.register("update_commands", commands_updated)
|
py | b40ad34639467a27bbd6a84e2f1e6189c0eca5dd | # coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dlxapi.configuration import Configuration
class Tag(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'color': 'str',
'parent_id': 'str',
'is_default': 'bool',
'portfolio_id': 'str'
}
attribute_map = {
'name': 'name',
'color': 'color',
'parent_id': 'parentId',
'is_default': 'isDefault',
'portfolio_id': 'portfolioId'
}
def __init__(self, name=None, color=None, parent_id=None, is_default=None, portfolio_id=None, _configuration=None): # noqa: E501
"""Tag - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._name = None
self._color = None
self._parent_id = None
self._is_default = None
self._portfolio_id = None
self.discriminator = None
if name is not None:
self.name = name
if color is not None:
self.color = color
if parent_id is not None:
self.parent_id = parent_id
if is_default is not None:
self.is_default = is_default
if portfolio_id is not None:
self.portfolio_id = portfolio_id
@property
def name(self):
"""Gets the name of this Tag. # noqa: E501
:return: The name of this Tag. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Tag.
:param name: The name of this Tag. # noqa: E501
:type: str
"""
self._name = name
@property
def color(self):
"""Gets the color of this Tag. # noqa: E501
:return: The color of this Tag. # noqa: E501
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""Sets the color of this Tag.
:param color: The color of this Tag. # noqa: E501
:type: str
"""
self._color = color
@property
def parent_id(self):
"""Gets the parent_id of this Tag. # noqa: E501
:return: The parent_id of this Tag. # noqa: E501
:rtype: str
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this Tag.
:param parent_id: The parent_id of this Tag. # noqa: E501
:type: str
"""
self._parent_id = parent_id
@property
def is_default(self):
"""Gets the is_default of this Tag. # noqa: E501
:return: The is_default of this Tag. # noqa: E501
:rtype: bool
"""
return self._is_default
@is_default.setter
def is_default(self, is_default):
"""Sets the is_default of this Tag.
:param is_default: The is_default of this Tag. # noqa: E501
:type: bool
"""
self._is_default = is_default
@property
def portfolio_id(self):
"""Gets the portfolio_id of this Tag. # noqa: E501
:return: The portfolio_id of this Tag. # noqa: E501
:rtype: str
"""
return self._portfolio_id
@portfolio_id.setter
def portfolio_id(self, portfolio_id):
"""Sets the portfolio_id of this Tag.
:param portfolio_id: The portfolio_id of this Tag. # noqa: E501
:type: str
"""
self._portfolio_id = portfolio_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Tag, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Tag):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Tag):
return True
return self.to_dict() != other.to_dict()
|
py | b40ad36315339ba81934cdedca063886dd8a9d16 | #!/usr/bin/env python
__author__ = "Peter Shipley"
import os
from ISY.IsyEvent import ISYEvent, _print_event
def main():
server = ISYEvent(debug=0x0000)
# you can subscribe to multiple devices
# server.subscribe('10.1.1.25')
server.subscribe(
addr=os.getenv('ISY_ADDR', '10.1.1.36'),
userl=os.getenv('ISY_USER', "admin"),
userp=os.getenv('ISY_PASS', "admin")
)
server.set_process_func(_print_event, "")
try:
print('Use Control-C to exit')
server.events_loop() #no return
# for d in server.event_iter( ignorelist=["_0", "_11"] ):
# server._print_event(d, "")
except KeyboardInterrupt:
print('Exiting')
if __name__ == '__main__':
main()
exit(0)
|
py | b40ad3bc90856f9837b288a9a0e3c4daa038f228 | from itertools import count
limit = 20
for num in count (0, limit):
isEvenlyDivisibleForAll = True
for divisor in count(limit -1, -1):
if divisor < 3:
break
if num%divisor != 0:
isEvenlyDivisibleForAll = False
break
if isEvenlyDivisibleForAll == True and num > 0:
print(num, " is evenly divisible by all numbers from 1 to ", limit)
break |
py | b40ad596f5925f80ad4f3d1032bf682135f16816 | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mars.oscar as mo
class SvcActor1(mo.Actor):
def __init__(self, arg):
super().__init__()
self._arg = arg
def get_arg(self):
return self._arg
async def start(config: dict, address: None):
svc_config = config['test_svc1']
await mo.create_actor(
SvcActor1, uid=svc_config['uid'], arg=svc_config['arg1'],
address=address)
|
py | b40ad6494eea765354e068eb5a8ba03960dc99aa | """
Graph based SLAM example
author: Atsushi Sakai (@Atsushi_twi)
Ref
[A Tutorial on Graph-Based SLAM](http://www2.informatik.uni-freiburg.de/~stachnis/pdf/grisetti10titsmag.pdf)
"""
import numpy as np
import math
import copy
import itertools
import matplotlib.pyplot as plt
# Simulation parameter
Qsim = np.diag([0.2, np.deg2rad(1.0)])**2
Rsim = np.diag([0.1, np.deg2rad(10.0)])**2
DT = 2.0 # time tick [s]
SIM_TIME = 100.0 # simulation time [s]
MAX_RANGE = 30.0 # maximum observation range
STATE_SIZE = 3 # State size [x,y,yaw]
# Covariance parameter of Graph Based SLAM
C_SIGMA1 = 0.1
C_SIGMA2 = 0.1
C_SIGMA3 = np.deg2rad(1.0)
MAX_ITR = 20 # Maximum iteration
show_graph_dtime = 20.0 # [s]
show_animation = True
class Edge():
def __init__(self):
self.e = np.zeros((3, 1))
self.omega = np.zeros((3, 3)) # information matrix
self.d1 = 0.0
self.d2 = 0.0
self.yaw1 = 0.0
self.yaw2 = 0.0
self.angle1 = 0.0
self.angle2 = 0.0
self.id1 = 0
self.id2 = 0
def cal_observation_sigma(d):
sigma = np.zeros((3, 3))
sigma[0, 0] = C_SIGMA1**2
sigma[1, 1] = C_SIGMA2**2
sigma[2, 2] = C_SIGMA3**2
return sigma
def calc_rotational_matrix(angle):
Rt = np.array([[math.cos(angle), -math.sin(angle), 0],
[math.sin(angle), math.cos(angle), 0],
[0, 0, 1.0]])
return Rt
def calc_edge(x1, y1, yaw1, x2, y2, yaw2, d1,
angle1, phi1, d2, angle2, phi2, t1, t2):
edge = Edge()
tangle1 = pi_2_pi(yaw1 + angle1)
tangle2 = pi_2_pi(yaw2 + angle2)
tmp1 = d1 * math.cos(tangle1)
tmp2 = d2 * math.cos(tangle2)
tmp3 = d1 * math.sin(tangle1)
tmp4 = d2 * math.sin(tangle2)
edge.e[0, 0] = x2 - x1 - tmp1 + tmp2
edge.e[1, 0] = y2 - y1 - tmp3 + tmp4
edge.e[2, 0] = 0
Rt1 = calc_rotational_matrix(tangle1)
Rt2 = calc_rotational_matrix(tangle2)
sig1 = cal_observation_sigma(d1)
sig2 = cal_observation_sigma(d2)
edge.omega = np.linalg.inv(Rt1 @ sig1 @ Rt1.T + Rt2 @ sig2 @ Rt2.T)
edge.d1, edge.d2 = d1, d2
edge.yaw1, edge.yaw2 = yaw1, yaw2
edge.angle1, edge.angle2 = angle1, angle2
edge.id1, edge.id2 = t1, t2
return edge
def calc_edges(xlist, zlist):
edges = []
cost = 0.0
zids = list(itertools.combinations(range(len(zlist)), 2))
for (t1, t2) in zids:
x1, y1, yaw1 = xlist[0, t1], xlist[1, t1], xlist[2, t1]
x2, y2, yaw2 = xlist[0, t2], xlist[1, t2], xlist[2, t2]
if zlist[t1] is None or zlist[t2] is None:
continue # No observation
for iz1 in range(len(zlist[t1][:, 0])):
for iz2 in range(len(zlist[t2][:, 0])):
if zlist[t1][iz1, 3] == zlist[t2][iz2, 3]:
d1 = zlist[t1][iz1, 0]
angle1, phi1 = zlist[t1][iz1, 1], zlist[t1][iz1, 2]
d2 = zlist[t2][iz2, 0]
angle2, phi2 = zlist[t2][iz2, 1], zlist[t2][iz2, 2]
edge = calc_edge(x1, y1, yaw1, x2, y2, yaw2, d1,
angle1, phi1, d2, angle2, phi2, t1, t2)
edges.append(edge)
cost += (edge.e.T @ (edge.omega) @ edge.e)[0, 0]
print("cost:", cost, ",nedge:", len(edges))
return edges
def calc_jacobian(edge):
t1 = edge.yaw1 + edge.angle1
A = np.array([[-1.0, 0, edge.d1 * math.sin(t1)],
[0, -1.0, -edge.d1 * math.cos(t1)],
[0, 0, 0]])
t2 = edge.yaw2 + edge.angle2
B = np.array([[1.0, 0, -edge.d2 * math.sin(t2)],
[0, 1.0, edge.d2 * math.cos(t2)],
[0, 0, 0]])
return A, B
def fill_H_and_b(H, b, edge):
A, B = calc_jacobian(edge)
id1 = edge.id1 * STATE_SIZE
id2 = edge.id2 * STATE_SIZE
H[id1:id1 + STATE_SIZE, id1:id1 + STATE_SIZE] += A.T @ edge.omega @ A
H[id1:id1 + STATE_SIZE, id2:id2 + STATE_SIZE] += A.T @ edge.omega @ B
H[id2:id2 + STATE_SIZE, id1:id1 + STATE_SIZE] += B.T @ edge.omega @ A
H[id2:id2 + STATE_SIZE, id2:id2 + STATE_SIZE] += B.T @ edge.omega @ B
b[id1:id1 + STATE_SIZE] += (A.T @ edge.omega @ edge.e)
b[id2:id2 + STATE_SIZE] += (B.T @ edge.omega @ edge.e)
return H, b
def graph_based_slam(x_init, hz):
print("start graph based slam")
zlist = copy.deepcopy(hz)
x_opt = copy.deepcopy(x_init)
nt = x_opt.shape[1]
n = nt * STATE_SIZE
for itr in range(MAX_ITR):
edges = calc_edges(x_opt, zlist)
H = np.zeros((n, n))
b = np.zeros((n, 1))
for edge in edges:
H, b = fill_H_and_b(H, b, edge)
# to fix origin
H[0:STATE_SIZE, 0:STATE_SIZE] += np.identity(STATE_SIZE)
dx = - np.linalg.inv(H) @ b
for i in range(nt):
x_opt[0:3, i] += dx[i * 3:i * 3 + 3, 0]
diff = dx.T @ dx
print("iteration: %d, diff: %f" % (itr + 1, diff))
if diff < 1.0e-5:
break
return x_opt
def calc_input():
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
u = np.array([[v, yawrate]]).T
return u
def observation(xTrue, xd, u, RFID):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = np.zeros((0, 4))
for i in range(len(RFID[:, 0])):
dx = RFID[i, 0] - xTrue[0, 0]
dy = RFID[i, 1] - xTrue[1, 0]
d = math.sqrt(dx**2 + dy**2)
angle = pi_2_pi(math.atan2(dy, dx)) - xTrue[2, 0]
phi = pi_2_pi(math.atan2(dy, dx))
if d <= MAX_RANGE:
dn = d + np.random.randn() * Qsim[0, 0] # add noise
angle_noise = np.random.randn() * Qsim[1, 1]
angle += angle_noise
phi += angle_noise
zi = np.array([dn, angle, phi, i])
z = np.vstack((z, zi))
# add noise to input
ud1 = u[0, 0] + np.random.randn() * Rsim[0, 0]
ud2 = u[1, 0] + np.random.randn() * Rsim[1, 1]
ud = np.array([[ud1, ud2]]).T
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, 1.0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT]])
x = F @ x + B @ u
return x
def pi_2_pi(angle):
return (angle + math.pi) % (2 * math.pi) - math.pi
def main():
print(__file__ + " start!!")
time = 0.0
# RFID positions [x, y, yaw]
RFID = np.array([[10.0, -2.0, 0.0],
[15.0, 10.0, 0.0],
[3.0, 15.0, 0.0],
[-5.0, 20.0, 0.0],
[-5.0, 5.0, 0.0]
])
# State Vector [x y yaw v]'
xTrue = np.zeros((STATE_SIZE, 1))
xDR = np.zeros((STATE_SIZE, 1)) # Dead reckoning
# history
hxTrue = []
hxDR = []
hz = []
dtime = 0.0
init = False
while SIM_TIME >= time:
if not init:
hxTrue = xTrue
hxDR = xTrue
init = True
else:
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
time += DT
dtime += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u, RFID)
hz.append(z)
if dtime >= show_graph_dtime:
x_opt = graph_based_slam(hxDR, hz)
dtime = 0.0
if show_animation: # pragma: no cover
plt.cla()
plt.plot(RFID[:, 0], RFID[:, 1], "*k")
plt.plot(hxTrue[0, :].flatten(),
hxTrue[1, :].flatten(), "-b")
plt.plot(hxDR[0, :].flatten(),
hxDR[1, :].flatten(), "-k")
plt.plot(x_opt[0, :].flatten(),
x_opt[1, :].flatten(), "-r")
plt.axis("equal")
plt.grid(True)
plt.title("Time" + str(time)[0:5])
plt.pause(1.0)
if __name__ == '__main__':
main() |
py | b40ad737f79425d59f5e0f4d5059b3ae09ff3f89 | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello, World! from azure"
|
py | b40ad84280128057a038526532131d09318aae6f | """
Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
Licensed under The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from PIL import Image, ImageOps
from torch.utils.data import Dataset
from torchvision import transforms
import os
import numpy as np
import torch
import cv2
import albumentations
import albumentations.augmentations as A
class HistogramEqualization(object):
def __call__(self, img):
img_eq = ImageOps.equalize(img)
return img_eq
class AdjustGamma(object):
def __init__(self, gamma):
self.gamma = gamma
def __call__(self, img):
img_gamma = transforms.functional.adjust_gamma(img, self.gamma)
return img_gamma
class CelebAMaskDataset(Dataset):
def __init__(self, args, dataroot, unlabel_transform=None, latent_dir=None, is_label=True, phase='train',
limit_size=None, unlabel_limit_size=None, aug=False, resolution=256):
self.args = args
self.is_label = is_label
if is_label == True:
self.latent_dir = latent_dir
self.data_root = os.path.join(dataroot, 'label_data')
if phase == 'train':
if limit_size is None:
self.idx_list = np.loadtxt(os.path.join(self.data_root, 'train_full_list.txt'), dtype=str)
else:
self.idx_list = np.loadtxt(os.path.join(self.data_root,
'train_{}_list.txt'.format(limit_size)), dtype=str).reshape(-1)
elif phase == 'val':
if limit_size is None:
self.idx_list = np.loadtxt(os.path.join(self.data_root, 'val_full_list.txt'), dtype=str)
else:
self.idx_list = np.loadtxt(os.path.join(self.data_root,
'val_{}_list.txt'.format(limit_size)), dtype=str).reshape(-1)
elif phase == 'train-val':
# concat both train and val
if limit_size is None:
train_list = np.loadtxt(os.path.join(self.data_root, 'train_full_list.txt'), dtype=str)
val_list = np.loadtxt(os.path.join(self.data_root, 'val_full_list.txt'), dtype=str)
self.idx_list = list(train_list) + list(val_list)
else:
train_list = np.loadtxt(os.path.join(self.data_root,
'train_{}_list.txt'.format(limit_size)), dtype=str).reshape(-1)
val_list = np.loadtxt(os.path.join(self.data_root,
'val_{}_list.txt'.format(limit_size)), dtype=str).reshape(-1)
self.idx_list = list(train_list) + list(val_list)
else:
self.idx_list = np.loadtxt(os.path.join(self.data_root, 'test_list.txt'), dtype=str)
else:
self.data_root = os.path.join(dataroot, 'unlabel_data')
if unlabel_limit_size is None:
self.idx_list = np.loadtxt(os.path.join(self.data_root, 'unlabel_list.txt'), dtype=str)
else:
self.idx_list = np.loadtxt(os.path.join(self.data_root, 'unlabel_{}_list.txt'.format(unlabel_limit_size)), dtype=str)
self.img_dir = os.path.join(self.data_root, 'image')
self.label_dir = os.path.join(self.data_root, 'label')
self.phase = phase
# need this to match the number of segments you have
self.color_map = {
0: [0, 0, 0],
1: [204, 0, 0],
2: [76, 153, 0],
3: [204, 204, 0],
4: [51, 51, 255],
5: [204, 0, 204],
6: [0, 255, 255],
7: [255, 204, 204],
8: [102, 51, 0],
9: [255, 0, 0],
10: [102, 204, 0],
11: [255, 255, 0],
12: [0, 0, 153],
13: [0, 0, 204],
14: [255, 51, 153],
15: [0, 204, 204],
16: [0, 51, 0],
17: [255, 153, 51],
18: [0, 204, 0]
}
# self.color_map = {
# 0: [ 0, 0, 0],
# 1: [ 0,0,205],
# 2: [132,112,255],
# 3: [ 25,25,112],
# 4: [187,255,255],
# 5: [ 102,205,170],
# 6: [ 227,207,87],
# 7: [ 142,142,56]
# }
self.data_size = len(self.idx_list)
self.resolution = resolution
self.aug = aug
if aug == True:
self.aug_t = albumentations.Compose([
A.transforms.HorizontalFlip(p=0.5),
A.transforms.ShiftScaleRotate(shift_limit=0.1,
scale_limit=0.2,
rotate_limit=15,
border_mode=cv2.BORDER_CONSTANT,
value=0,
mask_value=0,
p=0.5),
])
self.unlabel_transform = unlabel_transform
def _mask_labels(self, mask_np):
label_size = len(self.color_map.keys())
labels = np.zeros((label_size, mask_np.shape[0], mask_np.shape[1]))
for i in range(label_size):
labels[i][mask_np==i] = 1.0
return labels
@staticmethod
def preprocess(img):
image_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5), inplace=True)
]
)
img_tensor = image_transform(img)
# normalize
# img_tensor = (img_tensor - img_tensor.min()) / (img_tensor.max() - img_tensor.min())
# img_tensor = (img_tensor - 0.5) / 0.5
return img_tensor
def __len__(self):
if hasattr(self.args, 'n_gpu') == False:
return self.data_size
# make sure dataloader size is larger than batchxngpu size
return max(self.args.batch*self.args.n_gpu, self.data_size)
def __getitem__(self, idx):
if idx >= self.data_size:
idx = idx % (self.data_size)
if self.is_label:
img_idx = self.idx_list[idx].split('.')[0]
mask_pil = Image.open(os.path.join(self.label_dir, img_idx + '.png')).convert('L').resize((self.resolution, self.resolution), resample=0)
img_pil = Image.open(os.path.join(self.img_dir, img_idx + '.jpg')).convert('RGB').resize((self.resolution, self.resolution))
if (self.phase == 'train' or self.phase == 'train-val') and self.aug:
augmented = self.aug_t(image=np.array(img_pil), mask=np.array(mask_pil))
aug_img_pil = Image.fromarray(augmented['image'])
# apply pixel-wise transformation
img_tensor = self.preprocess(aug_img_pil)
mask_np = np.array(augmented['mask'])
labels = self._mask_labels(mask_np)
mask_tensor = torch.tensor(labels, dtype=torch.float)
mask_tensor = (mask_tensor - 0.5) / 0.5
else:
img_tensor = self.preprocess(img_pil)
mask_np = np.array(mask_pil)
labels = self._mask_labels(mask_np)
mask_tensor = torch.tensor(labels, dtype=torch.float)
mask_tensor = (mask_tensor - 0.5) / 0.5
return {
'image': img_tensor,
'mask': mask_tensor
}
else:
img_idx = self.idx_list[idx]
img_pil = Image.open(os.path.join(self.img_dir, img_idx)).convert('RGB').resize((self.resolution, self.resolution))
if self.unlabel_transform is not None:
img_tensor = self.unlabel_transform(img_pil)
else:
img_tensor = self.preprocess(img_pil)
return {
'image': img_tensor,
}
|
py | b40ad97ea607018ec8de9d37bb324b709aacc660 | ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
"""
RegexSemI_CamShops.py - regular expression based CamShops SemI decoder
=========================================================================
HELPFUL: http://regexr.com
"""
import RegexSemI
import re,os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
from utils import ContextLogger
logger = ContextLogger.getLogger('')
class RegexSemI_CamShops(RegexSemI.RegexSemI):
"""
"""
def __init__(self, repoIn=None):
RegexSemI.RegexSemI.__init__(self) #better than super() here - wont need to be changed for other domains
self.domainTag = "CamShops" #FIXME
self.create_domain_dependent_regex()
def create_domain_dependent_regex(self):
"""Can overwrite any of the regular expressions set in RegexParser.RegexParser.init_regular_expressions().
This doesn't deal with slot,value (ie domain dependent) semantics. For those you need to handcraft
the _decode_[inform,request,confirm] etc.
"""
# REDEFINES OF BASIC SEMANTIC ACTS (ie those other than inform, request): (likely nothing needs to be done here)
#eg: self.rHELLO = "anion"
self._domain_init(dstring=self.domainTag)
# DOMAIN DEPENDENT SEMANTICS:
self.slot_vocab= dict.fromkeys(self.USER_REQUESTABLE,'')
# FIXME: define slot specific language - for requests
#---------------------------------------------------------------------------------------------------
exit("THESE NEED FIXING FOR THIS DOMAIN")
self.slot_vocab["addr"] = "(address)"
self.slot_vocab["pricerange"] = "(price|cost)(\ ?range)*"
self.slot_vocab["area"] = "(area|location)"
self.slot_vocab["near"] = "(near)"
self.slot_vocab["kind"] = "(kind)"
self.slot_vocab["stars"] = "(stars|rating)"
self.slot_vocab["phone"] = "(phone(\ number)*)"
self.slot_vocab["postcode"] = "(postcode)"
self.slot_vocab["hasinternet"] = "(internet)"
self.slot_vocab["hasparking"] = "(parking|car(\ ?park))"
self.slot_vocab["name"] = "(name)"
#---------------------------------------------------------------------------------------------------
# Generate regular expressions for requests:
self._set_request_regex()
# FIXME: many value have synonyms -- deal with this here:
self._set_value_synonyms() # At end of file - this can grow very long
self._set_inform_regex()
def _set_request_regex(self):
"""
"""
self.request_regex = dict.fromkeys(self.USER_REQUESTABLE)
for slot in self.request_regex.keys():
# FIXME: write domain dependent expressions to detext request acts
self.request_regex[slot] = self.rREQUEST+"\ "+self.slot_vocab[slot]
self.request_regex[slot] += "|(?<!"+self.DONTCAREWHAT+")(?<!want\ )"+self.IT+"\ "+self.slot_vocab[slot]
self.request_regex[slot] += "|(?<!"+self.DONTCARE+")"+self.WHAT+"\ "+self.slot_vocab[slot]
# FIXME: Handcrafted extra rules as required on a slot to slot basis:
self.request_regex["pricerange"] += "|(how\ much\ is\ it)"
self.request_regex["food"] += "|(what\ (type\ of\ )*food)"
def _set_inform_regex(self):
"""
"""
self.inform_regex = dict.fromkeys(self.USER_INFORMABLE)
for slot in self.inform_regex.keys():
self.inform_regex[slot] = {}
for value in self.slot_values[slot].keys():
self.inform_regex[slot][value] = self.rINFORM+"\ "+self.slot_values[slot][value]
self.inform_regex[slot][value] += "|"+self.slot_values[slot][value] + self.WBG
self.inform_regex[slot][value] += "|a\ (laptop\ with(\ a)*\ )*" +self.slot_values[slot][value]
self.inform_regex[slot][value] += "|((what|about|which)(\ (it\'*s*|the))*)\ "+slot+"(?!\ (is\ it))"
self.inform_regex[slot][value] += "|(\ |^)"+self.slot_values[slot][value] + "(\ (please|and))*"
# FIXME: Handcrafted extra rules as required on a slot to slot basis:
# FIXME: value independent rules:
if slot == "pricerange":
self.inform_regex[slot]['dontcare'] = "any\ (price|price(\ |-)*range)"
self.inform_regex[slot]['dontcare'] +=\
"|(don\'*t|do\ not)\ care\ (what|which|about|for)\ (the\ )*(price|price(\ |-)*range)"
def _generic_request(self,obs,slot):
"""
"""
if self._check(re.search(self.request_regex[slot],obs, re.I)):
self.semanticActs.append('request('+slot+')')
def _generic_inform(self,obs,slot):
"""
"""
DETECTED_SLOT_INTENT = False
for value in self.slot_values[slot].keys():
if self._check(re.search(self.inform_regex[slot][value],obs, re.I)):
#FIXME: Think easier to parse here for "dont want" and "dont care" - else we're playing "WACK A MOLE!"
ADD_SLOTeqVALUE = True
# Deal with -- DONTWANT --:
if self._check(re.search(self.rINFORM_DONTWANT+"\ "+self.slot_values[slot][value], obs, re.I)):
self.semanticActs.append('inform('+slot+'!='+value+')') #TODO - is this valid?
ADD_SLOTeqVALUE = False
# Deal with -- DONTCARE --:
if self._check(re.search(self.rINFORM_DONTCARE+"\ "+slot, obs, re.I)) and not DETECTED_SLOT_INTENT:
self.semanticActs.append('inform('+slot+'=dontcare)')
ADD_SLOTeqVALUE = False
DETECTED_SLOT_INTENT = True
# Deal with -- REQUESTS --: (may not be required...)
#TODO? - maybe just filter at end, so that inform(X) and request(X) can not both be there?
if ADD_SLOTeqVALUE and not DETECTED_SLOT_INTENT:
self.semanticActs.append('inform('+slot+'='+value+')')
def _decode_request(self, obs):
"""
"""
# if a slot needs its own code, then add it to this list and write code to deal with it differently
DO_DIFFERENTLY= [] #FIXME
for slot in self.USER_REQUESTABLE:
if slot not in DO_DIFFERENTLY:
self._generic_request(obs,slot)
# Domain independent requests:
self._domain_independent_requests(obs)
def _decode_inform(self, obs):
"""
"""
# if a slot needs its own code, then add it to this list and write code to deal with it differently
DO_DIFFERENTLY= [] #FIXME
for slot in self.USER_INFORMABLE:
if slot not in DO_DIFFERENTLY:
self._generic_inform(obs,slot)
# Check other statements that use context
self._contextual_inform(obs)
def _decode_type(self,obs):
"""
"""
# This is pretty ordinary - will just keyword spot for now since type really serves no point at all in our system
if self._check(re.search(self.inform_type_regex,obs, re.I)):
self.semanticActs.append('inform(type='+self.domains_type+')')
def _decode_confirm(self, obs):
"""
"""
#TODO?
pass
def _set_value_synonyms(self):
"""Starts like:
self.slot_values[slot] = {value:"("+str(value)+")" for value in domain_ontology["informable"][slot]}
# Can add regular expressions/terms to be recognised manually:
"""
#FIXME:
#---------------------------------------------------------------------------------------------------
exit("THESE NEED FIXING FOR THIS DOMAIN")
# TYPE:
self.inform_type_regex = r"(hotel|motel)"
# SLOT: area
slot = 'area'
# {u'west': '(west)', u'east': '(east)', u'north': '(north)', u'south': '(south)', u'centre': '(centre)'}
self.slot_values[slot]['north'] = "((the)\ )*(north)"
self.slot_values[slot]['east'] = "((the)\ )*(east)"
self.slot_values[slot]['west'] = "((the)\ )*(west)"
self.slot_values[slot]['south'] = "((the)\ )*(south)"
self.slot_values[slot]['dontcare'] = "any(\ )*(area|location|place|where)"
# SLOT: pricerange
slot = 'pricerange'
# {u'moderate': '(moderate)', u'budget': '(budget)', u'expensive': '(expensive)'}
self.slot_values[slot]['moderate'] = "(to\ be\ |any\ )*(moderate|moderately\ priced|mid|middle|average)"
self.slot_values[slot]['moderate']+="(?!(\ )*weight)"
self.slot_values[slot]['cheap'] = "(to\ be\ |any\ )*(budget|cheap|bargin|cheapest|low\ cost)"
self.slot_values[slot]['expensive'] = "(to\ be\ |any\ )*(expensive|expensively|dear|costly|pricey)"
self.slot_values[slot]['dontcare'] = "any\ (price|price(\ |-)*range)"
# SLOT: near
# rely on ontology for now
# SLOT: hasparking
slot = 'hasparking'
self.slot_values[slot]['0'] = ""
self.slot_values[slot]['1'] = ""
self.slot_values[slot]['dontcare'] = ""
# SLOT: stars
slot = 'stars'
self.slot_values[slot]['0'] = ""
self.slot_values[slot]['4'] = ""
self.slot_values[slot]['3'] = ""
self.slot_values[slot]['2'] = ""
self.slot_values[slot]['dontcare'] = ""
# SLOT: kind
slot = 'kind'
#---------------------------------------------------------------------------------------------------
#END OF FILE
|
py | b40ad97ea7136af2da9174ec14f1d5ce9290f1bb | import pytest
from dagster import DagsterInvalidConfigError, ModeDefinition, execute_pipeline, pipeline, solid
from docs_snippets.overview.configuration.config_map_example import unsigned_s3_session
from docs_snippets.overview.configuration.configured_example import east_unsigned_s3_session
from docs_snippets.overview.configuration.example import (
run_bad_example,
run_good_example,
run_other_bad_example,
)
def test_config_example():
assert run_good_example().success
with pytest.raises(DagsterInvalidConfigError):
run_bad_example()
with pytest.raises(DagsterInvalidConfigError):
run_other_bad_example()
def execute_pipeline_with_resource_def(resource_def, run_config=None):
@solid(required_resource_keys={"key"})
def a_solid(_):
pass
@pipeline(mode_defs=[ModeDefinition(resource_defs={"key": resource_def})])
def a_pipeline():
a_solid()
res = execute_pipeline(a_pipeline, run_config=run_config)
assert res.success
def test_configured_example():
execute_pipeline_with_resource_def(east_unsigned_s3_session)
def test_config_map_example():
execute_pipeline_with_resource_def(
unsigned_s3_session, run_config={"resources": {"key": {"config": {"region": "us-west-1"}}}}
)
|
py | b40ada04b306788872559e02244e0784bdae7ae8 | """Constants for the Shelly integration."""
COAP_CONTEXT = "coap_context"
DATA_CONFIG_ENTRY = "config_entry"
DOMAIN = "shelly"
|
py | b40ada570c7533c0c2010cb5cd241846aa7319cb |
import re, string
## 0-4 simbole shtesë në fund të fjalëve për prapashtesat
## shquese dhe lakesat
prapa = "[a-zA-Z0-9çÇëË_-]{0,4}"
## temat që shkruhen me C/c në vend të Ç/ç-së nistore
## ruhen prapashtesat ndaj nuk pranohen tema me grupe alternative me |
## cafk, caj, cajnik, cibuk, cift, cimk, cmim, co, corap, cudi, cun
pa_c_nis = "afk|aj|ajnik|akerdis|akmak|allm|arcaf|arçaf|art|ati|" + \
"ibuk|ift|imk|izme|" + \
"mend|mim|" + \
"o|orap|orodit|" + \
"udi|un"
## funksion për zëvendësime c -> ç
def korrigjo_c(text):
## vlerënisje
t = text ; c_subs = 0
## ç'kemi, ç'ke, ç'keni,
t, c = re.subn(fr"(\b)(c|c'|ç|q|q')(ke)({prapa})(\b)", r"ç'\3\4", t) ; c_subs += c
## Ç'kemi, Ç'ke, Ç'keni,
t, c = re.subn(fr"(\b)(C|C'|Ç|Q|Q')(ke)({prapa})(\b)", r"Ç'\3\4", t) ; c_subs += c
## cka -> çka ; c'kam, ckam -> ç'kam ; c'ka(në) -> ç'ka(në)
t, c = re.subn(fr"(\b)(c|c'|ç|q|q')(ka)({prapa})(\b)", r"ç'\3\4", t) ; c_subs += c
## Cka -> Çka ; C'kam, Ckam -> Ç'kam ; C'ka(në) -> Ç'ka(në)
t, c = re.subn(fr"(\b)(C|C'|Ç|Q|Q')(ka)({prapa})(\b)", r"Ç'\3\4", t) ; c_subs += c
## çfarë
t, c = re.subn(fr"(\b)(c|ç|q)(far)(e|ë)?(\b)", r"çfarë", t) ; c_subs += c
## Çfarë
t, c = re.subn(fr"(\b)(C|Ç|Q)(far)(e|ë)?(\b)", r"Çfarë", t) ; c_subs += c
## çupë
t, c = re.subn(fr"(\b)(c|ç|q)(up)(e|ë)?(\b)", r"çupë", t) ; c_subs += c
## Çupë
t, c = re.subn(fr"(\b)(C|Ç|Q)(up)(e|ë)?(\b)", r"Çupë", t) ; c_subs += c
## çikë
t, c = re.subn(fr"(\b)(c|ç|q)(ik)(e|ë)?(\b)", r"çikë", t) ; c_subs += c
## Çikë
t, c = re.subn(fr"(\b)(C|Ç|Q)(ik)(e|ë)?(\b)", r"Çikë", t) ; c_subs += c
## fjalë që shkruhen me C/c në vend të Ç/ç-së nistore - caj -> çaj
t, c = re.subn(fr"(\b)(c)({pa_c_nis})({prapa})(\b)", r"ç\3\4", t) ; c_subs += c
t, c = re.subn(fr"(\b)(C)({pa_c_nis})({prapa})(\b)", r"Ç\3\4", t) ; c_subs += c
## për fjalën ekzakte çka lejon shpreje (e|ë) te paraqitja e saj
# t, c = re.subn(fr"(\b)(C)({pa_c_nis})(\b)", r"Ç\3", t) ; c_subs += c
return (t, c_subs)
|
py | b40adc369ea288c0bccf64e5179de43d24ac9e33 | #! /usr/bin/python
__author__="Alexander Rush <[email protected]>"
__date__ ="$Sep 12, 2012"
# import sys and json modules
import sys, json
"""
Count rule frequencies in a binarized CFG.
"""
class Counts:
def __init__(self):
self.unary = {}
self.binary = {}
self.nonterm = {}
def show(self):
for symbol, count in self.nonterm.iteritems():
print count, "NONTERMINAL", symbol
for (sym, word), count in self.unary.iteritems():
print count, "UNARYRULE", sym, word
for (sym, y1, y2), count in self.binary.iteritems():
print count, "BINARYRULE", sym, y1, y2
def count(self, tree):
"""
Count the frequencies of non-terminals and rules in the tree.
"""
if isinstance(tree, basestring): return
# Count the non-terminal symbol.
symbol = tree[0]
self.nonterm.setdefault(symbol, 0)
self.nonterm[symbol] += 1
if len(tree) == 3:
# It is a binary rule.
y1, y2 = (tree[1][0], tree[2][0])
key = (symbol, y1, y2)
self.binary.setdefault(key, 0)
self.binary[(symbol, y1, y2)] += 1
# Recursively count the children.
self.count(tree[1])
self.count(tree[2])
elif len(tree) == 2:
# It is a unary rule.
y1 = tree[1]
key = (symbol, y1)
self.unary.setdefault(key, 0)
self.unary[key] += 1
def main(parse_file):
counter = Counts()
for l in open(parse_file):
t = json.loads(l)
counter.count(t)
counter.show()
def usage():
sys.stderr.write("""
Usage: python count_cfg_freq.py [tree_file]
Print the counts of a corpus of trees.\n""")
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
sys.exit(1)
main(sys.argv[1])
|
py | b40add83647b432e916f8e59a06cc867c27f0bb8 | #!/usr/bin/env python3
#
# (C) Copyright 2020 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache v2.0 license.
#
import re
import os
import sys
import json
import time
import copy
import html
import socket
import textwrap
import argparse
import requests
from http import HTTPStatus
interface_fields = [
"PCRCErrors",
"ECRCErrors",
"TXStompedECRC",
"RXStompedECRC",
"NonCRCTransientErrors",
"LLRRecovery",
"PacketDeadlineDiscards",
"MarkedECN",
"ReceivedECN",
"LinkNTE",
"AKEYViolations",
"TotalTransReqs",
"TotalTransReqBytes",
"TotalRecvReqs",
"TotalRecvReqBytes",
"TotalTransResps",
"TotalTransRespBytes",
"TotalRecvResps",
"TotalRecvRespBytes",
]
command_choices = [ 'up', 'down', 'reset', 'metrics', 'query' ]
endpoint_help = textwrap.dedent("""\
endpoint - node/port
node = Gen-Z fabric manager process (MP) hostname or IP address
port = Gen-Z interface port number""")
command_help = textwrap.dedent("""\
up - bring the port to the Enabled State
down - bring the port to the Disabled State
reset - bring the port down and then up
metrics - clear the metrics
query - dump port information """)
# -------------------------------------------------------------------------------------------------
def rest(f, url, data):
REST_RETRIES = 3
headers = { "Accept": "application/json", "Content-Type": "application/json" }
r = None
reply = None
retries = 0
status = HTTPStatus.REQUEST_TIMEOUT
while (retries < REST_RETRIES) and (status == HTTPStatus.REQUEST_TIMEOUT):
retries += 1
try:
r = f(url, headers=headers, data=data)
except requests.exceptions.Timeout as e:
status = HTTPStatus.REQUEST_TIMEOUT
except requests.exceptions.HTTPError as e:
status = e.response.status_code
except requests.exceptions.ConnectionError as e:
status = HTTPStatus.SERVICE_UNAVAILABLE
except requests.exceptions.RequestException as e:
status = e.response.status_code
except:
status = HTTPStatus.BAD_REQUEST
else:
status = r.status_code
if not r:
print('REST request failed with code {}'.format(status))
elif status//100 != 2:
print('REST request returned error code {}'.format(status))
elif r.text and len(r.text) > 0:
reply = r.text
if reply and reply.startswith('<pre>'): reply = reply[5:-6]
reply = json.loads(reply)
return status//100 == 2, reply
# -------------------------------------------------------------------------------------------------
def rest_get(server, attribute):
if attribute[0] == '/': attribute = attribute[1:]
url = 'http://{server}/{attribute}'.format(server=server, attribute=attribute)
return rest(requests.get, url, None)
def rest_patch(server, attribute, values):
if attribute[0] == '/': attribute = attribute[1:]
url = 'http://{server}/{attribute}'.format(server=server, attribute=attribute)
data = json.dumps(values)
return rest(requests.patch, url, data)
# ----------------------------------------------------------------------------------------------------------------------
def check_values(data, values):
#
# Traverse the local data struct and replace the appropriate element with new entries.
#
if values:
for key in values:
if type(data[key]) is dict and type(values[key]) is dict:
if not check_values(data[key], values[key]):
return False
elif data[key] != values[key]:
return False
return True
def wait_for(server, attribute, values, retries):
port_ready = False
while not port_ready and retries >= 0:
retries -= 1
time.sleep(1)
status, data = rest_get(server, attribute)
if not status:
return False
port_ready = check_values(data, values)
return port_ready
# ----------------------------------------------------------------------------------------------------------------------
def port_metrics(args, attribute):
server = args['server']
interface_enabled = { 'InterfaceState' : 'Enabled' }
interface_disabled = { 'InterfaceState' : 'Disabled' }
#
# Clear the port metrics. We do this by disabling and then enabling the interface.
#
status1, _ = rest_patch(server, attribute, interface_disabled)
status2, _ = rest_patch(server, attribute, interface_enabled)
return status1 and status2
# ----------------------------------------------------------------------------------------------------------------------
def port_reset(args, attribute):
if not port_down(args, attribute):
return False
elif not port_up(args, attribute):
return False
else:
return True
# ----------------------------------------------------------------------------------------------------------------------
def port_up(args, attribute):
endpoint = args['endpoint']
server = args['server']
force = args['force']
#
# Fetch the port attribute.
#
status, data = rest_get(server, attribute)
if not status:
print('can\'t get the attribute for {}'.format(endpoint))
sys.exit(1)
port_state = data['Status']['State']
port_health = data['Status']['Health']
link_state = data['LinkState']
if_state = data['InterfaceState']
#
# Validate the state before proceeding.
#
if port_state != 'Disabled' or port_health != 'OK' or link_state != 'Disabled' or if_state != 'Disabled':
if not force:
print('{} is not in a proper state for bringing up'.format(endpoint))
print(' Status = {}/{}'.format(port_state, port_health))
print(' LinkState = {}'.format(link_state))
print(' InterfaceState = {}'.format(if_state))
return False
#
# Force the interface down.
#
if not port_down(args, attribute):
return False
#
# Start training.
#
values = { 'LinkState' : 'Enabled' }
status, _ = rest_patch(server, attribute, values)
if not status:
print('{} PATCH of LinkState failed'.format(endpoint))
return False
#
# Wait for training to complete.
#
if not wait_for(server, attribute, {'Status' : { 'State' : 'StandbyOffline', 'Health' : 'OK' }}, 5):
print('{} did not transition to StandbyOffline'.format(endpoint))
return False
#
# Set the interface state to enabled.
#
values = { 'InterfaceState' : 'Enabled' }
status, _ = rest_patch(server, attribute, values)
if not status:
print('{} PATCH of InterfaceState failed'.format(endpoint))
return False
#
# Wait for the interface to come ready.
#
if not wait_for(server, attribute, {'InterfaceState' : 'Enabled' }, 5):
print('{} did not transition to Enabled'.format(endpoint))
return True
# ----------------------------------------------------------------------------------------------------------------------
def port_down(args, attribute):
endpoint = args['endpoint']
server = args['server']
force = args['force']
#
# Fetch the port attribute.
#
status, data = rest_get(server, attribute)
if not data:
print('can\'t get the attribute for {}'.format(endpoint))
sys.exit(1)
port_state = data['Status']['State']
port_health = data['Status']['Health']
link_state = data['LinkState']
if_state = data['InterfaceState']
#
# Validate the state before proceeding.
#
if port_state not in ['StandbyOffline', 'Enabled'] or port_health != 'OK' or link_state != 'Enabled' or if_state != 'Enabled':
if not force:
print('{} is not in a proper state for bringing down'.format(endpoint))
print(' Status = {}/{}'.format(port_state, port_health))
print(' LinkState = {}'.format(link_state))
print(' InterfaceState = {}'.format(if_state))
return False
#
# Reset the port
#
values = { 'LinkState' : 'Disabled', 'InterfaceState' : 'Disabled' }
status, _ = rest_patch(server, attribute, values)
if not status:
print('{} PATCH failed'.format(endpoint))
return False
#
# Wait for the port state change to take affect.
#
if not wait_for(server, attribute, values, 5):
print('{} did not transisiton to Disabled'.format(endpoint))
return False
#
# Reset the state
#
values = { 'Status' : { 'State' : 'Disabled', 'Health' : 'OK' }}
status, _ = rest_patch(server, attribute, values)
if not status:
print('{} PATCH failed'.format(endpoint))
return False
#
# Wait for the port state change to take affect.
#
if not wait_for(server, attribute, values, 5):
print('{} did not transition to Disabled'.format(endpoint))
return False
return True
# ----------------------------------------------------------------------------------------------------------------------
def port_query(args, attribute):
endpoint = args['endpoint']
server = args['server']
#
# Get the port attribute.
#
status, port_data = rest_get(server, attribute)
if not status:
print('{} GET failed'.format(endpoint))
return False
metrics_attribute = port_data['Metrics']['@odata.id']
metrics_attribute = html.unescape(metrics_attribute)
metrics_attribute = re.sub('<[^>]*>', '', metrics_attribute)
#
# Get the metrics attribute.
#
status, metrics_data = rest_get(server, metrics_attribute)
if not status:
print('{} GET failed'.format(endpoint))
return False
oem_data = port_data['Oem']['Hpe']
oem_metrics = metrics_data['Oem']['Hpe']['Metrics']
#
# For enabled nodes, all the fields should be valid.
#
print()
print('{}:'.format(endpoint))
print()
print(' State/Health {}/{}'.format(port_data['Status']['State'], port_data['Status']['Health']))
print(' Link State {}'.format(port_data['LinkState']))
print(' Interface State {}'.format(port_data['InterfaceState']))
print(' Remote neighbor 0x{:08X}/{}'.format(oem_data['RemoteComponentID']['UID'], oem_data['RemoteComponentID']['Port']))
print()
print()
#
# Format the metrics.
#
layout = ' {:<24} {:>20}'
print('Interface Statistics:')
interface_metrics = metrics_data['Gen-Z']
for s in interface_fields:
print(layout.format(s, interface_metrics[s]))
print()
print()
print()
#
# Header and value format for counters and bytes.
#
layout = '{:<12} {:>20} {:>20} {:>20} {:>20} {:>20}'
#
# Port Requestor/Responder statistics.
#
try:
request_metrics = oem_metrics['Request']
response_metrics = oem_metrics['Response']
print('Requestor/Responder Interface Statistics:')
print()
print(layout.format('', 'Xmit Count', 'Xmit Bytes', 'Recv Count', 'Recv Bytes', ''))
print(layout.format('Requests',
request_metrics['XmitCount'],
request_metrics['XmitBytes'],
request_metrics['RecvCount'],
request_metrics['RecvBytes'],
''))
print(layout.format('Responses',
response_metrics['XmitCount'],
response_metrics['XmitBytes'],
response_metrics['RecvCount'],
response_metrics['RecvBytes'],
''))
print()
print()
print()
except:
pass
#
# Port VC statistics.
#
try:
x = oem_metrics['VC0']['XmitCount'],
print('Packet Relay Interface Statistics:')
print()
print(layout.format('', 'Xmit Packets', 'Xmit Bytes', 'Recv Packets', 'Recv Bytes', 'Occupancy'))
for vc in range(16):
vc_key = 'VC{}'.format(vc)
print(layout.format(vc_key,
oem_metrics[vc_key]['XmitCount'],
oem_metrics[vc_key]['XmitBytes'],
oem_metrics[vc_key]['RecvCount'],
oem_metrics[vc_key]['RecvBytes'],
oem_metrics[vc_key]['Occupancy']))
except:
pass
return True
# ----------------------------------------------------------------------------------------------------------------------
def resolve(endpoint):
#
# An endpoint is of the form <name>:<int>/<int>. The :<int> is optional.
#
name,delimiter,port_number = endpoint.partition('/')
if not port_number:
print('invalid endpoint [{}] specified'.format(endpoint))
sys.exit(1)
hostname,delimiter,hostport = name.partition(':')
if not hostport:
hostport = '8081'
try:
hostname = socket.gethostbyname(hostname)
except:
print('can\'t resolve the node address {}', name)
sys.exit(0)
return '{}:{}'.format(hostname, hostport), int(port_number)
if __name__ == '__main__':
#
# Get the command line parameters.
#
parser = argparse.ArgumentParser(description='port state manipulator', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-f', '--force', help='force a transition', required=False, action='store_true')
parser.add_argument('endpoint', help=endpoint_help, metavar='Endpoint')
parser.add_argument('command', help=command_help, nargs='*', metavar='Command', choices=command_choices)
args = vars(parser.parse_args())
args['server'], port = resolve(args['endpoint'])
#
# Fetch the chassis attribute in order to determine the type of node.
#
status, chassis = rest_get(args['server'], '/redfish/v1/Chassis/1')
if chassis == None:
print('{} GET chassis failed'.format(args['endpoint']))
sys.exit(1)
#
# Determine the attribute from the port number.
#
node_type = chassis['Oem']['Hpe']['NodeType']
if node_type == 'Switch':
attribute = 'redfish/v1/Fabrics/GenZ/Switches/Switch{}/Ports/{}'.format(1 + port//60, port%60)
elif node_type == 'Compute':
attribute = 'redfish/v1/Systems/1/FabricAdapters/1/Ports/{}'.format(port)
elif node_type == 'Memory':
attribute = 'redfish/v1/Fabrics/GenZ/Switches/Switch1/Ports/{}'.format(port)
elif node_type == 'IO':
attribute = 'redfish/v1/Systems/1/FabricAdapters/1/Ports/{}'.format(port)
#
# Do what the user requested.
#
for command in args['command']:
if command == 'metrics':
port_metrics(args, attribute)
elif command == 'up':
port_up(args, attribute)
elif command == 'down':
port_down(args, attribute)
elif command == 'reset':
port_reset(args, attribute)
elif command == 'query':
port_query(args, attribute)
sys.exit(0)
|
py | b40ade91f2524678c7dd3348b1cb21af3926d53a | # -*- coding: utf-8 -*-
import numpy as np
from ..Line import Line
class Square(Line):
def __init__(self, side: float = 10.0, origin=np.array([0, 0]), *args, **kwargs):
self.side = side
self.origin = origin
kwargs["points"] = self._points
super().__init__(*args, **kwargs)
@property
def _points(self):
return np.array(
[
[self.origin[0], self.origin[1]],
[self.origin[0] + self.side, self.origin[1]],
[self.origin[0] + self.side, self.origin[1] + self.side],
[self.origin[0], self.origin[1] + self.side],
[self.origin[0], self.origin[1]],
]
)
def generate_gcode(self):
self.points = self._points
super().generate_gcode()
@property
def _cls(self):
return self.__class__.__name__
def __repr__(self):
return f"{self._cls}<O={self.origin}, L={self.side}>"
|
py | b40adf4b393ebd64823dd0e59ce60ee5aada5f9d | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 10:22:31 2019
@author: snoone
"""
###extract wind direction from TD-13 large csv files and rename columns to IFF
import pandas as pd
import os
import pandas as pd
os.chdir ("C:/Users/snoone/Dropbox/PYTHON_TRAINING/NUS_China_weather/")
df = pd.read_csv("data_file_test.csv", usecols = ['year', 'month',"day"
,"longitude",
"latitude","elevation",
"station", "tmax"])
# add source id column and add source id to columns
df["Source_ID"]="Source_ID"
df["Source_ID"] = 324
##add in IFF columns and change the columns names in df to match IFF
df["Alias_station_name"]="Null"
df["Source_QC_flag"]="Null"
df['Original_observed_value']="Null"
df['Original_observed_value_units']="Null"
df['Gravity_corrected_by_source']='Null'
df['Homogenization_corrected_by_source']='Null'
df['Report_type_code']='Null'
df['Minute']='00'
df["Hour"]="0"
df = df.rename(columns=({'tmax':'Observed_value'}))
df['Station_ID']=df["station"]
df['Station_name']=df["station"]
df['Elevation']=df["elevation"]
df['Latitude']=df["latitude"]
df['Longitude']=df["longitude"]
df['Month']=df["month"]
df['Day']=df["day"]
df['Year']=df["year"]
#round convcerted values to 2 decimal places if needed
df['Observed_value']= round(df['Observed_value'],1)
##reorder the columns headers
df = df[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Gravity_corrected_by_source",
"Homogenization_corrected_by_source"]]
df.to_csv("output.csv",index=False)
##### separate the large csv file by station ID and save as IFF named Station_Id+variable+Source_ID
import csv
with open('output.csv') as fin:
csvin = csv.DictReader(fin)
os.chdir ("C:/Users/snoone/Dropbox/PYTHON_TRAINING/NUS_China_weather/output/max_temp/324")
#csvin.columns = [x.replace(' ', '_') for x in csvin.columns]
# Category -> open file lookup
outputs = {}
for row in csvin:
cat = row['Station_ID']
# Open a new file and write the header
if cat not in outputs:
fout = open ('{}_maximum_temperature_324.psv'.format(cat), "w", newline = "")
dw = csv.DictWriter(fout, fieldnames=csvin.fieldnames,delimiter='|')
dw.writeheader()
outputs[cat] = fout, dw
# Always write the row
outputs[cat][1].writerow(row)
# Close all the files
for fout, _ in outputs.values():
fout.close() |
py | b40adfa8b4a1c1b6d1f9278dd1c1bf77bff2dea7 | __all__ = ['ttypes', 'constants', 'SpotOnDocker']
|
py | b40ae0ecfcfcaf2ad56f0fc063a01f96d8c3afc3 | import torch
from torch.autograd import Variable
from torch import nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from pyCHAMP.solver.solver_base import SolverBase
import matplotlib.pyplot as plt
import time
class QMC_DataSet(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
return self.data[index, :]
class QMCLoss(nn.Module):
def __init__(self, wf, method='energy'):
super(QMCLoss, self).__init__()
self.wf = wf
self.method = method
def forward(self, out, pos):
if self.method == 'variance':
loss = self.wf.variance(pos)
elif self.method == 'energy':
loss = self.wf.energy(pos)
return loss
class NN(SolverBase):
def __init__(self, wf=None, sampler=None, optimizer=None):
SolverBase.__init__(self, wf, sampler, None)
self.opt = optim.SGD(self.wf.model.parameters(),
lr=0.005, momentum=0.9, weight_decay=0.001)
self.batchsize = 32
def sample(self):
pos = tensor.torch(self.sampler.generate(self.wf.pdf))
pos.requires_grad = True
return pos
def train(self, nepoch):
pos = self.sample()
pos = torch.rand(self.sampler.nwalkers, 3)
dataset = QMC_DataSet(pos)
dataloader = DataLoader(dataset, batch_size=self.batchsize)
qmc_loss = QMCLoss(self.wf, method='variance')
cumulative_loss = []
for n in range(nepoch):
cumulative_loss.append(0)
for data in dataloader:
data = Variable(data).float()
out = self.wf.model(data)
self.wf.model = self.wf.model.eval()
loss = qmc_loss(out, data)
cumulative_loss[n] += loss
self.wf.model = self.wf.model.train()
self.opt.zero_grad()
loss.backward()
self.opt.step()
print('epoch %d loss %f' % (n, cumulative_loss[n]))
pos = self.sample()
dataloader.dataset.data = pos.T
plt.plot(cumulative_loss)
plt.show()
class NN4PYSCF(SolverBase):
def __init__(self, wf=None, sampler=None, optimizer=None):
SolverBase.__init__(self, wf, sampler, None)
self.opt = optim.SGD(self.wf.parameters(), lr=0.005,
momentum=0.9, weight_decay=0.001)
self.batchsize = 32
def sample(self):
t0 = time.time()
pos = self.sampler.generate(self.wf.pdf)
print("Sampling on ", pos.shape, "done in %f" % (time.time()-t0))
return pos
def train(self, nepoch):
pos = self.sample()
dataset = QMC_DataSet(pos)
dataloader = DataLoader(dataset, batch_size=self.batchsize)
qmc_loss = QMCLoss(self.wf, method='variance')
cumulative_loss = []
for n in range(nepoch):
print('\n === epoch %d' % n)
cumulative_loss.append(0)
for data in dataloader:
print("\n data ", data.shape)
data = Variable(data).float()
t0 = time.time()
out = self.wf(data)
print("\t WF done in %f" % (time.time()-t0))
t0 = time.time()
loss = qmc_loss(out, data)
cumulative_loss[n] += loss
print("\t Loss (%f) done in %f" % (loss, time.time()-t0))
self.wf = self.wf.train()
self.opt.zero_grad()
t0 = time.time()
loss.backward()
print("\t Backward done in %f" % (time.time()-t0))
t0 = time.time()
self.opt.step()
print("\t opt done in %f" % (time.time()-t0))
print('=== epoch %d loss %f \n' % (n, cumulative_loss[n]))
pos = self.sample()
dataloader.dataset.data = pos
plt.plot(cumulative_loss)
plt.show()
if __name__ == "__main__":
# from pyCHAMP.solver.vmc import VMC
# from pyCHAMP.wavefunction.neural_wf_base import NeuralWF, WaveNet
from pyCHAMP.wavefunction.neural_pyscf_wf_base import NEURAL_PYSCF_WF
from pyCHAMP.sampler.metropolis import Metropolis_TORCH as Metropolis
# class HarmOsc3D(NeuralWF):
# def __init__(self,model,nelec,ndim):
# NeuralWF.__init__(self, model, nelec, ndim)
# def nuclear_potential(self,pos):
# return torch.sum(0.5*pos**2,1)
# def electronic_potential(self,pos):
# return 0
# wf = HarmOsc3D(model=WaveNet,nelec=1, ndim=3)
wf = NEURAL_PYSCF_WF(atom='O 0 0 0; H 0 1 0; H 0 0 1',
basis='dzp',
active_space=(2, 2))
sampler = Metropolis(nwalkers=64, nstep=10,
step_size=3, nelec=wf.nelec,
ndim=3, domain={'min': -5, 'max': 5})
nn = NN4PYSCF(wf=wf, sampler=sampler)
pos = nn.sample()
dataset = QMC_DataSet(pos)
dataloader = DataLoader(dataset, batch_size=nn.batchsize)
qmc_loss = QMCLoss(nn.wf, method='variance')
|
py | b40ae1ce09e8e60f8657e10fb97ff41084bbcfae |
"""
Collection of scripts used by the LHiCo on pre treatment CTD casts.
"""
import numpy as np
import gsw
import numpy.ma as ma
import pandas as pd
from scipy import signal as sign
from scipy.stats import linregress
from ctd.processing import _rolling_window
from pandas_flavor import register_dataframe_method, register_series_method
@register_series_method
@register_dataframe_method
def clean_invalid(df, flag='flag'):
""" Remove rows where flag is True. Is `flag` is not in the columns, return
the same dataframe.
Parameters
----------
flag : str
Column used as flag. Default if `flag`.
"""
df_new = df.copy()
_meta = df._metadata
if flag in df.columns:
filtr = (df.flag == False)
df_new = df_new[filtr]
df_new._metadata = _meta
return df_new
##############################################################################
@register_series_method
@register_dataframe_method
def loopedit2(df):
"""
Remove values with negative pressure gradient.
Credits
-------
Function extracted from an old OceanLab version:
https://github.com/iuryt/OceanLab
"""
df_new = df.copy()
try:
flag = df_new['dz/dtM'].values>0
df_new = df_new.iloc[flag,:]
except:
flag = np.hstack([1,np.diff(df_new.index.values)])>0
df_new = df_new.iloc[flag,:]
return df_new
##############################################################################
@register_series_method
@register_dataframe_method
def longest_pressure(df, thresh=2, indexname='Pressure [dbar]'):
"""Separates the dataframe based into pieces based on a pressure gradient
threshold and select the longest one.
Parameters
----------
data : pandas DataFrame
Pandas dataframe with the ctd data.
Notice the index must be the pressure values.
thresh : integer or float
gradient threshold used to separate the dataframe
indexname : str
The cast must be indexed by pressure, so this flag can be used to make sure that is the case.
Returns
-------
pandas DataFrame
DataFrame with the data selected with the longest pressure vector
"""
# TODO: adicionar flag para acionar o index, caso este seja pressure, ou uma coluna de pressure
df_new = df.copy()
# define threshold based on the maximum sampled depth
if df.index.max() > 1000:
thresh = 4
else:
thresh = 2
# -- find cut positions where the pressure surpasses a given threshold -- #
i = np.where(abs(np.gradient(df_new.index))>thresh)[0]
df_new.iloc[i] = np.nan # substitute values in cut positions with nan
# -- identify positions with nan and give a integer id for each section -- #
df_new['group'] = df_new.isnull().all(axis=1).cumsum()
groups = df_new.groupby('group')
gcount = groups.count() # counting the number of elements in each group
gcount1 = gcount[gcount==gcount.max()].dropna() # select the largest one
# -- select the largest group based on the integer id -- #
df_new = groups.get_group(gcount1.index.values[0]).dropna()
return df_new
##############################################################################
@register_series_method
@register_dataframe_method
def downup_cast(df, winsize=500, thresh=0.02):
df_new = df.copy()
if df_new.shape[0] > 3*winsize:
df_new = _downcast_upcast(df_new, winsize=winsize, thresh=thresh)
else:
df_new = _downcast_upcast(df_new, winsize=20, thresh=0.012)
return df_new
##############################################################################
@register_series_method
@register_dataframe_method
def bindata2(df, reference='depSM', delta=1.):
df_new = df.copy()
df_new = _binmov(df, reference=reference, delta=delta)
return df_new
##############################################################################
@register_dataframe_method
def check_pump(df, pumpname='pumps'):
df_new = df.copy()
return df_new[df_new[pumpname] != 0]
##############################################################################
@register_dataframe_method
def despike_(df, weight=2):
df_new = df.copy()
# get block size based on the cast shape
block = _block_size(df_new.shape[0])
# if series, use function directly. Otherwise, apply using .apply()
if isinstance(df_new, pd.Series):
df_new = _despike(df_new, block=block, weight=weight)
else:
indexname = df_new.index.name
if indexname in df_new.columns:
df_new = df_new.reset_index(drop=True)
else:
df_new = df_new.reset_index()
df_new = df_new.apply(_despike, block=block, weight=weight)
df_new.set_index(indexname)
return df_new
##############################################################################
def _block_size(shp):
# check the half size
if (shp < 105) & (shp > 10): # perfil for com menos de 105 medidas
# if cast has less than 105 and even number observations
if (shp//2)%2 == 0:
# block is defined as the half + 1
block = (shp//2)+1
else:
# if block has les than 105 and odd number of observations, then the block is the half
block = shp//2
# if cast has more than 105 observations, then the block is precisely 105
elif shp >= 105:
block = 105
else:
block = 3
return block
##############################################################################
def _despike(series, block=100, weight=2):
# weight is a factor that controls the cutoff threshold
prop = np.array(series)
roll = _rolling_window(prop, block)
std = weight * roll.std(axis=1)
mean = roll.mean(axis=1)
# gambiarra p séries com número par de elementos
if block % 2 == 1:
iblock0 = int((block - 1)/2)
iblock1 = int((block - 1)/2)
else:
iblock0 = int((block - 1)/2)
iblock1 = int((block - 1)/2)+1
std = np.hstack([np.tile(std[0], iblock0),
std,
np.tile(std[-1], iblock1)])
mean = np.hstack([np.tile(mean[0], iblock0),
mean,
np.tile(mean[-1], iblock1)])
series = series[np.abs(series-mean) < std]
clean = series.astype(float).copy()
return clean
##############################################################################
def _binmov(df, reference='depSM', delta=1.):
indd = np.round(df[reference].values)
binned = df.groupby(indd).mean()
binned[reference] = binned.index.values
return binned
##############################################################################
def _local_slope(value):
d = value - sign.detrend(value.values)
slope = linregress(np.arange(d.size), d)[0]
return slope
##############################################################################
def _downcast_upcast(data, winsize=500, direction='down', thresh=0.02):
"""
TODO - ADD DOCSTRING
"""
df = pd.DataFrame(data.index)
df = pd.DataFrame(data.index)
# -- extend dataframe to account for blackman window size -- #
index = df.index
bsize = np.floor(winsize/2)
if winsize % 2 == 0:
reindex = np.arange(index[0]-bsize,index[-1]+bsize)
else:
reindex = np.arange(index[0]-bsize,index[-1]+1+bsize)
# 'Extrapol.'
filt_na = df.reindex(index=reindex)
filt_na = filt_na.interpolate(limit_direction='both')
trend = filt_na.rolling(winsize, center=True).apply(_local_slope)
trend = trend.dropna()
# i = np.where((trend>0) & (np.gradient(trend)>0))[0]
if direction=='down':
i = np.where((trend>thresh))[0]
dataaux = data.iloc[i]
elif direction=='up':
i = np.where((trend<-thresh))[0]
dataaux = data.iloc[i]
else:
raise IOError('wrong direction input')
return dataaux
def is_verbose(message, verbose):
if verbose:
print(message)
else:
pass
@register_dataframe_method
def teos10_salt_temp_rho(df0, lon, lat,
pstr='Pressure [db]',
tstr='t090C',
sstr='sal00',
ct_str_out='ct',
sa_str_out='sa',
verbose=True):
"""Calculates absolute salinity and absolute temperature from dataframe.
The pressure, temperature and salinity temperature must be in the dataframe.
Parameters
----------
df : pandas.core.frame.DataFrame
Description of parameter `df`.
lon : float
longitude of the section
lat : float
latitude of the section
pstr : string
dataframe column name (input): pressure
tstr : string
dataframe column name (input): temperature
sstr : string
dataframe column name (input): salintiy
ct_str_out : string
dataframe column name (output): conservative temperature
sa_str_out : string
dataframe column name (output): absolute salinity
verbose : boolean
Print messages that describes the actions that were taken
Returns
-------
dataframe
Description of returned object.
"""
df = df0.copy()
is_verbose(f'reading {pstr}, {tstr} and {sstr} from dataframe', verbose)
# -- attributing values to varbs (less verbose script) -- #
sal00 = df[sstr].values
t090C = df[tstr].values
index = df[pstr].values
is_verbose(f'Includes absolute salinity ({sa_str_out}) ' +
f'and conservative temperature({ct_str_out})' +
' to dataframe', verbose)
# -- define absolute salinty and conservative temp -- #
df['sa'] = gsw.SA_from_SP(sal00, index, lon=lon, lat=lat)
df['ct'] = gsw.CT_from_t(sal00, t090C, index)
df['rho'] = gsw.rho(df['sa'].values,
df['ct'].values,
index)
is_verbose('Done', verbose)
return df
def centered_diff(x, y, equally_spaced=False):
nx = len(set(np.diff(x)))
if (nx != 1) and (not equally_spaced):
raise IOError('xnew must present equally spaced values')
if len(x) != len(y):
raise ValueError('x and y must present the same length')
dx = np.diff(x)[0]
f = np.zeros(y.size) * np.nan
f[1:-1] = (y[2:] - y[:-2])/(2*dx)
f[0] = (y[1]- y[0])/dx
f[-1] = (y[-1]-y[-2])/dx
return f
def centered_diff_general(x, y,maxoffset=1):
xi = np.arange(x.min(), x.max()+maxoffset)
yi = np.interp(xi, x, y)
gradient = centered_diff(xi, yi, equally_spaced=True)
return xi, gradient
def vertical_gradient(df,
rank=1,
tstr='ct',
sstr='sa',
rhostr='rho',
zstr=None):
# TODO: document.
# this method calculates the vertical gradient of salinity, temperature and
# density (parameters must be present in the dataframe)
# gradlim is the gradient threshold to determine the mixed layer depth
if rank not in [1,2]:
raise IOError('rank shoud be 1 or 2')
if zstr is None:
z = df.index.values
else:
z = df[zstr]
# calculate vertical gradient
tsrho_grad = {}
for i in [tstr, sstr, rhostr]:
aux = df[i].values
# first order
zi, gradaux = centered_diff_general(z,aux)
# second order
if rank == 2:
zi, gradaux = centered_diff_general(zi, gradaux)
# save gradients in dictionary
tsrho_grad[i] = gradaux
# tsrho_grad['zi'] = zi
return tsrho_grad, zi
def mld_vertical_gradient(tsrho_grad, zi, gradlim=0.05):
# TODO: document
# this method calculates the depth of mld based on the vertical gradient
# of temperature, salinity and density
# -- calculates the mixed layer depth (mld) -- #
zthreshlist = []
# for x in [gradS, gradrho, gradT]:
cont=0
gradlimfac = [0.25,1,0.9]
for x in tsrho_grad.keys():
aux = np.zeros(tsrho_grad[x].size+2) # extending boundaries
aux[1:-1] = tsrho_grad[x] # gradient value at limits is defined as 0
# find indices where condition is met
i0 = np.where(aux > gradlim*gradlimfac[cont])[0]
i1 = np.where(aux < -gradlim*gradlimfac[cont])[0]
cont += 1
# unify the indices
i = np.sort(np.concatenate([i0,i1])) - 1
# find mixed layer depth
if i.size >0:
imax = i[0]
zmldmax = zi[imax+1]
if (imax == 0) & (i.size >= 2):
imax = i[1]
zmldmax = zi[imax+1]
elif (imax == 0):
zmldmax = None
zthreshlist.append(zmldmax)
# test if the condition is met
# if not the mixed layer depth is the max depth of the profile
if len(zthreshlist) != 0:
zthresh = min(zthreshlist)
else:
zthresh = zi.max()
return zthresh
@register_dataframe_method
def mld_depth_vgrad(df,
rank=1,
tstr='ct',
sstr='sa',
rhostr='rho',
z=None,
gradlim=0.05):
dfgrad = vertical_gradient(df,
rank=rank,
tstr=tstr,
sstr=sstr,
rhostr=rhostr,
zstr=z)
return mld_vertical_gradient(*dfgrad,gradlim=gradlim)
|
py | b40ae221a48200fc9a10f289801cbc79845c7826 | """Main Module"""
from rates_shared.some_pkg.some_mod import do_it
def main() -> None:
"""Main Function"""
do_it()
print("Welcome to Package rates_shared")
if __name__ == '__main__':
main()
|
py | b40ae38fb98a21ba3a97a8285752496c43f72838 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import CScript, SignatureHash, SIGHASH_ALL, OP_TRUE, OP_FALSE
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
'''
This reimplements tests from the koobitj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
'''
class FullBlockTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(bytes("horsebattery"))
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.block_time = int(time.time())+1
self.tip = None
self.blocks = {}
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
return block
# Create a block on top of self.tip, and advance self.tip to point to the new block
# if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output,
# and rest will go to fees.
def next_block(self, number, spend=None, additional_coinbase_value=0, script=None):
if self.tip == None:
base_block_hash = self.genesis_hash
else:
base_block_hash = self.tip.sha256
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
if (spend != None):
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, self.block_time)
if (spend != None):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), "", 0xffffffff)) # no signature yet
# This copies the java comparison tool testing behavior: the first
# txout has a garbage scriptPubKey, "to make sure we're not
# pre-verifying too much" (?)
tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255])))
if script == None:
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
else:
tx.vout.append(CTxOut(1, script))
# Now sign it if necessary
scriptSig = ""
scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend
scriptSig = CScript([OP_TRUE])
else:
# We have to actually sign it
(sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL)
scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
tx.vin[0].scriptSig = scriptSig
# Now add the transaction to the block
block = self.add_transactions_to_block(block, [tx])
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
self.block_time += 1
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previous marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# add transactions to a block produced by next_block
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
old_hash = block.sha256
self.add_transactions_to_block(block, new_transactions)
block.solve()
# Update the internal state just like in next_block
self.tip = block
self.block_heights[block.sha256] = self.block_heights[old_hash]
del self.block_heights[old_hash]
self.blocks[block_number] = block
return block
# creates a new block and advances the tip to that block
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(1000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
out0 = get_spendable_output()
block(1, spend=out0)
save_spendable_output()
yield accepted()
out1 = get_spendable_output()
b2 = block(2, spend=out1)
yield accepted()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out1)
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
out2 = get_spendable_output()
block(4, spend=out2)
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out2)
save_spendable_output()
yield rejected()
out3 = get_spendable_output()
block(6, spend=out3)
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out2)
yield rejected()
out4 = get_spendable_output()
block(8, spend=out4)
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, 'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out3)
yield rejected()
block(11, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, 'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out3)
save_spendable_output()
#yield TestInstance([[b12, False]])
b13 = block(13, spend=out4)
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
out5 = get_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out5, additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 / 50 - 1))
tip(13)
block(15, spend=out5, script=lots_of_checksigs)
yield accepted()
# Test that a block with too many checksigs is rejected
out6 = get_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 / 50))
block(16, spend=out6, script=too_many_checksigs)
yield rejected(RejectResult(16, 'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, 'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out6)
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
out7 = get_spendable_output()
block(20, spend=out7)
yield rejected(RejectResult(16, 'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out6)
yield rejected()
block(22, spend=out5)
yield rejected()
# Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out6)
old_hash = b23.sha256
tx = CTransaction()
script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69
script_output = CScript([chr(0)*script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE)
yield accepted()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out6)
script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69
script_output = CScript([chr(0)*(script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
yield rejected(RejectResult(16, 'bad-blk-length'))
b25 = block(25, spend=out7)
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out6)
b26.vtx[0].vin[0].scriptSig = chr(0)
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, 'bad-cb-length'))
# Extend the b26 chain to make sure koobitd isn't accepting b26
b27 = block(27, spend=out7)
yield rejected()
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out6)
b28.vtx[0].vin[0].scriptSig = chr(0)*101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, 'bad-cb-length'))
# Extend the b28 chain to make sure koobitd isn't accepted b28
b29 = block(29, spend=out7)
# TODO: Should get a reject message back with "bad-prevblk", except
# there's a bug that prevents this from being detected. Just note
# failure for now, and add the reject result later.
yield rejected()
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = chr(0)*100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
if __name__ == '__main__':
FullBlockTest().main()
|
py | b40ae4e54325d614891229056226981410495d8f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author : AloneMonkey
# blog: www.alonemonkey.com
import sys
import codecs
import frida
import threading
import os
import shutil
import time
import argparse
import tempfile
import subprocess
import re
import paramiko
from paramiko import SSHClient
from scp import SCPClient
from tqdm import tqdm
import traceback
from log import *
script_dir = os.path.dirname(os.path.realpath(__file__))
DUMP_JS = os.path.join(script_dir, '../../methods/dump.js')
User = 'root'
Password = 'alpine'
Host = 'localhost'
Port = 2222
TEMP_DIR = tempfile.gettempdir()
PAYLOAD_DIR = 'Payload'
PAYLOAD_PATH = os.path.join(TEMP_DIR, PAYLOAD_DIR)
file_dict = {}
finished = threading.Event()
def get_usb_iphone():
Type = 'usb'
if int(frida.__version__.split('.')[0]) < 12:
Type = 'tether'
device_manager = frida.get_device_manager()
changed = threading.Event()
def on_changed():
changed.set()
device_manager.on('changed', on_changed)
device = None
while device is None:
devices = [dev for dev in device_manager.enumerate_devices() if dev.type == Type]
if len(devices) == 0:
print('Waiting for USB device...')
changed.wait()
else:
device = devices[0]
device_manager.off('changed', on_changed)
return device
def generate_ipa(path, display_name):
ipa_filename = display_name + '.ipa'
logger.info('Generating "{}"'.format(ipa_filename))
try:
app_name = file_dict['app']
for key, value in file_dict.items():
from_dir = os.path.join(path, key)
to_dir = os.path.join(path, app_name, value)
if key != 'app':
shutil.move(from_dir, to_dir)
target_dir = './' + PAYLOAD_DIR
zip_args = ('zip', '-qr', os.path.join(os.getcwd(), ipa_filename), target_dir)
subprocess.check_call(zip_args, cwd=TEMP_DIR)
shutil.rmtree(PAYLOAD_PATH)
except Exception as e:
print(e)
finished.set()
def on_message(message, data):
t = tqdm(unit='B',unit_scale=True,unit_divisor=1024,miniters=1)
last_sent = [0]
def progress(filename, size, sent):
t.desc = os.path.basename(filename).decode("utf-8")
t.total = size
t.update(sent - last_sent[0])
last_sent[0] = 0 if size == sent else sent
if 'payload' in message:
payload = message['payload']
if 'dump' in payload:
origin_path = payload['path']
dump_path = payload['dump']
scp_from = dump_path
scp_to = PAYLOAD_PATH + '/'
with SCPClient(ssh.get_transport(), progress = progress, socket_timeout = 60) as scp:
scp.get(scp_from, scp_to)
chmod_dir = os.path.join(PAYLOAD_PATH, os.path.basename(dump_path))
chmod_args = ('chmod', '655', chmod_dir)
try:
subprocess.check_call(chmod_args)
except subprocess.CalledProcessError as err:
print(err)
index = origin_path.find('.app/')
file_dict[os.path.basename(dump_path)] = origin_path[index + 5:]
if 'app' in payload:
app_path = payload['app']
scp_from = app_path
scp_to = PAYLOAD_PATH + '/'
with SCPClient(ssh.get_transport(), progress = progress, socket_timeout = 60) as scp:
scp.get(scp_from, scp_to, recursive=True)
chmod_dir = os.path.join(PAYLOAD_PATH, os.path.basename(app_path))
chmod_args = ('chmod', '755', chmod_dir)
try:
subprocess.check_call(chmod_args)
except subprocess.CalledProcessError as err:
print(err)
file_dict['app'] = os.path.basename(app_path)
if 'done' in payload:
finished.set()
t.close()
def compare_applications(a, b):
a_is_running = a.pid != 0
b_is_running = b.pid != 0
if a_is_running == b_is_running:
if a.name > b.name:
return 1
elif a.name < b.name:
return -1
else:
return 0
elif a_is_running:
return -1
else:
return 1
def get_applications(device):
try:
applications = device.enumerate_applications()
except Exception as e:
sys.exit('Failed to enumerate applications: %s' % e)
return applications
def load_js_file(session, filename):
source = ''
with codecs.open(filename, 'r', 'utf-8') as f:
source = source + f.read()
script = session.create_script(source)
script.on('message', on_message)
script.load()
return script
def create_dir(path):
path = path.strip()
path = path.rstrip('\\')
if os.path.exists(path):
shutil.rmtree(path)
try:
os.makedirs(path)
except os.error as err:
print(err)
def open_target_app(device, name_or_bundleid):
logger.info('Start the target app {}'.format(name_or_bundleid))
pid = ''
session = None
display_name = ''
bundle_identifier = ''
for application in get_applications(device):
if name_or_bundleid == application.identifier or name_or_bundleid == application.name:
pid = application.pid
display_name = application.name
bundle_identifier = application.identifier
try:
if not pid:
pid = device.spawn([bundle_identifier])
session = device.attach(pid)
device.resume(pid)
else:
session = device.attach(pid)
except Exception as e:
print(e)
return session, display_name, bundle_identifier
def start_dump(session, ipa_name):
logger.info('Dumping {} to {}'.format(display_name, TEMP_DIR))
script = load_js_file(session, DUMP_JS)
script.post('dump')
finished.wait()
generate_ipa(PAYLOAD_PATH, ipa_name)
if session:
session.detach()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='frida-ios-dump (by AloneMonkey v2.0)')
parser.add_argument('-l', '--list', dest='list_applications', action='store_true', help='List the installed apps')
parser.add_argument('-o', '--output', dest='output_ipa', help='Specify name of the decrypted IPA')
parser.add_argument('target', nargs='?', help='Bundle identifier or display name of the target app')
args = parser.parse_args()
exit_code = 0
ssh = None
if not len(sys.argv[1:]):
parser.print_help()
sys.exit(exit_code)
device = get_usb_iphone()
if args.list_applications:
list_applications(device)
else:
name_or_bundleid = args.target
output_ipa = args.output_ipa
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(Host, port=Port, username=User, password=Password)
create_dir(PAYLOAD_PATH)
(session, display_name, bundle_identifier) = open_target_app(device, name_or_bundleid)
if output_ipa is None:
output_ipa = display_name
output_ipa = re.sub('\.ipa$', '', output_ipa)
if session:
start_dump(session, output_ipa)
except paramiko.ssh_exception.NoValidConnectionsError as e:
print(e)
exit_code = 1
except paramiko.AuthenticationException as e:
print(e)
exit_code = 1
except Exception as e:
print('*** Caught exception: %s: %s' % (e.__class__, e))
traceback.print_exc()
exit_code = 1
if ssh:
ssh.close()
if os.path.exists(PAYLOAD_PATH):
shutil.rmtree(PAYLOAD_PATH)
sys.exit(exit_code)
|
py | b40ae50cd4ad232820a1353b18b2fa8a177f0981 | import os
import sys
import argparse
import cv2
from tqdm import tqdm
from ssds.ssds import SSDDetector
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
def plot_one_box(img, x, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def demo_image(model, image_path, display):
# 1. prepare image
image = cv2.imread(image_path)
image = cv2.resize(image, model.image_size)
# 2. model infer
scores, boxes, classes = model(image)
# 3. draw bounding box on the image
for score, box, labels in zip(scores, boxes, classes):
plot_one_box(image, box, COLORS[labels % 3], '{label}: {score:.3f}'.format(label=labels, score=score))
# 4. visualize result
if display:
cv2.imshow('result', image)
cv2.waitKey(0)
else:
path, _ = os.path.splitext(image_path)
cv2.imwrite(path + '_result.jpg', image)
print("output file save at '{}'".format(path + '_result.jpg'))
def demo_video(model, video_path, display):
# 0. prepare video
cap = cv2.VideoCapture(video_path)
frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if cap.isOpened() and (not display):
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = int(cap.get(cv2.CAP_PROP_FPS))
writer = cv2.VideoWriter(video_path+"_output.mp4", fourcc, fps, (width,height))
for fid in tqdm(range(frames)):
# 1. prepare image
flag, image = cap.read()
image = cv2.resize(image, model.image_size)
# 2. model infer
scores, boxes, classes = model(image)
# 3. draw bounding box on the image
for score, box, labels in zip(scores, boxes, classes):
plot_one_box(image, box, COLORS[labels % 3], '{label}: {score:.3f}'.format(label=labels, score=score))
image = cv2.resize(image, (width,height))
# 4. visualize result
if display:
cv2.imshow("Image", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
writer.write(image)
# 5. release the video resources
cap.release()
if display:
cv2.destroyAllWindows()
else:
writer.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Demo a ssds.pytorch network')
parser.add_argument('-cfg', '--confg-file',
help='the address of optional config file', default=None, type=str, required=True)
parser.add_argument('-i', '--demo-file',
help='the address of the demo file', default=None, type=str, required=True)
parser.add_argument('-t', '--type',
default='image', choices=['image', 'video'])
parser.add_argument('-d', '--display',
help='whether display the detection result', action="store_true")
parser.add_argument('-s', '--shift', action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
model = SSDDetector(args.confg_file, args.shift)
getattr(sys.modules[__name__], "demo_"+args.type)(model, args.demo_file, args.display) |
py | b40ae53c8c3db393eb69ac43846c5aaef7d329ed | import csv
from datetime import datetime
from nltk.corpus import stopwords
import nltk
import re
import unidecode
import json
#import random
from collections import defaultdict
from nltk.sentiment.vader import SentimentIntensityAnalyzer
csv.field_size_limit(100000000)
vader = SentimentIntensityAnalyzer()
with open('C:\\CLPsych Challenge\\Dataset\\PreProcessing\\word_contractions_expansion.json') as f:
cList = json.load(f)
c_re = re.compile('(%s)' % '|'.join(cList.keys()))
sw = stopwords.words("english")
extra_stop_words = ["cannot", "could", "would", "us", "may", "might", "need", "ought", "shall", "alls", "n't", "'s", "'ve", "'t", "'m", "'d", "'ll", "t"]
sw.extend(extra_stop_words)
#sw = []
def expandContractions(text, c_re=c_re):
def replace(match):
return cList[match.group(0)]
return c_re.sub(replace, text)
def humanize_unixtime(unix_time):
time = datetime.fromtimestamp(int(unix_time)).strftime('%d-%m-%Y %H.%M')
return time
def word_cleaner(word):
word = unidecode.unidecode(word)
if(word.lower() in sw):
word = " "
word = word.replace("_PERSON_", " ")
word = word.replace("_IP_", " ")
word = word.replace("_EMAIL_", " ")
word = word.replace("_URL_", " ")
word = word.replace("tldr", " ")
word = word.replace("<", " ")
# word = word.replace(".", " ")
p = re.compile('([A-Za-z]+)[.]')
word = p.sub(r'\1 ', word)
p = re.compile('[.]([A-Za-z]+)')
word = p.sub(r' \1', word)
word = word.replace("!", " ")
word = word.replace(",", " ")
word = word.replace("/", " ")
word = word.replace("~", " ")
# word = word.replace("-", " ")
word = word.replace("--", " ")
word = word.replace("-", " ")
word = word.replace("(", " ")
word = word.replace(")", " ")
word = word.replace("#", " ")
word = word.replace("?", " ")
word = word.replace("..", " ")
word = word.replace("...", " ")
word = word.replace("’", " ")
word = word.replace(":", " ")
word = word.replace("[", " ")
word = word.replace("]", " ")
word = word.replace("*", " ")
word = word.replace("\"", " ")
word = word.replace("&", " ")
word = word.replace("{", " ")
word = word.replace("}", " ")
word = word.replace("@", " ")
word = word.replace("↑", " ")
word = word.replace("$", " ")
word = word.replace("^", " ")
word = word.replace("\n", " ")
word = word.replace("\t", " ")
word = word.replace("\r", " ")
word = word.replace("`", " ")
word = word.replace("'", " ")
word = word.replace(";", " ")
#if(word == "." or word == " ." or word == " . " or word == ". "):
if(len(word) == 1 or word == "." or word == " ." or word == " . " or word == ". "):
word = " "
return word
path ="C:\\CLPsych Challenge\\Dataset\\clpsych19_test_data\\combined_data_Task_C_Test.csv"
all_data = dict()
file = open(path, 'r', encoding = 'utf8')
reader_data = csv.reader(file)
for i, row in enumerate(reader_data):
if(i == 0):
continue
all_data[(row[0], row[1])] = row
"""
train_user_label_path ="C:\\CLPsych Challenge\\Dataset\\clpsych19_training_data\\trainUserIds_TaskC_Final.csv"
file =open(train_user_label_path, 'r', encoding = 'utf8')
reader_train = csv.reader(file, delimiter=',')
train_user_id_label = dict()
for row in reader_train:
train_user_id_label[row[0]] = row[1]
test_user_label_path ="C:\\CLPsych Challenge\\Dataset\\clpsych19_training_data\\testUserIds_TaskB_Final.csv"
file =open(test_user_label_path, 'r', encoding = 'utf8')
reader_test = csv.reader(file, delimiter=',')
test_user_id_label = dict()
for row in reader_test:
test_user_id_label[row[0]] = row[1]
"""
taskA_path ="C:\\CLPsych Challenge\\Dataset\\clpsych19_test_data\\task_C_test.posts.csv"
#all_train_posts_of_users_combined = list()
#all_train_posts_of_users_combined.append(["User ID", "Post", "Label"])
#all_test_posts_of_users_combined = list()
#all_test_posts_of_users_combined.append(["User ID", "Post", "Label"])
file =open(taskA_path, 'r', encoding = 'utf8')
reader_user = csv.reader(file, delimiter=',')
taskA_user_posts = defaultdict(list)
for i, row in enumerate(reader_user):
if(i == 0):
continue
taskA_user_posts[row[1]].append(row[0])
just_user_posts_train = list()
#just_user_posts_test = list()
posts_users_individual = defaultdict(list)
for user in taskA_user_posts:
user_posts = list()
for row in taskA_user_posts[user]:
user_posts.append(all_data[(row, user)])
posts_sorted_by_date = sorted(user_posts, key=lambda x : x[3], reverse=True)
# for row in sorted_by_date:
# row[2] = humanize_unixtime(row[2])
# sorted_by_date
#user_post_combined = ""
for i, post in enumerate(posts_sorted_by_date):
user_post_combined = ""
user_id = post[1]
subreddit_name = post[2]
subreddit_name = expandContractions(subreddit_name)
subreddit_name =' '.join(subreddit_name.split('\t'))
subreddit_name ='.'.join(subreddit_name.split('\n'))
subreddit_name =' '.join(subreddit_name.split('|'))
subreddit_name =' '.join(subreddit_name.split('\r'))
post[4] = expandContractions(post[4])
post[4] =' '.join(post[4].split('\t'))
post[4] ='.'.join(post[4].split('\n'))
post[4] =' '.join(post[4].split('|'))
post[4] =' '.join(post[4].split('\r'))
post[5] = expandContractions(post[5])
post[5] =' '.join(post[5].split('\t'))
post[5] ='.'.join(post[5].split('\n'))
post[5] =' '.join(post[5].split('|'))
post[5] =' '.join(post[5].split('\r'))
#user_post_title = nltk.sent_tokenize(post[4])
#user_post = nltk.sent_tokenize(post[5])
#final_post_title_sentiment = ""
#final_post_sentiment = ""
#for sent in user_post_title:
# mydict = vader.polarity_scores(sent)
# if(mydict['compound'] <= -0.05 or mydict['compound'] >= 0.05):
# final_post_title_sentiment += sent
#for sent in user_post:
# mydict = vader.polarity_scores(sent)
# if(mydict['compound'] <= -0.05 or mydict['compound'] >= 0.05):
# final_post_sentiment += sent
word_tokenized_subreddit = nltk.word_tokenize(subreddit_name)
word_tokenized_title = nltk.word_tokenize(post[4])
word_tokenized_post = nltk.word_tokenize(post[5])
#word_tokenized_title = nltk.word_tokenize(final_post_title_sentiment)
#word_tokenized_post = nltk.word_tokenize(final_post_sentiment)
for word in word_tokenized_subreddit:
user_post_combined += word_cleaner(word) + " "
for word in word_tokenized_title:
user_post_combined += word_cleaner(word) + " "
for word in word_tokenized_post:
user_post_combined += word_cleaner(word) + " "
user_post_combined = re.sub(' +', ' ',user_post_combined)
#user_post_combined = ' '.join(user_post_combined.split(' '))
user_post_combined = user_post_combined.strip()
user_post_combined = user_post_combined.lower()
posts_users_individual[user_id].append(user_post_combined)
just_user_posts_train.append(user_post_combined)
#user_post_combined = re.sub(' +', ' ',user_post_combined)
#user_post_combined = ' '.join(user_post_combined.split(' '))
#user_post_combined = user_post_combined.strip()
#user_post_combined = user_post_combined.lower()
#print(user_post_combined)
#print("\n\n\n")
#label = random.randint(0,1)
#if user in train_user_id_label:
#label = train_user_id_label[user]
#posts_users_individual[user].append(label)
#all_train_posts_of_users_combined.append([user_id, user_post_combined, label])
#just_user_posts_train.append(user_post_combined)
#else:
#label = test_user_id_label[user]
#posts_users_individual[user].append(label)
#all_test_posts_of_users_combined.append([user_id, user_post_combined, label])
#just_user_posts_test.append(user_post_combined)
#with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Non-PreProcessed-Data\\User_Posts_Processed_Train_Full_Final.tsv",'w', encoding = 'utf8', newline='') as outcsv:
"""
with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Sentiment-Processed\\User_Posts_Processed_Train_Full_Final.tsv",'w', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv, delimiter='\t',quotechar = '"')
for row in all_train_posts_of_users_combined:
writer.writerow(row)
#with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Non-PreProcessed-Data\\User_Posts_Processed_Test_Final.tsv",'w', encoding = 'utf8', newline='') as outcsv:
with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Sentiment-Processed\\User_Posts_Processed_Test_Final.tsv",'w', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv, delimiter='\t', quotechar = '"')
for row in all_test_posts_of_users_combined:
writer.writerow(row)
"""
#with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Full_Train_Data.tsv",'w', encoding = 'utf8', newline='') as outcsv:
# writer = csv.writer(outcsv, delimiter='\t', quotechar = '"')
# for row in all_test_posts_of_users_combined:
# writer.writerow(row)
# for i, row in enumerate(all_train_posts_of_users_combined):
# if(i == 0):
# continue
# writer.writerow(row)
with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Only_User_Posts_Test_Task_C.txt",'w', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv,quotechar = '"')
for row in just_user_posts_train:
writer.writerow([row])
"""
with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Only_User_Posts_Test_Sentiment.txt",'w', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv, quotechar = '"')
for row in just_user_posts_test:
writer.writerow([row])
"""
"""
len(all_test_posts_of_users_combined) + len(all_train_posts_of_users_combined)
count_label_class = dict()
for row in all_train_posts_of_users_combined:
if(row[2] in count_label_class):
count_label_class[row[2]] += 1
else:
count_label_class[row[2]] = 1
for row in all_test_posts_of_users_combined:
if(row[2] in count_label_class):
count_label_class[row[2]] += 1
else:
count_label_class[row[2]] = 1
"""
len(just_user_posts_train)
len(posts_users_individual.keys())
import json
with open('C:\\CLPsych Challenge\\Dataset\\PreProcessing\\User_To_Posts_Test_Task_C.json', 'w') as fp:
json.dump(posts_users_individual, fp) |
py | b40ae5e524a3339d26a4f9141d21c502b606c8fb | # Copyright 2022 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import absl.testing
import iree.tflite.support.test_util as test_util
import numpy
import urllib.request
from PIL import Image
model_path = "https://storage.googleapis.com/iree-model-artifacts/tflite-integration-tests/posenet_i8.tflite"
model_input = "https://storage.googleapis.com/iree-model-artifacts/tflite-integration-tests/posenet_i8_input.jpg"
class PosenetI8Test(test_util.TFLiteModelTest):
def __init__(self, *args, **kwargs):
super(PosenetI8Test, self).__init__(model_path, *args, **kwargs)
def compare_results(self, iree_results, tflite_results, details):
super(PosenetI8Test, self).compare_results(iree_results, tflite_results,
details)
# This value is a discretized location of the persons joints. If we are
# *close* to the expected position we can consider this good enough.
self.assertTrue(
numpy.isclose(iree_results[0][:, :, :, 0],
tflite_results[0][:, :, :, 0],
atol=25e-3).all())
self.assertTrue(
numpy.isclose(iree_results[0][:, :, :, 1],
tflite_results[0][:, :, :, 1],
atol=25e-3).all())
def generate_inputs(self, input_details):
local_path = "/".join([self.workdir, "person.jpg"])
urllib.request.urlretrieve(model_input, local_path)
shape = input_details[0]["shape"]
im = numpy.array(Image.open(local_path).resize((shape[1], shape[2])))
args = [im.reshape(shape)]
return args
def test_compile_tflite(self):
self.compile_and_execute()
if __name__ == '__main__':
absl.testing.absltest.main()
|
py | b40ae7c372e7f76f49e1739ab301d8e46b5dcd3d | # Generated by Django 2.2.4 on 2020-03-19 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bikesharing", "0019_auto_20200317_2042"),
]
operations = [
migrations.AddField(
model_name="locationtracker",
name="internal_only",
field=models.BooleanField(
default=False,
help_text="""Internal trackers don't publish their locations
to the enduser. They are useful for backup trackers
with lower accuracy e.g. wifi trackers.""",
),
),
]
|
py | b40ae8fe768511865fd2a7052b220daa0afb8268 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
connect_nodes,
Decimal,
wait_until,
)
class WalletTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self):
self.add_nodes(4)
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
self.sync_all([self.nodes[0:3]])
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['size']
def run_test(self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 250)
assert_equal(walletinfo['balance'], 0)
self.sync_all([self.nodes[0:3]])
self.nodes[1].generate(101)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 250)
assert_equal(self.nodes[1].getbalance(), 250)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Exercise locking of unspent outputs
unspent_0 = self.nodes[1].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[1].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[1].sendtoaddress, self.nodes[1].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[1].listlockunspent())
self.nodes[1].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[1].listlockunspent()), 0)
# Send 21 RSCOIN from 1 to 0 using sendtoaddress call.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 21)
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
fee_per_kbyte = Decimal('0.001')
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = float(utxo["amount"]) - float(fee_per_kbyte)
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 0)
node_2_expected_bal = Decimal('250') + Decimal('21') - 2 * fee_per_kbyte
node_2_bal = self.nodes[2].getbalance()
assert_equal(node_2_bal, node_2_expected_bal)
assert_equal(self.nodes[2].getbalance("from1"), node_2_expected_bal)
# Send 10 RSCOIN normal
address = self.nodes[0].getnewaddress("test")
self.nodes[2].settxfee(float(fee_per_kbyte))
txid = self.nodes[2].sendtoaddress(address, 10, "", "")
fee = self.nodes[2].gettransaction(txid)["fee"]
node_2_bal -= (Decimal('10') - fee)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_0_bal = self.nodes[0].getbalance()
assert_equal(node_0_bal, Decimal('10'))
# Sendmany 10 RSCOIN
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "")
fee = self.nodes[2].gettransaction(txid)["fee"]
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_0_bal += Decimal('10')
node_2_bal -= (Decimal('10') - fee)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
assert_equal(self.nodes[0].getbalance(), node_0_bal)
assert_fee_amount(-fee, self.get_vsize(self.nodes[2].getrawtransaction(txid)), fee_per_kbyte)
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all([self.nodes[0:3]])
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
listunspent = self.nodes[1].listunspent(1, 9999999, [], 3)
assert_array_result(listunspent,
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all([self.nodes[0:3]])
blocks = self.nodes[0].generate(2)
self.sync_all([self.nodes[0:3]])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
maintenance = [
'-rescan',
'-reindex',
]
for m in maintenance:
self.log.info("check " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m])
self.start_node(1, [m])
self.start_node(2, [m])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest().main()
|
py | b40ae94fb30d086a10bb65e9d593ce35931e2364 | import copy
import io
import struct
from dataclasses import dataclass
from datetime import datetime
from eilib.helpers import read_exactly
_SIGNATURE = 0x019CE23C
_HEADER_FORMAT = '<LLLL'
_HEADER_SIZE = struct.calcsize(_HEADER_FORMAT)
_TABLE_ENTRY_FORMAT = '<lLLLHL'
_TABLE_ENTRY_SiZE = struct.calcsize(_TABLE_ENTRY_FORMAT)
class InvalidResFile(Exception):
pass
@dataclass
class ResFileItemInfo:
name: str
file_size: int
file_offset: int
modify_time: datetime
class _ResSubFile(io.BufferedIOBase):
def __init__(self, file: io.BufferedIOBase, mode, entry: ResFileItemInfo, close_cb):
super().__init__()
self._file = file
self._mode = mode
self._entry = entry
self._close_cb = close_cb
assert mode in ('r', 'w')
self._file.seek(self._entry.file_offset)
@property
def mode(self):
return self._mode
def readable(self):
return self._mode == 'r'
def writable(self):
return self._mode == 'w'
def read(self, size=-1):
self._check_closed('read')
if not self.readable():
raise io.UnsupportedOperation('file not open for reading')
if size < 0:
size = self._entry.file_size
return self._file.read(min(size, self._entry.file_size - self.tell()))
def write(self, data):
self._check_closed('write')
if not self.writable():
raise io.UnsupportedOperation('file not open for writing')
self._file.write(data)
self._entry.file_size = max(self._entry.file_size, self.tell())
def tell(self):
self._check_closed('tell')
return self._file.tell() - self._entry.file_offset
def seek(self, pos, whence=0):
self._check_closed('seek')
cur_pos = self.tell()
if whence == 0:
new_pos = pos
elif whence == 1:
new_pos = cur_pos + pos
elif whence == 2:
new_pos = self._entry.file_size + pos
else:
raise ValueError('invalid whence value')
if new_pos < 0:
new_pos = 0
elif self._mode == 'r':
new_pos = min(new_pos, self._entry.file_size)
else:
self._entry.file_size = max(self._entry.file_size, new_pos)
self._file.seek(new_pos + self._entry.file_offset, 0)
return new_pos
def close(self):
if self.closed: # pylint: disable=using-constant-test
return
try:
super().close()
finally:
self._close_cb()
def _check_closed(self, operation):
if self.closed: # pylint: disable=using-constant-test
raise ValueError(f'{operation} on closed file')
class ResFile:
def __init__(self, file, mode='r'):
if mode not in ('r', 'w'):
raise ValueError('ResFile requires mode "r", "w"')
self._opened = isinstance(file, str)
self._file = open(file, mode + 'b') if self._opened else file
self._mode = mode
self._file_size = 0
self._table = {}
self._subfile = None
if self._mode == 'r':
self._file.seek(0, 2)
self._file_size = self._file.tell()
self._read_headers()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def open(self, name, mode='r'):
if not self._file:
raise ValueError('ResFile is closed')
if self._subfile:
raise ValueError('only one opened file is allowed')
if mode == 'r':
entry = self._table[name]
elif mode == 'w':
if self._mode == 'r':
raise ValueError('ResFile was opened in read mode, so open() requires mode "r"')
self._write_alignment()
entry = ResFileItemInfo(name, 0, max(_HEADER_SIZE, self._file.tell()), datetime.now())
self._table[name] = entry
else:
raise ValueError('open() requires mode "r" or "w"')
self._subfile = _ResSubFile(self._file, mode, entry, self._close_subfile)
return self._subfile
def get_info(self, name):
return copy.deepcopy(self._table[name])
def iter_files(self):
for entry in self._table.values():
yield copy.deepcopy(entry)
def close(self):
if not self._file:
return
if self._subfile:
if self._subfile.mode != 'r':
raise ValueError("can't close the ResFile while there is an opened subfile")
self._close_subfile()
if self._mode != 'r':
self._write_headers()
if self._opened:
self._file.close()
self._file = None
def _write_alignment(self):
self._file.seek(0, 2)
self._file.write(b'\0' * ((16 - self._file.tell() % 16) % 16))
def _close_subfile(self):
if self._subfile.mode != 'r':
self._write_alignment()
self._subfile = None
def _read(self, size, message='Unexpected EOF'):
try:
return read_exactly(self._file, size)
except Exception as ex:
raise InvalidResFile(message) from ex
def _lower_ascii(self, value):
return ''.join((c.lower() if ord(c) >= 128 else c) for c in value)
def _read_headers(self):
self._file.seek(0)
header_data = self._read(_HEADER_SIZE, 'File header is truncated')
magic, table_size, table_offset, names_size = struct.unpack(_HEADER_FORMAT, header_data)
if magic != _SIGNATURE:
raise InvalidResFile('Invalid signature')
table_data_size = table_size * _TABLE_ENTRY_SiZE
if table_offset + table_data_size + names_size > self._file_size:
raise InvalidResFile('Files table is truncated')
self._file.seek(table_offset)
tables_data = self._read(table_data_size)
names_data = self._read(names_size)
for table_entry in struct.iter_unpack(_TABLE_ENTRY_FORMAT, tables_data):
_, file_size, file_offset, modify_timestamp, name_length, name_offset = table_entry
name = names_data[name_offset:name_offset+name_length].decode('cp1251')
self._table[self._lower_ascii(name)] = ResFileItemInfo(
name=name, file_size=file_size, file_offset=file_offset,
modify_time=datetime.fromtimestamp(modify_timestamp)
)
def _write_headers(self):
self._write_alignment()
table_offset = self._file.tell()
# Build hash table
hash_table = [[None, -1] for _ in self._table] # entry, next_index
last_free_index = len(hash_table) - 1
for entry in self._table.values():
# Calculate entry's hash
entry_hash = sum(b for b in self._lower_ascii(entry.name).encode('cp1251')) % (1 << 32)
index = entry_hash % len(hash_table)
# If index is busy, find another one
if hash_table[index][0] is not None:
while hash_table[index][1] >= 0:
index = hash_table[index][1]
while hash_table[last_free_index][0] is not None:
last_free_index -= 1
index = hash_table[index][1] = last_free_index
last_free_index -= 1
# Put entry in the hash table
hash_table[index][0] = entry
# Write hash table
encoded_names = []
name_offset = 0
for entry, next_index in hash_table:
encoded_names.append(entry.name.encode('cp1251'))
name_length = len(encoded_names[-1])
data = struct.pack(
_TABLE_ENTRY_FORMAT,
next_index,
entry.file_size,
entry.file_offset,
int(entry.modify_time.timestamp()),
name_length,
name_offset,
)
name_offset += name_length
self._file.write(data)
# Write file names
self._file.write(b''.join(encoded_names))
# Update file header
self._file.seek(0)
data = struct.pack(_HEADER_FORMAT, _SIGNATURE, len(hash_table), table_offset, name_offset)
self._file.write(data)
|
py | b40ae97f68e21f623031d10fdbcd1dc45b3a81a9 | from stripmaker import StripMaker
import numpy as np
class Thing(object):
def __init__(self,coords,foo):
self.coords = np.array(coords)
self.foo = foo
# a square should result in 4 filmstrips
def test_square():
faces = [
Thing([(0,0),(1,0)],'a'),
Thing([(1,0),(1,1)],'b'),
Thing([(1,1),(0,1)],'c'),
Thing([(0,1),(0,0)],'d')
]
assert len(StripMaker(faces).strips) == 4
# a triangle should result in 3 filmstrips
def test_3sides_of_square():
faces = [
Thing([(0,0),(1,0)],'a'),
Thing([(1,0),(1,1)],'b'),
Thing([(1,1),(0,1)],'c')
]
assert len(StripMaker(faces).strips) == 3
def test_triangle():
faces = [
Thing([(0,0),(1,0)],'a'),
Thing([(1,0),(1,1)],'b'),
Thing([(1,1),(0,0)],'c')
]
assert len(StripMaker(faces).strips) == 3
# a straight line should result in 1 filmstrip
def test_straight_line():
faces = [
Thing([(0,0),(1,0)],'a'),
Thing([(1,0),(2,0)],'b')
]
strips = StripMaker(faces).strips
assert len(strips) == 1
assert len(strips[0].pts) == 3
# a 45 degree angle should result in 1 filmstrip
def test_45deg_angle():
faces = [
Thing([(0,0),(1,0)],'a'),
Thing([(1,0),(2,1)],'b')
]
assert len(StripMaker(faces).strips) == 1
# an orphan
#def test_orphan():
# faces = [[(0,0),(1,0)],[(2,0),(3,0)]]
# assert len(StripMaker(faces).strips) == 2
# an orphan
#def test_orphan_complex():
# faces = [[(0,0),(1,0)],[(4,0),(5,0)],[(1,0),(2,0)]]
# assert len(StripMaker(faces).strips) == 2
|
py | b40ae984dc0988d02f93d0a060e19dc6caeaa70d | # -*- coding: utf-8 -*-
import contextlib
import logging
import threading
import random
import itertools
from . import connection as conn_impl, issues, settings as settings_impl, _apis
logger = logging.getLogger(__name__)
class EndpointInfo(object):
__slots__ = (
"address",
"endpoint",
"location",
"port",
"ssl",
"ipv4_addrs",
"ipv6_addrs",
"ssl_target_name_override",
"node_id",
)
def __init__(self, endpoint_info):
self.address = endpoint_info.address
self.endpoint = "%s:%s" % (endpoint_info.address, endpoint_info.port)
self.location = endpoint_info.location
self.port = endpoint_info.port
self.ssl = endpoint_info.ssl
self.ipv4_addrs = tuple(endpoint_info.ip_v4)
self.ipv6_addrs = tuple(endpoint_info.ip_v6)
self.ssl_target_name_override = endpoint_info.ssl_target_name_override
self.node_id = endpoint_info.node_id
def endpoints_with_options(self):
ssl_target_name_override = None
if self.ssl:
if self.ssl_target_name_override:
ssl_target_name_override = self.ssl_target_name_override
elif self.ipv6_addrs or self.ipv4_addrs:
ssl_target_name_override = self.address
endpoint_options = conn_impl.EndpointOptions(
ssl_target_name_override=ssl_target_name_override, node_id=self.node_id
)
if self.ipv6_addrs or self.ipv4_addrs:
for ipv6addr in self.ipv6_addrs:
yield ("ipv6:[%s]:%s" % (ipv6addr, self.port), endpoint_options)
for ipv4addr in self.ipv4_addrs:
yield ("ipv4:%s:%s" % (ipv4addr, self.port), endpoint_options)
else:
yield (self.endpoint, endpoint_options)
def __str__(self):
return "<Endpoint %s, location %s, ssl: %s>" % (
self.endpoint,
self.location,
self.ssl,
)
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(self.endpoint)
def __eq__(self, other):
if not hasattr(other, "endpoint"):
return False
return self.endpoint == other.endpoint
def _list_endpoints_request_factory(connection_params):
request = _apis.ydb_discovery.ListEndpointsRequest()
request.database = connection_params.database
return request
class DiscoveryResult(object):
def __init__(self, self_location, endpoints):
self.self_location = self_location
self.endpoints = endpoints
def __str__(self):
return "DiscoveryResult <self_location: %s, endpoints %s>" % (
self.self_location,
self.endpoints,
)
def __repr__(self):
return self.__str__()
@classmethod
def from_response(cls, rpc_state, response, use_all_nodes=False):
issues._process_response(response.operation)
message = _apis.ydb_discovery.ListEndpointsResult()
response.operation.result.Unpack(message)
unique_local_endpoints = set()
unique_different_endpoints = set()
for info in message.endpoints:
if info.location == message.self_location:
unique_local_endpoints.add(EndpointInfo(info))
else:
unique_different_endpoints.add(EndpointInfo(info))
result = []
unique_local_endpoints = list(unique_local_endpoints)
unique_different_endpoints = list(unique_different_endpoints)
if use_all_nodes:
result.extend(unique_local_endpoints)
result.extend(unique_different_endpoints)
random.shuffle(result)
else:
random.shuffle(unique_local_endpoints)
random.shuffle(unique_different_endpoints)
result.extend(unique_local_endpoints)
result.extend(unique_different_endpoints)
return cls(message.self_location, result)
class DiscoveryEndpointsResolver(object):
def __init__(self, driver_config):
self.logger = logger.getChild(self.__class__.__name__)
self._driver_config = driver_config
self._ready_timeout = getattr(
self._driver_config, "discovery_request_timeout", 10
)
self._lock = threading.Lock()
self._debug_details_history_size = 20
self._debug_details_items = []
self._endpoints = []
self._endpoints.append(driver_config.endpoint)
self._endpoints.extend(driver_config.endpoints)
random.shuffle(self._endpoints)
self._endpoints_iter = itertools.cycle(self._endpoints)
def _add_debug_details(self, message, *args):
self.logger.debug(message, *args)
message = message % args
with self._lock:
self._debug_details_items.append(message)
if len(self._debug_details_items) > self._debug_details_history_size:
self._debug_details_items.pop()
def debug_details(self):
"""
Returns last resolver errors as a debug string.
"""
with self._lock:
return "\n".join(self._debug_details_items)
def resolve(self):
with self.context_resolve() as result:
return result
@contextlib.contextmanager
def context_resolve(self):
self.logger.debug("Preparing initial endpoint to resolve endpoints")
endpoint = next(self._endpoints_iter)
initial = conn_impl.Connection.ready_factory(
endpoint, self._driver_config, ready_timeout=self._ready_timeout
)
if initial is None:
self._add_debug_details(
'Failed to establish connection to YDB discovery endpoint: "%s". Check endpoint correctness.'
% endpoint
)
yield
return
self.logger.debug(
"Resolving endpoints for database %s", self._driver_config.database
)
try:
resolved = initial(
_list_endpoints_request_factory(self._driver_config),
_apis.DiscoveryService.Stub,
_apis.DiscoveryService.ListEndpoints,
DiscoveryResult.from_response,
settings=settings_impl.BaseRequestSettings().with_timeout(
self._ready_timeout
),
wrap_args=(self._driver_config.use_all_nodes,),
)
self._add_debug_details(
"Resolved endpoints for database %s: %s",
self._driver_config.database,
resolved,
)
yield resolved
except Exception as e:
self._add_debug_details(
'Failed to resolve endpoints for database %s. Endpoint: "%s". Error details:\n %s',
self._driver_config.database,
endpoint,
e,
)
yield
finally:
initial.close()
|
py | b40aeb541833ed48755a0357d4c39151ff5a4ee3 | from __future__ import generators
import math
class CoordsGroupStart(object):
pass
class CoordsGroupEnd(object):
pass
# AUTHORS: zfierstadt, leec
def is_line_start(token,line):
"check whether line begins with token"
return token==line[:len(token)]
def get_ori_letterunit(start,end,seq,gapchar='-'):
"""try to determine orientation (1 or -1) based on whether start>end,
and letterunit (1 or 3) depending on the ratio of end-start difference
vs the actual non-gap letter count. Returns tuple (ori,letterunit)"""
if end>start:
ori=1
else:
ori= -1
ngap=0
for l in seq:
if l==gapchar:
ngap+=1
seqlen=len(seq)-ngap
if ori*float(end-start)/seqlen >2.0:
letterunit=3
else:
letterunit=1
return ori,letterunit
class BlastIval(object):
def __repr__(self):
return '<BLAST-IVAL: ' + repr(self.__dict__) + '>'
class BlastHitParser(object):
"""reads alignment info from blastall standard output.
Method parse_file(fo) reads file object fo, and generates tuples
suitable for BlastIval.
Attributes:
query_seq
query_start
query_end
subject_seq
subject_start
subject_end
query_id
subject_id
e_value
blast_score
identity_percent
"""
gapchar='-'
def __init__(self):
self.hit_id=0
self.nline = 0
self.reset()
def reset(self):
"flush any alignment info, so we can start reading new alignment"
self.query_seq=""
self.subject_seq=""
self.hit_id+=1
def save_query(self,line):
self.query_id=line.split()[1]
def save_subject(self,line):
self.subject_id=line.split()[0][1:]
def save_score(self,line):
"save a Score: line"
self.blast_score=float(line.split()[2])
s=line.split()[7]
if s[0]=='e':
s='1'+s
if s.endswith(','): s = s.strip(',')
try:
self.e_value= -math.log(float(s))/math.log(10.0)
except (ValueError,OverflowError), e:
self.e_value=300.
def save_identity(self,line):
"save Identities line"
s=line.split()[3][1:]
self.identity_percent=int(s[:s.find('%')])
def save_query_line(self,line):
"save a Query: line"
c=line.split()
self.query_end=int(c[3])
if not self.query_seq:
self.query_start=int(c[1])
if self.query_start < self.query_end: # handles forward orientation
self.query_start -= 1
self.query_seq+=c[2]
self.seq_start_char=line.find(c[2], 5) # IN CASE BLAST SCREWS UP Sbjct:
def save_subject_line(self,line):
"save a Sbjct: line, attempt to handle various BLAST insanities"
c=line.split()
if len(c)<4: # OOPS, BLAST FORGOT TO PUT SPACE BEFORE 1ST NUMBER
# THIS HAPPENS IN TBLASTN... WHEN THE SUBJECT SEQUENCE
# COVERS RANGE 1-1200, THE FOUR DIGIT NUMBER WILL RUN INTO
# THE SEQUENCE, WITH NO SPACE!!
c=['Sbjct:',line[6:self.seq_start_char]] \
+line[self.seq_start_char:].split() # FIX BLAST SCREW-UP
self.subject_end=int(c[3])
if not self.subject_seq:
self.subject_start=int(c[1])
if self.subject_start < self.subject_end: # handles forward orientation
self.subject_start -= 1
self.subject_seq+=c[2]
lendiff=len(self.query_seq)-len(self.subject_seq)
if lendiff>0: # HANDLE TBLASTN SCREWINESS: Sbjct SEQ OFTEN TOO SHORT!!
# THIS APPEARS TO BE ASSOCIATED ESPECIALLY WITH STOP CODONS *
self.subject_seq+=lendiff*'A' # EXTEND TO SAME LENGTH AS QUERY...
elif lendiff<0 and not hasattr(self,'ignore_query_truncation'):
# WHAT THE HECK?!?! WARN THE USER: BLAST RESULTS ARE SCREWY...
raise ValueError(
"""BLAST appears to have truncated the Query: sequence
to be shorter than the Sbjct: sequence:
Query: %s
Sbjct: %s
This should not happen! To ignore this error, please
create an attribute ignore_query_truncation on the
BlastHitParser object.""" % (self.query_seq,self.subject_seq))
def get_interval_obj(self, q_start, q_end, s_start, s_end,
query_ori, query_factor, subject_ori, subject_factor):
"return interval result as an object with attributes"
o = BlastIval()
o.hit_id = self.hit_id
o.src_id = self.query_id
o.dest_id = self.subject_id
o.blast_score = self.blast_score
o.e_value = self.e_value
o.percent_id = self.identity_percent
o.src_ori = query_ori
o.dest_ori = subject_ori
query_start = self.query_start+q_start*query_ori*query_factor
query_end = self.query_start+q_end*query_ori*query_factor
subject_start = self.subject_start+s_start*subject_ori*subject_factor
subject_end = self.subject_start+s_end*subject_ori*subject_factor
if query_start<query_end:
o.src_start = query_start
o.src_end = query_end
else:
o.src_start = query_end
o.src_end = query_start
if subject_start<subject_end:
o.dest_start = subject_start
o.dest_end = subject_end
else:
o.dest_start = subject_end
o.dest_end = subject_start
return o
def is_valid_hit(self):
return self.query_seq and self.subject_seq
def generate_intervals(self):
"generate interval tuples for the current alignment"
yield CoordsGroupStart() # bracket with grouping markers
query_ori,query_factor=get_ori_letterunit(self.query_start,\
self.query_end,self.query_seq,self.gapchar)
subject_ori,subject_factor=get_ori_letterunit(self.subject_start,\
self.subject_end,self.subject_seq,self.gapchar)
q_start= -1
s_start= -1
i_query=0
i_subject=0
for i in range(len(self.query_seq)): # SCAN ALIGNMENT FOR GAPS
if self.query_seq[i]==self.gapchar or self.subject_seq[i]==self.gapchar:
if q_start>=0: # END OF AN UNGAPPED INTERVAL
yield self.get_interval_obj(q_start, i_query,
s_start, i_subject,
query_ori, query_factor,
subject_ori, subject_factor)
q_start= -1
elif q_start<0: # START OF AN UNGAPPED INTERVAL
q_start=i_query
s_start=i_subject
if self.query_seq[i]!=self.gapchar: # COUNT QUERY LETTERS
i_query+=1
if self.subject_seq[i]!=self.gapchar: # COUNT SUBJECT LETTERS
i_subject+=1
if q_start>=0: # REPORT THE LAST INTERVAL
yield self.get_interval_obj(q_start, i_query,
s_start, i_subject,
query_ori, query_factor,
subject_ori, subject_factor)
yield CoordsGroupEnd()
def parse_file(self,myfile):
"generate interval tuples by parsing BLAST output from myfile"
for line in myfile:
self.nline += 1
if self.is_valid_hit() and \
(is_line_start('>',line) or is_line_start(' Score =',line) \
or is_line_start(' Database:',line) \
or is_line_start('Query=',line)):
for t in self.generate_intervals(): # REPORT THIS ALIGNMENT
yield t # GENERATE ALL ITS INTERVAL MATCHES
self.reset() # RESET TO START A NEW ALIGNMENT
if is_line_start('Query=',line):
self.save_query(line)
elif is_line_start('>',line):
self.save_subject(line)
elif is_line_start(' Score =',line):
self.save_score(line)
elif 'Identities =' in line:
self.save_identity(line)
elif is_line_start('Query:',line):
self.save_query_line(line)
elif is_line_start('Sbjct:',line):
self.save_subject_line(line)
if self.nline == 0: # no blast output??
raise IOError('no BLAST output. Check that blastall is in your PATH')
if __name__=='__main__':
import sys
p=BlastHitParser()
for t in p.parse_file(sys.stdin):
print t
|
py | b40aec0a3bf36a0927232a3a04b7a3bd2c1bf559 | from ros.lib.models import System
from ros.lib.app import db
def db_get_host(host_id):
return db.session.query(System).filter_by(inventory_id=host_id).first()
def db_get_record(model, **filters):
return db.session.query(model).filter_by(**filters).first()
def db_get_records(model, **filters):
return db.session.query(model).filter_by(**filters)
|
py | b40aecca78d53c1b8c0dba584809207d605de9b9 | #!/usr/bin/env python
import os
import sys
from optparse import OptionParser
import django
from django.conf import settings
from django.core.management import call_command
def main():
parser = OptionParser()
parser.add_option("--DATABASE_ENGINE", dest="DATABASE_ENGINE", default="sqlite3")
parser.add_option("--DATABASE_NAME", dest="DATABASE_NAME", default="")
parser.add_option("--DATABASE_USER", dest="DATABASE_USER", default="")
parser.add_option("--DATABASE_PASSWORD", dest="DATABASE_PASSWORD", default="")
parser.add_option("--SITE_ID", dest="SITE_ID", type="int", default=1)
options, args = parser.parse_args()
app_path = 'bisnode'
parent_dir, app_name = os.path.split(app_path)
sys.path.insert(0, parent_dir)
settings.configure(**{
"DATABASES": {
'default': {
"ENGINE": 'django.db.backends.%s' % options.DATABASE_ENGINE,
"NAME": options.DATABASE_NAME,
"USER": options.DATABASE_USER,
"PASSWORD": options.DATABASE_PASSWORD,
}
},
"SITE_ID": options.SITE_ID,
"ROOT_URLCONF": app_name + ".urls",
"TEMPLATE_LOADERS": (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
"django.template.loaders.eggs.Loader",
),
"TEMPLATE_DIRS": (
os.path.join(os.path.dirname(__file__),
"bisnode/templates"),
),
"INSTALLED_APPS": (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
app_name,
),
"LOGGING": {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d '
'%(thread)d %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
}
},
'loggers': {
app_name: {
'handlers': ['console'],
'level': 'DEBUG',
'formatter': 'verbose',
'propagate': True,
}
}
}
})
django.setup()
call_command("test", app_name)
if __name__ == "__main__":
main()
|
py | b40aed9b989e360f272bf322a13ecc4f10338519 | import random
import string
def _get_random_string():
return ''.join(random.choice(string.printable) for _ in range(int(random.random() * MAX_LENGTH)))
def random_sign():
return [-1, 1][int(random.random() * 2)]
MAX_LENGTH = 1000
DATA_SIZE = 500
FUZZY_STRINGS = [_get_random_string() for _ in range(DATA_SIZE)]
FUZZY_FLOATS = [random.random() * random_sign() for _ in range(DATA_SIZE)]
FUZZY_INTS = [int(random.random()) * random_sign() for _ in range(DATA_SIZE)]
# NOTE: do not use pytest.mark.parametrize because it will create too many test cases
# and make the output confusing.
def test_dump_and_restore_fuzzy_strings(keyspace):
key1 = 'test1'
key2 = 'test2'
for fuzzy_value in FUZZY_STRINGS:
keyspace.set(key1, fuzzy_value)
keyspace.restore(key2, 0, keyspace.dump(key1), replace=True)
assert keyspace.get(key1) == fuzzy_value
assert keyspace.get(key2) == keyspace.get(key1)
def test_dump_and_restore_fuzzy_sets(keyspace):
key1 = 'test1'
key2 = 'test2'
for fuzzy_value in FUZZY_STRINGS:
keyspace.sadd(key1, fuzzy_value)
keyspace.restore(key2, 0, keyspace.dump(key1), replace=True)
assert keyspace.smembers(key1) == set(FUZZY_STRINGS)
assert keyspace.smembers(key2) == keyspace.smembers(key1)
def test_dump_and_restore_fuzzy_sorted_sets(keyspace):
key1 = 'test1'
key2 = 'test2'
fuzzy_sorted_set = {}
for score, value in zip(FUZZY_FLOATS, FUZZY_STRINGS):
keyspace.zadd(key1, score=score, value=value)
fuzzy_sorted_set[value] = score
keyspace.restore(key2, 0, keyspace.dump(key1), replace=True)
zrange = keyspace.zrange(key2, 0, -1, with_scores=True)
assert dict(zip(zrange[::2], zrange[1::2])) == fuzzy_sorted_set
|
py | b40aee5f9cab6775729d051560bd0d34419b6da7 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2015 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str and range equivalent to Py3's
str = type('')
try:
range = xrange
except NameError:
pass
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
mmal_check,
)
from .encoders import (
PiVideoFrame,
PiVideoFrameType,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, and *clock_mode* parameters
provide initial values for the :attr:`sensor_mode`, :attr:`resolution`,
:attr:`framerate`, and :attr:`clock_mode` attributes of the class (these
attributes are all relatively expensive to set individually, hence setting
them all upon construction is a speed optimization). Please refer to the
attribute documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. _Compute Module: http://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 30
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset'):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._exif_tags = {
'IFD0.Model': 'RP_OV5647',
'IFD0.Make': 'RaspberryPi',
}
if camera_info.info_rev > 1:
self._exif_tags['IFD0.Model'] = 'RP_%s' % info.cameras[camera_num].camera_name.decode('ascii')
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiCameraResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self.exif_tags['IFD0.Model'].upper() == 'RP_OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiCameraResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enabled = True
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.connect(self._camera.outputs[self.CAMERA_VIDEO_PORT])
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` which has the same length as an image in `RGB`_ format
(colors represented as interleaved unsigned bytes) with the specified
*size* after the width has been rounded up to the nearest multiple of
32, and the height has been rounded up to the nearest multiple of 16.
For example, if *size* is ``(1280, 720)``, then *source* must be a
buffer with length 1280 × 720 × 3 bytes, or 2,764,800 bytes (because
1280 is a multiple of 32, and 720 is a multiple of 16 no extra rounding
is required). However, if *size* is ``(97, 57)``, then *source* must
be a buffer with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels
beyond column 97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: http://en.wikipedia.org/wiki/RGB
.. versionadded:: 1.8
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'high', or
'constrained'.
* *level* - The H.264 level to use for encoding. Defaults to '4', but
can be one of '4', '4.1', or '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value is 25000000
(25Mbps), except for H.264 level 4.2 for which the maximum is
62500000 (62.5Mbps). Bitrate 0 indicates the encoder should not use
bitrate control (the encoder is limited by the quality only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`under_the_hood` for more
information about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: http://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.enabled = False
self._preview.renderer.connection.enabled = False
self._camera.enabled = False
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enabled = True
self._preview.renderer.connection.enabled = True
self._splitter.connection.enabled = True
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 and sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, framerate // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Determine the FPS range for the requested framerate
if framerate >= 1.0:
fps_low = 1
fps_high = framerate
elif framerate >= 0.166:
fps_low = Fraction(166, 1000)
fps_high = Fraction(999, 1000)
else:
fps_low = Fraction(50, 1000)
fps_high = Fraction(166, 1000)
mp = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = mp
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
if framerate < 1:
port.framerate = 0
else:
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 <= value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance which can be easily converted to
an :class:`int` or :class:`float`.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. The
framerate can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>`, :class:`~fractions.Fraction`, or a ``(numerator,
denominator)`` tuple. For example, the following definitions are all
equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = self.framerate
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = self.framerate
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiCameraResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = self.framerate
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_delta(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is reset to 0 when :attr:`framerate` is set.
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. _dynamic range compression: http://en.wikipedia.org/wiki/Gain_compression
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera. Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
You can query the :attr:`analog_gain` and :attr:`digital_gain`
attributes to determine the actual gains being used by the camera.
If both are 1.0 this equates to ISO 100. Please note that this
capability requires an up to date firmware (#692 or later).
.. note::
With iso settings other than 0 (auto), the :attr:`exposure_mode`
property becomes non-functional.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. _sensitivity of the camera to light: http://en.wikipedia.org/wiki/Film_speed#Digital
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: http://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: http://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: http://www.raspberrypi.org/phpBB3/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`. Please note that these properties are not
directly settable, and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise
all frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: http://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
mp = self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR]
return mp.value in (mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
mp = mmal.MMAL_PARAMETER_MIRROR_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_MIRROR,
ct.sizeof(mmal.MMAL_PARAMETER_MIRROR_T)
),
{
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = mp
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
mp = self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR]
return mp.value in (mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
mp = mmal.MMAL_PARAMETER_MIRROR_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_MIRROR,
ct.sizeof(mmal.MMAL_PARAMETER_MIRROR_T)
),
{
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = mp
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
|
py | b40aee8dc907b86f5dfe0ec68f0808c1b091225d | from abc import ABCMeta, abstractmethod
from itertools import groupby
from soccer.data import LEAGUE_IDS
__all__ = ['BaseWriter']
class BaseWriter(object):
__metaclass__ = ABCMeta
def __init__(self, output_file):
self.output_filename = output_file
@abstractmethod
def live_scores(self, live_scores):
pass
@abstractmethod
def team_scores(self, team_scores, time):
pass
@abstractmethod
def team_players(self, team):
pass
@abstractmethod
def standings(self, league_table, league):
pass
@abstractmethod
def league_scores(self, total_data, time):
pass
def supported_leagues(self, total_data):
"""Filters out scores of unsupported leagues"""
supported_leagues = {val: key for key, val in LEAGUE_IDS.items()}
get_league_id = lambda x: int(x["_links"]["soccerseason"]["href"].split("/")[-1])
fixtures = (fixture for fixture in total_data["fixtures"]
if get_league_id(fixture) in supported_leagues)
# Sort the scores by league to make it easier to read
fixtures = sorted(fixtures, key=get_league_id)
for league, scores in groupby(fixtures, key=get_league_id):
league = supported_leagues[league]
for score in scores:
yield league, score
|
py | b40aefa5aa6caeabbe5f31d562842d51720130bf | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import shutil
import tempfile
import time
from marionette import MarionetteTestCase, EnduranceTestCaseMixin, \
B2GTestCaseMixin, MemoryEnduranceTestCaseMixin
from marionette.by import By
from marionette import expected
from marionette.errors import NoSuchElementException
from marionette.errors import StaleElementException
from marionette.errors import InvalidResponseException
from marionette.wait import Wait
from file_manager import GaiaDeviceFileManager, GaiaLocalFileManager
class GaiaApp(object):
def __init__(self, origin=None, name=None, frame=None, src=None):
self.frame = frame
self.frame_id = frame
self.src = src
self.name = name
self.origin = origin
def __eq__(self, other):
return self.__dict__ == other.__dict__
class GaiaApps(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
def get_permission(self, app_name, permission_name):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaApps.getPermission('%s', '%s')" % (app_name, permission_name))
def set_permission(self, app_name, permission_name, value):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaApps.setPermission('%s', '%s', '%s')" %
(app_name, permission_name, value))
def launch(self, name, switch_to_frame=True, launch_timeout=None):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("GaiaApps.launchWithName('%s')" % name, script_timeout=launch_timeout)
assert result, "Failed to launch app with name '%s'" % name
app = GaiaApp(frame=result.get('frame'),
src=result.get('src'),
name=result.get('name'),
origin=result.get('origin'))
if app.frame_id is None:
raise Exception("App failed to launch; there is no app frame")
if switch_to_frame:
self.marionette.switch_to_frame(app.frame_id)
return app
@property
def displayed_app(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_script('return GaiaApps.getDisplayedApp();')
return GaiaApp(frame=result.get('frame'),
src=result.get('src'),
name=result.get('name'),
origin=result.get('origin'))
def switch_to_displayed_app(self):
self.marionette.switch_to_default_content()
self.marionette.switch_to_frame(self.displayed_app.frame)
def is_app_installed(self, app_name):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("GaiaApps.locateWithName('%s')" % app_name)
def uninstall(self, name):
self.marionette.switch_to_frame()
self.marionette.execute_async_script("GaiaApps.uninstallWithName('%s')" % name)
def kill(self, app):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("GaiaApps.kill('%s');" % app.origin)
assert result, "Failed to kill app with name '%s'" % app.name
def kill_all(self):
# First we attempt to kill the FTU, we treat it as a user app
for app in self.running_apps(include_system_apps=True):
if app.origin == 'app://ftu.gaiamobile.org':
self.kill(app)
break
# Now kill the user apps
self.marionette.switch_to_frame()
self.marionette.execute_async_script("GaiaApps.killAll();")
@property
def installed_apps(self):
apps = self.marionette.execute_async_script(
'return GaiaApps.getInstalledApps();')
result = []
for app in [a for a in apps if not a['manifest'].get('role')]:
entry_points = app['manifest'].get('entry_points')
if entry_points:
for ep in entry_points.values():
result.append(GaiaApp(
origin=app['origin'],
name=ep['name']))
else:
result.append(GaiaApp(
origin=app['origin'],
name=app['manifest']['name']))
return result
def running_apps(self, include_system_apps=False):
''' Returns a list of running apps
Args:
include_system_apps: Includes otherwise hidden System apps in the list
Returns:
A list of GaiaApp objects representing the running apps.
'''
include_system_apps = json.dumps(include_system_apps)
self.marionette.switch_to_frame()
apps = self.marionette.execute_script(
"return GaiaApps.getRunningApps(%s);" % include_system_apps)
result = []
for app in [a[1] for a in apps.items()]:
result.append(GaiaApp(origin=app['origin'], name=app['name']))
return result
class GaiaData(object):
def __init__(self, marionette, testvars=None):
self.apps = GaiaApps(marionette)
self.marionette = marionette
self.testvars = testvars or {}
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_data_layer.js"))
self.marionette.import_script(js)
def set_time(self, date_number):
self.marionette.set_context(self.marionette.CONTEXT_CHROME)
self.marionette.execute_script("window.navigator.mozTime.set(%s);" % date_number)
self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
@property
def all_contacts(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True)
@property
def sim_contacts(self):
self.marionette.switch_to_frame()
adn_contacts = self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts("adn");', special_powers=True)
sdn_contacts = self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts("sdn");', special_powers=True)
return adn_contacts + sdn_contacts
def insert_contact(self, contact):
self.marionette.switch_to_frame()
mozcontact = contact.create_mozcontact()
result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(mozcontact), special_powers=True)
assert result, 'Unable to insert contact %s' % contact
def remove_all_contacts(self):
self.marionette.switch_to_frame()
timeout = max(self.marionette.timeout or 60000, 1000 * len(self.all_contacts))
result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True, script_timeout=timeout)
assert result, 'Unable to remove all contacts'
def get_setting(self, name):
return self.marionette.execute_async_script('return GaiaDataLayer.getSetting("%s")' % name, special_powers=True)
@property
def all_settings(self):
return self.get_setting('*')
def set_setting(self, name, value):
import json
value = json.dumps(value)
result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting("%s", %s)' % (name, value), special_powers=True)
assert result, "Unable to change setting with name '%s' to '%s'" % (name, value)
def _get_pref(self, datatype, name):
self.marionette.switch_to_frame()
pref = self.marionette.execute_script("return SpecialPowers.get%sPref('%s');" % (datatype, name), special_powers=True)
return pref
def _set_pref(self, datatype, name, value):
value = json.dumps(value)
self.marionette.switch_to_frame()
self.marionette.execute_script("SpecialPowers.set%sPref('%s', %s);" % (datatype, name, value), special_powers=True)
def get_bool_pref(self, name):
"""Returns the value of a Gecko boolean pref, which is different from a Gaia setting."""
return self._get_pref('Bool', name)
def set_bool_pref(self, name, value):
"""Sets the value of a Gecko boolean pref, which is different from a Gaia setting."""
return self._set_pref('Bool', name, value)
def get_int_pref(self, name):
"""Returns the value of a Gecko integer pref, which is different from a Gaia setting."""
return self._get_pref('Int', name)
def set_int_pref(self, name, value):
"""Sets the value of a Gecko integer pref, which is different from a Gaia setting."""
return self._set_pref('Int', name, value)
def get_char_pref(self, name):
"""Returns the value of a Gecko string pref, which is different from a Gaia setting."""
return self._get_pref('Char', name)
def set_char_pref(self, name, value):
"""Sets the value of a Gecko string pref, which is different from a Gaia setting."""
return self._set_pref('Char', name, value)
def set_volume(self, value):
channels = ['alarm', 'content', 'notification']
for channel in channels:
self.set_setting('audio.volume.%s' % channel, value)
def bluetooth_enable(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.enableBluetooth()")
def bluetooth_disable(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.disableBluetooth()")
@property
def bluetooth_is_enabled(self):
return self.marionette.execute_script("return window.navigator.mozBluetooth.enabled")
@property
def is_cell_data_enabled(self):
return self.get_setting('ril.data.enabled')
def connect_to_cell_data(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.connectToCellData()", special_powers=True)
assert result, 'Unable to connect to cell data'
def disable_cell_data(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.disableCellData()", special_powers=True)
assert result, 'Unable to disable cell data'
@property
def is_cell_data_connected(self):
# XXX: check bug-926169
# this is used to keep all tests passing while introducing multi-sim APIs
return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' +
'window.navigator.mozMobileConnections && ' +
'window.navigator.mozMobileConnections[0]; ' +
'return mobileConnection.data.connected;')
def enable_cell_roaming(self):
self.set_setting('ril.data.roaming_enabled', True)
def disable_cell_roaming(self):
self.set_setting('ril.data.roaming_enabled', False)
@property
def is_wifi_enabled(self):
return self.marionette.execute_script("return window.navigator.mozWifiManager && "
"window.navigator.mozWifiManager.enabled;")
def enable_wifi(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.enableWiFi()", special_powers=True)
assert result, 'Unable to enable WiFi'
def disable_wifi(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.disableWiFi()", special_powers=True)
assert result, 'Unable to disable WiFi'
def connect_to_wifi(self, network=None):
network = network or self.testvars.get('wifi')
assert network, 'No WiFi network provided'
self.enable_wifi()
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.connectToWiFi(%s)" % json.dumps(network),
script_timeout = max(self.marionette.timeout, 60000))
assert result, 'Unable to connect to WiFi network'
def forget_all_networks(self):
self.marionette.switch_to_frame()
self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()')
def is_wifi_connected(self, network=None):
network = network or self.testvars.get('wifi')
self.marionette.switch_to_frame()
return self.marionette.execute_script("return GaiaDataLayer.isWiFiConnected(%s)" % json.dumps(network))
@property
def known_networks(self):
known_networks = self.marionette.execute_async_script(
'return GaiaDataLayer.getKnownNetworks()')
return [n for n in known_networks if n]
@property
def active_telephony_state(self):
# Returns the state of only the currently active call or None if no active call
return self.marionette.execute_script("return GaiaDataLayer.getMozTelephonyState()")
@property
def is_antenna_available(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable')
@property
def is_fm_radio_enabled(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled')
@property
def fm_radio_frequency(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency')
@property
def media_files(self):
result = []
result.extend(self.music_files)
result.extend(self.picture_files)
result.extend(self.video_files)
return result
def delete_all_sms(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.deleteAllSms();", special_powers=True)
def get_all_sms(self):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script("return GaiaDataLayer.getAllSms();", special_powers=True)
def delete_all_call_log_entries(self):
"""The call log needs to be open and focused in order for this to work."""
self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();')
def kill_active_call(self):
self.marionette.execute_script("var telephony = window.navigator.mozTelephony; " +
"if(telephony.active) telephony.active.hangUp();")
@property
def music_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllMusic();')
@property
def picture_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllPictures();')
@property
def video_files(self):
return self.marionette.execute_async_script(
'return GaiaDataLayer.getAllVideos();')
def sdcard_files(self, extension=''):
files = self.marionette.execute_async_script(
'return GaiaDataLayer.getAllSDCardFiles();')
if len(extension):
return [filename for filename in files if filename.endswith(extension)]
return files
def send_sms(self, number, message):
self.marionette.switch_to_frame()
import json
number = json.dumps(number)
message = json.dumps(message)
result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True)
assert result, 'Unable to send SMS to recipient %s with text %s' % (number, message)
# FIXME: Bug 1011000: will make use of SoundManager instead
def wait_for_audio_channel_changed(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.waitForAudioChannelChanged();")
assert result, "Failed to get a mozChromeEvent audio-channel-changed"
return result
# FIXME: Bug 1011000: will make use of SoundManager instead
def wait_for_visible_audio_channel_changed(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.waitForVisibleAudioChannelChanged();")
assert result, "Failed to get a mozChromeEvent visible-audio-channel-changed"
return result
class Accessibility(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir,
'atoms', "accessibility.js"))
self.marionette.import_script(js)
def is_hidden(self, element):
return self._run_async_script('isHidden', [element])
def is_visible(self, element):
return self._run_async_script('isVisible', [element])
def is_disabled(self, element):
return self._run_async_script('isDisabled', [element])
def click(self, element):
self._run_async_script('click', [element])
def wheel(self, element, direction):
self.marionette.execute_script('Accessibility.wheel.apply(Accessibility, arguments)', [
element, direction])
def get_name(self, element):
return self._run_async_script('getName', [element])
def get_role(self, element):
return self._run_async_script('getRole', [element])
def dispatchEvent(self):
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new CustomEvent(" +
"'accessibility-action'));")
def _run_async_script(self, func, args):
result = self.marionette.execute_async_script(
'return Accessibility.%s.apply(Accessibility, arguments)' % func,
args, special_powers=True)
if not result:
return
if result.has_key('error'):
message = 'accessibility.js error: %s' % result['error']
raise Exception(message)
return result.get('result', None)
class FakeUpdateChecker(object):
def __init__(self, marionette):
self.marionette = marionette
self.fakeupdatechecker_atom = os.path.abspath(
os.path.join(__file__, os.path.pardir, 'atoms', "fake_update-checker.js"))
def check_updates(self):
self.marionette.set_context(self.marionette.CONTEXT_CHROME)
self.marionette.import_script(self.fakeupdatechecker_atom)
self.marionette.execute_script("GaiaUITests_FakeUpdateChecker();")
self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
class GaiaDevice(object):
def __init__(self, marionette, testvars=None, manager=None):
self.manager = manager
self.marionette = marionette
self.testvars = testvars or {}
if self.is_desktop_b2g:
self.file_manager = GaiaLocalFileManager(self)
# Use a temporary directory for storage
self.storage_path = tempfile.mkdtemp()
self._set_storage_path()
elif self.manager:
self.file_manager = GaiaDeviceFileManager(self)
# Use the device root for storage
self.storage_path = self.manager.deviceRoot
self.lockscreen_atom = os.path.abspath(
os.path.join(__file__, os.path.pardir, 'atoms', "gaia_lock_screen.js"))
def _set_storage_path(self):
if self.is_desktop_b2g:
# Override the storage location for desktop B2G. This will only
# work if the B2G instance is running locally.
GaiaData(self.marionette).set_char_pref(
'device.storage.overrideRootDir', self.storage_path)
@property
def is_android_build(self):
if self.testvars.get('is_android_build') is None:
self.testvars['is_android_build'] = 'android' in self.marionette.session_capabilities['platformName'].lower()
return self.testvars['is_android_build']
@property
def is_emulator(self):
if not hasattr(self, '_is_emulator'):
self._is_emulator = self.marionette.session_capabilities['device'] == 'qemu'
return self._is_emulator
@property
def is_desktop_b2g(self):
if self.testvars.get('is_desktop_b2g') is None:
self.testvars['is_desktop_b2g'] = self.marionette.session_capabilities['device'] == 'desktop'
return self.testvars['is_desktop_b2g']
@property
def is_online(self):
# Returns true if the device has a network connection established (cell data, wifi, etc)
return self.marionette.execute_script('return window.navigator.onLine;')
@property
def has_mobile_connection(self):
# XXX: check bug-926169
# this is used to keep all tests passing while introducing multi-sim APIs
return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' +
'window.navigator.mozMobileConnections && ' +
'window.navigator.mozMobileConnections[0]; ' +
'return mobileConnection !== undefined')
@property
def has_wifi(self):
if not hasattr(self, '_has_wifi'):
self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined')
return self._has_wifi
def restart_b2g(self):
self.stop_b2g()
time.sleep(2)
self.start_b2g()
def start_b2g(self, timeout=60):
if self.marionette.instance:
# launch the gecko instance attached to marionette
self.marionette.instance.start()
elif self.is_android_build:
self.manager.shellCheckOutput(['start', 'b2g'])
else:
raise Exception('Unable to start B2G')
self.marionette.wait_for_port()
self.marionette.start_session()
self.wait_for_b2g_ready(timeout)
# Reset the storage path for desktop B2G
self._set_storage_path()
def wait_for_b2g_ready(self, timeout):
# Wait for the homescreen to finish loading
Wait(self.marionette, timeout).until(expected.element_present(
By.CSS_SELECTOR, '#homescreen[loading-state=false]'))
@property
def is_b2g_running(self):
return 'b2g' in self.manager.shellCheckOutput(['toolbox', 'ps'])
def stop_b2g(self, timeout=5):
if self.marionette.instance:
# close the gecko instance attached to marionette
self.marionette.instance.close()
elif self.is_android_build:
self.manager.shellCheckOutput(['stop', 'b2g'])
Wait(self.marionette, timeout=timeout).until(
lambda m: not self.is_b2g_running,
message='b2g failed to stop.')
else:
raise Exception('Unable to stop B2G')
self.marionette.client.close()
self.marionette.session = None
self.marionette.window = None
def press_sleep_button(self):
self.marionette.execute_script("""
window.wrappedJSObject.dispatchEvent(new CustomEvent('mozChromeEvent', {
detail: {
type: 'sleep-button-press'
}
}));""")
def press_release_volume_up_then_down_n_times(self, n_times):
self.marionette.execute_script("""
function sendEvent(aName, aType) {
window.wrappedJSObject.dispatchEvent(new CustomEvent('mozChromeEvent', {
detail: {
type: aName + '-button-' + aType
}
}));
}
for (var i = 0; i < arguments[0]; ++i) {
sendEvent('volume-up', 'press');
sendEvent('volume-up', 'release');
sendEvent('volume-down', 'press');
sendEvent('volume-down', 'release');
};""", script_args=[n_times])
def turn_screen_off(self):
self.marionette.execute_script("window.wrappedJSObject.ScreenManager.turnScreenOff(true)")
def turn_screen_on(self):
self.marionette.execute_script("window.wrappedJSObject.ScreenManager.turnScreenOn(true)")
@property
def is_screen_enabled(self):
return self.marionette.execute_script('return window.wrappedJSObject.ScreenManager.screenEnabled')
def touch_home_button(self):
apps = GaiaApps(self.marionette)
if apps.displayed_app.name.lower() != 'homescreen':
# touching home button will return to homescreen
self._dispatch_home_button_event()
Wait(self.marionette).until(
lambda m: apps.displayed_app.name.lower() == 'homescreen')
apps.switch_to_displayed_app()
else:
apps.switch_to_displayed_app()
mode = self.marionette.find_element(By.TAG_NAME, 'body').get_attribute('class')
self._dispatch_home_button_event()
apps.switch_to_displayed_app()
if mode == 'edit-mode':
# touching home button will exit edit mode
Wait(self.marionette).until(lambda m: m.find_element(
By.TAG_NAME, 'body').get_attribute('class') != mode)
else:
# touching home button inside homescreen will scroll it to the top
Wait(self.marionette).until(lambda m: m.execute_script(
"return document.querySelector('.scrollable').scrollTop") == 0)
def _dispatch_home_button_event(self):
self.marionette.switch_to_frame()
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('home'));")
def hold_home_button(self):
self.marionette.switch_to_frame()
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('holdhome'));")
def hold_sleep_button(self):
self.marionette.switch_to_frame()
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('holdsleep'));")
@property
def is_locked(self):
self.marionette.switch_to_frame()
return self.marionette.execute_script('return window.wrappedJSObject.lockScreen.locked')
def lock(self):
self.marionette.import_script(self.lockscreen_atom)
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('GaiaLockScreen.lock()')
assert result, 'Unable to lock screen'
Wait(self.marionette).until(lambda m: m.find_element(By.CSS_SELECTOR, 'div.lockScreenWindow.active'))
def unlock(self):
self.marionette.import_script(self.lockscreen_atom)
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('GaiaLockScreen.unlock()')
assert result, 'Unable to unlock screen'
def change_orientation(self, orientation):
""" There are 4 orientation states which the phone can be passed in:
portrait-primary(which is the default orientation), landscape-primary, portrait-secondary and landscape-secondary
"""
self.marionette.execute_async_script("""
if (arguments[0] === arguments[1]) {
marionetteScriptFinished();
}
else {
var expected = arguments[1];
window.screen.onmozorientationchange = function(e) {
console.log("Received 'onmozorientationchange' event.");
waitFor(
function() {
window.screen.onmozorientationchange = null;
marionetteScriptFinished();
},
function() {
return window.screen.mozOrientation === expected;
}
);
};
console.log("Changing orientation to '" + arguments[1] + "'.");
window.screen.mozLockOrientation(arguments[1]);
};""", script_args=[self.screen_orientation, orientation])
@property
def screen_width(self):
return self.marionette.execute_script('return window.screen.width')
@property
def screen_orientation(self):
return self.marionette.execute_script('return window.screen.mozOrientation')
class GaiaTestCase(MarionetteTestCase, B2GTestCaseMixin):
def __init__(self, *args, **kwargs):
self.restart = kwargs.pop('restart', False)
MarionetteTestCase.__init__(self, *args, **kwargs)
B2GTestCaseMixin.__init__(self, *args, **kwargs)
def setUp(self):
try:
MarionetteTestCase.setUp(self)
except (InvalidResponseException, IOError):
if self.restart:
pass
self.device = GaiaDevice(self.marionette,
manager=self.device_manager,
testvars=self.testvars)
if self.restart and (self.device.is_android_build or self.marionette.instance):
# Restart if it's a device, or we have passed a binary instance with --binary command arg
self.device.stop_b2g()
if self.device.is_android_build:
self.cleanup_data()
self.device.start_b2g()
# Run the fake update checker
FakeUpdateChecker(self.marionette).check_updates()
# We need to set the default timeouts because we may have a new session
if self.marionette.timeout is None:
# if no timeout is passed in, we detect the hardware type and set reasonable defaults
timeouts = {}
if self.device.is_desktop_b2g:
self.marionette.timeout = 5000
timeouts[self.marionette.TIMEOUT_SEARCH] = 5000
timeouts[self.marionette.TIMEOUT_SCRIPT] = 10000
timeouts[self.marionette.TIMEOUT_PAGE] = 10000
elif self.device.is_emulator:
self.marionette.timeout = 30000
timeouts[self.marionette.TIMEOUT_SEARCH] = 30000
timeouts[self.marionette.TIMEOUT_SCRIPT] = 60000
timeouts[self.marionette.TIMEOUT_PAGE] = 60000
else:
# else, it is a device, the type of which is difficult to detect
self.marionette.timeout = 10000
timeouts[self.marionette.TIMEOUT_SEARCH] = 10000
timeouts[self.marionette.TIMEOUT_SCRIPT] = 20000
timeouts[self.marionette.TIMEOUT_PAGE] = 20000
for k, v in timeouts.items():
self.marionette.timeouts(k, v)
else:
# if the user has passed in --timeout then we override everything
self.marionette.timeouts(self.marionette.TIMEOUT_SEARCH, self.marionette.timeout)
self.marionette.timeouts(self.marionette.TIMEOUT_SCRIPT, self.marionette.timeout)
self.marionette.timeouts(self.marionette.TIMEOUT_PAGE, self.marionette.timeout)
self.apps = GaiaApps(self.marionette)
self.data_layer = GaiaData(self.marionette, self.testvars)
self.accessibility = Accessibility(self.marionette)
self.cleanup_storage()
if self.restart:
self.cleanup_gaia(full_reset=False)
else:
self.cleanup_gaia(full_reset=True)
def cleanup_data(self):
self.device.file_manager.remove('/cache/*')
self.device.file_manager.remove('/data/b2g/mozilla')
self.device.file_manager.remove('/data/local/debug_info_trigger')
self.device.file_manager.remove('/data/local/indexedDB')
self.device.file_manager.remove('/data/local/OfflineCache')
self.device.file_manager.remove('/data/local/permissions.sqlite')
self.device.file_manager.remove('/data/local/storage/persistent')
# remove remembered networks
self.device.file_manager.remove('/data/misc/wifi/wpa_supplicant.conf')
def cleanup_storage(self):
"""Remove all files from the device's storage paths"""
storage_paths = [self.device.storage_path]
if self.device.is_android_build:
# TODO: Remove hard-coded paths once bug 1018079 is resolved
storage_paths.extend(['/mnt/sdcard',
'/mnt/extsdcard',
'/storage/sdcard',
'/storage/sdcard0',
'/storage/sdcard1'])
for path in storage_paths:
if self.device.file_manager.dir_exists(path):
for item in self.device.file_manager.list_items(path):
self.device.file_manager.remove('/'.join([path, item]))
def cleanup_gaia(self, full_reset=True):
# restore settings from testvars
[self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings', {}).items()]
# restore prefs from testvars
for name, value in self.testvars.get('prefs', {}).items():
if type(value) is int:
self.data_layer.set_int_pref(name, value)
elif type(value) is bool:
self.data_layer.set_bool_pref(name, value)
else:
self.data_layer.set_char_pref(name, value)
# unlock
if self.data_layer.get_setting('lockscreen.enabled'):
self.device.unlock()
# kill the FTU and any open, user-killable apps
self.apps.kill_all()
if full_reset:
# disable passcode
self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111')
self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False)
# change language back to english
self.data_layer.set_setting("language.current", "en-US")
# reset keyboard to default values
self.data_layer.set_setting("keyboard.enabled-layouts",
"{'app://keyboard.gaiamobile.org/manifest.webapp': {'en': True, 'number': True}}")
# reset do not track
self.data_layer.set_setting('privacy.donottrackheader.value', '-1')
if self.data_layer.get_setting('airplaneMode.enabled'):
# enable the device radio, disable airplane mode
self.data_layer.set_setting('airplaneMode.enabled', False)
# Re-set edge gestures pref to False
self.data_layer.set_setting('edgesgesture.enabled', False)
# disable carrier data connection
if self.device.has_mobile_connection:
self.data_layer.disable_cell_data()
self.data_layer.disable_cell_roaming()
if self.device.has_wifi:
# Bug 908553 - B2G Emulator: support wifi emulation
if not self.device.is_emulator:
self.data_layer.enable_wifi()
self.data_layer.forget_all_networks()
self.data_layer.disable_wifi()
# remove data
self.data_layer.remove_all_contacts()
# reset to home screen
self.device.touch_home_button()
# disable sound completely
self.data_layer.set_volume(0)
# disable auto-correction of keyboard
self.data_layer.set_setting('keyboard.autocorrect', False)
def connect_to_network(self):
if not self.device.is_online:
try:
self.connect_to_local_area_network()
except:
self.marionette.log('Failed to connect to wifi, trying cell data instead.')
if self.device.has_mobile_connection:
self.data_layer.connect_to_cell_data()
else:
raise Exception('Unable to connect to network')
assert self.device.is_online
def connect_to_local_area_network(self):
if not self.device.is_online:
if self.testvars.get('wifi') and self.device.has_wifi:
self.data_layer.connect_to_wifi()
assert self.device.is_online
else:
raise Exception('Unable to connect to local area network')
def push_resource(self, filename, remote_path=None, count=1):
# push to the test storage space defined by device root
self.device.file_manager.push_file(
self.resource(filename), remote_path, count)
def resource(self, filename):
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename))
def wait_for_element_present(self, by, locator, timeout=None):
return Wait(self.marionette, timeout, ignored_exceptions=NoSuchElementException).until(
lambda m: m.find_element(by, locator))
def wait_for_element_not_present(self, by, locator, timeout=None):
self.marionette.set_search_timeout(0)
try:
return Wait(self.marionette, timeout).until(
lambda m: not m.find_element(by, locator))
except NoSuchElementException:
pass
self.marionette.set_search_timeout(self.marionette.timeout or 10000)
def wait_for_element_displayed(self, by, locator, timeout=None):
Wait(self.marionette, timeout, ignored_exceptions=[NoSuchElementException, StaleElementException]).until(
lambda m: m.find_element(by, locator).is_displayed())
def wait_for_element_not_displayed(self, by, locator, timeout=None):
self.marionette.set_search_timeout(0)
try:
Wait(self.marionette, timeout, ignored_exceptions=StaleElementException).until(
lambda m: not m.find_element(by, locator).is_displayed())
except NoSuchElementException:
pass
self.marionette.set_search_timeout(self.marionette.timeout or 10000)
def wait_for_condition(self, method, timeout=None, message=None):
Wait(self.marionette, timeout).until(method, message=message)
def is_element_present(self, by, locator):
self.marionette.set_search_timeout(0)
try:
self.marionette.find_element(by, locator)
return True
except NoSuchElementException:
return False
finally:
self.marionette.set_search_timeout(self.marionette.timeout or 10000)
def is_element_displayed(self, by, locator):
self.marionette.set_search_timeout(0)
try:
return self.marionette.find_element(by, locator).is_displayed()
except NoSuchElementException:
return False
finally:
self.marionette.set_search_timeout(self.marionette.timeout or 10000)
def tearDown(self):
if self.device.is_desktop_b2g and self.device.storage_path:
shutil.rmtree(self.device.storage_path, ignore_errors=True)
self.apps = None
self.data_layer = None
MarionetteTestCase.tearDown(self)
class GaiaEnduranceTestCase(GaiaTestCase, EnduranceTestCaseMixin, MemoryEnduranceTestCaseMixin):
def __init__(self, *args, **kwargs):
GaiaTestCase.__init__(self, *args, **kwargs)
EnduranceTestCaseMixin.__init__(self, *args, **kwargs)
MemoryEnduranceTestCaseMixin.__init__(self, *args, **kwargs)
kwargs.pop('iterations', None)
kwargs.pop('checkpoint_interval', None)
def close_app(self):
# Close the current app (self.app) by using the home button
self.device.touch_home_button()
# Bring up the cards view
_cards_view_locator = ('id', 'cards-view')
self.device.hold_home_button()
self.wait_for_element_displayed(*_cards_view_locator)
# Sleep a bit
time.sleep(5)
# Tap the close icon for the current app
locator_part_two = '#cards-view li.card[data-origin*="%s"] .close-card' % self.app_under_test.lower()
_close_button_locator = ('css selector', locator_part_two)
close_card_app_button = self.marionette.find_element(*_close_button_locator)
close_card_app_button.tap()
|
py | b40af0db80c12573cb56eedab55bebb31990e6a3 | import os
import shutil
import json
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from rest_framework import status
from hs_core import hydroshare
from hs_core.views import add_metadata_element, update_metadata_element, delete_metadata_element
from hs_core.testing import MockIRODSTestCaseMixin, ViewTestCase
class TestCRUDMetadata(MockIRODSTestCaseMixin, ViewTestCase):
def setUp(self):
super(TestCRUDMetadata, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.username = 'john'
self.password = 'jhmypassword'
self.user = hydroshare.create_account(
'[email protected]',
username=self.username,
first_name='John',
last_name='Clarson',
superuser=False,
password=self.password,
groups=[]
)
self.gen_res = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.user,
title='Generic Resource Key/Value Metadata Testing'
)
def tearDown(self):
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
super(TestCRUDMetadata, self).tearDown()
def test_CRUD_metadata(self):
# here we are testing the add_metadata_element view function
# There should be no keywords (subject element) now
self.assertEqual(self.gen_res.metadata.subjects.count(), 0)
# add keywords
url_params = {'shortkey': self.gen_res.short_id, 'element_name': 'subject'}
post_data = {'value': 'kw-1, kw 2, key word'}
url = reverse('add_metadata_element', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
self.set_request_message_attributes(request)
response = add_metadata_element(request, shortkey=self.gen_res.short_id,
element_name='subject')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual(response_dict['status'], 'success')
self.assertEqual(response_dict['element_name'], 'subject')
self.gen_res.refresh_from_db()
self.assertEqual(self.gen_res.metadata.subjects.count(), 3)
# here we are testing the update_metadata_element view function
# update title metadata
self.assertEqual(self.gen_res.metadata.title.value,
'Generic Resource Key/Value Metadata Testing')
title_element = self.gen_res.metadata.title
url_params = {'shortkey': self.gen_res.short_id, 'element_name': 'title',
'element_id': title_element.id}
post_data = {'value': 'Updated Resource Title'}
url = reverse('update_metadata_element', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
self.set_request_message_attributes(request)
response = update_metadata_element(request, shortkey=self.gen_res.short_id,
element_name='title', element_id=title_element.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual(response_dict['status'], 'success')
self.gen_res.refresh_from_db()
self.assertEqual(self.gen_res.metadata.title.value, 'Updated Resource Title')
# here we are testing the delete_metadata_element view function
# first create a contributor element and then delete it
# there should be no contributors now
self.assertEqual(self.gen_res.metadata.contributors.count(), 0)
url_params = {'shortkey': self.gen_res.short_id, 'element_name': 'contributor'}
post_data = {'name': 'John Smith', 'email': '[email protected]'}
url = reverse('add_metadata_element', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
self.set_request_message_attributes(request)
response = add_metadata_element(request, shortkey=self.gen_res.short_id,
element_name='contributor')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual(response_dict['status'], 'success')
self.assertEqual(response_dict['element_name'], 'contributor')
self.gen_res.refresh_from_db()
# there should be one contributor now
self.assertEqual(self.gen_res.metadata.contributors.count(), 1)
# now delete the contributor we created above
contributor = self.gen_res.metadata.contributors.first()
url_params = {'shortkey': self.gen_res.short_id, 'element_name': 'contributor',
'element_id': contributor.id}
url = reverse('delete_metadata_element', kwargs=url_params)
request = self.factory.post(url, data={})
request.user = self.user
request.META['HTTP_REFERER'] = 'some-url'
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = delete_metadata_element(request, shortkey=self.gen_res.short_id,
element_name='contributor', element_id=contributor.id)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['Location'], request.META['HTTP_REFERER'])
self.gen_res.refresh_from_db()
# there should be no contributors
self.assertEqual(self.gen_res.metadata.contributors.count(), 0)
hydroshare.delete_resource(self.gen_res.short_id)
|
py | b40af133cb90f177ca2a22a4b94a1c10a190c2a2 | from baseapp import db
from flask_login import UserMixin
class Users(UserMixin, db.Model):
id = db.Column(db.BigInteger, primary_key=True)
username = db.Column(db.String(50), unique=True)
password = db.Column(db.String(150))
email = db.Column(db.String(50))
admin = db.Column(db.Boolean)
group = db.Column(db.String(50))
|
py | b40af14df3cecf62fe715f449e657ade651b6747 | import numpy as np
import os
import os.path as osp
from .globals import dir_path
import ctypes
ctypes.CDLL(osp.join(dir_path, "libamirstan_plugin.so"))
import tensorrt as trt
import torchvision.ops
def create_roipool_plugin(layer_name,
out_size,
featmap_strides,
roi_scale_factor,
finest_scale):
creator = trt.get_plugin_registry().get_plugin_creator(
'RoiPoolPluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
pf_out_size = trt.PluginField("out_size", np.array(
[out_size], dtype=np.int32), trt.PluginFieldType.INT32)
pfc.append(pf_out_size)
pf_featmap_strides = trt.PluginField("featmap_strides", np.array(
featmap_strides).astype(np.float32), trt.PluginFieldType.FLOAT32)
pfc.append(pf_featmap_strides)
pf_roi_scale_factor = trt.PluginField("roi_scale_factor", np.array(
[roi_scale_factor], dtype=np.float32), trt.PluginFieldType.FLOAT32)
pfc.append(pf_roi_scale_factor)
pf_finest_scale = trt.PluginField("finest_scale", np.array(
[finest_scale], dtype=np.int32), trt.PluginFieldType.INT32)
pfc.append(pf_finest_scale)
return creator.create_plugin(layer_name, pfc) |
py | b40af21ae7301b962c37241fed5d44e9a367187d | import qrcode
import os
from django.conf import settings
from .services import (
_device,
_rack,
)
def _img_name(pk, device):
"""
File name
"""
if device == True:
img_name = '/device_qr/d-' + str(pk) + '.png'
else:
img_name = '/rack_qr/r-' + str(pk) + '.png'
return img_name
def _create_qr(data, pk, device):
"""
Generate QR
"""
qr = qrcode.QRCode(
version=1,
box_size=2,
error_correction=qrcode.constants.ERROR_CORRECT_M,
border=1)
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
img.save(settings.BASE_DIR + '/mainapp/static' + _img_name(pk, device))
def _remove_qr(pk, device):
"""
Delete QR
"""
img_name = settings.BASE_DIR + '/mainapp/static' + _img_name(pk, device)
if os.path.isfile(img_name):
os.remove(img_name)
def _show_qr(data, pk, device):
"""
Show (create/update) QR
"""
_create_qr(data, pk, device)
return _img_name(pk, device)
def _qr_data(pk, device):
"""
QR data
"""
if device == True:
return 'http://127.0.0.1:80001/device_detail/' + str(pk) + \
'\nResp: ' + _device(pk).responsible + \
'\nFResp: ' + _device(pk).financially_responsible_person + \
'\nInv: ' + _device(pk).device_inventory_number + \
'\nAsset: ' + _device(pk).fixed_asset
else:
return 'http://127.0.0.1:80001/rack_detail/' + str(pk) + \
'\nResp: ' + _rack(pk).responsible + \
'\nFResp: ' + _rack(pk).rack_financially_responsible_person + \
'\nInv: ' + _rack(pk).rack_inventory_number + \
'\nAsset: ' + _rack(pk).fixed_asset
|
py | b40af2675653da43a4ced5fe6b17d9911a2833b8 | # Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import fastestimator as fe
class TestData(unittest.TestCase):
def setUp(self):
self.d = fe.util.Data({"a": 0, "b": 1, "c": 2})
def test_write_with_log(self):
self.d.write_with_log("d", 3)
self.assertEqual(self.d.read_logs(), {'d': 3})
def test_write_without_log(self):
self.d.write_without_log("e", 5)
self.assertEqual(self.d.read_logs(), {})
def test_read_logs(self):
self.d.write_with_log("d", 3)
self.d.write_with_log("a", 4)
self.assertEqual(self.d.read_logs(), {"d": 3, "a": 4})
|
py | b40af42717fe27addb29d14bfad3006cb685f231 | def setup_module():
import pytest
from nltk.inference.mace import Mace
try:
m = Mace()
m._find_binary("mace4")
except LookupError:
pytest.skip("Mace4/Prover9 is not available so inference.doctest was skipped")
|
py | b40af576f201ed13e3f57c2fbe948023f6c281fa | ################################################################################################################################################
######################################################## Import required modules ###############################################################
################################################################################################################################################
import argparse
import pprint
import json
import logging
import os
import sys
import pandas as pd
import random
import time
import glob
import numpy as np
from collections import defaultdict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import Dataset, DataLoader
from transformers import RobertaModel, RobertaConfig
from transformers import RobertaForSequenceClassification
from transformers import AdamW, get_linear_schedule_with_warmup
################################################################################################################################################
###################################################### Parse input arguments ###################################################################
################################################################################################################################################
def parse_args():
parser = argparse.ArgumentParser()
# CLI args
parser.add_argument('--train_batch_size',
type=int,
default=64)
parser.add_argument('--train_steps_per_epoch',
type=int,
default=64)
parser.add_argument('--validation_batch_size',
type=int,
default=64)
parser.add_argument('--validation_steps_per_epoch',
type=int,
default=64)
parser.add_argument('--epochs',
type=int,
default=1)
parser.add_argument('--freeze_bert_layer',
type=eval,
default=False)
parser.add_argument('--learning_rate',
type=float,
default=0.01)
parser.add_argument('--momentum',
type=float,
default=0.5)
parser.add_argument('--seed',
type=int,
default=42)
parser.add_argument('--log_interval',
type=int,
default=100)
parser.add_argument('--backend',
type=str,
default=None)
parser.add_argument('--max_seq_length',
type=int,
default=128)
parser.add_argument('--run_validation',
type=eval,
default=False)
# Container environment
parser.add_argument('--hosts',
type=list,
default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current_host',
type=str,
default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--model_dir',
type=str,
default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train_data',
type=str,
default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--validation_data',
type=str,
default=os.environ['SM_CHANNEL_VALIDATION'])
parser.add_argument('--output_dir',
type=str,
default=os.environ['SM_OUTPUT_DIR'])
parser.add_argument('--num_gpus',
type=int,
default=os.environ['SM_NUM_GPUS'])
# Debugger args
parser.add_argument("--save-frequency",
type=int,
default=10,
help="frequency with which to save steps")
parser.add_argument("--smdebug_path",
type=str,
help="output directory to save data in",
default="/opt/ml/output/tensors",)
parser.add_argument("--hook-type",
type=str,
choices=["saveall", "module-input-output", "weights-bias-gradients"],
default="saveall",)
return parser.parse_args()
################################################################################################################################################
########################################################### Tools and variables ################################################################
################################################################################################################################################
# Model name according to the PyTorch documentation:
# https://github.com/aws/sagemaker-pytorch-inference-toolkit/blob/6936c08581e26ff3bac26824b1e4946ec68ffc85/src/sagemaker_pytorch_serving_container/torchserve.py#L45
MODEL_NAME = 'model.pth'
# Hugging face list of models: https://huggingface.co/models
PRE_TRAINED_MODEL_NAME = 'roberta-base'
def create_list_input_files(path):
input_files = glob.glob('{}/*.tsv'.format(path))
print(input_files)
return input_files
def save_transformer_model(model, model_dir):
path = '{}/transformer'.format(model_dir)
os.makedirs(path, exist_ok=True)
print('Saving Transformer model to {}'.format(path))
model.save_pretrained(path)
def save_pytorch_model(model, model_dir):
os.makedirs(model_dir, exist_ok=True)
print('Saving PyTorch model to {}'.format(model_dir))
save_path = os.path.join(model_dir, MODEL_NAME)
torch.save(model.state_dict(), save_path)
################################################################################################################################################
########################################################### Configure the model ################################################################
################################################################################################################################################
def configure_model():
classes = [-1, 0, 1]
config = RobertaConfig.from_pretrained(
PRE_TRAINED_MODEL_NAME,
num_labels=len(classes),
id2label={
### BEGIN SOLUTION - DO NOT delete this comment for grading purposes
0: -1, # Replace all None
1: 0, # Replace all None
2: 1, # Replace all None
### END SOLUTION - DO NOT delete this comment for grading purposes
},
label2id={
-1: 0,
0: 1,
1: 2,
}
)
config.output_attentions=True
return config
################################################################################################################################################
####################################################### PyTorch Dataset and DataLoader #########################################################
################################################################################################################################################
# PyTorch dataset retrieves the dataset’s features and labels one sample at a time
# Create a custom Dataset class for the reviews
class ReviewDataset(Dataset):
def __init__(self, input_ids_list, label_id_list):
self.input_ids_list = input_ids_list
self.label_id_list = label_id_list
def __len__(self):
return len(self.input_ids_list)
def __getitem__(self, item):
# convert list of token_ids into an array of PyTorch LongTensors
input_ids = json.loads(self.input_ids_list[item])
label_id = self.label_id_list[item]
input_ids_tensor = torch.LongTensor(input_ids)
label_id_tensor = torch.tensor(label_id, dtype=torch.long)
return input_ids_tensor, label_id_tensor
# PyTorch DataLoader helps to to organise the input training data in “minibatches” and reshuffle the data at every epoch
# It takes Dataset as an input
def create_data_loader(path, batch_size):
print("Get data loader")
df = pd.DataFrame(columns=['input_ids', 'label_id'])
input_files = create_list_input_files(path)
for file in input_files:
df_temp = pd.read_csv(file,
sep='\t',
usecols=['input_ids', 'label_id'])
df = df.append(df_temp)
ds = ReviewDataset(
input_ids_list=df.input_ids.to_numpy(),
label_id_list=df.label_id.to_numpy(),
)
return DataLoader(
ds,
batch_size=batch_size,
shuffle=True,
drop_last=True,
), df
################################################################################################################################################
################################################################ Train model ###################################################################
################################################################################################################################################
def train_model(model,
train_data_loader,
df_train,
val_data_loader,
df_val,
args):
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(params=model.parameters(), lr=args.learning_rate)
if args.freeze_bert_layer:
print('Freezing BERT base layers...')
for name, param in model.named_parameters():
if 'classifier' not in name: # classifier layer
param.requires_grad = False
print('Set classifier layers to `param.requires_grad=False`.')
train_correct = 0
train_total = 0
for epoch in range(args.epochs):
print('EPOCH -- {}'.format(epoch))
for i, (sent, label) in enumerate(train_data_loader):
if i < args.train_steps_per_epoch:
model.train()
optimizer.zero_grad()
sent = sent.squeeze(0)
if torch.cuda.is_available():
sent = sent.cuda()
label = label.cuda()
output = model(sent)[0]
_, predicted = torch.max(output, 1)
loss = loss_function(output, label)
loss.backward()
optimizer.step()
if args.run_validation and i % args.validation_steps_per_epoch == 0:
print('RUNNING VALIDATION:')
correct = 0
total = 0
model.eval()
for sent, label in val_data_loader:
sent = sent.squeeze(0)
if torch.cuda.is_available():
sent = sent.cuda()
label = label.cuda()
output = model(sent)[0]
_, predicted = torch.max(output.data, 1)
total += label.size(0)
correct += (predicted.cpu() ==label.cpu()).sum()
accuracy = 100.00 * correct.numpy() / total
print('[epoch/step: {0}/{1}] val_loss: {2:.2f} - val_acc: {3:.2f}%'.format(epoch, i, loss.item(), accuracy))
else:
break
print('TRAINING COMPLETED.')
return model
################################################################################################################################################
#################################################################### Main ######################################################################
################################################################################################################################################
if __name__ == '__main__':
# Parse args
args = parse_args()
print('Loaded arguments:')
print(args)
# Get environment variables
env_var = os.environ
print('Environment variables:')
pprint.pprint(dict(env_var), width = 1)
# Check if distributed training
is_distributed = len(args.hosts) > 1 and args.backend is not None
print("Distributed training - {}".format(is_distributed))
use_cuda = args.num_gpus > 0
print("Number of gpus available - {}".format(args.num_gpus))
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
device = torch.device('cuda' if use_cuda else 'cpu')
# Initialize the distributed environment.
if is_distributed:
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
print('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# Set the seed for generating random numbers
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
# Instantiate model
config = None
model = None
successful_download = False
retries = 0
while (retries < 5 and not successful_download):
try:
# Configure model
config = configure_model()
model = RobertaForSequenceClassification.from_pretrained(
'roberta-base',
config=config
)
model.to(device)
successful_download = True
print('Sucessfully downloaded after {} retries.'.format(retries))
except:
retries = retries + 1
random_sleep = random.randint(1, 30)
print('Retry #{}. Sleeping for {} seconds'.format(retries, random_sleep))
time.sleep(random_sleep)
if not model:
print('Not properly initialized...')
# Create data loaders
train_data_loader, df_train = create_data_loader(args.train_data, args.train_batch_size)
val_data_loader, df_val = create_data_loader(args.validation_data, args.validation_batch_size)
print("Processes {}/{} ({:.0f}%) of train data".format(
len(train_data_loader.sampler), len(train_data_loader.dataset),
100. * len(train_data_loader.sampler) / len(train_data_loader.dataset)
))
print("Processes {}/{} ({:.0f}%) of validation data".format(
len(val_data_loader.sampler), len(val_data_loader.dataset),
100. * len(val_data_loader.sampler) / len(val_data_loader.dataset)
))
print('model_dir: {}'.format(args.model_dir))
print('model summary: {}'.format(model))
callbacks = []
initial_epoch_number = 0
# Start training
model = train_model(
model,
train_data_loader,
df_train,
val_data_loader,
df_val,
args
)
save_transformer_model(model, args.model_dir)
save_pytorch_model(model, args.model_dir)
# Prepare for inference which will be used in deployment
# You will need three files for it: inference.py, requirements.txt, config.json
inference_path = os.path.join(args.model_dir, "code/")
os.makedirs(inference_path, exist_ok=True)
os.system("cp inference.py {}".format(inference_path))
os.system("cp requirements.txt {}".format(inference_path))
os.system("cp config.json {}".format(inference_path))
|
py | b40af592869e6c43b5fe10aed554ca07863abd8e | import frappe
from frappe import _
from frappe.database.schema import DBTable
class MariaDBTable(DBTable):
def create(self):
add_text = ''
# columns
column_defs = self.get_column_definitions()
if column_defs: add_text += ',\n'.join(column_defs) + ',\n'
# index
index_defs = self.get_index_definitions()
if index_defs: add_text += ',\n'.join(index_defs) + ',\n'
# create table
frappe.db.sql("""create table `%s` (
name varchar({varchar_len}) not null primary key,
creation datetime(6),
modified datetime(6),
modified_by varchar({varchar_len}),
owner varchar({varchar_len}),
docstatus int(1) not null default '0',
parent varchar({varchar_len}),
parentfield varchar({varchar_len}),
parenttype varchar({varchar_len}),
idx int(8) not null default '0',
%sindex parent(parent),
index modified(modified))
ENGINE={engine}
ROW_FORMAT=COMPRESSED
CHARACTER SET=utf8mb4
COLLATE=utf8mb4_unicode_ci""".format(varchar_len=frappe.db.VARCHAR_LEN,
engine=self.meta.get("engine") or 'InnoDB') % (self.table_name, add_text))
def alter(self):
for col in self.columns.values():
col.build_for_alter_table(self.current_columns.get(col.fieldname.lower()))
add_column_query = []
modify_column_query = []
add_index_query = []
drop_index_query = []
columns_to_modify = set(self.change_type + self.add_unique + self.set_default)
for col in self.add_column:
add_column_query.append("ADD COLUMN `{}` {}".format(col.fieldname, col.get_definition()))
for col in columns_to_modify:
modify_column_query.append("MODIFY `{}` {}".format(col.fieldname, col.get_definition()))
for col in self.add_index:
# if index key not exists
if not frappe.db.sql("SHOW INDEX FROM `%s` WHERE key_name = %s" %
(self.table_name, '%s'), col.fieldname):
add_index_query.append("ADD INDEX `{}`(`{}`)".format(col.fieldname, col.fieldname))
for col in self.drop_index:
if col.fieldname != 'name': # primary key
# if index key exists
if frappe.db.sql("""SHOW INDEX FROM `{0}`
WHERE key_name=%s
AND Non_unique=%s""".format(self.table_name), (col.fieldname, col.unique)):
drop_index_query.append("drop index `{}`".format(col.fieldname))
try:
for query_parts in [add_column_query, modify_column_query, add_index_query, drop_index_query]:
if query_parts:
query_body = ", ".join(query_parts)
query = "ALTER TABLE `{}` {}".format(self.table_name, query_body)
frappe.db.sql(query)
except Exception as e:
# sanitize
if e.args[0]==1060:
frappe.throw(str(e))
elif e.args[0]==1062:
fieldname = str(e).split("'")[-2]
frappe.throw(_("{0} field cannot be set as unique in {1}, as there are non-unique existing values").format(
fieldname, self.table_name))
elif e.args[0]==1067:
frappe.throw(str(e.args[1]))
else:
raise e
|
py | b40af6ab6b4609ad7c86a1d9bbe7a31fb4c7dfe3 | # -*- coding: utf-8 -*-
# With thanks to https://stackoverflow.com/users/6792743/perl
# import libraries
import pandas as pd
import requests
import xml.etree.ElementTree as ET
import io
from io import StringIO
import time
import datetime
# Get values for current year, month, and year
dt = datetime.datetime.today()
year = dt.year
month = dt.month
day = dt.day-1 # Variable day equals the previous day
print(year,month,day)
# Define XML query URL based on date
url = f"https://fogos.icnf.pt/localizador/webserviceocorrencias.asp" \
f"?ANO={year}" \
f"&MES={month}" \
f"&DIA={day}" \
# Get last update
df_actual=pd.read_csv("icnf_2022_raw.csv")
# XML Query
# Get data
resp = requests.get(url)
# Parse XML
et = ET.parse(io.StringIO(resp.text))
# Create DataFrame
df = pd.DataFrame([
{f.tag: f.text for f in e.findall('./')} for e in et.findall('./')]
)
df = df.reset_index()
#Append only new records
df_actual.append(df[df.isin(df_actual) == False])
df_sorted = df_actual.sort_values(["ANO","MES","DIA"])
df_sorted.reset_index()
# Save to CSV
df_sorted.to_csv("icnf_2022_raw.csv",index=False)
|
py | b40af70d004f6a5c5d43071241423c0f5379edb1 | #!/usr/bin/env python3
# Description: classes to start a job in a non-ui thread, and provide updates about progress
from PyQt5.QtCore import QThread, pyqtProperty, pyqtSignal, pyqtSlot
class Worker(QThread):
"""
Description: A class that is used by UI to get notifications about event
progress.
Usage: Make a subclass and reimplement run() method to do useful work.
While runnning you can use method log() or log_line() to log some
text for UI.
For ease of use with QML, it's best for the parameters of the
worker function to be properties of the class. The QML code would
assign them some values, and then call start().
"""
#Q_SIGNALS
logUpdated = pyqtSignal('QString') # gives only last update
logChanged = pyqtSignal('QString') # gives whole log
def __init__(self, parent=None):
super().__init__(parent)
self._full_log = ""
#Q_PROPERTY
@pyqtProperty('QString', notify=logChanged)
def fullLog(self):
return self._full_log
def log(self, text):
self._full_log += text
self.logUpdated.emit(text)
self.logChanged.emit(self._full_log)
def log_line(self, line):
self.log(line + "\n")
class ProgressWorker(Worker):
"""
Description: A worker that can provide a progress bar.
Usage: Same as above + when creating you can provide amount of steps it
will take to complete an action. When working you can advance steps
by calling the advance() method.
"""
#Q_SIGNALS
progressChanged = pyqtSignal(int)
progressEndChanged = pyqtSignal(int)
def __init__(self, end=1, start=0, parent=None):
super().__init__(parent)
self._progress = start
self._progress_end = end
#Q_PROPERTY
@pyqtProperty(int, notify=progressChanged)
def progress(self):
return self._progress
@pyqtProperty(int, notify=progressEndChanged)
def progressEnd(self):
return self._progress_end
def set_progress(self, val):
self._progress = val
self.progressChanged.emit(self._progress)
def set_end(self, val):
self._progress_end = val
self.progressEndChanged.emit(self._progress_end)
# increase progress by 1
def advance(self):
if self._progress == self._progress_end:
return
self.set_progress(self.progress + 1)
|
py | b40af95752b26ff124e16505b1f3948128406f36 | # Generated by Django 3.0.7 on 2020-09-10 13:44
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('analysis', '0075_auto_20200906_2117'),
]
operations = [
migrations.CreateModel(
name='StrategyTargetPctTestQuantiles',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
('last_mod_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='最后更新时间')),
('strategy_code', models.CharField(blank=True, db_index=True, max_length=25, null=True, verbose_name='策略代码')),
('ts_code', models.CharField(max_length=15, verbose_name='股票代码')),
('qt_10pct', models.FloatField(blank=True, null=True, verbose_name='10%分位数')),
('qt_25pct', models.FloatField(blank=True, null=True, verbose_name='25%分位数')),
('qt_50pct', models.FloatField(blank=True, null=True, verbose_name='50%分位数')),
('qt_75pct', models.FloatField(blank=True, null=True, verbose_name='75%分位数')),
('qt_90pct', models.FloatField(blank=True, null=True, verbose_name='90%分位数')),
('mean_val', models.FloatField(blank=True, null=True, verbose_name='平均数')),
('min_val', models.FloatField(blank=True, null=True, verbose_name='最小值')),
('max_val', models.FloatField(blank=True, null=True, verbose_name='最大值')),
('target_pct', models.IntegerField(verbose_name='目标涨幅')),
('ranking', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='排名')),
('test_freq', models.CharField(default='D', max_length=5, verbose_name='K线周期')),
],
options={
'verbose_name': '目标涨幅四分位统计',
'verbose_name_plural': '目标涨幅四分位统计',
'ordering': ['ts_code'],
},
),
migrations.CreateModel(
name='StrategyUpDownTestQuantiles',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
('last_mod_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='最后更新时间')),
('strategy_code', models.CharField(blank=True, db_index=True, max_length=25, null=True, verbose_name='策略代码')),
('test_type', models.CharField(blank=True, max_length=25, null=True, verbose_name='测试类型')),
('ts_code', models.CharField(max_length=15, verbose_name='股票代码')),
('qt_10pct', models.FloatField(blank=True, null=True, verbose_name='10%分位数')),
('qt_25pct', models.FloatField(blank=True, null=True, verbose_name='25%分位数')),
('qt_50pct', models.FloatField(blank=True, null=True, verbose_name='50%分位数')),
('qt_75pct', models.FloatField(blank=True, null=True, verbose_name='75%分位数')),
('qt_90pct', models.FloatField(blank=True, null=True, verbose_name='90%分位数')),
('mean_val', models.FloatField(blank=True, null=True, verbose_name='平均数')),
('min_val', models.FloatField(blank=True, null=True, verbose_name='最小值')),
('max_val', models.FloatField(blank=True, null=True, verbose_name='最大值')),
('test_period', models.IntegerField(verbose_name='测试周期')),
('ranking', models.IntegerField(db_index=True, verbose_name='排名')),
('test_freq', models.CharField(default='D', max_length=5, verbose_name='K线周期')),
],
options={
'verbose_name': '涨跌四分位统计',
'verbose_name_plural': '涨跌四分位统计',
'ordering': ['ts_code'],
},
),
migrations.AlterField(
model_name='stockstrategytestlog',
name='event_type',
field=models.CharField(choices=[('MARK_CP', '标记临界点'), ('UPD_DOWNLOAD', '更新下载历史交易'), ('DOWNLOAD', '下载历史交易'), ('UPD_CP', '更新临界点'), ('EXP_PCT_UPD', '更新预期涨幅'), ('PERIOD_TEST', '标记高低点涨幅'), ('EXP_PCT_TEST', '标记预期涨幅'), ('PERIOD_UPD', '更新高低点涨幅')], max_length=50, verbose_name='日志类型'),
),
migrations.AlterField(
model_name='tradestrategystat',
name='applied_period',
field=models.CharField(blank=True, choices=[('60', '60分钟'), ('D', '日线'), ('30', '30分钟'), ('15', '15分钟'), ('M', '月线'), ('W', '周线')], default='60', max_length=2, null=True, verbose_name='应用周期'),
),
]
|
py | b40af9838484908f640a30612aa4f7c764288a1f | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-like` ` not-like` | | `container-status` | `is` ` is-not` | | `containers` | `are` | | `criticality-tag` | `is` ` is-not` ` is-greater-than` ` is-less-than` ` is-applied` ` is-not-applied` | | `custom-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `cve` | `is` ` is-not` ` contains` ` does-not-contain` | | `cvss-access-complexity` | `is` ` is-not` | | `cvss-authentication-required` | `is` ` is-not` | | `cvss-access-vector` | `is` ` is-not` | | `cvss-availability-impact` | `is` ` is-not` | | `cvss-confidentiality-impact` | `is` ` is-not` | | `cvss-integrity-impact` | `is` ` is-not` | | `cvss-v3-confidentiality-impact` | `is` ` is-not` | | `cvss-v3-integrity-impact` | `is` ` is-not` | | `cvss-v3-availability-impact` | `is` ` is-not` | | `cvss-v3-attack-vector` | `is` ` is-not` | | `cvss-v3-attack-complexity` | `is` ` is-not` | | `cvss-v3-user-interaction` | `is` ` is-not` | | `cvss-v3-privileges-required` | `is` ` is-not` | | `host-name` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-empty` ` is-not-empty` ` is-like` ` not-like` | | `host-type` | `in` ` not-in` | | `ip-address` | `is` ` is-not` ` in-range` ` not-in-range` ` is-like` ` not-like` | | `ip-address-type` | `in` ` not-in` | | `last-scan-date` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `location-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `open-ports` | `is` ` is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is-not` ` in-range` ` greater-than` ` less-than` | | `service-name` | `contains` ` does-not-contain` | | `site-id` | `in` ` not-in` | | `software` | `contains` ` does-not-contain` | | `vAsset-cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-datacenter` | `is` ` is-not` | | `vAsset-host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-power-state` | `in` ` not-in` | | `vAsset-resource-pool-path` | `contains` ` does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vulnerability-category` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` | | `vulnerability-cvss-v3-score` | `is` ` is-not` | | `vulnerability-cvss-score` | `is` ` is-not` ` in-range` ` is-greater-than` ` is-less-than` | | `vulnerability-exposures` | `includes` ` does-not-include` | | `vulnerability-title` | `contains` ` does-not-contain` ` is` ` is-not` ` starts-with` ` ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from py_insightvm_sdk.models.link import Link # noqa: F401,E501
from py_insightvm_sdk.models.page_info import PageInfo # noqa: F401,E501
from py_insightvm_sdk.models.software import Software # noqa: F401,E501
class PageOfSoftware(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'page': 'PageInfo',
'resources': 'list[Software]'
}
attribute_map = {
'links': 'links',
'page': 'page',
'resources': 'resources'
}
def __init__(self, links=None, page=None, resources=None): # noqa: E501
"""PageOfSoftware - a model defined in Swagger""" # noqa: E501
self._links = None
self._page = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if page is not None:
self.page = page
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this PageOfSoftware. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this PageOfSoftware. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this PageOfSoftware.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this PageOfSoftware. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def page(self):
"""Gets the page of this PageOfSoftware. # noqa: E501
The details of pagination indicating which page was returned, and how the remaining pages can be retrieved. # noqa: E501
:return: The page of this PageOfSoftware. # noqa: E501
:rtype: PageInfo
"""
return self._page
@page.setter
def page(self, page):
"""Sets the page of this PageOfSoftware.
The details of pagination indicating which page was returned, and how the remaining pages can be retrieved. # noqa: E501
:param page: The page of this PageOfSoftware. # noqa: E501
:type: PageInfo
"""
self._page = page
@property
def resources(self):
"""Gets the resources of this PageOfSoftware. # noqa: E501
The page of resources returned. # noqa: E501
:return: The resources of this PageOfSoftware. # noqa: E501
:rtype: list[Software]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this PageOfSoftware.
The page of resources returned. # noqa: E501
:param resources: The resources of this PageOfSoftware. # noqa: E501
:type: list[Software]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PageOfSoftware, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageOfSoftware):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b40afb26ee1acb8731dfd429fafa79092e2ddbc8 | import logging
from tqdm import tqdm
import torch
from smoke.utils import comm
from smoke.utils.timer import Timer, get_time_str
from smoke.data.datasets.evaluation import evaluate
def compute_on_dataset(model, data_loader, device, timer=None):
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for batch in tqdm(data_loader):
images, targets, image_ids = batch["images"], batch["targets"], batch["img_ids"]
images = images.to(device)
with torch.no_grad():
if timer:
timer.tic()
output = model(images, targets)
if timer:
torch.cuda.synchronize()
timer.toc()
output = output.to(cpu_device)
if targets[0].has_field("global_T_ego"):
output = (output, torch.stack([t.get_field("ego_T_cam") for t in targets]).squeeze().to(cpu_device), torch.stack([t.get_field("global_T_ego") for t in targets]).squeeze().to(cpu_device))
results_dict.update(
{img_id: output for img_id in image_ids} # TODO: here seems image_ids actually only have size 1, code is not quite elegant
)
return results_dict
def inference(
model,
data_loader,
dataset_name,
eval_type="detection",
device="cuda",
output_folder=None,
):
device = torch.device(device)
num_devices = comm.get_world_size()
logger = logging.getLogger(__name__)
dataset = data_loader.dataset
logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
predictions = compute_on_dataset(model, data_loader, device, inference_timer)
comm.synchronize()
total_time = total_timer.toc()
total_time_str = get_time_str(total_time)
logger.info(
"Total run time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
total_infer_time = get_time_str(inference_timer.total_time)
logger.info(
"Model inference time: {} ({} s / img per device, on {} devices)".format(
total_infer_time,
inference_timer.total_time * num_devices / len(dataset),
num_devices,
)
)
if not comm.is_main_process():
return
return evaluate(eval_type=eval_type,
dataset=dataset,
predictions=predictions,
output_folder=output_folder, )
|
py | b40afb3edf688e39ad460a36db16de3aa6becdcd | import should_be.all # noqa
import unittest
class TestSequenceMixin(unittest.TestCase):
def setUp(self):
self.lst = [1, 2, 3]
def test_should_have_same_items_as(self):
self.assertRaises(AssertionError,
self.lst.should_have_same_items_as,
[1, 2])
self.assertRaises(AssertionError,
self.lst.should_have_same_items_as,
[1, 3, 2, 4])
self.assertRaises(AssertionError,
self.lst.should_have_same_items_as,
[1, 4, 2])
self.lst.should_have_same_items_as([3, 1, 2])
def test_list_should_be(self):
self.assertRaisesRegexp(AssertionError, r'lengths',
self.lst.should_be, [1])
self.assertRaisesRegexp(AssertionError, r'item',
self.lst.should_be, [1, 3, 4])
self.lst.should_be([1, 2, 3])
def test_list_should_be_falls_back(self):
self.assertRaises(AssertionError, self.lst.should_be, 1)
|
py | b40afb5005afb0432cea2e20bd08a7606b7de5e0 | """Type mapping helpers."""
from __future__ import division
__copyright__ = "Copyright (C) 2011 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
from . import gpuarray
# {{{ registry
NAME_TO_DTYPE = {}
def register_dtype(dtype, c_names):
"""
Associate a numpy dtype with its C equivalents.
:param dtype: type to associate
:type dtype: numpy.dtype or string
:param c_names: list of C type names
:type c_names: str or list
Will register `dtype` for use with the gpuarray module. If the
c_names argument is a list then the first element of that list is
taken as the primary association and will be used for generated C
code. The other types will be mapped to the provided dtype when
going in the other direction.
"""
if isinstance(c_names, str):
c_names = [c_names]
dtype = np.dtype(dtype)
# register if not already there
try:
gpuarray.dtype_to_ctype(dtype)
except ValueError:
gpuarray.register_dtype(dtype, c_names[0])
for nm in c_names:
if nm in NAME_TO_DTYPE and NAME_TO_DTYPE[nm] != dtype:
raise RuntimeError("name '%s' already registered" % nm)
NAME_TO_DTYPE[nm] = dtype
def _fill_dtype_registry(respect_windows):
from sys import platform
register_dtype(np.bool, ["ga_bool", "bool"])
register_dtype(np.int8, ["ga_byte", "char", "signed char"])
register_dtype(np.uint8, ["ga_ubyte", "unsigned char"])
register_dtype(np.int16, ["ga_short", "short", "signed short", "signed short int", "short signed int"])
register_dtype(np.uint16, ["ga_ushort", "unsigned short", "unsigned short int", "short unsigned int"])
register_dtype(np.int32, ["ga_int", "int", "signed int"])
register_dtype(np.uint32, ["ga_uint", "unsigned", "unsigned int"])
register_dtype(np.int64, ["ga_long"])
register_dtype(np.uint64, ["ga_ulong"])
is_64_bit = tuple.__itemsize__ * 8 == 64
if is_64_bit:
if 'win32' in platform and respect_windows:
i64_name = "long long"
else:
i64_name = "long"
register_dtype(np.int64, [i64_name, "%s int" % i64_name,
"signed %s int" % i64_name,
"%s signed int" % i64_name])
register_dtype(np.uint64, ["unsigned %s" % i64_name,
"unsigned %s int" % i64_name,
"%s unsigned int" % i64_name])
# According to this uintp may not have the same hash as uint32:
# http://projects.scipy.org/numpy/ticket/2017
# Failing tests tell me this is the case for intp too.
if is_64_bit:
register_dtype(np.intp, ["ga_long"])
register_dtype(np.uintp, ["ga_ulong"])
else:
register_dtype(np.intp, ["ga_int"])
register_dtype(np.uintp, ["ga_uint"])
register_dtype(np.float32, ["ga_float", "float"])
register_dtype(np.float64, ["ga_double", "double"])
# }}}
# {{{ dtype -> ctype
def dtype_to_ctype(dtype, with_fp_tex_hack=False):
"""
Return the C type that corresponds to `dtype`.
:param dtype: a numpy dtype
"""
if dtype is None:
raise ValueError("dtype may not be None")
dtype = np.dtype(dtype)
if with_fp_tex_hack:
if dtype == np.float32:
return "fp_tex_float"
elif dtype == np.float64:
return "fp_tex_double"
return gpuarray.dtype_to_ctype(dtype)
# }}}
# {{{ c declarator parsing
def parse_c_arg_backend(c_arg, scalar_arg_class, vec_arg_class):
c_arg = c_arg.replace("const", "").replace("volatile", "")
# process and remove declarator
import re
decl_re = re.compile(r"(\**)\s*([_a-zA-Z0-9]+)(\s*\[[ 0-9]*\])*\s*$")
decl_match = decl_re.search(c_arg)
if decl_match is None:
raise ValueError("couldn't parse C declarator '%s'" % c_arg)
name = decl_match.group(2)
if decl_match.group(1) or decl_match.group(3) is not None:
arg_class = vec_arg_class
else:
arg_class = scalar_arg_class
tp = c_arg[:decl_match.start()]
tp = " ".join(tp.split())
try:
dtype = NAME_TO_DTYPE[tp]
except KeyError:
raise ValueError("unknown type '%s'" % tp)
return arg_class(dtype, name)
# }}}
def get_np_obj(obj):
"""
Returns a numpy object of the same dtype and comportement as the
source suitable for output dtype determination.
This is used since the casting rules of numpy are rather obscure
and the best way to imitate them is to try an operation ans see
what it does.
"""
if isinstance(obj, np.ndarray) and obj.shape == ():
return obj
try:
return np.ones(1, dtype=obj.dtype)
except AttributeError:
return np.asarray(obj)
def get_common_dtype(obj1, obj2, allow_double):
"""
Returns the proper output type for a numpy operation involving the
two provided objects. This may not be suitable for certain
obscure numpy operations.
If `allow_double` is False, a return type of float64 will be
forced to float32 and complex128 will be forced to complex64.
"""
# Yes, numpy behaves differently depending on whether
# we're dealing with arrays or scalars.
np1 = get_np_obj(obj1)
np2 = get_np_obj(obj2)
result = (np1 + np2).dtype
if not allow_double:
if result == np.float64:
result = np.dtype(np.float32)
elif result == np.complex128:
result = np.dtype(np.complex64)
return result
def upcast(*args):
a = np.array([0], dtype=args[0])
for t in args[1:]:
a = a + np.array([0], dtype=t)
return a.dtype
# vim: foldmethod=marker
|
py | b40afbeb65a62004bebec0d7422a7d7431ec1f63 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from cancerPrediction.utils import model
# Ucitavanje dataset-a
df = pd.read_csv('./data/data.csv')
# print(df.head())
# print(df.info())
# Broj kolona i redova u dataset-u
print(df.shape)
# Prebrojavanje null/empty vrednosti u svakoj koloni
print(df.isna().sum())
# Izbacivanje "prazne" kolone
df = df.dropna(axis=1)
# Nakon izbacivanja kolone (provera)
print(df.shape)
# Prebrojavanje malignih(M) i benignih(B) slucajeva
benignVsMalignant = df['diagnosis'].value_counts()
print(benignVsMalignant)
# Vizualizacija dijagnostikovanih slucajeva
# sns.countplot(x='diagnosis', data=df)
# plt.show()
# proveravanje tipova podataka iz kolona
# print(df.dtypes)
# konvertovanje string kolone (diagnosis) u brojeve 0/1
labelencoder_Y = LabelEncoder()
df.iloc[:, 1] = labelencoder_Y.fit_transform(df.iloc[:, 1].values)
# sns.pairplot(data=df.iloc[:,1:5], hue="diagnosis")
# plt.show()
# Prikazivanje korelacije izmedju kolona u dataset-u
# print(df.iloc[:, 1:32].corr())
# Vizualizacija korelacije
# plt.figure(figsize=(10, 10))
# sns.heatmap(df.iloc[:, 1:12].corr(), annot=True, fmt='.0%')
# plt.show()
# Podela dataset-a na nezavisne(x) i zavisne(y) dataset-ove
X = df.iloc[:, 2:31].values
Y = df.iloc[:, 1].values
# Podela dataset-a na train/test
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20, random_state=0)
# Skaliranje
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
# Pravljenje modela
model = model.makeModel(X_train, Y_train, X, Y)
print('============================================================================')
print('CONFUSION MATRIX')
# testiranje modela nad test podacima (confusion matrix)
for i in range(len(model)):
print('Model ', i)
cm = confusion_matrix(Y_test, model[i].predict(X_test))
print(cm)
TP = cm[0][0]
TN = cm[1][1]
FP = cm[0][1]
FN = cm[1][0]
Accuracy = (TP + TN) / (TP + TN + FN + FP)
print('Tacnost modela ', Accuracy)
print('============================================================================\n')
print('ACCURACY SCORE')
# Drugi nacin racunanja metrike modela
for i in range(len(model)):
print('Model ', i)
print(classification_report(Y_test, model[i].predict(X_test)))
print('Tacnost : ', accuracy_score(Y_test, model[i].predict(X_test)))
print('============================================================================')
# Predikcija modela Random Forest klasifikatora
randomForest = model[2]
prediction = randomForest.predict(X_test)
print('Predikacija modela: ', prediction)
print()
print('Stvarni rezultat : ', Y_test)
|
py | b40afcc3cff44367b85d1c8c935ac80f331ca69d | #!/usr/bin/env python
__doc__ = '''Reads a designSpace file and create a Glyphs file from its linked ufos'''
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2018 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
from silfont.core import execute, splitfn
from glyphsLib import to_glyphs
from fontTools.designspaceLib import DesignSpaceDocument
import os
argspec = [
('designspace', {'help': 'Input designSpace file'}, {'type': 'filename'}),
('glyphsfile', {'help': 'Output glyphs file name', 'nargs': '?' }, {'type': 'filename', 'def': None}),
('--glyphsformat', {'help': "Format for glyphs file (2 or 3)", 'default': "2"}, {}),
('--nofea', {'help': 'Do not process features.fea', 'action': 'store_true', 'default': False}, {}),
# ('--nofixes', {'help': 'Bypass code fixing data', 'action': 'store_true', 'default': False}, {}),
('-l', '--log', {'help': 'Log file'}, {'type': 'outfile', 'def': '_ufo2glyphs.log'})]
# This is just bare-bones code at present so does the same as glyphsLib's ufo2glyphs!
# It is designed so that data could be massaged, if necessary, on the way. No such need has been found so far
def doit(args):
glyphsfile = args.glyphsfile
logger = args.logger
gformat = args.glyphsformat
if gformat in ("2","3"):
gformat = int(gformat)
else:
logger.log("--glyphsformat must be 2 or 3", 'S')
if glyphsfile is None:
(path,base,ext) = splitfn(args.designspace)
glyphsfile = os.path.join(path, base + ".glyphs" )
else:
(path, base, ext) = splitfn(glyphsfile)
backupname = os.path.join(path, base + "-backup.glyphs" )
logger.log("Opening designSpace file", "I")
ds = DesignSpaceDocument()
ds.read(args.designspace)
if args.nofea: # Need to rename the features.fea files so they are not processed
origfeas = []; hiddenfeas = []
for source in ds.sources:
origfea = os.path.join(source.path, "features.fea")
hiddenfea = os.path.join(source.path, "features.tmp")
if os.path.exists(origfea):
logger.log(f'Renaming {origfea} to {hiddenfea}', "I")
os.rename(origfea, hiddenfea)
origfeas.append(origfea)
hiddenfeas.append(hiddenfea)
else:
logger.log(f'No features.fea found in {source.path}')
logger.log("Now creating glyphs object", "I")
glyphsfont = to_glyphs(ds)
if args.nofea: # Now need to reverse renamimg of features.fea files
for i, origfea in enumerate(origfeas):
logger.log(f'Renaming {hiddenfeas[i]} back to {origfea}', "I")
os.rename(hiddenfeas[i], origfea)
glyphsfont.format_version = gformat
if os.path.exists(glyphsfile): # Create a backup
logger.log("Renaming existing glyphs file to " + backupname, "I")
os.renames(glyphsfile, backupname)
logger.log("Writing glyphs file: " + glyphsfile, "I")
glyphsfont.save(glyphsfile)
def cmd(): execute(None, doit, argspec)
if __name__ == "__main__": cmd()
|
py | b40afd4861e5c099cb282fbb3e5eae5c7248a2d3 | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdRuleEvaluationSpec(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdRuleEvaluationSpec = True
super(AdRuleEvaluationSpec, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
evaluation_type = 'evaluation_type'
filters = 'filters'
trigger = 'trigger'
id = 'id'
class EvaluationType:
schedule = 'SCHEDULE'
trigger = 'TRIGGER'
_field_types = {
'evaluation_type': 'EvaluationType',
'filters': 'list<AdRuleFilters>',
'trigger': 'AdRuleTrigger',
'id': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['EvaluationType'] = AdRuleEvaluationSpec.EvaluationType.__dict__.values()
return field_enum_info
|
py | b40afda8021e1575f90d4b8371fd741123d092ed | import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class MyLSTM(nn.Module):
def __init__(self,
emb_vectors,
lstm_layers,
hidden_dim,
target_size,
dropout_prob,
device,
seq_len=250):
super().__init__()
# variable definitions
self.hidden_dim = hidden_dim
self.n_layers = lstm_layers
self.device = device
# add zero tensor at index 0 for PADDING tensor.
# with this operation we incremented all input indexes in the dataloader.
emb_vectors = np.insert(emb_vectors, 0, [np.zeros(300)], 0)
self.embedding_dim = emb_vectors.shape[1]
self.word_embeddings = nn.Embedding.from_pretrained(torch.Tensor(emb_vectors))
self.lstm = nn.LSTM(self.embedding_dim, hidden_dim,
batch_first=True, num_layers=lstm_layers,
dropout=dropout_prob)
self.dropout = nn.Dropout(0.1)
self.hidden2tag = nn.Linear(hidden_dim * seq_len, target_size)
self.sigmoid = nn.Sigmoid()
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, sentence, hidden):
embeds = self.word_embeddings(sentence)
x, (hid, out) = self.lstm(embeds, hidden)
x = x.contiguous().view(x.shape[0], -1)
x = self.dropout(x)
x = self.hidden2tag(x)
#x = self.sigmoid(x)
x = self.log_softmax(x)
return x, (hid, out)
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(self.device),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(self.device))
return hidden
def switch_train(self):
self.train() |
py | b40aff8981ed6013e48e409cb1af7030eb10b3cf | # -*- coding:utf-8 -*-
# /usr/bin/env python
from .api import app_core
|
py | b40b001f7cb24e87f62b7ea40677e3c7afb2396e | import os, sys
import glob
import argparse
import time
from mpi4py import MPI
MPI_COMM = MPI.COMM_WORLD
MPI_RANK = MPI_COMM.Get_rank()
MPI_SIZE = MPI_COMM.Get_size()
N_GPUS = 2
rank_gpu = MPI_RANK % N_GPUS
os.environ['CUDA_VISIBLE_DEVICES'] = f'{rank_gpu}'
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import torch
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from windflow.datasets.g5nr import g5nr
from windflow.inference.inference_flows import FlowRunner
from windflow.datasets.utils import cartesian_to_speed
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", default="raft", type=str)
parser.add_argument("--checkpoint", default="models/raft-size_512/checkpoint.pth.tar", type=str)
parser.add_argument("--data_directory", default="data/G5NR/", type=str)
parser.add_argument("--output", default="data/G5NR_Flows/raft/", type=str)
parser.add_argument("--n_files", default=None, type=int)
parser.add_argument("--batch_size", default=2, type=int)
args = parser.parse_args()
# Set model information
model_name = args.model_name
# Set data variables
data_directory = args.data_directory
to_directory = args.output
if (MPI_RANK == 0) and (not os.path.exists(to_directory)):
os.makedirs(to_directory)
# get file list
files = g5nr.get_files(data_directory, 'test')
print("Number of OSSE test files", len(files))
if args.n_files is None:
N_files = len(files)
else:
N_files = args.n_files
N_per_rank = N_files // MPI_SIZE
files = files.iloc[N_per_rank * MPI_RANK: N_per_rank * (MPI_RANK+1) + 1]
# load model
tile_size = 512
overlap = 128
if model_name in ['pwc-net-rmsvd', 'pwc-net-l1']:
runner = FlowRunner('pwc-net',
tile_size=tile_size,
overlap=overlap,
batch_size=args.batch_size)
else:
runner = FlowRunner(model_name.replace('-guided','').replace('-unflow', ''),
tile_size=tile_size,
overlap=overlap,
batch_size=args.batch_size)
runner.load_checkpoint(args.checkpoint)
stats = []
# iterate and perform inference, compute test statistics
ds1 = xr.open_mfdataset(files.iloc[0].values, engine='netcdf4')
for i in range(1, files.shape[0]):
# open dataset
ds2 = xr.open_mfdataset(files.iloc[i].values, engine='netcdf4')
f = os.path.basename(files.iloc[i-1]['U']) # get ds1 file
to_flow_file = os.path.join(to_directory, f.replace('_U_', '_WindFlow_').replace('.nc', '.zarr'))
if os.path.exists(to_flow_file):
ds1 = ds2.copy()
continue
t = ds1['time'].values
output_ds = xr.zeros_like(ds1)
del output_ds['QV'], output_ds['tpw']
U_flows = np.zeros(ds1.U.shape)
V_flows = np.zeros(ds1.V.shape)
t0 = time.time()
for i, lev in enumerate(ds1.lev):
qv1_lev = ds1.sel(lev=lev)['QV'].values[0]
qv2_lev = ds2.sel(lev=lev)['QV'].values[0]
_, flows_lev = runner.forward(qv1_lev, qv2_lev)
U_flows[0,i] = flows_lev[0]
V_flows[0,i] = flows_lev[1]
output_ds['U'] = output_ds['U'] + U_flows
output_ds['V'] = output_ds['V'] + V_flows
output_ds = cartesian_to_speed(output_ds)
output_ds.attrs['Source'] = 'NEX'
output_ds.attrs['Title'] = 'Optical Flow Feature Tracking'
output_ds.attrs['Contact'] = '[email protected]'
output_ds.attrs['History'] = 'G5NR outputs from GEOS-5 by gmao processed by NEX optical flow.'
output_ds.attrs['Model'] = model_name
output_ds.attrs['Pytorch_Checkpoint'] = args.checkpoint
#output_ds.to_netcdf(to_flow_file)
output_ds.to_zarr(to_flow_file)
#print(f'Wrote to file {to_flow_file}')
print(f"Wrote to file: {to_flow_file} -- Processing time {time.time()-t0} (seconds)")
ds1 = ds2.copy()
|
py | b40b035e53e744a4e75e8efd55e229c4de8b49ec | #===============================================================================
# Copyright 2007 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""This module contains functions/classes using a Whoosh index
as a backend for a spell-checking engine.
"""
from collections import defaultdict
from whoosh import analysis, fields, query, searching
from whoosh.support.levenshtein import relative, distance
class SpellChecker(object):
"""Implements a spell-checking engine using a search index for the
backend storage and lookup. This class is based on the Lucene
contributed spell-checker code.
To use this object::
st = store.FileStorage("spelldict")
sp = SpellChecker(st)
sp.add_words([u"aardvark", u"manticore", u"zebra", ...])
# or
ix = index.open_dir("index")
sp.add_field(ix, "content")
suggestions = sp.suggest(u"ardvark", number = 2)
"""
def __init__(self, storage, indexname = "SPELL",
booststart = 2.0, boostend = 1.0,
mingram = 3, maxgram = 4,
minscore = 0.5):
"""
:param storage: The storage object in which to create the
spell-checker's dictionary index.
:param indexname: The name to use for the spell-checker's
dictionary index. You only need to change this if you
have multiple spelling indexes in the same storage.
:param booststart: How much to boost matches of the first
N-gram (the beginning of the word).
:param boostend: How much to boost matches of the last
N-gram (the end of the word).
:param mingram: The minimum gram length to store.
:param maxgram: The maximum gram length to store.
:param minscore: The minimum score matches much achieve to
be returned.
"""
self.storage = storage
self.indexname = indexname
self._index = None
self.booststart = booststart
self.boostend = boostend
self.mingram = mingram
self.maxgram = maxgram
def index(self, create = False):
"""Returns the backend index of this object (instantiating it if
it didn't already exist).
"""
import index
if create or not self._index:
create = create or not index.exists(self.storage, indexname = self.indexname)
if create:
self._index = self.storage.create_index(self._schema(), self.indexname)
else:
self._index = self.storage.open_index(self.indexname)
return self._index
def _schema(self):
# Creates a schema given this object's mingram and maxgram attributes.
from fields import Schema, FieldType, Frequency, ID, STORED
from analysis import SimpleAnalyzer
idtype = ID()
freqtype = FieldType(format=Frequency(SimpleAnalyzer()))
fls = [("word", STORED), ("score", STORED)]
for size in xrange(self.mingram, self.maxgram + 1):
fls.extend([("start%s" % size, idtype),
("end%s" % size, idtype),
("gram%s" % size, freqtype)])
return Schema(**dict(fls))
def suggest(self, text, number = 3, usescores = False):
"""Returns a list of suggested alternative spellings of 'text'. You must
add words to the dictionary (using add_field, add_words, and/or add_scored_words)
before you can use this.
:param text: The word to check.
:param number: The maximum number of suggestions to return.
:param usescores: Use the per-word score to influence the suggestions.
:rtype: list
"""
grams = defaultdict(list)
for size in xrange(self.mingram, self.maxgram + 1):
key = "gram%s" % size
nga = analysis.NgramAnalyzer(size)
for t in nga(text):
grams[key].append(t.text)
queries = []
for size in xrange(self.mingram, min(self.maxgram + 1, len(text))):
key = "gram%s" % size
gramlist = grams[key]
queries.append(query.Term("start%s" % size, gramlist[0], boost = self.booststart))
queries.append(query.Term("end%s" % size, gramlist[-1], boost = self.boostend))
for gram in gramlist:
queries.append(query.Term(key, gram))
q = query.Or(queries)
ix = self.index()
s = searching.Searcher(ix)
try:
results = s.search(q)
length = len(results)
if len(results) > number*2:
length = len(results)//2
fieldlist = results[:length]
suggestions = [(fs["word"], fs["score"])
for fs in fieldlist
if fs["word"] != text]
if usescores:
def keyfn(a):
return 0 - (1/distance(text, a[0])) * a[1]
else:
def keyfn(a):
return distance(text, a[0])
suggestions.sort(key = keyfn)
finally:
s.close()
return [word for word, _ in suggestions[:number]]
def add_field(self, ix, fieldname):
"""Adds the terms in a field from another index to the backend dictionary.
This method calls add_scored_words() and uses each term's frequency as the
score. As a result, more common words will be suggested before rare words.
If you want to calculate the scores differently, use add_scored_words()
directly.
:param ix: The index.Index object from which to add terms.
:param fieldname: The field name (or number) of a field in the source
index. All the indexed terms from this field will be added to the
dictionary.
"""
tr = ix.term_reader()
try:
self.add_scored_words((w, freq) for w, _, freq in tr.iter_field(fieldname))
finally:
tr.close()
def add_words(self, ws, score = 1):
"""Adds a list of words to the backend dictionary.
:param ws: A sequence of words (strings) to add to the dictionary.
:param score: An optional score to use for ALL the words in 'ws'.
"""
self.add_scored_words((w, score) for w in ws)
def add_scored_words(self, ws):
"""Adds a list of ("word", score) tuples to the backend dictionary.
Associating words with a score lets you use the 'usescores' keyword
argument of the suggest() method to order the suggestions using the
scores.
:param ws: A sequence of ("word", score) tuples.
"""
writer = self.index().writer()
for text, score in ws:
if text.isalpha():
fields = {"word": text, "score": score}
for size in xrange(self.mingram, self.maxgram + 1):
nga = analysis.NgramAnalyzer(size)
gramlist = [t.text for t in nga(text)]
if len(gramlist) > 0:
fields["start%s" % size] = gramlist[0]
fields["end%s" % size] = gramlist[-1]
fields["gram%s" % size] = " ".join(gramlist)
writer.add_document(**fields)
writer.commit()
if __name__ == '__main__':
pass
|
py | b40b03a335f9e091ccb2a758284d00c391f1e103 | # Servo Control
import time
import wiringpi
#gpio pwm-ms
#pio pwmc 192
#gpio pwmr 2000
#gpio -g pwm 18 100 #1.0ms left
#gpio -g pwm 18 150 #1.5ms middle
#gpio -g pwm 18 200 #2.0ms right
# use 'GPIO naming'
wiringpi.wiringPiSetupGpio()
# set #13 & 18 to be PWM outputs
wiringpi.pinMode(13, wiringpi.GPIO.PWM_OUTPUT)
wiringpi.pinMode(18, wiringpi.GPIO.PWM_OUTPUT)
# set the PWM mode to milliseconds stype
wiringpi.pwmSetMode(wiringpi.GPIO.PWM_MODE_MS)
# divide down clock for 50Hz
wiringpi.pwmSetClock(192)
wiringpi.pwmSetRange(2000)
delay_period = 0.05 #0.1 .. 0.001, slower .. faster
#55 (0.55ms) .. 252 (2.52ms)
while True:
wiringpi.pwmWrite(13, 98)
wiringpi.pwmWrite(15, 98)
time.sleep(delay_period)
# for i, j in zip(range(55, 252, 1), range(252, 55, -1)):
# wiringpi.pwmWrite(13, i)
# wiringpi.pwmWrite(18, j)
# print('{}, {}'.format(i, j))
# time.sleep(delay_period)
# for i, j in zip(range(252, 55, -1), range(55, 252, 1)):
# wiringpi.pwmWrite(13, i)
# wiringpi.pwmWrite(18, j)
# print('{}, {}'.format(i, j))
# time.sleep(delay_period) |
py | b40b03a5e6f372e0432aad099906b365ed0e3f9c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module 2: Input Requests and Support Details
A User is asked if they want to get Help or provide Help.
"""
from datetime import datetime
from datetime import date
from geopy.geocoders import Nominatim
def geo(loc):
"""
Function defines the location of student.
Paramters
---------
loc: string
The location name of a user.
Returns
-------
Geolocated code for user.
"""
geolocator = Nominatim(user_agent="http")
return geolocator.geocode(loc)
class Request:
"""
A class to input details of help requested.
Attributes
----------
type: category
The category of help requested.
"""
def __init__(self, type):
"""
Function defines the type of help requested.
Parameters
----------
Category : dict
Type of help requested.
optionsDict : dict
Offer options for help based on selection in Category.
Returns
-------
None.
"""
self.type = type
self.Category = {"1": "Errands",
"2": "Ride",
"3": "Translate",
"4": "Tutor"}
self.optionsDict = {"Errands": {"1": "Flat maintenance",
"2": "Government services",
"3": "Grocery shopping",
"4": "Mall shopping",
"5": "Move in/out",
"6": "Take care of pets/plants"},
"Ride": {"1": "Charlottenburg",
"2": "Friedrichshain",
"3": "Kreuzberg",
"4": "Litchtenberg",
"5": "Mitte",
"6": "Neukoelln",
"7": "Pankow",
"8": "Spandau",
"9": "Steglitz",
"10": "Tempelhof",
"11": "Schoeneberg",
"12": "Treptow-Koepenick"},
"Translate": {"1": "English",
"2": "French",
"3": "German",
"4": "Hindi",
"5": "Japanese",
"6": "Mandarin",
"7": "Polish",
"8": "Russian",
"9": "Spanish",
"10": "Swedish"},
"Tutor": {"1": "Economics",
"2": "Finance",
"3": "History",
"4": "Law",
"5": "Literature",
"6": "Mathematics",
"7": "Programming Language: Python",
"8": "Programming Language: R",
"9": "Statistics",
"10": "Sciences"}
}
# general function for selecting from dictionary
def OptionsSelect(self, options, name):
"""
Function allows for selecting from help options
Returns
-------
Selected option.
"""
index = 0
indexValidList = []
print('Select ' + name + ':')
for optionName in options:
"""
For multiple help requests
"""
index = index + 1
indexValidList.extend([options[optionName]])
print(str(index) + '. ' + options[optionName])
inputValid = False
while not inputValid:
"""
Defining response for invalid input
"""
inputRaw = input(name + ': ')
inputNo = int(inputRaw) - 1
if inputNo > -1 and inputNo < len(indexValidList):
selected = indexValidList[inputNo]
print('Selected ' + name + ': ' + selected)
inputValid = True
break
else:
print('Please select a valid ' + name + ' number')
return selected
# select category
def CatSelect(self):
"""
Defines selected category
Returns
-------
None.
"""
self.selectCat = self.OptionsSelect(self.Category, self.type)
# select option
def OptSelect(self):
"""
Defines selected options
Returns
-------
None.
"""
if self.selectCat != "Ride":
self.catOptions = self.optionsDict[self.selectCat]
self.selectOption= self.OptionsSelect(self.catOptions, (self.type + " Option"))
else:
self.selectOption = "NA"
# input location
def LocSelect(self):
"""
Defines location, if a ride is requested
Returns
-------
None.
"""
while True:
try:
if self.selectCat == "Ride":
self.OptOrg = str(input("Please enter the complete address of your origin: "))
self.OrgAdd = geo(str(self.OptOrg) + " Berlin, Deutschland").address #must always be within Berlin state
self.OrgCoord = (geo(self.OptOrg).latitude, geo(self.OptOrg).longitude)
self.OptDest = str(input("Please enter the complete address of your destination: "))
self.DestAdd = geo(str(self.OptDest) + " Berlin, Deutschland").address
self.DestCoord = (geo(self.OptDest).latitude, geo(self.OptDest).longitude)
else: #also enter address for other categories
self.OptOrg = str(input("Please enter the complete address of your preferred location: "))
self.OrgAdd = geo(str(self.OptOrg) + " Berlin, Deutschland").address #must always be within Berlin state
self.OrgCoord = (geo(self.OptOrg).latitude, geo(self.OptOrg).longitude)
self.OptDest = "NA"
self.DestAdd = "NA"
self.DestCoord = "NA"
except AttributeError:
print ("Invalid address. Please enter address within Berlin only.")
continue
break
# input date
def validDate(self):
"""
Defines data of request.
Returns
-------
None.
"""
while True:
try:
self.requestdate = datetime.date(datetime.strptime(input("Enter date for request (YYYY-MM-DD): ") ,'%Y-%m-%d'))
if self.requestdate < date.today():
print("Invalid date. Please enter a future date.")
continue
except ValueError:
print ("Invalid date. Please enter date in YYYY-MM-DD format.")
continue
break
# input time
def validTime(self):
"""
Defines time of request.
Returns
-------
None.
"""
while True:
try:
self.requesttime = datetime.time(datetime.strptime(input("Enter time for request (HH:MM): "), "%H:%M"))
except ValueError:
print ("Invalid time. Please enter date in HH:MM format.")
continue
else:
break
# input additional information:
def AddInfo(self):
"""
Allows user to put in any additional information
Returns
-------
None.
"""
self.info = str(input("Please provide any additional information regarding your request (enter NONE if no further details are needed): "))
def TimeStamp(self):
"""
Gets the timestamp when request/support is sent
Returns
-------
timenow: timestamp.
"""
self.timenow = datetime.now()
# print request details
def printDetails(self):
"""
Prints the details of the User's request or support
Returns
-------
selectcat: str
Category
OrgAdd:
Exact location of the Origin
DestAdd:
Exact location of the Desitnation
requestdate: date
Date of request or support
requesttime: time
Time of request or support
info: str
Additional information
Timestamp: time
Timestamp when request/support is sent
"""
if self.selectCat == "Ride":
print("Thank you! Your", self.type, "has been recorded with the following details:" +
"\n Category: ", self.selectCat +
#"\n Option: ", self.selectOption +
"\n Origin: ", self.OrgAdd +
"\n Destination: ", self.DestAdd +
"\n Date: ", str(self.requestdate) +
"\n Time: ", str(self.requesttime) +
"\n Additional Information: ", self.info,
"\n Timestamp: ", self.timenow)
else:
print("Thank you! Your", self.type, "has been recorded with the following details:" +
"\n Category: ", self.selectCat +
"\n Option: ", self.selectOption +
"\n Location: ", self.OrgAdd +
#"\n Destination: ", self.DestAdd +
"\n Date: ", str(self.requestdate) +
"\n Time: ", str(self.requesttime) +
"\n Additional Information: ", self.info,
"\n Timestamp: ", self.timenow)
def runAll(self):
"""
Runs all functions necessary of Class Request
Returns
-------
All objects generated from functions inside
"""
self.CatSelect()
self.OptSelect()
self.LocSelect()
self.validDate()
self.validTime()
self.AddInfo()
self.TimeStamp()
self.printDetails()
# getters
def getreqCat(self):
return self.selectCat
"""
Returns
-------
Selected category.
"""
def getreqOpt(self):
return self.selectOption
"""
Returns
-------
Selected option.
"""
def getreqOrg(self):
return self.OptOrg
"""
Returns
-------
Identified pick-up address.
"""
def getreqOrg_add(self):
return self.OrgAdd
"""
Returns
-------
Identified pick-up location.
"""
def getreqOrg_coord(self):
return self.OrgCoord
"""
Returns
-------
Pick-up coordinates.
"""
def getreqDest(self):
return self.OptDest
"""
Returns
-------
Identified destination.
"""
def getreqDest_add(self):
return self.DestAdd
"""
Returns
-------
Identified destination location.
"""
def getreqDest_coord(self):
return self.DestCoord
"""
Returns
-------
Destination coordinates.
"""
def getreqDate(self):
return self.requestdate
"""
Returns
-------
Date that help is requested.
"""
def getreqTime(self):
return self.requesttime
"""
Returns
-------
Time that help is requested.
"""
def getreqInfo(self):
return self.info
"""
Returns
-------
Additional information provided.
"""
def getTimestamp(self):
return self.timenow
"""
Returns
-------
Current time.
"""
|
py | b40b049553b0f61b902f3b7d5ef22a113829c055 | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 0); |
py | b40b04e61fbf65b590734baefb7f33be42be2764 | #!/usr/bin/env python
"""
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
from base import Request, sendApdu
import argparse
import struct
parser = argparse.ArgumentParser()
parser.add_argument('--acc', help="Account number to sign with", default=0)
parser.add_argument('--message', help="Message to be sign",
default="test message")
args = parser.parse_args()
accNumber = struct.pack(">I", int(args.acc))
messageLength = struct.pack(">I", len(args.message))
result = sendApdu(Request['SignMessage'], Request['NoneVerify'],
accNumber, messageLength, args.message)
signature = result[0: 1 + 32 + 32]
print "Signature " + ''.join(format(x, '02x') for x in signature)
|
py | b40b0574d87ff31f34853a71aceb1deed2d89274 | from pysearpc import searpc_func, SearpcError, NamedPipeClient
class RpcClient(NamedPipeClient):
"""RPC used in client"""
def __init__(self, socket_path, *args, **kwargs):
NamedPipeClient.__init__(
self,
socket_path,
"seafile-rpcserver",
*args,
**kwargs
)
@searpc_func("string", ["int"])
def seafile_sync_error_id_to_str():
pass
sync_error_id_to_str = seafile_sync_error_id_to_str
@searpc_func("int", ["int"])
def seafile_del_file_sync_error_by_id():
pass
del_file_sync_error_by_id = seafile_del_file_sync_error_by_id
@searpc_func("int", ["string"])
def seafile_calc_dir_size(path):
pass
calc_dir_size = seafile_calc_dir_size
@searpc_func("int64", [])
def seafile_get_total_block_size():
pass
get_total_block_size = seafile_get_total_block_size
@searpc_func("string", ["string"])
def seafile_get_config(key):
pass
get_config = seafile_get_config
@searpc_func("int", ["string", "string"])
def seafile_set_config(key, value):
pass
set_config = seafile_set_config
@searpc_func("int", ["string"])
def seafile_get_config_int(key):
pass
get_config_int = seafile_get_config_int
@searpc_func("int", ["string", "int"])
def seafile_set_config_int(key, value):
pass
set_config_int = seafile_set_config_int
@searpc_func("int", ["int"])
def seafile_set_upload_rate_limit(limit):
pass
set_upload_rate_limit = seafile_set_upload_rate_limit
@searpc_func("int", ["int"])
def seafile_set_download_rate_limit(limit):
pass
set_download_rate_limit = seafile_set_download_rate_limit
# repo
@searpc_func("objlist", ["int", "int"])
def seafile_get_repo_list():
pass
get_repo_list = seafile_get_repo_list
@searpc_func("object", ["string"])
def seafile_get_repo():
pass
get_repo = seafile_get_repo
@searpc_func("string", ["string", "string", "string", "string", "string", "int"])
def seafile_create_repo(name, desc, passwd, base, relay_id, keep_history):
pass
create_repo = seafile_create_repo
@searpc_func("int", ["string"])
def seafile_destroy_repo(repo_id):
pass
remove_repo = seafile_destroy_repo
@searpc_func("objlist", ["string", "string", "string", "int"])
def seafile_diff():
pass
get_diff = seafile_diff
@searpc_func("object", ["string", "int", "string"])
def seafile_get_commit(repo_id, version, commit_id):
pass
get_commit = seafile_get_commit
@searpc_func("objlist", ["string", "int", "int"])
def seafile_get_commit_list():
pass
get_commit_list = seafile_get_commit_list
@searpc_func("objlist", ["string"])
def seafile_branch_gets(repo_id):
pass
branch_gets = seafile_branch_gets
@searpc_func("int", ["string", "string"])
def seafile_branch_add(repo_id, branch):
pass
branch_add = seafile_branch_add
# clone related
@searpc_func("string", ["string", "string"])
def gen_default_worktree(worktree_parent, repo_name):
pass
@searpc_func("string", ["string", "int", "string", "string", "string", "string", "string", "string", "int", "string"])
def seafile_clone(repo_id, repo_version, repo_name, worktree, token, password, magic, email, random_key, enc_version, more_info):
pass
clone = seafile_clone
@searpc_func("string", ["string", "int", "string", "string", "string", "string", "string", "string", "int", "string"])
def seafile_download(repo_id, repo_version, repo_name, wt_parent, token, password, magic, email, random_key, enc_version, more_info):
pass
download = seafile_download
@searpc_func("int", ["string"])
def seafile_cancel_clone_task(repo_id):
pass
cancel_clone_task = seafile_cancel_clone_task
@searpc_func("objlist", [])
def seafile_get_clone_tasks():
pass
get_clone_tasks = seafile_get_clone_tasks
@searpc_func("object", ["string"])
def seafile_find_transfer_task(repo_id):
pass
find_transfer_task = seafile_find_transfer_task
# sync
@searpc_func("int", ["string", "string"])
def seafile_sync(repo_id, peer_id):
pass
sync = seafile_sync
@searpc_func("object", ["string"])
def seafile_get_repo_sync_task():
pass
get_repo_sync_task = seafile_get_repo_sync_task
@searpc_func("int", [])
def seafile_is_auto_sync_enabled():
pass
is_auto_sync_enabled = seafile_is_auto_sync_enabled
@searpc_func("objlist", ["int", "int"])
def seafile_get_file_sync_errors():
pass
get_file_sync_errors = seafile_get_file_sync_errors
###### Property Management #########
@searpc_func("int", ["string", "string"])
def seafile_set_repo_passwd(repo_id, passwd):
pass
set_repo_passwd = seafile_set_repo_passwd
@searpc_func("int", ["string", "string", "string"])
def seafile_set_repo_property(repo_id, key, value):
pass
set_repo_property = seafile_set_repo_property
@searpc_func("string", ["string", "string"])
def seafile_get_repo_property(repo_id, key):
pass
get_repo_property = seafile_get_repo_property
@searpc_func("string", ["string"])
def seafile_get_repo_relay_address(repo_id):
pass
get_repo_relay_address = seafile_get_repo_relay_address
@searpc_func("string", ["string"])
def seafile_get_repo_relay_port(repo_id):
pass
get_repo_relay_port = seafile_get_repo_relay_port
@searpc_func("int", ["string", "string", "string"])
def seafile_update_repo_relay_info(repo_id, addr, port):
pass
update_repo_relay_info = seafile_update_repo_relay_info
@searpc_func("int", ["string", "string"])
def seafile_set_repo_token(repo_id, token):
pass
set_repo_token = seafile_set_repo_token
@searpc_func("string", ["string"])
def seafile_get_repo_token(repo_id):
pass
get_repo_token = seafile_get_repo_token
@searpc_func("object", ["int", "string", "string"])
def seafile_generate_magic_and_random_key(enc_version, repo_id, password):
pass
generate_magic_and_random_key = seafile_generate_magic_and_random_key
@searpc_func("int", [])
def seafile_shutdown():
pass
shutdown = seafile_shutdown
|
py | b40b059e6a5e1122b0c60d8bc01ce58a59ca9f1c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
def main():
assert (len (sys.argv) == 2)
items = json.load (open (sys.argv[1], 'r'))
for item in items:
job_name = item['workflows']['job_name']
build_status = item['status']
branch = item['branch']
if job_name == 'cross' and build_status == 'success' and branch == 'master':
print (item['build_num'])
exit(0)
exit (1)
main()
|
py | b40b06d25505a8324197cab81900f127ef6d9f6f | from gym.envs.registration import register
# gridnav: square ##############################################################
register(
id='GridNav_2-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'gridnav_2_v0.yaml'}
)
register(
id='GridNav_2-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'gridnav_2_v1.yaml'}
)
register(
id='GridNav_3-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'gridnav_3_v0.yaml'}
)
register(
id='GridNav_3-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'gridnav_3_v1.yaml'}
)
# nchain modified ##############################################################
# gym.error.Error: Cannot re-register id: NChain-v0
register(
id='NChain_mod-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'nchain_mod_v0.yaml'}
)
register(
id='NChain_mod-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'nchain_mod_v1.yaml'}
)
# tor ##########################################################################
register(
id='Tor_20201121a-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20201121a.yaml'}
)
register(
id='Tor_20201121a-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20201121a_v1.yaml'}
)
register(
id='hordijk_example-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'hordijk_example_v0.yaml'}
)
register(
id='Hordijk_example-v3',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'hordijk_example_v3.yaml'}
)
register(
id='Hordijk_example-v4',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'hordijk_example_v4.yaml'}
)
register(
id='Tor_20210306-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20210306_v0.yaml'}
)
register(
id='Tor_20210306-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20210306_v1.yaml'}
)
register(
id='Tor_20210307-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20210307_v0.yaml'}
)
register(
id='Tor_20210307-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'tor_20210307_v1.yaml'}
)
# feinberg_2002_hmdp ###########################################################
register(
id='Example_3_1-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_3_1.yaml'}
)
register(
id='Example_3_3-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_3_3.yaml'}
)
register(
id='Example_8_1-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_8_1.yaml'}
)
# puterman_1994_mdp ############################################################
register(
id='Example_10_1_1-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_10_1_1.yaml'}
)
register(
id='Example_10_1_2-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_10_1_2.yaml'}
)
register(
id='Example_10_1_2-v1',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_10_1_2_v1.yaml'}
)
register(
id='Example_10_2_2-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'example_10_2_2.yaml'}
)
register(
id='Problem_10_7-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'problem_10_7.yaml'}
)
register(
id='Problem_10_9-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'problem_10_9.yaml'}
)
register(
id='Problem_6_64-v0',
entry_point='gym_symbol.envs:SymbolicRepresentation',
kwargs={'cfg_fname': 'problem_6_64.yaml'}
)
|
py | b40b06f4c087054f91b6544e477489197357db8c | """ global parameters"""
import logging
from vehiclemodels.parameters_vehicle1 import parameters_vehicle1 # Ford Escort - front wheel drive
from vehiclemodels.parameters_vehicle2 import parameters_vehicle2 # BMW 320i - rear wheel drive
from vehiclemodels.parameters_vehicle3 import parameters_vehicle3 # VW Vanagon - rear wheel drive
from vehiclemodels.parameters_vehicle4 import parameters_vehicle4 # semi-trailer truck - complex
from vehiclemodels.init_ks import init_ks
from vehiclemodels.init_st import init_st
from vehiclemodels.init_mb import init_mb
from vehiclemodels.init_std import init_std
from vehiclemodels.vehicle_dynamics_ks import vehicle_dynamics_ks # kinematic single track, no slip
from vehiclemodels.vehicle_dynamics_st import vehicle_dynamics_st # single track bicycle with slip
from vehiclemodels.vehicle_dynamics_std import vehicle_dynamics_std # single track bicycle with slip
from vehiclemodels.vehicle_dynamics_mb import vehicle_dynamics_mb # fancy multibody model
LOGGING_LEVEL=logging.INFO # set the overall default leval, change with --log option
import scipy.constants
G=scipy.constants.value('standard acceleration of gravity')
#######################################################
# client
# SERVER_HOST='telluridevm.iniforum.ch' # metanet 16-core model server
SERVER_HOST='localhost'
SERVER_PING_INTERVAL_S = 1 # interval between trying for server
SERVER_TIMEOUT_SEC = 1 # timeout in seconds for UDP socket reads during game running
ENABLE_UPNP = True # set True to try unpnp to forward CLIENT_PORT_RANGE ports to local machine
UPNP_LEASE_TIME = 1200 # the lease time for these ports in seconds
# your autodrive controller module (i.e. folder) and class name, must be a class that has read method that returns the car_command() object
# AUTODRIVE_MODULE='src.controllers.pid_next_waypoint_car_controller'
# AUTODRIVE_CLASS='pid_next_waypoint_car_controller'
# overridden by command line --autodrive
# AUTODRIVE_MODULE='src.controllers.pure_pursuit_controller'
# AUTODRIVE_CLASS = 'pure_pursuit_controller'
AUTODRIVE_MODULE='src.controllers.pure_pursuit_controller_v2'
AUTODRIVE_CLASS = 'pure_pursuit_controller_v2'
# your model class that takes car state and control and predicts the next state given a future time.
# overridden by command line --model
CAR_MODEL_MODULE= 'src.models.models' # the module (i.e. folder.file without .py)
CAR_MODEL_CLASS = 'linear_extrapolation_model' # the class within the file
# CAR_MODEL_CLASS= 'RNN_model'
#display
FPS=20 # frames per second for simulation and animation
GAME_FONT_NAME = 'Consolas' # local display font, default is Consolas
GAME_FONT_SIZE = 16 # default is 16
# Joystick connectivity
CHECK_FOR_JOYSTICK_INTERVAL = 100 # check for missing joystick every this many cycles
JOYSTICK_NUMBER = 0 # in case multiple joysticks, use this to set the desired one, starts from zero
# recording data
DATA_FILENAME_BASE= 'l2race'
DATA_FOLDER_NAME= 'data'
# car and track options
CAR_NAME='l2racer' # label stuck on car
TRACK_NAME='oval_easy' # tracks are stored in the 'media' folder. Data for a track must be extracted using scripts in Track_Preparation before using in l2race
TRACKS_FOLDER='./media/tracks/' # location of tracks relative to root of l2race
# Other possible track names:
# track_names = [
# 'Sebring',
# 'oval',
# 'oval_easy',
# 'track_1',
# 'track_2',
# 'track_3',
# 'track_4',
# 'track_5',
# 'track_6']
# track_name + '.png'
# track_name + '_map.npy'
# track_name + 'Info.npy'
# help message printed by hitting h or ? key
HELP="""Keyboard commands:
drive with LEFT/UP/RIGHT/DOWN or AWDS keys
hold SPACE pressed to reverse with drive keys\n
y runs automatic control (if implemented)
m runs user model (if implemented)
r resets car
R restarts client from scratch (if server went down)
l toggles recording logging to uniquely-named CSV file
ESC quits
h|? shows this help
"""
#######################################################
# server and model settings. Client cannot affect these model server settings
#
# DO NOT CHANGE THESE VALUES unless you want to control model server server.py
#########################
# DO NOT CHANGE UNLESS they are also changed on model server
# Define screen area, track is scaled to fill this area, note 4:3 aspect ratio
# Track information must also be generated at this size so client cannot change the values easily.
SCREEN_WIDTH_PIXELS = 1024 # pixels
SCREEN_HEIGHT_PIXELS = 768 # pixels
# meters per screen pixel, e.g. 4m car would be 40 pixels, so about 4% of width
# increase M_PER_PIXEL to make cars smaller relative to track
M_PER_PIXEL = 0.20 # Overall scale parameter: 0.2 makes the cars really small on track. 0.1 makes them fill about 1/3 of track width.
# car model and solver
MODEL = vehicle_dynamics_st # vehicle_dynamics_ks vehicle_dynamics_ST vehicle_dynamics_MB
SOLVER = 'euler' # 'RK23' # DOP853 LSODA BDF RK45 RK23 # faster, no overhead but no checking
PARAMETERS = parameters_vehicle2()
EULER_TIMESTEP_S=1e-3 # fixed timestep for Euler solver (except for last one)
RTOL = 1e-2 # tolerance value for RK and other gear-shifting solvers (anything but euler)
ATOL = 1e-4
SERVER_PORT = 50000 # client starts game on this port on the SERVER_HOST
CLIENT_PORT_RANGE = '50010-50020' # range of ports used for client that server uses for game
# client needs to open/forward this port range for receiving state from server and sending commands to server
# The ENABLE_UPNP flag turns on automatic forwarding but it does not work with all routers.
KILL_ZOMBIE_TRACK_TIMEOUT_S = 10 # if track process gets no input for this long, it terminates itself
FRICTION_FACTOR = .5 # overall friction parameter multiplier for some models, not used for now
SAND_SLOWDOWN = 0.985 # If in sand, at every update the resulting velocity is multiplied by the slowdown factor
REVERSE_TO_FORWARD_GEAR = 0.5 # You get less acceleration on reverse gear than while moving forwards.
MODEL_UPDATE_RATE_HZ = 50 # rate that server attempts to update all the car models for each track process (models run serially in each track process)
MAX_CARS_PER_TRACK = 6 # only this many cars can run on each track
MAX_SPECTATORS_PER_TRACK = 10 # only this many spectators can connect to each track
KS_TO_ST_SPEED_M_PER_SEC = 2.0 # transistion speed from KS to ST model types
### Constants for RNN0 model:
import pandas as pd
import numpy as np
normalization_distance = M_PER_PIXEL*(np.sqrt((SCREEN_HEIGHT_PIXELS ** 2) + (SCREEN_WIDTH_PIXELS ** 2)))
normalization_velocity = 50.0 # from Mark 24
normalization_acceleration = 5.0 # 2.823157895
normalization_angle = 180.0
normalization_dt = 1.0e-1
normalization_x = SCREEN_WIDTH_PIXELS
normalization_y = SCREEN_HEIGHT_PIXELS
NORMALIZATION_INFO = pd.DataFrame({
'time': None,
'dt': normalization_dt,
'command.autodrive_enabled': None,
'command.steering': None,
'command.throttle': None,
'command.brake': None,
'command.reverse': None,
'position_m.x': normalization_distance,
'position_m.y': normalization_distance,
'velocity_m_per_sec.x': normalization_velocity,
'velocity_m_per_sec.y': normalization_velocity,
'speed_m_per_sec': normalization_velocity,
'accel_m_per_sec_2.x': normalization_acceleration,
'accel_m_per_sec_2.y': normalization_acceleration,
'steering_angle_deg': None,
'body_angle_deg': normalization_angle,
'body_angle.cos': None,
'body_angle.sin': None,
'yaw_rate_deg_per_sec': None,
'drift_angle_deg': None,
'hit_distance': normalization_distance,
'nearest_waypoint_idx': None,
'first_next_waypoint.x': normalization_distance,
'first_next_waypoint.y': normalization_distance,
'fifth_next_waypoint.x': normalization_distance,
'fifth_next_waypoint.y': normalization_distance,
'twentieth_next_waypoint.x': normalization_distance,
'twentieth_next_waypoint.y': normalization_distance
}, index=[0])
|
py | b40b06f5c09882794bd33a440aac749e9e6eb6da | # -*- coding: utf-8 -*-
# Author: Chmouel Boudjnah <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import pprint
import tempfile
from unittest import mock
import iterfzf
import pytest
from jiraohsofancy import jiraohsofancy
from . import fixtures
@pytest.mark.usefixtures("reset_env")
class TestJIC():
def test_get_config_env(self, monkeypatch):
monkeypatch.setenv("JIRA_USERNAME", "foo")
monkeypatch.setenv("JIRA_PASSWORD", "bar")
monkeypatch.setenv("JIRA_SERVER", "https://blah")
j = jiraohsofancy.JIC(None)
ret = j.set_config()
assert (ret['username'] == "foo")
def test_get_config_file(self, monkeypatch, tmp_path):
tmpfile = tmp_path / "config.ini"
fd = open(tmpfile, 'wb')
fd.write(b"""[jira]\n
server=http://hahaha\n
username=hello\n
password=moto\n
""")
fd.close()
argsetup = argparse.Namespace(config_file=tmpfile)
j = jiraohsofancy.JIC(argsetup)
j.set_config()
assert (j.config["username"] == "hello")
def test_get_objects(self):
j = jiraohsofancy.JIC(None)
fake = fixtures.FakeJIRA()
j._cnx = fake
projects = ["INI", "MANI", "MOH"]
iterfzf.iterfzf = mock.MagicMock(return_value=projects[0])
fake.set_projects(projects)
assert (j.get_project().name == projects[0])
priorities = ["OYLO", "ROUKO", "DEAG"]
iterfzf.iterfzf = mock.MagicMock(return_value=priorities[-1])
fake.set_priorities(priorities)
assert (j.get_priorities().name == priorities[-1])
components = ["ATTA", "BOYA", "KASHA"]
iterfzf.iterfzf = mock.MagicMock(return_value=components[-2])
fake.set_components(components)
assert (j.get_component("fake").name == components[-2])
versions = ["ATTA", "BOYA", "KASHA"]
iterfzf.iterfzf = mock.MagicMock(return_value=versions[-2])
fake.set_versions(versions)
assert (j.get_versions("fake").name == versions[-2])
def test_new_issue(self, monkeypatch, reset_env, tmp_path):
tmpfile = tmp_path / "config.ini"
fd = open(tmpfile, 'wb')
fd.write(b"""Alatouki la marakena""")
fd.close()
argsetup = argparse.Namespace(
test=True,
open=False,
project="SRVKP",
component="CLI",
priority="Low",
summary="Hello Moto",
assign="me",
version='v0.1',
description_file=tmpfile,
issuetype="Bug")
monkeypatch.setenv("JIRA_USERNAME", "foo")
monkeypatch.setenv("JIRA_PASSWORD", "bar")
monkeypatch.setenv("JIRA_SERVER", "https://blah")
def mypp(_output):
assert (_output["description"] == "Alatouki la marakena")
assert (_output["versions"][0]['name'] == argsetup.version)
assert (_output["summary"] == argsetup.summary)
assert (_output["components"][0]['name'] == argsetup.component)
monkeypatch.setattr(pprint, "pprint", mypp)
ji = jiraohsofancy.JIC(argsetup)
ji._cnx = mock.MagicMock()
ji._cnx.permalink = mock.MagicMock()
ji.set_config()
ji.issue()
ji.args.test = False
ji.issue()
ji._cnx.create_issue.assert_called()
@mock.patch('tempfile.mkstemp')
@mock.patch('subprocess.call')
def test_edit(self, sc, ts):
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(b"Hello Moto")
tmpfile.close()
ts.return_value = [0, tmpfile.name]
argsetup = argparse.Namespace(editor=None)
ji = jiraohsofancy.JIC(argsetup)
ret = ji.edit()
assert (ret == "Hello Moto")
def test_complete(self, capsys):
ff = fixtures.FakeJIRA()
# Python is weird, wtf i need that for
ff._projects = []
ff._versions = []
ff._components = []
for o in [("project", ff.set_projects, ["PROJ1", "PRJ2", "PRJ3"]),
("component", ff.set_components, ["COMP1", "COMP2",
"COMP3"]),
(["version", ff.set_versions, ["v1", "v2", "v3"]])]:
argsetup = argparse.Namespace(complete=o[0], project="BLAH")
o[1](o[2])
ji = jiraohsofancy.JIC(argsetup)
ji._cnx = ff
ret = ji.complete()
assert (ret == " ".join(o[2]))
|
py | b40b077277d640f1c3b1ec2fec914bc3a86ce2c8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
from pathlib import Path
from typing import Tuple, List, Set, Dict, Any
from toolbox.data.DatasetSchema import (
RelationalTripletDatasetSchema,
BaseDatasetSchema,
DBP15k,
)
from toolbox.data.functional import (
read_cache,
read_triple_hrt,
read_attribute_triple_eav,
build_map_tr_h,
build_map_hr_t,
read_seeds, cache_data,
)
# region 2. relational triplet data
class DatasetCachePath:
def __init__(self, cache_path: Path):
self.cache_path = cache_path
self.cache_metadata_path = self.cache_path / 'metadata.pkl'
def is_meta_cache_exists(self):
""" Checks if the metadata of the knowledge graph if available"""
return self.cache_metadata_path.exists()
class RelationalTripletDatasetCachePath(DatasetCachePath):
def __init__(self, cache_path: Path):
DatasetCachePath.__init__(self, cache_path)
self.cache_all_triples_path = self.cache_path / 'triplets_all.pkl'
self.cache_train_triples_path = self.cache_path / 'triplets_train.pkl'
self.cache_test_triples_path = self.cache_path / 'triplets_test.pkl'
self.cache_valid_triples_path = self.cache_path / 'triplets_valid.pkl'
self.cache_all_triples_ids_path = self.cache_path / 'triplets_ids_all.pkl'
self.cache_train_triples_ids_path = self.cache_path / 'triplets_ids_train.pkl'
self.cache_test_triples_ids_path = self.cache_path / 'triplets_ids_test.pkl'
self.cache_valid_triples_ids_path = self.cache_path / 'triplets_ids_valid.pkl'
self.cache_all_entities_path = self.cache_path / 'entities.pkl'
self.cache_all_relations_path = self.cache_path / 'relations.pkl'
self.cache_entities_ids_path = self.cache_path / "entities_ids.pkl"
self.cache_relations_ids_path = self.cache_path / "relations_ids.pkl"
self.cache_idx2entity_path = self.cache_path / 'idx2entity.pkl'
self.cache_idx2relation_path = self.cache_path / 'idx2relation.pkl'
self.cache_entity2idx_path = self.cache_path / 'entity2idx.pkl'
self.cache_relation2idx_path = self.cache_path / 'relation2idx.pkl'
self.cache_hr_t_path = self.cache_path / 'hr_t.pkl'
self.cache_tr_h_path = self.cache_path / 'tr_h.pkl'
self.cache_hr_t_train_path = self.cache_path / 'hr_t_train.pkl'
self.cache_tr_h_train_path = self.cache_path / 'tr_h_train.pkl'
self.cache_hr_t_valid_path = self.cache_path / 'hr_t_valid.pkl'
self.cache_tr_h_valid_path = self.cache_path / 'tr_h_valid.pkl'
self.cache_hr_t_test_path = self.cache_path / 'hr_t_test.pkl'
self.cache_tr_h_test_path = self.cache_path / 'tr_h_test.pkl'
self.cache_relation_property_path = self.cache_path / 'relation_property.pkl'
class BaseData:
def __init__(self,
dataset: BaseDatasetSchema,
cache_path: DatasetCachePath):
self.dataset = dataset
self.cache_path = cache_path
def force_prepare_data(self):
self.read_all_origin_data()
self.transform_all_data()
self.cache_all_data()
def preprocess_data_if_needed(self):
"""Function to prepare the dataset"""
if self.is_cache_exists():
print("data already prepared, using cache")
return
print("preparing data")
self.force_prepare_data()
print("")
print("done!")
print(self.print())
def is_cache_exists(self):
"""Function to check if the dataset is cached in the memory"""
return self.cache_path.is_meta_cache_exists()
def read_all_origin_data(self):
pass
def transform_all_data(self):
pass
def cache_all_data(self):
cache_data(self.meta(), self.cache_path.cache_metadata_path)
def clear_cache(self):
pass
def meta(self) -> Dict[str, Any]:
return {}
def dump(self) -> List[str]:
""" Function to dump statistic information of a dataset """
# dump key information
dump = [
"",
"-" * 15 + "Metadata Info for Dataset: " + self.dataset.name + "-" * (15 - len(self.dataset.name)),
"-" * (30 + len("Metadata Info for Dataset: ")),
"",
]
return dump
def print(self, log=print):
for i in self.dump():
log(i)
def __str__(self):
return "\n".join(self.dump())
class RelationalTripletData(BaseData):
""" The class is the main module that handles the knowledge graph.
KnowledgeGraph is responsible for downloading, parsing, processing and preparing
the training, testing and validation dataset.
Args:
dataset (RelationalTripletDatasetSchema): custom dataset.
cache_path (RelationalTripletDatasetCachePath): cache path.
Attributes:
dataset (object): The dataset object isntance.
all_relations (list):list of all the relations.
all_entities (list): List of all the entities.
entity2idx (dict): Dictionary for mapping string name of entities to unique numerical id.
idx2entity (dict): Dictionary for mapping the id to string.
relation2idx (dict): Dictionary for mapping the id to string.
idx2relation (dict): Dictionary for mapping the id to string.
hr_t (dict): Dictionary with set as a default key and list as values.
tr_h (dict): Dictionary with set as a default key and list as values.
hr_t_train (dict): Dictionary with set as a default key and list as values.
tr_h_train (dict): Dictionary with set as a default key and list as values.
relation_property (list): list storing the entities tied to a specific relation.
Examples:
>>> from toolbox.data.DataSchema import RelationalTripletData, RelationalTripletDatasetCachePath
>>> from toolbox.data.DatasetSchema import FreebaseFB15k_237
>>> dataset = FreebaseFB15k_237()
>>> cache = RelationalTripletDatasetCachePath(dataset.cache_path)
>>> data = RelationalTripletData(dataset=dataset, cache_path=cache)
>>> data.preprocess_data_if_needed()
"""
def __init__(self,
dataset: RelationalTripletDatasetSchema,
cache_path: RelationalTripletDatasetCachePath):
BaseData.__init__(self, dataset, cache_path)
self.dataset = dataset
self.cache_path = cache_path
# KG data structure stored in triplet format
self.all_triples: List[Tuple[str, str, str]] = []
self.train_triples: List[Tuple[str, str, str]] = []
self.test_triples: List[Tuple[str, str, str]] = []
self.valid_triples: List[Tuple[str, str, str]] = []
self.all_triples_ids: List[Tuple[int, int, int]] = []
self.train_triples_ids: List[Tuple[int, int, int]] = []
self.test_triples_ids: List[Tuple[int, int, int]] = []
self.valid_triples_ids: List[Tuple[int, int, int]] = []
self.all_relations: List[str] = []
self.all_entities: List[str] = []
self.entities_ids: List[int] = []
self.relations_ids: List[int] = []
self.entity2idx: Dict[str, int] = {}
self.idx2entity: Dict[int, str] = {}
self.relation2idx: Dict[str, int] = {}
self.idx2relation: Dict[int, str] = {}
self.hr_t: Dict[Tuple[int, int], Set[int]] = defaultdict(set)
self.tr_h: Dict[Tuple[int, int], Set[int]] = defaultdict(set)
self.hr_t_train: Dict[Tuple[int, int], Set[int]] = defaultdict(set)
self.tr_h_train: Dict[Tuple[int, int], Set[int]] = defaultdict(set)
self.hr_t_valid: Dict[Tuple[int, int], Set[int]] = defaultdict(set)
self.tr_h_valid: Dict[Tuple[int, int], Set[int]] = defaultdict(set)
self.hr_t_test: Dict[Tuple[int, int], Set[int]] = defaultdict(set)
self.tr_h_test: Dict[Tuple[int, int], Set[int]] = defaultdict(set)
self.relation_property: Dict[int, float] = {}
# meta
self.entity_count = 0
self.relation_count = 0
self.valid_triples_count = 0
self.test_triples_count = 0
self.train_triples_count = 0
self.triple_count = 0
def read_all_origin_data(self):
self.read_all_triplets()
def read_all_triplets(self):
self.train_triples = read_triple_hrt(self.dataset.data_paths['train'])
self.valid_triples = read_triple_hrt(self.dataset.data_paths['valid'])
self.test_triples = read_triple_hrt(self.dataset.data_paths['test'])
self.all_triples = self.train_triples + self.valid_triples + self.test_triples
self.valid_triples_count = len(self.valid_triples)
self.test_triples_count = len(self.test_triples)
self.train_triples_count = len(self.train_triples)
self.triple_count = self.valid_triples_count + self.test_triples_count + self.train_triples_count
def transform_all_data(self):
self.transform_entities_relations()
self.transform_mappings()
self.transform_all_triplets_ids()
self.transform_entity_ids()
self.transform_relation_ids()
self.transform_hr_t()
self.transform_tr_h()
self.transform_hr_t_train()
self.transform_tr_h_train()
self.transform_hr_t_valid()
self.transform_tr_h_valid()
self.transform_hr_t_test()
self.transform_tr_h_test()
self.transform_relation_property()
def transform_entities_relations(self):
""" Function to read the entities. """
entities: Set[str] = set()
relations: Set[str] = set()
# print("entities_relations")
# bar = Progbar(len(self.all_triples))
# i = 0
for h, r, t in self.all_triples:
entities.add(h)
entities.add(t)
relations.add(r)
# i += 1
# bar.update(i, [("h", h.split("/")[-1]), ("r", r.split("/")[-1]), ("t", t.split("/")[-1])])
self.all_entities = sorted(list(entities))
self.all_relations = sorted(list(relations))
self.entity_count = len(self.all_entities)
self.relation_count = len(self.all_relations)
def transform_mappings(self):
""" Function to generate the mapping from string name to integer ids. """
for k, v in enumerate(self.all_entities):
self.entity2idx[v] = k
self.idx2entity[k] = v
for k, v in enumerate(self.all_relations):
self.relation2idx[v] = k
self.idx2relation[k] = v
def transform_all_triplets_ids(self):
entity2idx = self.entity2idx
relation2idx = self.relation2idx
self.train_triples_ids = [(entity2idx[h], relation2idx[r], entity2idx[t]) for h, r, t in self.train_triples]
self.test_triples_ids = [(entity2idx[h], relation2idx[r], entity2idx[t]) for h, r, t in self.test_triples]
self.valid_triples_ids = [(entity2idx[h], relation2idx[r], entity2idx[t]) for h, r, t in self.valid_triples]
self.all_triples_ids = self.train_triples_ids + self.valid_triples_ids + self.test_triples_ids
def transform_entity_ids(self):
entity2idx = self.entity2idx
print("entities_ids")
# bar = Progbar(len(self.all_entities))
# i = 0
for e in self.all_entities:
self.entities_ids.append(entity2idx[e])
# i += 1
# bar.update(i, [("entity", e.split("/")[-1])])
def transform_relation_ids(self):
relation2idx = self.relation2idx
print("relations_ids")
# bar = Progbar(len(self.all_relations))
# i = 0
for r in self.all_relations:
self.relations_ids.append(relation2idx[r])
# i += 1
# bar.update(i, [("relation", r.split("/")[-1])])
def transform_hr_t(self):
""" Function to read the list of tails for the given head and relation pair. """
self.hr_t = build_map_hr_t(self.all_triples_ids)
def transform_tr_h(self):
""" Function to read the list of heads for the given tail and relation pair. """
self.tr_h = build_map_tr_h(self.all_triples_ids)
def transform_hr_t_train(self):
""" Function to read the list of tails for the given head and relation pair for the training set. """
self.hr_t_train = build_map_hr_t(self.train_triples_ids)
def transform_tr_h_train(self):
""" Function to read the list of heads for the given tail and relation pair for the training set. """
self.tr_h_train = build_map_tr_h(self.train_triples_ids)
def transform_hr_t_valid(self):
""" Function to read the list of tails for the given head and relation pair for the valid set. """
self.hr_t_valid = build_map_hr_t(self.valid_triples_ids)
def transform_tr_h_valid(self):
""" Function to read the list of heads for the given tail and relation pair for the valid set. """
self.tr_h_valid = build_map_tr_h(self.valid_triples_ids)
def transform_hr_t_test(self):
""" Function to read the list of tails for the given head and relation pair for the valid set. """
self.hr_t_test = build_map_hr_t(self.test_triples_ids)
def transform_tr_h_test(self):
""" Function to read the list of heads for the given tail and relation pair for the valid set. """
self.tr_h_test = build_map_tr_h(self.test_triples_ids)
def transform_relation_property(self):
""" Function to read the relation property.
Returns:
list: Returns the list of relation property.
"""
relation_property_head = {x: [] for x in range(len(self.all_relations))}
relation_property_tail = {x: [] for x in range(len(self.all_relations))}
# print("relation_property")
# bar = Progbar(len(self.train_triples_ids))
# i = 0
for h, r, t in self.train_triples_ids:
relation_property_head[r].append(h)
relation_property_tail[r].append(t)
# i += 1
# bar.update(i, [])
self.relation_property = {}
for x in relation_property_head.keys():
value_up = len(set(relation_property_tail[x]))
value_bot = len(set(relation_property_head[x])) + len(set(relation_property_tail[x]))
if value_bot == 0:
value = 0
else:
value = value_up / value_bot
self.relation_property[x] = value
return self.relation_property
def cache_all_data(self):
"""Function to cache the prepared dataset in the memory"""
cache_data(self.all_triples, self.cache_path.cache_all_triples_path)
cache_data(self.train_triples, self.cache_path.cache_train_triples_path)
cache_data(self.test_triples, self.cache_path.cache_test_triples_path)
cache_data(self.valid_triples, self.cache_path.cache_valid_triples_path)
cache_data(self.all_triples_ids, self.cache_path.cache_all_triples_ids_path)
cache_data(self.train_triples_ids, self.cache_path.cache_train_triples_ids_path)
cache_data(self.test_triples_ids, self.cache_path.cache_test_triples_ids_path)
cache_data(self.valid_triples_ids, self.cache_path.cache_valid_triples_ids_path)
cache_data(self.all_entities, self.cache_path.cache_all_entities_path)
cache_data(self.all_relations, self.cache_path.cache_all_relations_path)
cache_data(self.entities_ids, self.cache_path.cache_entities_ids_path)
cache_data(self.relations_ids, self.cache_path.cache_relations_ids_path)
cache_data(self.idx2entity, self.cache_path.cache_idx2entity_path)
cache_data(self.idx2relation, self.cache_path.cache_idx2relation_path)
cache_data(self.relation2idx, self.cache_path.cache_relation2idx_path)
cache_data(self.entity2idx, self.cache_path.cache_entity2idx_path)
cache_data(self.hr_t, self.cache_path.cache_hr_t_path)
cache_data(self.tr_h, self.cache_path.cache_tr_h_path)
cache_data(self.hr_t_train, self.cache_path.cache_hr_t_train_path)
cache_data(self.tr_h_train, self.cache_path.cache_tr_h_train_path)
cache_data(self.hr_t_valid, self.cache_path.cache_hr_t_valid_path)
cache_data(self.tr_h_valid, self.cache_path.cache_tr_h_valid_path)
cache_data(self.hr_t_test, self.cache_path.cache_hr_t_test_path)
cache_data(self.tr_h_test, self.cache_path.cache_tr_h_test_path)
cache_data(self.relation_property, self.cache_path.cache_relation_property_path)
cache_data(self.meta(), self.cache_path.cache_metadata_path)
def load_cache(self, keys: List[str]):
for key in keys:
self.read_cache_data(key)
def read_cache_data(self, key):
"""Function to read the cached dataset from the memory"""
path = "cache_%s_path" % key
if hasattr(self, key) and hasattr(self.cache_path, path):
key_path = getattr(self.cache_path, path)
value = read_cache(key_path)
setattr(self, key, value)
return value
elif key == "meta":
meta = read_cache(self.cache_path.cache_metadata_path)
self.read_meta(meta)
else:
raise ValueError('Unknown cache data key %s' % key)
def read_meta(self, meta):
self.relation_count = meta["relation_count"]
self.entity_count = meta["entity_count"]
self.valid_triples_count = meta["valid_triples_count"]
self.test_triples_count = meta["test_triples_count"]
self.train_triples_count = meta["train_triples_count"]
self.triple_count = meta["triple_count"]
def meta(self) -> Dict[str, Any]:
return {
"relation_count": self.relation_count,
"entity_count": self.entity_count,
"valid_triples_count": self.valid_triples_count,
"test_triples_count": self.test_triples_count,
"train_triples_count": self.train_triples_count,
"triple_count": self.triple_count,
}
def dump(self) -> List[str]:
""" Function to dump statistic information of a dataset """
# dump key information
dump = [
"",
"-" * 15 + "Metadata Info for Dataset: " + self.dataset.name + "-" * (15 - len(self.dataset.name)),
"Total Training Triples :%s" % self.train_triples_count,
"Total Testing Triples :%s" % self.test_triples_count,
"Total validation Triples :%s" % self.valid_triples_count,
"Total Entities :%s" % self.entity_count,
"Total Relations :%s" % self.relation_count,
"-" * (30 + len("Metadata Info for Dataset: ")),
"",
]
return dump
# endregion
class DBP15kCachePath(RelationalTripletDatasetCachePath):
def __init__(self, cache_path: Path):
RelationalTripletDatasetCachePath.__init__(self, cache_path)
self.cache_entity_align_file = self.cache_path / "ref_ent_ids.txt"
self.cache_all_entity_file = self.cache_path / "ent_ids_all.txt"
self.cache_all_attr_file = self.cache_path / "att2id_all.txt"
self.cache_all_value_file = self.cache_path / "att_value2id_all.txt"
self.cache_all_triple_file = self.cache_path / "triples_struct_all.txt"
self.cache_all_triple_file_enhance = self.cache_path / "triples_struct_all_enhance.txt"
self.cache_kg1_entity_file = self.cache_path / "ent_ids_1.txt"
self.cache_kg2_entity_file = self.cache_path / "ent_ids_2.txt"
self.cache_seeds_path = self.cache_path / "seeds.pkl"
self.cache_train_seeds_path = self.cache_path / "train_seeds.pkl"
self.cache_test_seeds_path = self.cache_path / "test_seeds.pkl"
self.cache_seeds_ids_path = self.cache_path / "seeds_ids.pkl"
self.cache_train_seeds_ids_path = self.cache_path / "train_seeds_ids.pkl"
self.cache_test_seeds_ids_path = self.cache_path / "test_seeds_ids.pkl"
self.cache_left_ids_path = self.cache_path / "left_ids.pkl"
self.cache_right_ids_path = self.cache_path / "right_ids.pkl"
self.cache_kg1_triples_path = self.cache_path / "kg1_triples.pkl"
self.cache_kg2_triples_path = self.cache_path / "kg2_triples.pkl"
self.cache_kg1_triples_ids_path = self.cache_path / "kg1_triples_ids.pkl"
self.cache_kg2_triples_ids_path = self.cache_path / "kg2_triples_ids.pkl"
self.cache_all_attribute_triples_path = self.cache_path / "all_attribute_triples.pkl"
self.cache_kg1_attribute_triples_path = self.cache_path / "kg1_attribute_triples.pkl"
self.cache_kg2_attribute_triples_path = self.cache_path / "kg2_attribute_triples.pkl"
self.cache_all_attribute_triples_ids_path = self.cache_path / "all_attribute_triples_ids.pkl"
self.cache_kg1_attribute_triples_ids_path = self.cache_path / "kg1_attribute_triples_ids.pkl"
self.cache_kg2_attribute_triples_ids_path = self.cache_path / "kg2_attribute_triples_ids.pkl"
self.cache_all_attribute_names_path = self.cache_path / "all_attribute_names.pkl"
self.cache_all_attribute_values_path = self.cache_path / "all_attribute_values.pkl"
self.cache_kg1_entities_path = self.cache_path / "kg1_entities.pkl"
self.cache_kg2_entities_path = self.cache_path / "kg2_entities.pkl"
self.cache_kg1_entities_ids_path = self.cache_path / "kg1_entities_ids.pkl"
self.cache_kg2_entities_ids_path = self.cache_path / "kg2_entities_ids.pkl"
self.cache_kg1_relations_path = self.cache_path / "kg1_relations.pkl"
self.cache_kg2_relations_path = self.cache_path / "kg2_relations.pkl"
self.cache_kg1_relations_ids_path = self.cache_path / "kg1_relations_ids.pkl"
self.cache_kg2_relations_ids_path = self.cache_path / "kg2_relations_ids.pkl"
self.cache_kg1_attribute_names_path = self.cache_path / "kg1_attribute_names.pkl"
self.cache_kg2_attribute_names_path = self.cache_path / "kg2_attribute_names.pkl"
self.cache_kg1_attribute_names_ids_path = self.cache_path / "kg1_attribute_names_ids.pkl"
self.cache_kg2_attribute_names_ids_path = self.cache_path / "kg2_attribute_names_ids.pkl"
self.cache_attribute_names_ids_path = self.cache_path / "attribute_names_ids.pkl"
self.cache_kg1_attribute_values_path = self.cache_path / "kg1_attribute_values.pkl"
self.cache_kg2_attribute_values_path = self.cache_path / "kg2_attribute_values.pkl"
self.cache_kg1_attribute_values_ids_path = self.cache_path / "kg1_attribute_values_ids.pkl"
self.cache_kg2_attribute_values_ids_path = self.cache_path / "kg2_attribute_values_ids.pkl"
self.cache_attribute_values_ids_path = self.cache_path / "attribute_values_ids.pkl"
self.cache_attribute_name2idx_path = self.cache_path / "attribute_name2idx.pkl"
self.cache_idx2attribute_name_path = self.cache_path / "idx2attribute_name.pkl"
self.cache_attribute_value2idx_path = self.cache_path / "attribute_value2idx.pkl"
self.cache_idx2attribute_value_path = self.cache_path / "idx2attribute_value.pkl"
class DBP15kData(RelationalTripletData):
def __init__(self,
dataset: DBP15k,
cache_path: DBP15kCachePath,
train_seeds_percent=0.3):
RelationalTripletData.__init__(self, dataset, cache_path)
self.cache_path = cache_path
self.train_seeds_percent = train_seeds_percent
self.kg1_triples: List[Tuple[str, str, str]] = []
self.kg2_triples: List[Tuple[str, str, str]] = []
self.kg1_triples_ids: List[Tuple[int, int, int]] = []
self.kg2_triples_ids: List[Tuple[int, int, int]] = []
self.all_attribute_triples: List[Tuple[str, str, str]] = []
self.kg1_attribute_triples: List[Tuple[str, str, str]] = []
self.kg2_attribute_triples: List[Tuple[str, str, str]] = []
self.all_attribute_triples_ids: List[Tuple[int, int, int]] = []
self.kg1_attribute_triples_ids: List[Tuple[int, int, int]] = []
self.kg2_attribute_triples_ids: List[Tuple[int, int, int]] = []
self.all_attribute_names: List[str] = []
self.all_attribute_values: List[str] = []
self.all_attribute_names_ids: List[int] = []
self.all_attribute_values_ids: List[int] = []
self.kg1_entities: List[str] = []
self.kg2_entities: List[str] = []
self.kg1_entities_ids: List[int] = []
self.kg2_entities_ids: List[int] = []
self.entities_ids: List[int] = []
self.kg1_relations: List[str] = []
self.kg2_relations: List[str] = []
self.kg1_relations_ids: List[int] = []
self.kg2_relations_ids: List[int] = []
self.relations_ids: List[int] = []
self.kg1_attribute_names: List[str] = []
self.kg2_attribute_names: List[str] = []
self.kg1_attribute_names_ids: List[int] = []
self.kg2_attribute_names_ids: List[int] = []
self.attribute_names_ids: List[int] = []
self.kg1_attribute_values: List[str] = []
self.kg2_attribute_values: List[str] = []
self.kg1_attribute_values_ids: List[int] = []
self.kg2_attribute_values_ids: List[int] = []
self.attribute_values_ids: List[int] = []
self.attribute_name2idx: Dict[str, int] = {}
self.idx2attribute_name: Dict[int, str] = {}
self.attribute_value2idx: Dict[str, int] = {}
self.idx2attribute_value: Dict[int, str] = {}
self.seeds: List[Tuple[str, str]] = [] # (m, 2) m个对齐的实体对(a,b)称a为左实体,b为右实体
self.train_seeds: List[Tuple[str, str]] = [] # (0.3m, 2)
self.test_seeds: List[Tuple[str, str]] = [] # (0.7m, 2)
self.seeds_ids: List[Tuple[int, int]] = [] # (m, 2) m个对齐的实体对(a,b)称a为左实体,b为右实体
self.train_seeds_ids: List[Tuple[int, int]] = [] # (0.3m, 2)
self.test_seeds_ids: List[Tuple[int, int]] = [] # (0.7m, 2)
self.left_ids: List[int] = [] # test_seeds 中对齐实体的左实体id
self.right_ids: List[int] = [] # test_seeds 中对齐实体的右实体id
self.kg1_triples_count = 0
self.kg2_triples_count = 0
self.all_attribute_triples_count = 0
self.kg1_attribute_triples_count = 0
self.kg2_attribute_triples_count = 0
self.alignment_seeds_count = 0
self.valid_alignment_seeds_count = 0
self.test_alignment_seeds_count = 0
self.train_alignment_seeds_count = 0
self.kg1_entities_count = 0
self.kg2_entities_count = 0
self.kg1_relations_count = 0
self.kg2_relations_count = 0
self.all_attribute_names_count = 0
self.kg1_attribute_names_count = 0
self.kg2_attribute_names_count = 0
self.all_attribute_values_count = 0
self.kg1_attribute_values_count = 0
self.kg2_attribute_values_count = 0
def read_all_origin_data(self):
self.read_all_triplets()
self.read_attribute_triplets()
self.read_entity_align_list()
def read_all_triplets(self):
self.kg1_triples = read_triple_hrt(self.dataset.data_paths['kg1_relational_triples'])
self.kg2_triples = read_triple_hrt(self.dataset.data_paths['kg2_relational_triples'])
self.all_triples = self.kg1_triples + self.kg2_triples
self.train_triples = self.all_triples
self.test_triples = []
self.valid_triples = []
self.kg1_triples_count = len(self.kg1_triples)
self.kg2_triples_count = len(self.kg2_triples)
self.triple_count = len(self.all_attribute_triples)
self.train_triples_count = len(self.train_triples)
self.test_triples_count = len(self.test_triples)
self.valid_triples_count = len(self.valid_triples)
def read_attribute_triplets(self):
self.kg1_attribute_triples = read_attribute_triple_eav(self.dataset.data_paths['kg1_attribute_triples'])
self.kg2_attribute_triples = read_attribute_triple_eav(self.dataset.data_paths['kg2_attribute_triples'])
self.all_attribute_triples = self.kg1_attribute_triples + self.kg2_attribute_triples
self.kg1_attribute_triples_count = len(self.kg1_attribute_triples)
self.kg2_attribute_triples_count = len(self.kg2_attribute_triples)
self.all_attribute_triples_count = len(self.all_attribute_triples)
def read_entity_align_list(self):
self.seeds = read_seeds(self.dataset.data_paths['seeds'])
train_max_idx = int(self.train_seeds_percent * len(self.seeds))
self.train_seeds = self.seeds[:train_max_idx]
self.test_seeds = self.seeds[train_max_idx:]
self.alignment_seeds_count = len(self.seeds)
self.valid_alignment_seeds_count = 0
self.test_alignment_seeds_count = len(self.test_seeds)
self.train_alignment_seeds_count = len(self.train_seeds)
def transform_all_data(self):
RelationalTripletData.transform_all_data(self)
self.transform_attribute_names_values()
self.transform_attribute_mappings()
self.transform_all_attribute_triplets_ids()
self.transform_attribute_name_ids()
self.transform_attribute_value_ids()
self.transform_entity_align_ids()
def transform_entities_relations(self):
entities: Set[str] = set()
kg1_entities: Set[str] = set()
kg2_entities: Set[str] = set()
relations: Set[str] = set()
kg1_relations: Set[str] = set()
kg2_relations: Set[str] = set()
print("kg1_entities_relations")
# bar = Progbar(len(self.kg1_triples))
# i = 0
for h, r, t in self.kg1_triples:
kg1_entities.add(h)
kg1_entities.add(t)
kg1_relations.add(r)
# i += 1
# bar.update(i, [("h", h.split("/")[-1]), ("r", r.split("/")[-1]), ("t", t.split("/")[-1])])
print("kg2_entities_relations")
# bar = Progbar(len(self.kg2_triples))
# i = 0
for h, r, t in self.kg2_triples:
kg2_entities.add(h)
kg2_entities.add(t)
kg2_relations.add(r)
# i += 1
# bar.update(i, [("h", h.split("/")[-1]), ("r", r.split("/")[-1]), ("t", t.split("/")[-1])])
entities = kg2_entities.union(kg1_entities)
relations = kg2_relations.union(kg1_relations)
self.all_entities = sorted(list(entities))
self.kg1_entities = sorted(list(kg1_entities))
self.kg2_entities = sorted(list(kg2_entities))
self.all_relations = sorted(list(relations))
self.kg1_relations = sorted(list(kg1_relations))
self.kg2_relations = sorted(list(kg2_relations))
self.entity_count = len(self.all_entities)
self.kg1_entities_count = len(self.kg1_entities)
self.kg2_entities_count = len(self.kg2_entities)
self.relation_count = len(self.all_relations)
self.kg1_relations_count = len(self.kg1_relations)
self.kg2_relations_count = len(self.kg2_relations)
def transform_attribute_names_values(self):
attribute_names: Set[str] = set()
kg1_attribute_names: Set[str] = set()
kg2_attribute_names: Set[str] = set()
attribute_values: Set[str] = set()
kg1_attribute_values: Set[str] = set()
kg2_attribute_values: Set[str] = set()
print("kg1_attribute_names_values")
# bar = Progbar(len(self.kg1_attribute_triples))
# i = 0
for e, a, v in self.kg1_attribute_triples:
kg1_attribute_names.add(a)
kg1_attribute_values.add(v)
# i += 1
# bar.update(i, [("name", a.split("/")[-1]), ("value", v)])
print("kg2_attribute_names_values")
# bar = Progbar(len(self.kg2_attribute_triples))
# i = 0
for e, a, v in self.kg2_attribute_triples:
kg2_attribute_names.add(a)
kg2_attribute_values.add(v)
# i += 1
# bar.update(i, [("name", a.split("/")[-1]), ("value", v)])
attribute_names = kg1_attribute_names.union(kg2_attribute_names)
attribute_values = kg1_attribute_values.union(kg2_attribute_values)
self.all_attribute_names = sorted(list(attribute_names))
self.kg1_attribute_names = sorted(list(kg1_attribute_names))
self.kg2_attribute_names = sorted(list(kg2_attribute_names))
self.all_attribute_names_count = len(self.all_attribute_names)
self.kg1_attribute_names_count = len(self.kg1_attribute_names)
self.kg2_attribute_names_count = len(self.kg2_attribute_names)
self.all_attribute_values = sorted(list(attribute_values))
self.kg1_attribute_values = sorted(list(kg1_attribute_values))
self.kg2_attribute_values = sorted(list(kg2_attribute_values))
self.all_attribute_values_count = len(self.all_attribute_values)
self.kg1_attribute_values_count = len(self.kg1_attribute_values)
self.kg2_attribute_values_count = len(self.kg2_attribute_values)
def transform_attribute_mappings(self):
""" Function to generate the mapping from string name to integer ids. """
self.idx2attribute_name = {k: v for k, v in enumerate(self.all_attribute_names)}
self.attribute_name2idx = {v: k for k, v in self.idx2attribute_name.items()}
self.idx2attribute_value = {k: v for k, v in enumerate(self.all_attribute_values)}
self.attribute_value2idx = {v: k for k, v in self.idx2attribute_value.items()}
def transform_all_triplets_ids(self):
entity2idx = self.entity2idx
relation2idx = self.relation2idx
print("kg1_triples_ids")
# bar = Progbar(len(self.kg1_triples))
# i = 0
for h, r, t in self.kg1_triples:
self.kg1_triples_ids.append((entity2idx[h], relation2idx[r], entity2idx[t]))
# i += 1
# bar.update(i, [("h", h.split("/")[-1]), ("r", r.split("/")[-1]), ("t", t.split("/")[-1])])
print("kg2_triples_ids")
# bar = Progbar(len(self.kg2_triples))
# i = 0
for h, r, t in self.kg2_triples:
self.kg2_triples_ids.append((entity2idx[h], relation2idx[r], entity2idx[t]))
# i += 1
# bar.update(i, [("h", h.split("/")[-1]), ("r", r.split("/")[-1]), ("t", t.split("/")[-1])])
self.all_triples_ids = self.kg1_triples_ids + self.kg2_triples_ids
self.train_triples_ids = self.all_triples_ids
self.test_triples_ids = []
self.valid_triples_ids = []
def transform_entity_align_ids(self):
entity2idx = self.entity2idx
for left_entity, right_entity in self.seeds:
self.seeds_ids.append((entity2idx[left_entity], entity2idx[right_entity]))
train_max_idx = int(self.train_seeds_percent * len(self.seeds))
self.train_seeds_ids = self.seeds_ids[:train_max_idx]
self.test_seeds_ids = self.seeds_ids[train_max_idx:]
self.left_ids = []
self.right_ids = []
for left_entity, right_entity in self.test_seeds_ids:
self.left_ids.append(left_entity) # 对齐的左边的实体
self.right_ids.append(right_entity) # 对齐的右边的实体
def transform_all_attribute_triplets_ids(self):
entity2idx = self.entity2idx
attribute_name2idx = self.attribute_name2idx
attribute_value2idx = self.attribute_value2idx
print("kg1_attribute_triples_ids")
# bar = Progbar(len(self.kg1_attribute_triples))
# i = 0
for e, a, v in self.kg1_attribute_triples:
self.kg1_attribute_triples_ids.append((entity2idx[e], attribute_name2idx[a], attribute_value2idx[v]))
# i += 1
# bar.update(i, [("e", e.split("/")[-1]), ("a", a.split("/")[-1]), ("v", v)])
print("kg2_attribute_triples_ids")
# bar = Progbar(len(self.kg2_attribute_triples))
# i = 0
for e, a, v in self.kg2_attribute_triples:
self.kg2_attribute_triples_ids.append((entity2idx[e], attribute_name2idx[a], attribute_value2idx[v]))
# i += 1
# bar.update(i, [("e", e.split("/")[-1]), ("a", a.split("/")[-1]), ("v", v)])
self.all_attribute_triples_ids = self.kg1_attribute_triples_ids + self.kg2_attribute_triples_ids
def transform_entity_ids(self):
entity2idx = self.entity2idx
print("kg1_entities_ids")
# bar = Progbar(len(self.kg1_entities))
# i = 0
for e in self.kg1_entities:
self.kg1_entities_ids.append(entity2idx[e])
# i += 1
# bar.update(i, [("entity", e.split("/")[-1])])
print("kg2_entities_ids")
# bar = Progbar(len(self.kg2_entities))
# i = 0
for e in self.kg2_entities:
self.kg2_entities_ids.append(entity2idx[e])
# i += 1
# bar.update(i, [("entity", e.split("/")[-1])])
self.entities_ids = self.kg1_entities_ids + self.kg2_entities_ids
def transform_relation_ids(self):
relation2idx = self.relation2idx
print("kg1_relations_ids")
# bar = Progbar(len(self.kg1_relations))
# i = 0
for r in self.kg1_relations:
self.kg1_relations_ids.append(relation2idx[r])
# i += 1
# bar.update(i, [("relation", r.split("/")[-1])])
print("kg2_relations_ids")
# bar = Progbar(len(self.kg2_relations))
# i = 0
for r in self.kg2_relations:
self.kg2_relations_ids.append(relation2idx[r])
# i += 1
# bar.update(i, [("relation", r.split("/")[-1])])
self.relations_ids = self.kg1_relations_ids + self.kg2_relations_ids
def transform_attribute_name_ids(self):
attribute_name2idx = self.attribute_name2idx
print("kg1_attribute_names_ids")
# bar = Progbar(len(self.kg1_attribute_names))
# i = 0
for r in self.kg1_attribute_names:
self.kg1_attribute_names_ids.append(attribute_name2idx[r])
# i += 1
# bar.update(i, [("attribute_names", r.split("/")[-1])])
print("kg2_attribute_names_ids")
# bar = Progbar(len(self.kg2_attribute_names))
# i = 0
for r in self.kg2_attribute_names:
self.kg2_attribute_names_ids.append(attribute_name2idx[r])
# i += 1
# bar.update(i, [("attribute_names", r.split("/")[-1])])
self.attribute_names_ids = self.kg1_attribute_names_ids + self.kg2_attribute_names_ids
def transform_attribute_value_ids(self):
attribute_value2idx = self.attribute_value2idx
print("kg1_attribute_values_ids")
# bar = Progbar(len(self.kg1_attribute_values))
# i = 0
for r in self.kg1_attribute_values:
self.kg1_attribute_values_ids.append(attribute_value2idx[r])
# i += 1
# bar.update(i, [("attribute_value", r)])
print("kg2_attribute_values_ids")
# bar = Progbar(len(self.kg2_attribute_values))
# i = 0
for r in self.kg2_attribute_values:
self.kg2_attribute_values_ids.append(attribute_value2idx[r])
# i += 1
# bar.update(i, [("attribute_value", r)])
self.attribute_values_ids = self.kg1_attribute_values_ids + self.kg2_attribute_values_ids
def cache_all_data(self):
cache_data(self.kg1_triples, self.cache_path.cache_kg1_triples_path)
cache_data(self.kg2_triples, self.cache_path.cache_kg2_triples_path)
cache_data(self.kg1_triples_ids, self.cache_path.cache_kg1_triples_ids_path)
cache_data(self.kg2_triples_ids, self.cache_path.cache_kg2_triples_ids_path)
cache_data(self.all_attribute_triples, self.cache_path.cache_all_attribute_triples_path)
cache_data(self.kg1_attribute_triples, self.cache_path.cache_kg1_attribute_triples_path)
cache_data(self.kg2_attribute_triples, self.cache_path.cache_kg2_attribute_triples_path)
cache_data(self.all_attribute_triples_ids, self.cache_path.cache_all_attribute_triples_ids_path)
cache_data(self.kg1_attribute_triples_ids, self.cache_path.cache_kg1_attribute_triples_ids_path)
cache_data(self.kg2_attribute_triples_ids, self.cache_path.cache_kg2_attribute_triples_ids_path)
cache_data(self.all_attribute_names, self.cache_path.cache_all_attribute_names_path)
cache_data(self.all_attribute_values, self.cache_path.cache_all_attribute_values_path)
cache_data(self.kg1_entities, self.cache_path.cache_kg1_entities_path)
cache_data(self.kg2_entities, self.cache_path.cache_kg2_entities_path)
cache_data(self.kg1_entities_ids, self.cache_path.cache_kg1_entities_ids_path)
cache_data(self.kg2_entities_ids, self.cache_path.cache_kg2_entities_ids_path)
cache_data(self.entities_ids, self.cache_path.cache_entities_ids_path)
cache_data(self.kg1_relations, self.cache_path.cache_kg1_relations_path)
cache_data(self.kg2_relations, self.cache_path.cache_kg2_relations_path)
cache_data(self.kg1_relations_ids, self.cache_path.cache_kg1_relations_ids_path)
cache_data(self.kg2_relations_ids, self.cache_path.cache_kg2_relations_ids_path)
cache_data(self.relations_ids, self.cache_path.cache_relations_ids_path)
cache_data(self.kg1_attribute_names, self.cache_path.cache_kg1_attribute_names_path)
cache_data(self.kg2_attribute_names, self.cache_path.cache_kg2_attribute_names_path)
cache_data(self.kg1_attribute_names_ids, self.cache_path.cache_kg1_attribute_names_ids_path)
cache_data(self.kg2_attribute_names_ids, self.cache_path.cache_kg2_attribute_names_ids_path)
cache_data(self.attribute_names_ids, self.cache_path.cache_attribute_names_ids_path)
cache_data(self.kg1_attribute_values, self.cache_path.cache_kg1_attribute_values_path)
cache_data(self.kg2_attribute_values, self.cache_path.cache_kg2_attribute_values_path)
cache_data(self.kg1_attribute_values_ids, self.cache_path.cache_kg1_attribute_values_ids_path)
cache_data(self.kg2_attribute_values_ids, self.cache_path.cache_kg2_attribute_values_ids_path)
cache_data(self.attribute_values_ids, self.cache_path.cache_attribute_values_ids_path)
cache_data(self.attribute_name2idx, self.cache_path.cache_attribute_name2idx_path)
cache_data(self.idx2attribute_name, self.cache_path.cache_idx2attribute_name_path)
cache_data(self.attribute_value2idx, self.cache_path.cache_attribute_value2idx_path)
cache_data(self.idx2attribute_value, self.cache_path.cache_idx2attribute_value_path)
cache_data(self.seeds, self.cache_path.cache_seeds_path)
cache_data(self.train_seeds, self.cache_path.cache_train_seeds_path)
cache_data(self.test_seeds, self.cache_path.cache_test_seeds_path)
cache_data(self.seeds_ids, self.cache_path.cache_seeds_ids_path)
cache_data(self.train_seeds_ids, self.cache_path.cache_train_seeds_ids_path)
cache_data(self.test_seeds_ids, self.cache_path.cache_test_seeds_ids_path)
cache_data(self.left_ids, self.cache_path.cache_left_ids_path)
cache_data(self.right_ids, self.cache_path.cache_right_ids_path)
RelationalTripletData.cache_all_data(self)
def read_meta(self, meta):
self.entity_count = meta["entity_count"]
self.relation_count = meta["relation_count"]
self.triple_count = meta["triple_count"]
self.train_triples_count = meta["train_triples_count"]
self.test_triples_count = meta["test_triples_count"]
self.valid_triples_count = meta["valid_triples_count"]
self.kg1_triples_count = meta["kg1_triples_count"]
self.kg2_triples_count = meta["kg2_triples_count"]
self.all_attribute_triples_count = meta["all_attribute_triples_count"]
self.kg1_attribute_triples_count = meta["kg1_attribute_triples_count"]
self.kg2_attribute_triples_count = meta["kg2_attribute_triples_count"]
self.alignment_seeds_count = meta["alignment_seeds_count"]
self.valid_alignment_seeds_count = meta["valid_alignment_seeds_count"]
self.test_alignment_seeds_count = meta["test_alignment_seeds_count"]
self.train_alignment_seeds_count = meta["train_alignment_seeds_count"]
self.all_attribute_names_count = meta["all_attribute_names_count"]
self.all_attribute_values_count = meta["all_attribute_values_count"]
self.kg1_entities_count = meta["kg1_entities_count"]
self.kg2_entities_count = meta["kg2_entities_count"]
self.kg1_relations_count = meta["kg1_relations_count"]
self.kg2_relations_count = meta["kg2_relations_count"]
self.kg1_attribute_names_count = meta["kg1_attribute_names_count"]
self.kg2_attribute_names_count = meta["kg2_attribute_names_count"]
self.kg1_attribute_values_count = meta["kg1_attribute_values_count"]
self.kg2_attribute_values_count = meta["kg2_attribute_values_count"]
def meta(self) -> Dict[str, Any]:
return {
"dataset": self.dataset.name,
"entity_count": self.entity_count,
"relation_count": self.relation_count,
"triple_count": self.triple_count,
"train_triples_count": self.train_triples_count,
"test_triples_count": self.test_triples_count,
"valid_triples_count": self.valid_triples_count,
"kg1_triples_count": self.kg1_triples_count,
"kg2_triples_count": self.kg2_triples_count,
"all_attribute_triples_count": self.all_attribute_triples_count,
"kg1_attribute_triples_count": self.kg1_attribute_triples_count,
"kg2_attribute_triples_count": self.kg2_attribute_triples_count,
"alignment_seeds_count": self.alignment_seeds_count,
"valid_alignment_seeds_count": self.valid_alignment_seeds_count,
"test_alignment_seeds_count": self.test_alignment_seeds_count,
"train_alignment_seeds_count": self.train_alignment_seeds_count,
"all_attribute_names_count": self.all_attribute_names_count,
"all_attribute_values_count": self.all_attribute_values_count,
"kg1_entities_count": self.kg1_entities_count,
"kg2_entities_count": self.kg2_entities_count,
"kg1_relations_count": self.kg1_relations_count,
"kg2_relations_count": self.kg2_relations_count,
"kg1_attribute_names_count": self.kg1_attribute_names_count,
"kg2_attribute_names_count": self.kg2_attribute_names_count,
"kg1_attribute_values_count": self.kg1_attribute_values_count,
"kg2_attribute_values_count": self.kg2_attribute_values_count,
}
def dump(self) -> List[str]:
dump = [
"",
"-" * 15 + "Metadata Info for Dataset: " + self.dataset.name + "-" * (15 - len(self.dataset.name)),
"Total Entities :%s" % self.entity_count,
"Total Relations :%s" % self.relation_count,
"Total Attribute Names :%s" % self.all_attribute_names_count,
"Total Attribute Values :%s" % self.all_attribute_values_count,
"Total Triples :%s" % self.triple_count,
"Total Training Triples :%s" % self.train_triples_count,
"Total Testing Triples :%s" % self.test_triples_count,
"Total Validation Triples :%s" % self.valid_triples_count,
"Total Attribute Triples :%s" % self.all_attribute_triples_count,
"Total Alignment Seeds :%s" % self.alignment_seeds_count,
"Total Validation Alignment Seeds :%s" % self.valid_alignment_seeds_count,
"Total Testing Alignment Seeds :%s" % self.test_alignment_seeds_count,
"Total Training Alignment Seeds :%s" % self.train_alignment_seeds_count,
"KG1",
"triples :%d" % self.kg1_triples_count,
"attribute triples :%d" % self.kg1_attribute_triples_count,
"entities :%d" % self.kg1_entities_count,
"relations :%d" % self.kg1_relations_count,
"attribute_names :%d" % self.kg1_attribute_names_count,
"attribute_values :%d" % self.kg1_attribute_values_count,
"KG2",
"triples :%d" % self.kg2_triples_count,
"attribute triples :%d" % self.kg2_attribute_triples_count,
"entities :%d" % self.kg2_entities_count,
"relations :%d" % self.kg2_relations_count,
"attribute_names :%d" % self.kg2_attribute_names_count,
"attribute_values :%d" % self.kg2_attribute_values_count,
"-" * (30 + len("Metadata Info for Dataset: ")),
"",
]
return dump
|
py | b40b0779d96acb7ba7ace8fa11e6ece5156d6036 | # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo CLI tool for GCP."""
from datetime import datetime
import json
import sys
from typing import TYPE_CHECKING
from google.auth import default
# pylint: disable=line-too-long
from libcloudforensics.providers.gcp.internal import compute as gcp_compute
from libcloudforensics.providers.gcp.internal import log as gcp_log
from libcloudforensics.providers.gcp.internal import monitoring as gcp_monitoring
from libcloudforensics.providers.gcp.internal import project as gcp_project
from libcloudforensics.providers.gcp.internal import storage as gcp_storage
from libcloudforensics.providers.gcp.internal import storagetransfer as gcp_st
from libcloudforensics.providers.gcp.internal import cloudsql as gcp_cloudsql
from libcloudforensics.providers.gcp import forensics
from libcloudforensics import logging_utils
# pylint: enable=line-too-long
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
if TYPE_CHECKING:
import argparse
def ListInstances(args: 'argparse.Namespace') -> None:
"""List GCE instances in GCP project.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
project = gcp_project.GoogleCloudProject(args.project)
instances = project.compute.ListInstances()
logger.info('Instances found:')
for instance in instances:
bootdisk = instances[instance].GetBootDisk()
if bootdisk:
logger.info('Name: {0:s}, Bootdisk: {1:s}'.format(
instance, bootdisk.name))
def ListDisks(args: 'argparse.Namespace') -> None:
"""List GCE disks in GCP project.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
project = gcp_project.GoogleCloudProject(args.project)
disks = project.compute.ListDisks()
logger.info('Disks found:')
for disk in disks:
logger.info('Name: {0:s}, Zone: {1:s}'.format(disk, disks[disk].zone))
def CreateDiskCopy(args: 'argparse.Namespace') -> None:
"""Copy GCE disks to other GCP project.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
disk = forensics.CreateDiskCopy(args.project,
args.dst_project,
args.zone,
instance_name=args.instance_name,
disk_name=args.disk_name,
disk_type=args.disk_type)
logger.info('Disk copy completed.')
logger.info('Name: {0:s}'.format(disk.name))
def DeleteInstance(args: 'argparse.Namespace') -> None:
"""Deletes a GCE instance.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
compute_client = gcp_compute.GoogleCloudCompute(args.project)
instance = compute_client.GetInstance(instance_name=args.instance_name)
instance.Delete(delete_disks=args.delete_all_disks,
force_delete=args.force_delete)
print('Instance deleted.')
def ListLogs(args: 'argparse.Namespace') -> None:
"""List GCP logs for a project.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
logs = gcp_log.GoogleCloudLog(args.project.split(','))
results = logs.ListLogs()
logger.info('Found {0:d} available log types:'.format(len(results)))
for line in results:
logger.info(line)
def QueryLogs(args: 'argparse.Namespace') -> None:
"""Query GCP logs.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
ValueError: If the start or end date is not properly formatted.
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
logs = gcp_log.GoogleCloudLog(args.project.split(','))
try:
if args.start:
datetime.strptime(args.start, '%Y-%m-%dT%H:%M:%SZ')
if args.end:
datetime.strptime(args.end, '%Y-%m-%dT%H:%M:%SZ')
except ValueError as error:
sys.exit(str(error))
qfilter = ''
if args.start:
qfilter += 'timestamp>="{0:s}" '.format(args.start)
if args.start and args.end:
qfilter += 'AND '
if args.end:
qfilter += 'timestamp<="{0:s}" '.format(args.end)
if args.filter and (args.start or args.end):
qfilter += 'AND '
qfilter += args.filter
elif args.filter:
qfilter += args.filter
results = logs.ExecuteQuery(qfilter.split(',') if qfilter else None)
logger.info('Found {0:d} log entries:'.format(len(results)))
for line in results:
logger.info(json.dumps(line))
def CreateDiskFromGCSImage(args: 'argparse.Namespace') -> None:
"""Creates GCE persistent disk from image in GCS.
Please refer to doc string of forensics.CreateDiskFromGCSImage
function for more details on how the image is created.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
result = forensics.CreateDiskFromGCSImage(
args.project, args.gcs_path, args.zone, name=args.disk_name)
logger.info('Disk creation completed.')
logger.info('Project ID: {0:s}'.format(result['project_id']))
logger.info('Disk name: {0:s}'.format(result['disk_name']))
logger.info('Zone: {0:s}'.format(result['zone']))
logger.info('size in bytes: {0:s}'.format(result['bytes_count']))
logger.info('MD5 hash of source image in hex: {0:s}'.format(
result['md5Hash']))
def StartAnalysisVm(args: 'argparse.Namespace') -> None:
"""Start forensic analysis VM.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
attach_disks = []
if args.attach_disks:
attach_disks = args.attach_disks.split(',')
# Check if attach_disks parameter exists and if there
# are any empty entries.
if not (attach_disks and all(elements for elements in attach_disks)):
logger.error('parameter --attach_disks: {0:s}'.format(
args.attach_disks))
return
logger.info('Starting analysis VM...')
vm = forensics.StartAnalysisVm(args.project,
args.instance_name,
args.zone,
int(args.disk_size),
args.disk_type,
int(args.cpu_cores),
attach_disks=attach_disks)
logger.info('Analysis VM started.')
logger.info('Name: {0:s}, Started: {1:s}'.format(vm[0].name, str(vm[1])))
def ListServices(args: 'argparse.Namespace') -> None:
"""List active GCP services (APIs) for a project.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
apis = gcp_monitoring.GoogleCloudMonitoring(args.project)
results = apis.ActiveServices()
logger.info('Found {0:d} APIs:'.format(len(results)))
sorted_apis = sorted(results.items(), key=lambda x: x[1], reverse=True)
for apiname, usage in sorted_apis:
logger.info('{0:s}: {1:d}'.format(apiname, usage))
def GetBucketACLs(args: 'argparse.Namespace') -> None:
"""Retrieve the Access Controls for a GCS bucket.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
gcs = gcp_storage.GoogleCloudStorage(args.project)
bucket_acls = gcs.GetBucketACLs(args.path)
for role in bucket_acls:
logger.info('{0:s}: {1:s}'.format(role, ', '.join(bucket_acls[role])))
def GetGCSObjectMetadata(args: 'argparse.Namespace') -> None:
"""List the details of an object in a GCS bucket.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
gcs = gcp_storage.GoogleCloudStorage(args.project)
results = gcs.GetObjectMetadata(args.path)
if results.get('kind') == 'storage#objects':
for item in results.get('items', []):
for key, value in item.items():
logger.info('{0:s}: {1:s}'.format(key, value))
logger.info('---------')
else:
for key, value in results.items():
logger.info('{0:s}: {1:s}'.format(key, value))
def ListBuckets(args: 'argparse.Namespace') -> None:
"""List the buckets in a GCP project.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
gcs = gcp_storage.GoogleCloudStorage(args.project)
results = gcs.ListBuckets()
for obj in results:
logger.info('{0:s} : {1:s}'.format(
obj.get('id', 'ID not found'), obj.get('selfLink', 'No link')))
def CreateBucket(args: 'argparse.Namespace') -> None:
"""Create a bucket in a GCP project.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
gcs = gcp_storage.GoogleCloudStorage(args.project)
result = gcs.CreateBucket(args.name, labels={'created_by': 'cfu'})
logger.info(
'{0:s} : {1:s}'.format(
result.get('id', 'ID not found'), result.get('selfLink', 'No link')))
def ListBucketObjects(args: 'argparse.Namespace') -> None:
"""List the objects in a GCS bucket.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
gcs = gcp_storage.GoogleCloudStorage(args.project)
results = gcs.ListBucketObjects(args.path)
for obj in results:
logger.info('{0:s} {1:s}b [{2:s}]'.format(
obj.get('id', 'ID not found'), obj.get('size', 'Unknown size'),
obj.get('contentType', 'Unknown Content-Type')))
def GetBucketSize(args: 'argparse.Namespace') -> None:
"""Get the size of a GCS bucket.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
gcs = gcp_storage.GoogleCloudStorage(args.project)
results = gcs.GetBucketSize(args.path)
for bucket_name, bucket_size in results.items():
logger.info('{0:s}: {1:d}b'.format(bucket_name, bucket_size))
def ListCloudSqlInstances(args: 'argparse.Namespace') -> None:
"""List the CloudSQL instances of a Project.
Args:
args (argsparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
gcsql = gcp_cloudsql.GoogleCloudSQL(args.project)
results = gcsql.ListCloudSQLInstances()
for obj in results:
logger.info('{0:s} {1:s} [{2:s}]'.format(
obj.get('instanceType', 'type not found'),
obj.get('name', 'name not known'),
obj.get('state', 'state not known')))
def DeleteObject(args: 'argparse.Namespace') -> None:
"""Deletes an object in GCS.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
gcs = gcp_storage.GoogleCloudStorage(args.project)
gcs.DeleteObject(args.path)
print('Object deleted.')
def InstanceNetworkQuarantine(args: 'argparse.Namespace') -> None:
"""Put a Google Cloud instance in network quarantine.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
exempted_ips = []
if args.exempted_src_ips:
exempted_ips = args.exempted_src_ips.split(',')
# Check if exempted_src_ips argument exists and if there
# are any empty entries.
if not (exempted_ips and all(exempted_ips)):
logger.error('parameter --exempted_src_ips: {0:s}'.format(
args.exempted_src_ips))
return
forensics.InstanceNetworkQuarantine(args.project,
args.instance_name, exempted_ips, args.enable_logging )
def VMRemoveServiceAccount(args: 'argparse.Namespace') -> None:
"""Removes an attached service account from a VM instance.
Requires the instance to be stopped, if it isn't already.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
AssignProjectID(args)
forensics.VMRemoveServiceAccount(args.project, args.instance_name,
args.leave_stopped)
def AssignProjectID(args: 'argparse.Namespace') -> None:
"""Configures the project_id to be used by the tool.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
AttributeError: If no project_id was provided and none was inferred
from the gcloud environment.
"""
if not args.project:
_, project_id = default()
if project_id:
args.project = project_id
else:
raise AttributeError(
"No project_id was found. Either pass a --project=project_id"
" to the CLI (`cloudforensics gcp --project=project_id`), or set "
"one in your gcloud SDK: `gcloud config set project project_id`")
def S3ToGCS(args: 'argparse.Namespace') -> None:
"""Transfer a file from S3 to a GCS bucket.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
gcst = gcp_st.GoogleCloudStorageTransfer(args.project)
gcst.S3ToGCS(args.s3_path, args.zone, args.gcs_path)
logger.info('File successfully transferred.')
|
py | b40b08b60a9509dd75c249159e2a55db1f808100 | # coding: utf-8
"""
Inventory API
The Inventory API is used to create and manage inventory, and then to publish and manage this inventory on an eBay marketplace. There are also methods in this API that will convert eligible, active eBay listings into the Inventory API model. # noqa: E501
OpenAPI spec version: 1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BulkPriceQuantityResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'responses': 'list[PriceQuantityResponse]'
}
attribute_map = {
'responses': 'responses'
}
def __init__(self, responses=None): # noqa: E501
"""BulkPriceQuantityResponse - a model defined in Swagger""" # noqa: E501
self._responses = None
self.discriminator = None
if responses is not None:
self.responses = responses
@property
def responses(self):
"""Gets the responses of this BulkPriceQuantityResponse. # noqa: E501
This container will return an HTTP status code, offer ID, and SKU value for each offer/inventory item being updated, as well as an <strong>errors</strong> and/or <strong>warnings</strong> container if any errors or warnings are triggered while trying to update those offers/inventory items. # noqa: E501
:return: The responses of this BulkPriceQuantityResponse. # noqa: E501
:rtype: list[PriceQuantityResponse]
"""
return self._responses
@responses.setter
def responses(self, responses):
"""Sets the responses of this BulkPriceQuantityResponse.
This container will return an HTTP status code, offer ID, and SKU value for each offer/inventory item being updated, as well as an <strong>errors</strong> and/or <strong>warnings</strong> container if any errors or warnings are triggered while trying to update those offers/inventory items. # noqa: E501
:param responses: The responses of this BulkPriceQuantityResponse. # noqa: E501
:type: list[PriceQuantityResponse]
"""
self._responses = responses
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BulkPriceQuantityResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BulkPriceQuantityResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b40b0a34434c16543ab1aa14f37444ce744c3275 | #!/usr/bin/env python3
# Copyright 2021 PickNik LLC
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the PickNik LLC nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import subprocess
import sys
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='List packages that have changed files since point in git history.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'path',
help='<path> is the root of a git repo containing ros packages')
parser.add_argument(
'point',
help='<point> is a git branch, tag, or commit')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--names-only', '-n',
action='store_true',
default=False,
help='Output only the name of each package but not the path')
group.add_argument(
'--paths-only', '-p',
action='store_true',
default=False,
help='Output only the path of each package but not the name')
args = parser.parse_args(argv)
if not os.path.isdir(os.path.join(args.path, '.git')):
print("'%s' is not the base of a git repo" % args.path, file=sys.stderr)
return 1
packages = get_packages_changed_since(args.path, args.point)
lines = []
for package in packages:
if args.names_only:
lines.append(package['name'])
elif args.paths_only:
lines.append(package['path'])
else:
lines.append('%s\t%s\t%s' % (package['name'], package['path'], package['type']))
lines.sort()
for line in lines:
print(line)
return 0
def find_executable(file_names):
paths = os.getenv('PATH').split(os.path.pathsep)
for file_name in file_names:
for path in paths:
file_path = os.path.join(path, file_name)
if os.path.isfile(file_path) and os.access(file_path, os.X_OK):
return file_path
return None
def get_packages_in_repo(repo_path):
bin_names = [
'colcon',
]
colcon_bin = find_executable(bin_names)
if not colcon_bin:
print('Could not find %s executable' %
' / '.join(["'%s'" % n for n in bin_names]), file=sys.stderr)
return 1
cmd = [colcon_bin, 'list', '--base-paths', repo_path]
output = []
try:
output = subprocess.check_output(cmd).strip().decode().split()
except subprocess.CalledProcessError as e:
print('The invocation of "%s" failed with error code %d: %s' %
(os.path.basename(colcon_bin), e.returncode, e),
file=sys.stderr)
return [
{
'name': output[x],
'path': output[x+1],
'type': output[x+2]
}
for x in range(0, len(output), 3)
]
def get_packages_changed_since(repo_path, point):
packages = get_packages_in_repo(repo_path)
bin_names = [
'git',
]
git_bin = find_executable(bin_names)
if not git_bin:
print('Could not find %s executable' %
' / '.join(["'%s'" % n for n in bin_names]), file=sys.stderr)
return 1
def modified_files_test(package):
cmd = [
'git', 'diff', '--name-only', '--diff-filter=MA',
point + '..HEAD', os.path.relpath(package['path'], repo_path)
]
modified_files = []
try:
modified_files = subprocess.check_output(cmd, cwd=repo_path).strip().decode().split()
except subprocess.CalledProcessError as e:
print('The invocation of "%s" failed with error code %d: %s' %
(os.path.basename(git_bin), e.returncode, ' '.join(cmd)),
file=sys.stderr)
return False
return (len(modified_files) > 0)
filtered_packages = list(filter(modified_files_test, packages))
return filtered_packages
if __name__ == '__main__':
sys.exit(main())
|
py | b40b0a72c5f907e4c9c7eeac8da45ff2c9075e7b | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/particle/shared_particle_test_45.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | b40b0a7de37735174b6006afd272eb515773926c | from __future__ import absolute_import
import os
import sys
import click
import json
from .io import error
from .config import read_global_config, global_config_path, set_global_config_path # noqa, ditto
from .galaxy import get_galaxy_instance, get_toolshed_instance
from parsec import __version__ # noqa, ditto
CONTEXT_SETTINGS = dict(auto_envvar_prefix='PARSEC', help_option_names=['-h', '--help'])
class Context(object):
def __init__(self):
self.verbose = False
self.home = os.getcwd()
self._global_config = None
@property
def global_config(self):
if self._global_config is None:
self._global_config = read_global_config()
return self._global_config
def log(self, msg, *args):
"""Logs a message to stderr."""
if args:
msg %= args
click.echo(msg, file=sys.stderr)
def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args)
def exit(self, exit_code):
self.vlog("Exiting parsec with exit code [%d]" % exit_code)
sys.exit(exit_code)
pass_context = click.make_pass_decorator(Context, ensure=True)
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
'commands'))
def list_cmds():
rv = []
for filename in os.listdir(cmd_folder):
if filename.endswith('.py') and \
filename.startswith('cmd_'):
rv.append(filename[len("cmd_"):-len(".py")])
rv.sort()
return rv
def list_subcmds(parent):
rv = []
for filename in os.listdir(os.path.join(cmd_folder, parent)):
if filename.endswith('.py') and \
not filename.startswith('__'):
rv.append(filename[:-len(".py")])
rv.sort()
return rv
def name_to_command(parent, name):
try:
if sys.version_info[0] == 2:
if parent:
parent = parent.encode('ascii', 'replace')
name = name.encode('ascii', 'replace')
if parent:
mod_name = 'parsec.commands.%s.%s' % (parent, name)
else:
mod_name = 'parsec.commands.cmd_' + name
mod = __import__(mod_name, None, None, ['cli'])
except ImportError as e:
error("Problem loading command %s, exception %s" % (name, e))
return
return mod.cli
class ParsecCLI(click.MultiCommand):
def list_commands(self, ctx):
# We pre-calculate this so it works more nicely within packaged
# versions of parsec. Please feel free to fix this?
commands = ['config', 'dataset_collections', 'datasets', 'datatypes',
'folders', 'forms', 'ftpfiles', 'genomes', 'groups',
'histories', 'init', 'invocations', 'jobs', 'libraries',
'quotas', 'roles', 'tool_data', 'tool_dependencies',
'toolshed_categories', 'toolshed', 'toolShed',
'toolshed_repositories', 'toolshed_tools', 'tools',
'users', 'utils', 'visual', 'workflows']
return commands
def get_command(self, ctx, name):
return name_to_command(None, name)
@click.command(cls=ParsecCLI, context_settings=CONTEXT_SETTINGS)
@click.version_option(__version__)
@click.option('-v', '--verbose', is_flag=True,
help='Enables verbose mode.')
@click.option(
"-g",
"--galaxy_instance",
help='Name of instance in %s. This parameter can also be set via the environment variable PARSEC_GALAXY_INSTANCE' % global_config_path(),
default='__default',
show_default=True,
required=True
)
@click.option(
"--path", "-f",
help="config file path",
type=str
)
@pass_context
def parsec(ctx, galaxy_instance, verbose, path=None):
"""Command line wrappers around BioBlend functions. While this sounds
unexciting, with parsec and jq you can easily build powerful command line
scripts."""
# set config_path if provided
if path is not None and len(path) > 0:
set_global_config_path(path)
# We abuse this, knowing that calls to one will fail.
try:
ctx.gi = get_galaxy_instance(galaxy_instance)
except TypeError:
pass
# ctx.log("Could not access Galaxy instance configuration")
try:
ctx.ti = get_toolshed_instance(galaxy_instance)
except TypeError:
pass
# ctx.log("Could not access Toolshed instance configuration")
ctx.verbose = verbose
def json_loads(data):
"""Load json data, allowing - to represent stdin."""
if data is None:
return ""
if data == "-":
return json.load(sys.stdin)
elif os.path.exists(data):
with open(data, 'r') as handle:
return json.load(handle)
else:
return json.loads(data)
|
py | b40b0af1cd7ca082d53d862bf3d037c7c7fbd06d | from . import configLog, log, logOnce, startLogging, endLogging
""" shortcut import for time logging """
configLog()
|
py | b40b0b70b8c01278ea8a97fca986d0e5e17c40f2 | import random
import string
import stripe
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import redirect
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.views.generic import ListView, DetailView, View
from .forms import CheckoutForm, CouponForm, RefundForm, PaymentForm
from .models import Item, OrderItem, Order, Address, Payment, Coupon, Refund, UserProfile
stripe.api_key = settings.STRIPE_SECRET_KEY
def create_ref_code():
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))
def products(request):
print(request)
context = {
'items': Item.objects.all()
}
return render(request, "product.html", context)
def is_valid_form(values):
valid = True
for field in values:
if field == '':
valid = False
return valid
class CheckoutView(View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
form = CheckoutForm()
context = {
'form': form,
'couponform': CouponForm(),
'order': order,
'DISPLAY_COUPON_FORM': True
}
shipping_address_qs = Address.objects.filter(
user=self.request.user,
address_type='S',
default=True
)
if shipping_address_qs.exists():
context.update(
{'default_shipping_address': shipping_address_qs[0]})
billing_address_qs = Address.objects.filter(
user=self.request.user,
address_type='B',
default=True
)
if billing_address_qs.exists():
context.update(
{'default_billing_address': billing_address_qs[0]})
return render(self.request, "checkout.html", context)
except ObjectDoesNotExist:
messages.info(self.request, "You do not have an active order")
return redirect("core:checkout")
def post(self, *args, **kwargs):
form = CheckoutForm(self.request.POST or None)
try:
order = Order.objects.get(user=self.request.user, ordered=False)
if form.is_valid():
use_default_shipping = form.cleaned_data.get(
'use_default_shipping')
if use_default_shipping:
print("Using the defualt shipping address")
address_qs = Address.objects.filter(
user=self.request.user,
address_type='S',
default=True
)
if address_qs.exists():
shipping_address = address_qs[0]
order.shipping_address = shipping_address
order.save()
else:
messages.info(
self.request, "No default shipping address available")
return redirect('core:checkout')
else:
print("User is entering a new shipping address")
shipping_address1 = form.cleaned_data.get(
'shipping_address')
shipping_address2 = form.cleaned_data.get(
'shipping_address2')
shipping_country = form.cleaned_data.get(
'shipping_country')
shipping_zip = form.cleaned_data.get('shipping_zip')
if is_valid_form([shipping_address1, shipping_country, shipping_zip]):
shipping_address = Address(
user=self.request.user,
street_address=shipping_address1,
apartment_address=shipping_address2,
country=shipping_country,
zip=shipping_zip,
address_type='S'
)
shipping_address.save()
order.shipping_address = shipping_address
order.save()
set_default_shipping = form.cleaned_data.get(
'set_default_shipping')
if set_default_shipping:
shipping_address.default = True
shipping_address.save()
else:
messages.info(
self.request, "Please fill in the required shipping address fields")
use_default_billing = form.cleaned_data.get(
'use_default_billing')
same_billing_address = form.cleaned_data.get(
'same_billing_address')
if same_billing_address:
billing_address = shipping_address
billing_address.pk = None
billing_address.save()
billing_address.address_type = 'B'
billing_address.save()
order.billing_address = billing_address
order.save()
elif use_default_billing:
print("Using the defualt billing address")
address_qs = Address.objects.filter(
user=self.request.user,
address_type='B',
default=True
)
if address_qs.exists():
billing_address = address_qs[0]
order.billing_address = billing_address
order.save()
else:
messages.info(
self.request, "No default billing address available")
return redirect('core:checkout')
else:
print("User is entering a new billing address")
billing_address1 = form.cleaned_data.get(
'billing_address')
billing_address2 = form.cleaned_data.get(
'billing_address2')
billing_country = form.cleaned_data.get(
'billing_country')
billing_zip = form.cleaned_data.get('billing_zip')
if is_valid_form([billing_address1, billing_country, billing_zip]):
billing_address = Address(
user=self.request.user,
street_address=billing_address1,
apartment_address=billing_address2,
country=billing_country,
zip=billing_zip,
address_type='B'
)
billing_address.save()
order.billing_address = billing_address
order.save()
set_default_billing = form.cleaned_data.get(
'set_default_billing')
if set_default_billing:
billing_address.default = True
billing_address.save()
else:
messages.info(
self.request, "Please fill in the required billing address fields")
payment_option = form.cleaned_data.get('payment_option')
if payment_option == 'S':
return redirect('core:payment', payment_option='stripe')
elif payment_option == 'P':
return redirect('core:payment', payment_option='paypal')
else:
messages.warning(
self.request, "Invalid payment option selected")
return redirect('core:checkout')
except ObjectDoesNotExist:
messages.warning(self.request, "You do not have an active order")
return redirect("core:order-summary")
class PaymentView(View):
def get(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
if order.billing_address:
context = {
'order': order,
'DISPLAY_COUPON_FORM': False,
'STRIPE_PUBLIC_KEY': settings.STRIPE_PUBLIC_KEY
}
userprofile = self.request.user.userprofile
if userprofile.one_click_purchasing:
# fetch the users card list
cards = stripe.Customer.list_sources(
userprofile.stripe_customer_id,
limit=3,
object='card'
)
card_list = cards['data']
if len(card_list) > 0:
# update the context with the default card
context.update({
'card': card_list[0]
})
return render(self.request, "payment.html", context)
else:
messages.warning(
self.request, "You have not added a billing address")
return redirect("core:checkout")
def post(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
form = PaymentForm(self.request.POST)
userprofile = UserProfile.objects.get(user=self.request.user)
if form.is_valid():
token = form.cleaned_data.get('stripeToken')
save = form.cleaned_data.get('save')
use_default = form.cleaned_data.get('use_default')
if save:
if userprofile.stripe_customer_id != '' and userprofile.stripe_customer_id is not None:
customer = stripe.Customer.retrieve(
userprofile.stripe_customer_id)
customer.sources.create(source=token)
else:
customer = stripe.Customer.create(
email=self.request.user.email,
)
customer.sources.create(source=token)
userprofile.stripe_customer_id = customer['id']
userprofile.one_click_purchasing = True
userprofile.save()
amount = int(order.get_total() * 100)
try:
if use_default or save:
# charge the customer because we cannot charge the token more than once
charge = stripe.Charge.create(
amount=amount, # cents
currency="usd",
customer=userprofile.stripe_customer_id
)
else:
# charge once off on the token
charge = stripe.Charge.create(
amount=amount, # cents
currency="usd",
source=token
)
# create the payment
payment = Payment()
payment.stripe_charge_id = charge['id']
payment.user = self.request.user
payment.amount = order.get_total()
payment.save()
# assign the payment to the order
order_items = order.items.all()
order_items.update(ordered=True)
for item in order_items:
item.save()
order.ordered = True
order.payment = payment
order.ref_code = create_ref_code()
order.save()
messages.success(self.request, "Your order was successful!")
return redirect("/")
except stripe.error.CardError as e:
body = e.json_body
err = body.get('error', {})
messages.warning(self.request, f"{err.get('message')}")
return redirect("/")
except stripe.error.RateLimitError as e:
# Too many requests made to the API too quickly
messages.warning(self.request, "Rate limit error")
return redirect("/")
except stripe.error.InvalidRequestError as e:
# Invalid parameters were supplied to Stripe's API
print(e)
messages.warning(self.request, "Invalid parameters")
return redirect("/")
except stripe.error.AuthenticationError as e:
# Authentication with Stripe's API failed
# (maybe you changed API keys recently)
messages.warning(self.request, "Not authenticated")
return redirect("/")
except stripe.error.APIConnectionError as e:
# Network communication with Stripe failed
messages.warning(self.request, "Network error")
return redirect("/")
except stripe.error.StripeError as e:
# Display a very generic error to the user, and maybe send
# yourself an email
messages.warning(
self.request, "Something went wrong. You were not charged. Please try again.")
return redirect("/")
except Exception as e:
# send an email to ourselves
messages.warning(
self.request, "A serious error occurred. We have been notifed.")
return redirect("/")
messages.warning(self.request, "Invalid data received")
return redirect("/payment/stripe/")
class HomeView(ListView):
model = Item
paginate_by = 10
template_name = "home.html"
class OrderSummaryView(LoginRequiredMixin, View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'object': order
}
return render(self.request, 'order_summary.html', context)
except ObjectDoesNotExist:
messages.warning(self.request, "You do not have an active order")
return redirect("/")
class ItemDetailView(DetailView):
model = Item
template_name = "product.html"
@login_required
def add_to_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(
item=item,
user=request.user,
ordered=False
)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item.quantity += 1
order_item.save()
messages.info(request, "This item quantity was updated.")
return redirect("core:order-summary")
else:
order.items.add(order_item)
messages.info(request, "This item was added to your cart.")
return redirect("core:order-summary")
else:
ordered_date = timezone.now()
order = Order.objects.create(
user=request.user, ordered_date=ordered_date)
order.items.add(order_item)
messages.info(request, "This item was added to your cart.")
return redirect("core:order-summary")
@login_required
def remove_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered=False
)[0]
order.items.remove(order_item)
order_item.delete()
messages.info(request, "This item was removed from your cart.")
return redirect("core:order-summary")
else:
messages.info(request, "This item was not in your cart")
return redirect("core:product", slug=slug)
else:
messages.info(request, "You do not have an active order")
return redirect("core:product", slug=slug)
@login_required
def remove_single_item_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered=False
)[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
messages.info(request, "This item quantity was updated.")
return redirect("core:order-summary")
else:
messages.info(request, "This item was not in your cart")
return redirect("core:product", slug=slug)
else:
messages.info(request, "You do not have an active order")
return redirect("core:product", slug=slug)
def get_coupon(request, code):
try:
coupon = Coupon.objects.get(code=code)
return coupon
except ObjectDoesNotExist:
messages.info(request, "This coupon does not exist")
return redirect("core:checkout")
class AddCouponView(View):
def post(self, *args, **kwargs):
form = CouponForm(self.request.POST or None)
if form.is_valid():
try:
code = form.cleaned_data.get('code')
order = Order.objects.get(
user=self.request.user, ordered=False)
order.coupon = get_coupon(self.request, code)
order.save()
messages.success(self.request, "Successfully added coupon")
return redirect("core:checkout")
except ObjectDoesNotExist:
messages.info(self.request, "You do not have an active order")
return redirect("core:checkout")
class RequestRefundView(View):
def get(self, *args, **kwargs):
form = RefundForm()
context = {
'form': form
}
return render(self.request, "request_refund.html", context)
def post(self, *args, **kwargs):
form = RefundForm(self.request.POST)
if form.is_valid():
ref_code = form.cleaned_data.get('ref_code')
message = form.cleaned_data.get('message')
email = form.cleaned_data.get('email')
# edit the order
try:
order = Order.objects.get(ref_code=ref_code)
order.refund_requested = True
order.save()
# store the refund
refund = Refund()
refund.order = order
refund.reason = message
refund.email = email
refund.save()
messages.info(self.request, "Your request was received.")
return redirect("core:request-refund")
except ObjectDoesNotExist:
messages.info(self.request, "This order does not exist.")
return redirect("core:request-refund")
|
py | b40b0b78a9ec148310ed503613a77bfe41216f5b | import numpy as np
# ========================================================================
def p0_printer(par):
iproc = par.rank
def printer(*args, **kwargs):
if iproc == 0:
print(*args, **kwargs)
return printer
# ========================================================================
def hill(x):
h = 28.0
xstar = x * h
xstar[xstar > 128] = 252 - xstar[xstar > 128]
ystar = np.zeros(x.shape)
idx = (0.0 <= xstar) & (xstar < 9.0)
ystar[idx] = np.minimum(
28 * np.ones(x[idx].shape),
2.800000000000e01
+ 0.000000000000e00 * xstar[idx]
+ 6.775070969851e-03 * xstar[idx] ** 2
- 2.124527775800e-03 * xstar[idx] ** 3,
)
idx = (9.0 <= xstar) & (xstar < 14.0)
ystar[idx] = (
2.507355893131e01
+ 9.754803562315e-01 * xstar[idx]
- 1.016116352781e-01 * xstar[idx] ** 2
+ 1.889794677828e-03 * xstar[idx] ** 3
)
idx = (14.0 <= xstar) & (xstar < 20.0)
ystar[idx] = (
2.579601052357e01
+ 8.206693007457e-01 * xstar[idx]
- 9.055370274339e-02 * xstar[idx] ** 2
+ 1.626510569859e-03 * xstar[idx] ** 3
)
idx = (20.0 <= xstar) & (xstar < 30.0)
ystar[idx] = (
4.046435022819e01
- 1.379581654948e00 * xstar[idx]
+ 1.945884504128e-02 * xstar[idx] ** 2
- 2.070318932190e-04 * xstar[idx] ** 3
)
idx = (30.0 <= xstar) & (xstar < 40.0)
ystar[idx] = (
1.792461334664e01
+ 8.743920332081e-01 * xstar[idx]
- 5.567361123058e-02 * xstar[idx] ** 2
+ 6.277731764683e-04 * xstar[idx] ** 3
)
idx = (40.0 <= xstar) & (xstar < 50.0)
ystar[idx] = np.maximum(
np.zeros(x[idx].shape),
5.639011190988e01
- 2.010520359035e00 * xstar[idx]
+ 1.644919857549e-02 * xstar[idx] ** 2
+ 2.674976141766e-05 * xstar[idx] ** 3,
)
return ystar / h
# ========================================================================
def xplanes():
return [0.05, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
|
py | b40b0bc16d60a25f48e9cd2b1ab766b6a37041d7 | import logging
from flask import Flask
from flask_appbuilder import AppBuilder, SQLA
"""
Logging configuration
"""
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(name)s:%(message)s")
logging.getLogger().setLevel(logging.DEBUG)
app = Flask(__name__)
app.config.from_object("config")
db = SQLA(app)
appbuilder = AppBuilder(app, db.session)
from . import api # noqa
|
py | b40b0c12b08dcb8bc82347028a70896218d3db85 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit and system tests for metricscaler.py"""
import os
import time
import random
import pytest
from google.cloud import bigtable
from google.cloud.bigtable import enums
from mock import patch
from metricscaler import get_cpu_load
from metricscaler import main
from metricscaler import scale_bigtable
PROJECT = os.environ['GCLOUD_PROJECT']
BIGTABLE_ZONE = os.environ['BIGTABLE_ZONE']
SIZE_CHANGE_STEP = 3
INSTANCE_ID_FORMAT = 'metric-scale-test-{}'
INSTANCE_ID_RANGE = 10000
BIGTABLE_INSTANCE = INSTANCE_ID_FORMAT.format(
random.randrange(INSTANCE_ID_RANGE))
# System tests to verify API calls succeed
def test_get_cpu_load():
assert float(get_cpu_load()) > 0.0
@pytest.fixture()
def instance():
cluster_id = BIGTABLE_INSTANCE
client = bigtable.Client(project=PROJECT, admin=True)
serve_nodes = 3
storage_type = enums.StorageType.SSD
production = enums.Instance.Type.PRODUCTION
labels = {'prod-label': 'prod-label'}
instance = client.instance(BIGTABLE_INSTANCE, instance_type=production,
labels=labels)
if not instance.exists():
cluster = instance.cluster(cluster_id, location_id=BIGTABLE_ZONE,
serve_nodes=serve_nodes,
default_storage_type=storage_type)
instance.create(clusters=[cluster])
yield
instance.delete()
def test_scale_bigtable(instance):
bigtable_client = bigtable.Client(admin=True)
instance = bigtable_client.instance(BIGTABLE_INSTANCE)
instance.reload()
cluster = instance.cluster(BIGTABLE_INSTANCE)
cluster.reload()
original_node_count = cluster.serve_nodes
scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True)
for n in range(10):
time.sleep(10)
cluster.reload()
new_node_count = cluster.serve_nodes
try:
assert (new_node_count == (original_node_count + SIZE_CHANGE_STEP))
except AssertionError:
if n == 9:
raise
scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
for n in range(10):
time.sleep(10)
cluster.reload()
final_node_count = cluster.serve_nodes
try:
assert final_node_count == original_node_count
except AssertionError:
if n == 9:
raise
# Unit test for logic
@patch('time.sleep')
@patch('metricscaler.get_cpu_load')
@patch('metricscaler.scale_bigtable')
def test_main(scale_bigtable, get_cpu_load, sleep):
SHORT_SLEEP = 5
LONG_SLEEP = 10
get_cpu_load.return_value = 0.5
main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP,
LONG_SLEEP)
scale_bigtable.assert_not_called()
scale_bigtable.reset_mock()
get_cpu_load.return_value = 0.7
main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP,
LONG_SLEEP)
scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
BIGTABLE_INSTANCE, True)
scale_bigtable.reset_mock()
get_cpu_load.return_value = 0.2
main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP,
LONG_SLEEP)
scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
BIGTABLE_INSTANCE, False)
scale_bigtable.reset_mock()
|
py | b40b0d3f240690ecb66ee7369d5a49ebfd383818 | # -*- coding: utf-8 -*-
#
# Modified from: https://it.wikipedia.org/wiki/Utente:BimBot/Scripts#discussioneanonimi3.py
#
import pywikibot
import re
import subprocess
from pywikibot import pagegenerators
from pywikibot.exceptions import LockedPageError
from time import *
start = time()
site = pywikibot.Site('it', 'wikipedia')
def main():
subprocess.check_call('mysql --defaults-file=~/replica.my.cnf -h ' +
'itwiki.analytics.db.svc.eqiad.wmflabs -BN < talk_ip.sql > talk_ip.out', shell=True)
talkpages = pagegenerators.TextIOPageGenerator('talk_ip.out')
for talk in talkpages:
if talk.namespace() != 3 or not talk.exists() or not talk.botMayEdit():
continue
pywikibot.output("\n>>>>> " + talk.title() + " <<<<<")
oldtext = talk.get()
try:
if checkStatic(talk.title(with_ns=False)):
newtext = u'{{IPcondiviso}}\n' + oldtext
talk.put(
newtext, u'Bot: aggiungo template IPcondiviso ([[Utente:IncolaBot/FAQ|FAQ]])')
else:
newtext = u'{{BenvenutoIP}}'
talk.put(
newtext, u'Bot: aggiungo template BenvenutoIP ([[Utente:IncolaBot/FAQ|FAQ]])')
except LockedPageError:
continue
def checkStatic(ip):
response = str(subprocess.check_output(['dig', '-x', ip, '+short']))
pywikibot.output('Dig response: ' + response)
return bool(re.search('[Ss]tatic', response))
if __name__ == "__main__":
try:
main()
finally:
end = time()
print("Run time:", end-start)
|
py | b40b0efe98a4d00f010d69bbc854b37bb9f77995 | import json
from json.decoder import JSONDecodeError
import requests
from Classes.Metadata import Metadata
from Classes.PortablePacket import PortablePacket
from extension import write
from colorama import Fore
from zip_utils import *
import os
import sys
home = os.path.expanduser('~')
def update_portable(ctx, packet: PortablePacket, metadata: Metadata):
import shutil
import click
from difflib import get_close_matches
write(
f'Updating [ {Fore.LIGHTCYAN_EX}{packet.display_name}{Fore.RESET} ]', 'white', metadata)
options = os.listdir(rf'{home}\electric')
matches = get_close_matches(
rf'{home}\electric\{packet.json_name}@{packet.latest_version}', options)
if len(matches) == 1:
# similar package exists and we need to get the version of the currently installed package.
current_version = matches[0].split('@')[-1].replace('.json', '')
if current_version != packet.latest_version:
write(f'{packet.display_name} Will Be Updated From ({current_version}) => ({packet.latest_version})', 'green', metadata)
write('Requesting Currently Installed Version', 'yellow', metadata)
REQA = 'http://electric-env.eba-9m7janw8.us-east-1.elasticbeanstalk.com/package/'
try:
response = requests.get(
REQA + packet.json_name + '.json', timeout=5)
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout):
click.echo(click.style(
f'Failed to request {packet.json_name}.json from server', 'red'))
sys.exit()
try:
res = json.loads(response.text)
except JSONDecodeError:
click.echo(click.style(f'{packet.json_name} not found!', 'red'))
sys.exit()
pkg = res
pkg = pkg['portable']
keys = list(pkg[current_version].keys())
data = {
'display-name': res['display-name'],
'package-name': res['package-name'],
'latest-version': res['latest-version'],
'url': pkg[current_version]['url'],
'file-type': pkg[current_version]['file-type'] if 'file-type' in keys else None,
'extract-dir': res['package-name'],
'chdir': pkg[current_version]['chdir'] if 'chdir' in keys else [],
'bin': pkg[current_version]['bin'] if 'bin' in keys else [],
'shortcuts': pkg[current_version]['shortcuts'] if 'shortcuts' in keys else [],
'pre-install': pkg[current_version]['pre-install'] if 'pre-install' in keys else [],
'post-install': pkg[current_version]['post-install'] if 'post-install' in keys else [],
'install-notes': pkg[current_version]['install-notes'] if 'install-notes' in keys else None,
'uninstall-notes': pkg[current_version]['uninstall-notes'] if 'uninstall-notes' in keys else None,
'set-env': pkg[current_version]['set-env'] if 'set-env' in keys else None,
'persist': pkg[current_version]['persist'] if 'persist' in keys else None,
'checksum': pkg[current_version]['checksum'] if 'checksum' in keys else None,
'dependencies': pkg[current_version]['dependencies'] if 'dependencies' in keys else None,
}
old_packet = PortablePacket(data)
# continue updating the package
# if a directory has to be saved before uninstallation and installation of the portable
if old_packet.persist:
install_directory = rf'{home}\electric\{old_packet.json_name}@{current_version}\\'
if old_packet.chdir:
install_directory += old_packet.chdir + '\\'
install_directory = install_directory.replace('\\\\', '\\')
if isinstance(old_packet.persist, list):
for path in old_packet.persist:
# multiple directories to backup
try:
shutil.copytree(
install_directory + path, rf'{home}\electric\Persist\{old_packet.json_name}@{current_version}\{path}')
except FileExistsError:
pass
else:
# only 1 directory to backup
if old_packet.persist:
try:
shutil.copytree(install_directory + old_packet.persist,
rf'{home}\electric\Persist\{old_packet.json_name}@{current_version}\{old_packet.persist}')
except FileExistsError:
pass
os.system(f'electric uninstall {packet.json_name} --portable')
os.system(f'electric install {packet.json_name} --portable')
new_install_dir = rf'{home}\electric\{packet.json_name}@{packet.latest_version}\\'
if packet.chdir:
new_install_dir += packet.chdir + '\\'
new_install_dir = new_install_dir.replace('\\\\', '\\')
if old_packet.persist:
write('Restoring Old Files And Data', 'green', metadata)
if isinstance(old_packet.persist, list):
for path in old_packet.persist:
shutil.rmtree(new_install_dir + path)
shutil.copytree(
rf'{home}\electric\Persist\{old_packet.json_name}@{current_version}\{path}', new_install_dir + path)
else:
shutil.rmtree(new_install_dir.replace(
'\\\\', '\\') + old_packet.persist.replace('\\\\', '\\'))
shutil.copytree(
rf'{home}\electric\Persist\{old_packet.json_name}@{current_version}\{old_packet.persist}', new_install_dir + old_packet.persist)
# completed backup of files to backups directory
write(
rf'Successfully Completed Backup Of Required Data To {home}\electric\Persist', 'cyan', metadata)
else:
write(
f'Could not find any existing installations of {packet.display_name}', 'red', metadata)
write(f'Successfully Updated {packet.display_name}',
'bright_magenta', metadata)
sys.exit()
|
py | b40b0ff954467e3255e72a7c315ea04ac948b4dc | import os
import subprocess
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision.datasets import CIFAR10, CIFAR100, SVHN
import torch
import math
import torch.nn.functional as F
import tarfile
from torchvision.datasets import ImageFolder
# Custom
from datasets.transforms import get_transforms
from datasets.tiny_imagenet import TinyImagenetDataset
from datasets.caltech import Caltech256
from datasets.flower import Flower102
from datasets.imagenet import ImageNetDataset
from datasets.oxford_pet import OxfordPet
def get_datasets(args, transform='default', target_transform='default'):
"""
Return the PyTorch datasets.
"""
# Transforms
transform = get_transforms(args)[0] if transform == 'default' else transform
target_transform = get_transforms(args)[1] if target_transform == 'default' else target_transform
ds_args = {
'root': args.data_path,
'download': True,
}
if args.dataset == 'tiny_imagenet':
ds_train = TinyImagenetDataset(train=True, transform=transform['train'],
target_transform=target_transform['train'], **ds_args)
ds_train_aug = TinyImagenetDataset(train=True, transform=transform['train_aug'],
target_transform=target_transform['train_aug'], **ds_args)
ds_validation = TinyImagenetDataset(train=False, transform=transform['validation'],
target_transform=target_transform['validation'], **ds_args)
ds_test = None
elif args.dataset in ['cifar10']:
ds_train = CIFAR10(train=True, transform=transform['train'], target_transform=target_transform['train'],
**ds_args)
ds_train_aug = CIFAR10(train=True, transform=transform['train_aug'],
target_transform=target_transform['train_aug'], **ds_args)
ds_validation = CIFAR10(train=False, transform=transform['validation'],
target_transform=target_transform['validation'], **ds_args)
ds_test = None
elif args.dataset == 'cifar100':
ds_train = CIFAR100(train=True, transform=transform['train'], target_transform=target_transform['train'],
**ds_args)
ds_train_aug = CIFAR100(train=True, transform=transform['train_aug'],
target_transform=target_transform['train_aug'], **ds_args)
ds_validation = CIFAR100(train=False, transform=transform['validation'],
target_transform=target_transform['validation'], **ds_args)
ds_test = None
elif args.dataset == 'imagenet':
ds_args = {
'root_path': os.path.join(args.data_base_path, 'imagenet'),
}
ds_train = ImageNetDataset(partition='train', transform=transform['train'], target_transform=target_transform['train'], **ds_args)
ds_train_aug = ImageNetDataset(partition='train', transform=transform['train_aug'], target_transform=target_transform['train_aug'], **ds_args)
ds_validation = ImageNetDataset(partition='val', transform=transform['validation'], target_transform=target_transform['validation'], **ds_args)
ds_test = ImageNetDataset(partition='test', transform=transform['test'], target_transform=target_transform['test'], **ds_args)
elif args.dataset == 'caltech256':
ds_train = Caltech256(train=True, transform=transform['train'], target_transform=target_transform['train'], **ds_args)
ds_train_aug = Caltech256(train=True, transform=transform['train_aug'], target_transform=target_transform['train_aug'], **ds_args)
ds_validation = Caltech256(train=False, transform=transform['validation'], target_transform=target_transform['validation'], **ds_args)
ds_test = None
elif args.dataset == 'flower102':
ds_train = Flower102(split='train', transform=transform['train'], target_transform=target_transform['train'], **ds_args)
ds_train_aug = Flower102(split='train', transform=transform['train_aug'], target_transform=target_transform['train_aug'], **ds_args)
ds_validation = Flower102(split='val', transform=transform['validation'], target_transform=target_transform['validation'], **ds_args)
ds_test = Flower102(split='test', transform=transform['test'], target_transform=target_transform['test'], **ds_args)
elif args.dataset == 'oxford_pet':
ds_train = OxfordPet(train=True, transform=transform['train'], target_transform=target_transform['train'], **ds_args)
ds_train_aug = OxfordPet(train=True, transform=transform['train_aug'], target_transform=target_transform['train_aug'], **ds_args)
ds_validation = OxfordPet(train=False, transform=transform['validation'], target_transform=target_transform['validation'], **ds_args)
ds_test = None
elif args.dataset == 'svhn':
ds_train = SVHN(split='train', transform=transform['train'], target_transform=target_transform['train'], **ds_args)
ds_train_aug = SVHN(split='train', transform=transform['train_aug'], target_transform=target_transform['train_aug'], **ds_args)
ds_validation = SVHN(split='test', transform=transform['validation'], target_transform=target_transform['validation'], **ds_args)
ds_test = None
else:
raise Exception(f'Error. Dataset {args.dataset} not supported.')
# Datasets
dss = {
'train': ds_train,
'train_aug': ds_train_aug,
'validation': ds_validation,
'test': ds_test,
}
return dss
|
py | b40b104cc8381966c21ec20166da8edeaa8ed386 | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
from typing import Dict, Any
from pathlib import Path
from acktest.k8s import resource as k8s
from acktest.resources import load_resource_file
SERVICE_NAME = "applicationautoscaling"
CRD_GROUP = "applicationautoscaling.services.k8s.aws"
CRD_VERSION = "v1alpha1"
# PyTest marker for the current service
service_marker = pytest.mark.service(arg=SERVICE_NAME)
bootstrap_directory = Path(__file__).parent
resource_directory = Path(__file__).parent / "resources"
def load_autoscaling_resource(
resource_name: str, additional_replacements: Dict[str, Any] = {}
):
"""Overrides the default `load_resource_file` to access the specific resources
directory for the current service.
"""
return load_resource_file(
resource_directory,
resource_name,
additional_replacements=additional_replacements,
)
def create_applicationautoscaling_resource(
resource_plural, resource_name, spec_file, replacements, namespace="default"
):
"""
Wrapper around k8s.load_and_create_resource to create a ApplicationAutoscaling resource
"""
reference, spec, resource = k8s.load_and_create_resource(
resource_directory,
CRD_GROUP,
CRD_VERSION,
resource_plural,
resource_name,
spec_file,
replacements,
namespace,
)
return reference, spec, resource
|
py | b40b10bf47533d1ca3e22cd8e5af189694b747a5 | from soupsieve import select
from sqlalchemy import Column, String, delete
from database import BaseObject, session
from utils import print_title
from inputs import get_input_item
class Book(BaseObject):
__tablename__ = 'T_BOOK'
type = Column('F_TYPE', String(50), nullable=False)
title = Column('F_TITLE', String(300), nullable=False)
author = Column('F_AUTHOR', String(50), nullable=False)
isbn = Column('F_ISBN', String(13), nullable=False, unique=True)
genre = Column('F_GENRE', String(50), nullable=False)
price = Column('F_PRICE', String(15), nullable=True)
language = Column('F_LANGUAGE', String(50), nullable=False)
series = Column('F_SERIES', String(200), nullable=True)
size = Column('F_SIZE', String(50), nullable=False) # size stands for pages, length(minutes), characters
def __str__(self):
pass
def add_book():
b = Book()
b.type = input('Which type of book would you like to add? (Choose from "ebook", "audiobook" or "physicalbook"): ')
b.title = input('Give book title: ')
b.author = input('Give author name: ')
b.isbn = input('Give isbn-number: ')
b.genre = input('Choose book genre: ')
b.price = input('Give sale price: ')
b.language = input('Choose language: ')
b.series = input('Give book series: ')
b.size = input('Give book size (in characters, words or pages): ')
session.add(b)
session.commit()
def search_book(): # return is a qry NOT an object!
input_isbn = input('What book are you looking for? Please give in the isbn: ')
qry = session.query(Book).filter_by(isbn=input_isbn)
return qry
def remove_book():
book = search_book()
book.delete()
session.commit()
def change_book(): # in progress
print_title('Change a book')
book_qry = search_book()
options = {1: 'change type',
2: 'change title',
3: 'change author',
4: 'change isbn',
5: 'change genre',
6: 'change price',
7: 'change language',
8: 'change series',
9: 'change size'
}
for option in options:
print('{}: {}'.format(option, options[option]))
choice = get_input_item('What do you want to do? Give number(empty to exit): \n', 1)
if choice == 1:
print_title('Change type:')
book_type = get_input_item('Give new type: ')
book_qry.one().type = book_type
if choice == 2:
print_title('Change title:')
book_title = get_input_item('Give new title: ')
book_qry.one().type = book_title
if choice == 3:
print_title('Change author:')
book_author = get_input_item('Give new author: ')
book_qry.one().type = book_author
if choice == 4:
print_title('Change isbn:')
book_isbn = get_input_item('Give new isbn: ')
book_qry.one().type = book_isbn
if choice == 5:
print_title('Change genre:')
book_genre = get_input_item('Give new genre: ')
book_qry.one().type = book_genre
if choice == 6:
print_title('Change price:')
book_price = get_input_item('Give new price: ')
book_qry.one().type = book_price
if choice == 7:
print_title('Change language:')
book_lang = get_input_item('Give new language: ')
book_qry.one().type = book_lang
if choice == 8:
print_title('Change series:')
book_ser = get_input_item('Give new series: ')
book_qry.one().type = book_ser
if choice == 9:
print_title('Change size:')
book_size = get_input_item('Give new size: ')
book_qry.one().type = book_size
session.commit()
|
py | b40b10dd46bfb2294e0c49edaed52904750419ba | from setuptools import setup, find_packages
import pkg_resources
from pathlib import Path
this_directory = Path(__file__).parent
VERSION = '0.1.42'
DESCRIPTION = 'A python package to interact with Inter-American Development Bank machine learning models to automatic label elements for iRAP certification'
LONG_DESCRIPTION = (this_directory / "README.md").read_text()
with Path('requirements.txt').open() as requirements_txt:
install_requires = [str(requirement) for requirement in pkg_resources.parse_requirements(requirements_txt)]
setup(
# the name must match the folder name 'verysimplemodule'
name="viasegura",
version=VERSION,
author="Jose Maria Marquez Blanco",
author_email="[email protected]",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
packages=find_packages(),
install_requires=install_requires,
keywords=['Machine Learning', 'safe road'],
classifiers= [
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.7",
include_package_data=True
) |
py | b40b1199482726b1b381d89a20d04170a64c5164 | import sqlite3
import numpy as np
import json
from contextlib import contextmanager
from openmdao.utils.record_util import format_iteration_coordinate, deserialize
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.recorders.sqlite_recorder import blob_to_array, format_version
import pickle
@contextmanager
def database_cursor(filename):
"""
Context manager managing a cursor for the SQLite database with the given file name.
"""
con = sqlite3.connect(filename)
cur = con.cursor()
yield cur
con.close()
def get_format_version_abs2meta(db_cur):
"""
Return the format version and abs2meta dict from metadata table in the case recorder file.
"""
prom2abs = {}
conns = {}
db_cur.execute("SELECT format_version, abs2meta FROM metadata")
row = db_cur.fetchone()
f_version = row[0]
if f_version >= 11:
db_cur.execute("SELECT prom2abs, conns FROM metadata")
row2 = db_cur.fetchone()
# Auto-IVC
prom2abs = json.loads(row2[0])
conns = json.loads(row2[1])
# Need to also get abs2meta so that we can pass it to deserialize
if f_version >= 3:
abs2meta = json.loads(row[1])
elif f_version in (1, 2):
try:
abs2meta = pickle.loads(row[1]) if row[1] is not None else None
except TypeError:
# Reading in a python 2 pickle recorded pre-OpenMDAO 2.4.
abs2meta = pickle.loads(row[1].encode()) if row[1] is not None else None
return f_version, abs2meta, prom2abs, conns
def assertProblemDataRecorded(test, expected, tolerance):
"""
Expected can be from multiple cases.
"""
with database_cursor(test.filename) as db_cur:
f_version, abs2meta, prom2abs, conns = get_format_version_abs2meta(db_cur)
# iterate through the cases
for case, (t0, t1), outputs_expected in expected:
# from the database, get the actual data recorded
db_cur.execute("SELECT * FROM problem_cases WHERE case_name=:case_name",
{"case_name": case})
row_actual = db_cur.fetchone()
test.assertTrue(row_actual, 'Problem table does not contain the requested '
'case name: "{}"'.format(case))
counter, global_counter, case_name, timestamp, success, msg, inputs_text, \
outputs_text, residuals_text, derivatives, abs_err, rel_err = row_actual
if f_version >= 3:
outputs_actual = deserialize(outputs_text, abs2meta, prom2abs, conns)
elif f_version in (1, 2):
outputs_actual = blob_to_array(outputs_text)
test.assertEqual(success, 1)
test.assertEqual(msg, '')
for vartype, actual, expected in (
('outputs', outputs_actual, outputs_expected),
):
if expected is None:
if f_version >= 3:
test.assertIsNone(actual)
if f_version in (1, 2):
test.assertEqual(actual, np.array(None, dtype=object))
else:
actual = actual[0]
# Check to see if the number of values in actual and expected match
test.assertEqual(len(actual), len(expected))
for key, value in expected.items():
# Check to see if the keys in the actual and expected match
test.assertTrue(key in actual.dtype.names,
'{} variable not found in actual data'
' from recorder'.format(key))
# Check to see if the values in actual and expected match
assert_near_equal(actual[key], expected[key], tolerance)
def assertDriverIterDataRecorded(test, expected, tolerance, prefix=None):
"""
Expected can be from multiple cases.
"""
with database_cursor(test.filename) as db_cur:
f_version, abs2meta, prom2abs, conns = get_format_version_abs2meta(db_cur)
# iterate through the cases
for coord, (t0, t1), outputs_expected, inputs_expected, residuals_expected in expected:
iter_coord = format_iteration_coordinate(coord, prefix=prefix)
# from the database, get the actual data recorded
db_cur.execute("SELECT * FROM driver_iterations WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iter_coord})
row_actual = db_cur.fetchone()
test.assertTrue(row_actual,
'Driver iterations table does not contain the requested '
'iteration coordinate: "{}"'.format(iter_coord))
counter, global_counter, iteration_coordinate, timestamp, success, msg,\
inputs_text, outputs_text, residuals_text = row_actual
if f_version >= 3:
inputs_actual = deserialize(inputs_text, abs2meta, prom2abs, conns)
outputs_actual = deserialize(outputs_text, abs2meta, prom2abs, conns)
residuals_actual = deserialize(residuals_text, abs2meta, prom2abs, conns)
elif f_version in (1, 2):
inputs_actual = blob_to_array(inputs_text)
outputs_actual = blob_to_array(outputs_text)
# Does the timestamp make sense?
test.assertTrue(t0 <= timestamp and timestamp <= t1)
test.assertEqual(success, 1)
test.assertEqual(msg, '')
for vartype, actual, expected in (
('outputs', outputs_actual, outputs_expected),
('inputs', inputs_actual, inputs_expected),
('residuals', residuals_actual, residuals_expected)
):
if expected is None:
if f_version >= 3:
test.assertIsNone(actual)
if f_version in (1, 2):
test.assertEqual(actual, np.array(None, dtype=object))
else:
actual = actual[0]
# Check to see if the number of values in actual and expected match
test.assertEqual(len(actual), len(expected))
for key, value in expected.items():
# ivc sources
if vartype == 'outputs' and key in prom2abs['input']:
prom_in = prom2abs['input'][key][0]
src_key = conns[prom_in]
else:
src_key = key
# Check to see if the keys in the actual and expected match
test.assertTrue(src_key in actual.dtype.names,
'{} variable not found in actual data'
' from recorder'.format(key))
# Check to see if the values in actual and expected match
assert_near_equal(actual[src_key], expected[key], tolerance)
def assertDriverDerivDataRecorded(test, expected, tolerance, prefix=None):
"""
Expected can be from multiple cases.
"""
with database_cursor(test.filename) as db_cur:
# iterate through the cases
for coord, (t0, t1), totals_expected in expected:
iter_coord = format_iteration_coordinate(coord, prefix=prefix)
# from the database, get the actual data recorded
db_cur.execute("SELECT * FROM driver_derivatives WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iter_coord})
row_actual = db_cur.fetchone()
db_cur.execute("SELECT abs2meta FROM metadata")
row_abs2meta = db_cur.fetchone()
test.assertTrue(row_actual,
'Driver iterations table does not contain the requested '
'iteration coordinate: "{}"'.format(iter_coord))
counter, global_counter, iteration_coordinate, timestamp, success, msg,\
totals_blob = row_actual
abs2meta = json.loads(row_abs2meta[0]) if row_abs2meta[0] is not None else None
test.assertTrue(isinstance(abs2meta, dict))
totals_actual = blob_to_array(totals_blob)
# Does the timestamp make sense?
test.assertTrue(t0 <= timestamp and timestamp <= t1)
test.assertEqual(success, 1)
test.assertEqual(msg, '')
if totals_expected is None:
test.assertEqual(totals_actual, np.array(None, dtype=object))
else:
actual = totals_actual[0]
# Check to see if the number of values in actual and expected match
test.assertEqual(len(actual), len(totals_expected))
for key, value in totals_expected.items():
# Check to see if the keys in the actual and expected match
test.assertTrue(key in actual.dtype.names,
'{} variable not found in actual data'
' from recorder'.format(key))
# Check to see if the values in actual and expected match
assert_near_equal(actual[key], totals_expected[key], tolerance)
def assertProblemDerivDataRecorded(test, expected, tolerance, prefix=None):
"""
Expected can be from multiple cases.
"""
with database_cursor(test.filename) as db_cur:
# iterate through the cases
for case_name, (t0, t1), totals_expected in expected:
# from the database, get the actual data recorded
db_cur.execute("SELECT * FROM problem_cases WHERE "
"case_name=:case_name",
{"case_name": case_name})
row_actual = db_cur.fetchone()
test.assertTrue(row_actual,
'Problem case table does not contain the requested '
'case name: "{}"'.format(case_name))
counter, global_counter, case_name, timestamp, success, msg, inputs, outputs, \
residuals, totals_blob, abs_err, rel_err = \
row_actual
totals_actual = blob_to_array(totals_blob)
test.assertEqual(success, 1)
test.assertEqual(msg, '')
if totals_expected is None:
test.assertEqual(totals_actual.shape, (),
msg="Expected empty array derivatives in case recorder")
else:
test.assertNotEqual(totals_actual.shape[0], 0,
msg="Expected non-empty array derivatives in case recorder")
actual = totals_actual[0]
# Check to see if the number of values in actual and expected match
test.assertEqual(len(actual), len(totals_expected))
for key, value in totals_expected.items():
# Check to see if the keys in the actual and expected match
test.assertTrue(key in actual.dtype.names,
'{} variable not found in actual data'
' from recorder'.format(key))
# Check to see if the values in actual and expected match
assert_near_equal(actual[key], totals_expected[key], tolerance)
def assertSystemIterDataRecorded(test, expected, tolerance, prefix=None):
"""
Expected can be from multiple cases.
"""
with database_cursor(test.filename) as db_cur:
f_version, abs2meta, prom2abs, conns = get_format_version_abs2meta(db_cur)
# iterate through the cases
for coord, (t0, t1), inputs_expected, outputs_expected, residuals_expected in expected:
iter_coord = format_iteration_coordinate(coord, prefix=prefix)
# from the database, get the actual data recorded
db_cur.execute("SELECT * FROM system_iterations WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iter_coord})
row_actual = db_cur.fetchone()
test.assertTrue(row_actual, 'System iterations table does not contain the requested '
'iteration coordinate: "{}"'.format(iter_coord))
counter, global_counter, iteration_coordinate, timestamp, success, msg, inputs_text, \
outputs_text, residuals_text = row_actual
if f_version >= 3:
inputs_actual = deserialize(inputs_text, abs2meta, prom2abs, conns)
outputs_actual = deserialize(outputs_text, abs2meta, prom2abs, conns)
residuals_actual = deserialize(residuals_text, abs2meta, prom2abs, conns)
elif f_version in (1, 2):
inputs_actual = blob_to_array(inputs_text)
outputs_actual = blob_to_array(outputs_text)
residuals_actual = blob_to_array(residuals_text)
# Does the timestamp make sense?
test.assertTrue(t0 <= timestamp and timestamp <= t1)
test.assertEqual(success, 1)
test.assertEqual(msg, '')
for vartype, actual, expected in (
('inputs', inputs_actual, inputs_expected),
('outputs', outputs_actual, outputs_expected),
('residuals', residuals_actual, residuals_expected),
):
if expected is None:
if f_version >= 3:
test.assertIsNone(actual)
if f_version in (1, 2):
test.assertEqual(actual, np.array(None, dtype=object))
else:
# Check to see if the number of values in actual and expected match
test.assertEqual(len(actual[0]), len(expected))
for key, value in expected.items():
# Check to see if the keys in the actual and expected match
test.assertTrue(key in actual[0].dtype.names,
'{} variable not found in actual data '
'from recorder'.format(key))
# Check to see if the values in actual and expected match
assert_near_equal(actual[0][key], expected[key], tolerance)
def assertSolverIterDataRecorded(test, expected, tolerance, prefix=None):
"""
Expected can be from multiple cases.
"""
with database_cursor(test.filename) as db_cur:
f_version, abs2meta, prom2abs, conns = get_format_version_abs2meta(db_cur)
# iterate through the cases
for coord, (t0, t1), expected_abs_error, expected_rel_error, expected_output, \
expected_solver_residuals in expected:
iter_coord = format_iteration_coordinate(coord, prefix=prefix)
# from the database, get the actual data recorded
db_cur.execute("SELECT * FROM solver_iterations "
"WHERE iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iter_coord})
row_actual = db_cur.fetchone()
test.assertTrue(row_actual, 'Solver iterations table does not contain the requested '
'iteration coordinate: "{}"'.format(iter_coord))
counter, global_counter, iteration_coordinate, timestamp, success, msg, \
abs_err, rel_err, input_blob, output_text, residuals_text = row_actual
if f_version >= 3:
output_actual = deserialize(output_text, abs2meta, prom2abs, conns)
residuals_actual = deserialize(residuals_text, abs2meta, prom2abs, conns)
elif f_version in (1, 2):
output_actual = blob_to_array(output_text)
residuals_actual = blob_to_array(residuals_text)
# Does the timestamp make sense?
test.assertTrue(t0 <= timestamp and timestamp <= t1,
'timestamp should be between when the model started and stopped')
test.assertEqual(success, 1)
test.assertEqual(msg, '')
if expected_abs_error:
test.assertTrue(abs_err, 'Expected absolute error but none recorded')
assert_near_equal(abs_err, expected_abs_error, tolerance)
if expected_rel_error:
test.assertTrue(rel_err, 'Expected relative error but none recorded')
assert_near_equal(rel_err, expected_rel_error, tolerance)
for vartype, actual, expected in (
('outputs', output_actual, expected_output),
('residuals', residuals_actual, expected_solver_residuals),
):
if expected is None:
if f_version >= 3:
test.assertIsNone(actual)
if f_version in (1, 2):
test.assertEqual(actual, np.array(None, dtype=object))
else:
# Check to see if the number of values in actual and expected match
test.assertEqual(len(actual[0]), len(expected))
for key, value in expected.items():
# Check to see if the keys in the actual and expected match
test.assertTrue(key in actual[0].dtype.names,
'{} variable not found in actual data '
'from recorder'.format(key))
# Check to see if the values in actual and expected match
assert_near_equal(actual[0][key], expected[key], tolerance)
def assertMetadataRecorded(test, expected_prom2abs, expected_abs2prom):
with database_cursor(test.filename) as db_cur:
db_cur.execute("SELECT format_version, prom2abs, abs2prom FROM metadata")
row = db_cur.fetchone()
format_version_actual = row[0]
format_version_expected = format_version
prom2abs = json.loads(str(row[1]))
abs2prom = json.loads(str(row[2]))
if prom2abs is None:
test.assertIsNone(expected_prom2abs)
else:
for io in ['input', 'output']:
for var in prom2abs[io]:
test.assertEqual(prom2abs[io][var].sort(), expected_prom2abs[io][var].sort())
if abs2prom is None:
test.assertIsNone(expected_abs2prom)
else:
for io in ['input', 'output']:
for var in abs2prom[io]:
test.assertEqual(abs2prom[io][var], expected_abs2prom[io][var])
# this always gets recorded
test.assertEqual(format_version_actual, format_version_expected)
def assertViewerDataRecorded(test, expected):
with database_cursor(test.filename) as db_cur:
db_cur.execute("SELECT format_version FROM metadata")
f_version = db_cur.fetchone()[0]
test.assertTrue(isinstance(f_version, int))
db_cur.execute("SELECT model_viewer_data FROM driver_metadata")
row = db_cur.fetchone()
if expected is None:
test.assertIsNone(row)
return
model_viewer_data = json.loads(row[0])
test.assertTrue(isinstance(model_viewer_data, dict))
# primary keys
if f_version >= 6:
test.assertEqual(set(model_viewer_data.keys()), {
'tree', 'sys_pathnames_list', 'connections_list',
'driver', 'design_vars', 'responses', 'declare_partials_list'
})
else:
test.assertEqual(set(model_viewer_data.keys()), {
'tree', 'sys_pathnames_list', 'connections_list', 'abs2prom',
'driver', 'design_vars', 'responses', 'declare_partials_list'
})
# system pathnames
test.assertTrue(isinstance(model_viewer_data['sys_pathnames_list'], list))
# connections
test.assertTrue(isinstance(model_viewer_data['connections_list'], list))
test.assertEqual(expected['connections_list_length'],
len(model_viewer_data['connections_list']))
cl = model_viewer_data['connections_list']
for c in cl:
test.assertTrue(set(c.keys()).issubset(set(['src', 'tgt', 'cycle_arrows'])))
# model tree
tr = model_viewer_data['tree']
test.assertEqual({'name', 'type', 'subsystem_type', 'children', 'linear_solver',
'nonlinear_solver', 'is_parallel', 'component_type', 'class',
'expressions', 'options', 'linear_solver_options',
'nonlinear_solver_options'},
set(tr.keys()))
test.assertEqual(expected['tree_children_length'],
len(model_viewer_data['tree']['children']))
if f_version < 6:
# abs2prom map
abs2prom = model_viewer_data['abs2prom']
for io in ['input', 'output']:
for var in expected['abs2prom'][io]:
test.assertEqual(abs2prom[io][var], expected['abs2prom'][io][var])
return model_viewer_data
def assertSystemMetadataIdsRecorded(test, ids):
with database_cursor(test.filename) as cur:
for id in ids:
cur.execute("SELECT * FROM system_metadata WHERE id=:id", {"id": id})
row_actual = cur.fetchone()
test.assertTrue(row_actual,
'System metadata table does not contain the '
'requested id: "{}"'.format(id))
def assertSystemIterCoordsRecorded(test, iteration_coordinates):
with database_cursor(test.filename) as cur:
for iteration_coordinate in iteration_coordinates:
cur.execute("SELECT * FROM system_iterations WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iteration_coordinate})
row_actual = cur.fetchone()
test.assertTrue(row_actual,
'System iterations table does not contain the '
'requested iteration coordinate: "{}"'.
format(iteration_coordinate))
|
py | b40b1246bb3abe0cc78185be9608f0f054ab7822 | class Solution {
public:
bool leadsToDestination(int n, vector<vector<int>>& edges, int source, int destination) {
vector<vector<int>> graph(n);
for(const vector<int> & edge : edges) {
graph[edge[0]].push_back(edge[1]);
}
if (!graph[destination].empty()) return false;
vector<int> marks(n, 0);
return Dfs(source, destination, graph, &marks);
}
private:
bool Dfs(int node, int destination, const vector<vector<int>> & graph, vector<int> * marks) {
if (graph[node].empty() && node != destination) return false;
(*marks)[node] = true;
for(int nei : graph[node]) {
if(!(*marks)[nei]) {
if (!Dfs(nei, destination, graph, marks)) return false;
} else {
return false;
}
}
(*marks)[node] = false;
return true;
}
};
|
py | b40b12c6da1e312b9d1cd318ca853032cd86cfb4 | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class VisionaryWaferVisu(Package):
"""Visionary Meta Package"""
homepage = ''
# some random tarball, to make `spack fetch --dependencies visionary-wafer-visu` work
url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz'
# This is only a dummy tarball (see difference between version numbers)
# TODO: as soon as a MetaPackage-concept has been merged, please update this package
version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz')
depends_on("emscripten")
depends_on("py-numpy")
def install(self, spec, prefix):
mkdirp(prefix.etc)
# store a copy of this package.
install(__file__, join_path(prefix.etc, 'visionary-wafer-visu.py'))
# we could create some filesystem view here?
|
py | b40b13480279f5fc62aedd04081d47832e0e825d | from Bio import SeqIO
import numpy as np
#import matplotlib.pyplot as plt
from Bio.Restriction import *
from Bio.Seq import Seq
from Bio.Alphabet.IUPAC import IUPACAmbiguousDNA
import itertools
from Bio.SeqRecord import SeqRecord
import re
import os
import sys
path = "~/Seq/UMGC_IL_030"
os.chdir(os.path.expanduser(path))
folder_name = os.getcwd()
R1_primer = "GTGCCAGCAGCCGCGGTAA" #Read 1 primer sequence
R2_primer = "GGACTACCAGGGTATCTAAT" #Read 2 primer sequence
R1_adapter = "CTGTCTCTTATACACATCTCCGAGCCCACGAGAC"
R2_adapter = "CTGTCTCTTATACACATCTGACGCTGCCGACGA"
data_file_names = !ls
files = []
for i in data_file_names:
if i[-12:] == "MERGED.fastq":
fx = folder_name + "/" + i
files.append(fx)
#Write out text file
save_name = folder_name + "/Primer_corrections_V4R_thresh3.txt"
save_file = open(save_name, "w")
header = ("Sample",'\t',"total reads",'\t',"no mismatch",'\t',"total corrected",'\t',"1",'\t',"2",'\t',"3",'\t',"4",'\t',"5",'\t',"6",'\t',"7",'\t',"8",'\t',"9",'\t',"10",'\t',"11",'\t',"12",'\t',"13",'\t',"14",'\t',"15",'\t',"16",'\t',"17",'\t',"18",'\t',"19",'\t',"20",'\t',"1_A",'\t',"2_A",'\t',"3_A",'\t',"4_A",'\t',"5_A",'\t',"6_A",'\t',"7_A",'\t',"8_A",'\t',"9_A",'\t',"10_A",'\t',"11_A",'\t',"12_A",'\t',"13_A",'\t',"14_A",'\t',"15_A",'\t',"16_A",'\t',"17_A",'\t',"18_A",'\t',"19_A",'\t',"20_A",'\t',"1_T",'\t',"2_T",'\t',"3_T",'\t',"4_T",'\t',"5_T",'\t',"6_T",'\t',"7_T",'\t',"8_T",'\t',"9_T",'\t',"10_T",'\t',"11_T",'\t',"12_T",'\t',"13_T",'\t',"14_T",'\t',"15_T",'\t',"16_T",'\t',"17_T",'\t',"18_T",'\t',"19_T",'\t',"20_T",'\t',"1_G",'\t',"2_G",'\t',"3_G",'\t',"4_G",'\t',"5_G",'\t',"6_G",'\t',"7_G",'\t',"8_G",'\t',"9_G",'\t',"10_G",'\t',"11_G",'\t',"12_G",'\t',"13_G",'\t',"14_G",'\t',"15_G",'\t',"16_G",'\t',"17_G",'\t',"18_G",'\t',"19_G",'\t',"20_G",'\t',"1_C",'\t',"2_C",'\t',"3_C",'\t',"4_C",'\t',"5_C",'\t',"6_C",'\t',"7_C",'\t',"8_C",'\t',"9_C",'\t',"10_C",'\t',"11_C",'\t',"12_C",'\t',"13_C",'\t',"14_C",'\t',"15_C",'\t',"16_C",'\t',"17_C",'\t',"18_C",'\t',"19_C",'\t',"20_C",'\t','\n')
save_file.write(''.join(map(str, header)))
new_tab = '\t'
new_line = '\n'
for q in files:
GG_filename = q
#Count number of records in the file
count = 0
for record in SeqIO.parse(GG_filename, "fastq"):
count += 1
GG_rec = count
print("There were " + str(GG_rec) + " records in the sample")
#Finding V4 forward primer sites
RP = []
R_match_count = 0
V4_R_pos = []
For_primer = []
mismatch_rname = []
mismatch_seq = []
for record in SeqIO.parse(GG_filename, "fastq"):
rname = record.description
rc_record = record.reverse_complement()
#R2_primer = "GGACTAC[ACT][ACG]GGGT[AT]TCTAAT"
V4_R = rc_record.seq.find("GGACTACCAGGGTATCTAAT")
if V4_R == -1:
V4_R_pos.append(V4_R)
mismatch_rname.append(rname)
mismatch_seq.append(rc_record.seq)
else:
V4_R_pos.append(V4_R)
R_match_count +=1
print("Found " + str(R_match_count) + " total records containing a perfect match to the V4_R primer out of " + str(GG_rec) + " total records")
#find positions of corrections
corr_pos = []
corr_base = []
corr_seq = []
corr_name = []
large_num_corr = []
counter = 0
for i in mismatch_seq:
c_pos_item = []
c_base = []
count = 0
for j, item in enumerate(i[0:20]):
if item != "N":
if item == "GGACTACCAGGGTATCTAAT"[j]:
pass
else:
c_pos_item.append(j)
c_base.append(item)
count += 1
if count > 0 and count < 4:
corr_pos.append(c_pos_item)
corr_base.append(c_base)
corr_seq.append(i)
corr_name.append(mismatch_rname[counter])
else:
#print "> 3 corrections"
large_num_corr.append(i)
counter += 1
import itertools
chain_pos = itertools.chain(*corr_pos)
chain_base = itertools.chain(*corr_base)
corr_positions = list(chain_pos)
corr_nucleotide = list(chain_base)
try:
print("Found " + str(len(corr_pos)) + "/" + str(GG_rec) +" total records containing a likely true mismatch (" + str((len(corr_pos))/float(GG_rec)*100) + "%)")
except ZeroDivisionError:
print("none found")
save_file.write(q.split("/")[-1])
save_file.write(new_tab)
save_file.write(str(GG_rec))
save_file.write(new_tab)
save_file.write(str(R_match_count))
save_file.write(new_tab)
save_file.write(str(len(corr_pos)))
save_file.write(new_tab)
# plt.clf()
# if len(corr_pos) > 0:
# #fig_dir = folder_name + "/Figs"
# #os.chdir(fig_dir)
# #Make position histogram - where are these sites?
# fig = plt.figure()
# plt.title("Distribution of primer corrections")
# ax = fig.add_subplot(111)
# x = corr_positions
# numBins = 19
# ax.hist(x,numBins,color='green')
# ax.set_ylabel("Number of corrections")
# ax.set_xlabel('Position')
# #ticks = range(1, (numBins+1))
# #plt.xticks(ticks)
# plt_name = GG_filename[:-6] + "_corrections.jpg"
# plt.savefig(plt_name)
#
#Write out file with correction data
corr_by_pos = []
A_list = []
T_list = []
G_list = []
C_list = []
for i in range(20):
count = 0
count_A = 0
count_T = 0
count_G = 0
count_C = 0
for j, item in enumerate(corr_positions):
if item == i:
count += 1
if corr_nucleotide[j] == 'A':
count_A +=1
elif corr_nucleotide[j] == 'T':
count_T +=1
elif corr_nucleotide[j] == 'G':
count_G +=1
elif corr_nucleotide[j] == 'C':
count_C +=1
corr_by_pos.append(count)
A_list.append(count_A)
T_list.append(count_T)
G_list.append(count_G)
C_list.append(count_C)
for i, item in enumerate(corr_by_pos):
save_file.write(str(item))
save_file.write(new_tab)
for i, item in enumerate(A_list):
save_file.write(str(item))
save_file.write(new_tab)
for i, item in enumerate(T_list):
save_file.write(str(item))
save_file.write(new_tab)
for i, item in enumerate(G_list):
save_file.write(str(item))
save_file.write(new_tab)
for i, item in enumerate(C_list):
save_file.write(str(item))
save_file.write(new_tab)
save_file.write(new_line)
save_file.close()
|
py | b40b13bdd162395b99f12772e3a541bf3af911c1 | """
Eero WiFi router device_tracker for Home Assistant
For instructions and examples, see https://github.com/jrlucier/eero_tracker
"""
import logging
import voluptuous as vol
import datetime
import time
import re
import json
import requests
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker.legacy import DeviceScanner
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.components.device_tracker.const import (
DOMAIN, CONF_SCAN_INTERVAL)
_LOGGER = logging.getLogger(__name__)
CONF_ONLY_MACS_KEY = 'only_macs'
CONF_ONLY_NETWORKS = 'only_networks'
CONF_ONLY_WIRELESS = 'only_wireless'
CONF_SESSION_FILE_NAME = 'session_file_name'
MINIMUM_SCAN_INTERVAL = 25
CACHE_EXPIRY=3600 # cache accounts for an hour
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ONLY_MACS_KEY, default=''): cv.string,
vol.Optional(CONF_ONLY_NETWORKS, default=[]): vol.All(cv.ensure_list, [cv.positive_int]),
vol.Optional(CONF_ONLY_WIRELESS, default=True): cv.boolean,
vol.Optional(CONF_SESSION_FILE_NAME, default='eero.session'): cv.string
})
def get_scanner(hass, config):
"""Validate the configuration and return EeroDeviceScanner."""
_LOGGER.debug(f"Initializing eero_tracker (domain {DOMAIN})")
return EeroDeviceScanner(hass, config[DOMAIN])
class EeroException(Exception):
"""A propagating error for Eero"""
def __init__(self, status, error_message):
super(EeroException, self).__init__()
self.status = status
self.error_message = error_message
class EeroDeviceScanner(DeviceScanner):
"""This class queries a Eero-based router for present devices."""
API_ENDPOINT = 'https://api-user.e2ro.com/2.2/{}'
def __init__(self, hass, config):
"""Initialize the scanner."""
self.__session_file = hass.config.path(config[CONF_SESSION_FILE_NAME])
self.__session = None
# configure any filters (macs or networks)
self.__only_macs = set([x.strip().lower() for x in config[CONF_ONLY_MACS_KEY].split(',') if x != ''])
if len(self.__only_macs) > 0:
_LOGGER.info(f"Including only MAC addresses: {self.__only_macs}")
self.__only_networks = set(config[CONF_ONLY_NETWORKS])
if len(self.__only_networks) > 0:
_LOGGER.info(f"Including only networks: {self.__only_networks}")
self.__only_wireless = config[CONF_ONLY_WIRELESS]
_LOGGER.info(f"Tracking only wireless devices = {self.__only_wireless}")
self.__last_results = []
self.__account = None
self.__account_update_timestamp = None
self.__mac_to_nickname = {}
minimum_interval = datetime.timedelta(seconds=MINIMUM_SCAN_INTERVAL)
self.__scan_interval = config.get(CONF_SCAN_INTERVAL, minimum_interval)
# Prevent users from specifying an interval faster than 25 seconds
if self.__scan_interval < minimum_interval:
_LOGGER.warning(
f"Scan interval {self.__scan_interval} MUST be >= {MINIMUM_SCAN_INTERVAL} seconds to prevent DDoS on eero's servers; limiting to {minimum_interval}.")
self.__scan_interval = minimum_interval
else:
_LOGGER.debug(f"Scan interval = {self.__scan_interval}")
# Grab the session key from the file
try:
_LOGGER.debug(f"Loading eero session key from '{self.__session_file}'")
with open(self.__session_file, 'r') as f:
self.__session = f.read().replace('\n', '')
except IOError:
_LOGGER.error(f"Could not find the eero.session file '{self.__session_file}'")
self.__session = None
def scan_devices(self):
"""Required for the API, handles returning results"""
# Return empty array if the session was never started.
if self.__session is None:
return []
self._update_info()
return self.__last_results
def get_device_name(self, mac):
"""Required for the API. None to indicate we don't know the devices true name"""
return self.__mac_to_nickname.get(mac)
def _update_info(self):
"""Retrieve the latest information from Eero for returning to HA."""
# Cache the accounts for an hour. These rarely change and this reduces the
# lookup requests to only 1 every update. This cache is reset on Home Assistant
# restarts, so in an emergency a user can always restart Home Assistant to force update.
if self.__account_update_timestamp is None or (time.time() - self.__account_update_timestamp) >= CACHE_EXPIRY:
_LOGGER.debug(f"Updating eero account information cache (expires every {CACHE_EXPIRY} seconds)")
self.__account = self._account()
self.__account_update_timestamp = time.time()
self.__mac_to_nickname = {}
self.__last_results = []
for network in self.__account['networks']['data']:
match = re.search('/networks/(\d+)', network['url'])
network_id = int(match.group(1))
# if specific networks should be filtered, skip any not in the filter
if len(self.__only_networks) > 0 and network_id not in self.__only_networks:
_LOGGER.debug(f"Ignoring network {network_id} devices not in only_networks: {self.__only_networks}")
continue
# load all devices for this network, but only track connected wireless devices
devices = self._devices(network['url'])
json_obj = json.loads(json.dumps(devices, indent=4))
self._update_tracked_devices(network_id, json_obj)
return
def _update_tracked_devices(self, network_id, devices_json_obj):
for device in devices_json_obj:
# skip devices that are not connected
if not device['connected']:
continue
# if only wireless devices are tracked, then skip if not wireless
if self.__only_wireless and not device['wireless']:
continue
# if mac addressess are whitelisted with only_macs, skip if not on the list
mac = device['mac']
if len(self.__only_macs) > 0 and mac not in self.__only_macs:
continue
# create mapping of mac addresses to nicknames for lookup by device_name (if a nickname is assigned)
nickname = device['nickname']
# default nickname to host name if missing
if not nickname or nickname == 'None':
nickname = device['hostname']
if nickname:
self.__mac_to_nickname[mac] = nickname
_LOGGER.debug(f"Network {network_id} device found: nickname={nickname}; host={device['hostname']}; mac={mac}")
self.__last_results.append(mac)
@property
def _cookie_dict(self):
"""Creates a session cookie"""
return dict(s=self.__session)
def _refreshed(self, func):
"""Handles if we need to refresh the logged in session or not"""
try:
return func()
except EeroException as exception:
if exception.status == 401 and exception.error_message == 'error.session.refresh':
self._login_refresh()
return func()
else:
_LOGGER.error(f"Eero connection failure: {exception.error_message}")
def _login_refresh(self):
"""Refresh the Eero session"""
response = self._post_req('login/refresh', cookies=self._cookie_dict)
new_session = response.get('user_token')
if not new_session:
_LOGGER.error(f"Failed updating eero session key! {response}")
return
_LOGGER.debug(f"Updating {self.__session_file} with new session key")
try:
# update in-memory session first, in case there is any failure in writing to the
# session file, at least this tracker will continue working until next HA restart
self.__session = new_session
# TODO: ideally write to a temp file, and if successful, then move to overwrite
# the existing session file
with open(self.__session_file, 'w+') as f:
f.write(new_session)
except IOError:
_LOGGER.error(f"Could not update eero session key in {self.__session_file}")
def _account(self):
return self._refreshed(lambda: self._get_req('account', cookies=self._cookie_dict))
@staticmethod
def _id_from_url(id_or_url):
"""Handles grabbing the Eero ID from the URL"""
match = re.search('^[0-9]+$', id_or_url)
if match:
return match.group(0)
match = re.search(r'\/([0-9]+)$', id_or_url)
if match:
return match.group(1)
def _devices(self, network_id):
"""Gets the list of devices from Eero"""
return self._refreshed(lambda: self._get_req('networks/{}/devices'.format(self._id_from_url(network_id)),
cookies=self._cookie_dict))
@staticmethod
def _parse_response(response):
"""Basic response handler"""
data = json.loads(response.text)
response_code = data['meta']['code']
if response_code not in [ 200, 201 ]:
raise EeroException(response_code, data['meta'].get('error', ""))
return data.get('data', "")
def _post_req(self, action, **kwargs):
"""POST a request"""
response = requests.post(self.API_ENDPOINT.format(action), **kwargs)
return self._parse_response(response)
def _get_req(self, action, **kwargs):
"""GET a request"""
response = requests.get(self.API_ENDPOINT.format(action), **kwargs)
return self._parse_response(response)
|
py | b40b1517b42a93475134b60b8995b590495225b1 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from tests.base import BaseTestCase
from pyasn1.compat import octets
class OctetsTestCase(BaseTestCase):
if sys.version_info[0] > 2:
def test_ints2octs(self):
assert [1, 2, 3] == list(octets.ints2octs([1, 2, 3]))
def test_ints2octs_empty(self):
assert not octets.ints2octs([])
def test_int2oct(self):
assert [12] == list(octets.int2oct(12))
def test_octs2ints(self):
assert [1, 2, 3] == list(octets.octs2ints(bytes([1, 2, 3])))
def test_octs2ints_empty(self):
assert not octets.octs2ints(bytes([]))
def test_oct2int(self):
assert 12 == octets.oct2int(bytes([12]))[0]
def test_str2octs(self):
assert bytes([1, 2, 3]) == octets.str2octs('\x01\x02\x03')
def test_str2octs_empty(self):
assert not octets.str2octs('')
def test_octs2str(self):
assert '\x01\x02\x03' == octets.octs2str(bytes([1, 2, 3]))
def test_octs2str_empty(self):
assert not octets.octs2str(bytes([]))
def test_isOctetsType(self):
assert octets.isOctetsType('abc') == False
assert octets.isOctetsType(123) == False
assert octets.isOctetsType(bytes()) == True
def test_isStringType(self):
assert octets.isStringType('abc') == True
assert octets.isStringType(123) == False
assert octets.isStringType(bytes()) == False
def test_ensureString(self):
assert 'abc'.encode() == octets.ensureString('abc'.encode())
assert bytes([1, 2, 3]) == octets.ensureString([1, 2, 3])
else:
def test_ints2octs(self):
assert '\x01\x02\x03' == octets.ints2octs([1, 2, 3])
def test_ints2octs_empty(self):
assert not octets.ints2octs([])
def test_int2oct(self):
assert '\x0c' == octets.int2oct(12)
def test_octs2ints(self):
assert [1, 2, 3] == octets.octs2ints('\x01\x02\x03')
def test_octs2ints_empty(self):
assert not octets.octs2ints('')
def test_oct2int(self):
assert 12 == octets.oct2int('\x0c')
def test_str2octs(self):
assert '\x01\x02\x03' == octets.str2octs('\x01\x02\x03')
def test_str2octs_empty(self):
assert not octets.str2octs('')
def test_octs2str(self):
assert '\x01\x02\x03' == octets.octs2str('\x01\x02\x03')
def test_octs2str_empty(self):
assert not octets.octs2str('')
def test_isOctetsType(self):
assert octets.isOctetsType('abc') == True
assert octets.isOctetsType(123) == False
assert octets.isOctetsType(unicode('abc')) == False
def test_isStringType(self):
assert octets.isStringType('abc') == True
assert octets.isStringType(123) == False
assert octets.isStringType(unicode('abc')) == True
def test_ensureString(self):
assert 'abc' == octets.ensureString('abc')
assert '123' == octets.ensureString(123)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
py | b40b152450f38be98fa1e48c869f12d022c77b4d | from office365.sharepoint.changes.change import Change
class ChangeFolder(Change):
"""Specifies a change on a folder not contained in a list or document library."""
@property
def unique_id(self):
return self.properties.get("UniqueId", None)
@property
def web_id(self):
return self.properties.get("WebId", None)
|
py | b40b1558a9aa57bad311c7e28bca41b05079ec96 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
import re
from azure.storage.blob import BlockBlobService
from azure.storage.file import FileService
from devtools_testutils import AzureMgmtTestCase, StorageAccountPreparer
from devtools_testutils import ResourceGroupPreparer
from msrestazure.azure_exceptions import CloudError
import azure.mgmt.batchai.models as models
from azure.mgmt.batchai import BatchAIManagementClient
from . import helpers
class JobTestCase(AzureMgmtTestCase):
def setUp(self):
super(JobTestCase, self).setUp()
self.client = helpers.create_batchai_client(self) # type: BatchAIManagementClient
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_job_creation_and_deletion(self, resource_group, location, cluster, storage_account, storage_account_key):
"""Tests simple scenario for a job - submit, check results, delete."""
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'job', 1,
'echo hi | tee {0}/hi.txt'.format(helpers.JOB_OUTPUT_DIRECTORY_PATH_ENV),
container=models.ContainerSettings(
image_source_registry=models.ImageSourceRegistry(image='ubuntu'))
) # type: models.Job
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name, helpers.MINUTE),
models.ExecutionState.succeeded)
# Check standard job output
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'hi\n', u'stderr.txt': u''})
# Check job's output
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
helpers.JOB_OUTPUT_DIRECTORY_ID,
{u'hi.txt': u'hi\n'})
# Check that we can access the output files directly in storage using path segment returned by the server
helpers.assert_file_in_file_share(self, storage_account.name, storage_account_key,
job.job_output_directory_path_segment + '/' + helpers.STDOUTERR_FOLDER_NAME,
'stdout.txt', u'hi\n')
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_running_job_deletion(self, resource_group, location, cluster):
"""Tests deletion of a running job."""
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'job', 1,
'sleep 600')
self.assertEqual(
helpers.wait_for_job_start_running(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.running)
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_running_job_termination(self, resource_group, location, cluster):
"""Tests termination of a running job."""
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'longrunning', 1,
'sleep 600')
self.assertEqual(
helpers.wait_for_job_start_running(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.running)
self.client.jobs.terminate(resource_group.name, job.name).result()
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name, helpers.MINUTE),
models.ExecutionState.failed)
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer(target_nodes=0, wait=False)
def test_queued_job_termination(self, resource_group, location, cluster):
"""Tests termination of a job in queued state."""
# Create a job which will be in queued state because the cluster has no compute nodes.
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'job', 1, 'true')
self.client.jobs.terminate(resource_group.name, job.name).result()
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name, helpers.MINUTE),
models.ExecutionState.failed)
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_completed_job_termination(self, resource_group, location, cluster):
"""Tests termination of completed job."""
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'job', 1, 'true')
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name, helpers.MINUTE),
models.ExecutionState.succeeded)
# termination of completed job is NOP and must not change the execution state.
self.client.jobs.terminate(resource_group.name, job.name).result()
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name, helpers.MINUTE),
models.ExecutionState.succeeded)
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_failed_job_reporting(self, resource_group, location, cluster):
"""Tests if job failure is reported correctly."""
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'job', 1,
'false')
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.failed)
job = self.client.jobs.get(resource_group.name, job.name)
self.assertEqual(job.execution_info.exit_code, 1)
self.assertEqual(len(job.execution_info.errors), 1)
self.assertEqual(job.execution_info.errors[0].code, 'JobFailed')
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_job_preparation_host(self, resource_group, location, cluster):
"""Tests job preparation execution for a job running on a host."""
# create a job with job preparation which populates input data in $AZ_BATCHAI_INPUT_INPUT/hi.txt
job = helpers.create_custom_job(
self.client, resource_group.name, location, cluster.id, 'job', 1,
'cat $AZ_BATCHAI_INPUT_INPUT/hi.txt',
'mkdir -p $AZ_BATCHAI_INPUT_INPUT && echo hello | tee $AZ_BATCHAI_INPUT_INPUT/hi.txt')
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.succeeded)
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'hello\n',
u'stderr.txt': u'',
u'stdout-job_prep.txt': u'hello\n',
u'stderr-job_prep.txt': u''})
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_job_preparation_container(self, resource_group, location, cluster):
"""Tests job preparation execution for a job running in a container."""
# create a job with job preparation which populates input data in $AZ_BATCHAI_INPUT_INPUT/hi.txt
job = helpers.create_custom_job(
self.client, resource_group.name, location, cluster.id, 'job', 1,
'cat $AZ_BATCHAI_INPUT_INPUT/hi.txt',
'mkdir -p $AZ_BATCHAI_INPUT_INPUT && echo hello | tee $AZ_BATCHAI_INPUT_INPUT/hi.txt',
container=models.ContainerSettings(
image_source_registry=models.ImageSourceRegistry(image='ubuntu')))
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.succeeded)
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'hello\n',
u'stderr.txt': u'',
u'stdout-job_prep.txt': u'hello\n',
u'stderr-job_prep.txt': u''})
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_job_host_preparation_failure_reporting(self, resource_group, location, cluster):
"""Tests if job preparation failure is reported correctly."""
# create a job with failing job preparation
job = helpers.create_custom_job(
self.client, resource_group.name, location, cluster.id, 'job', 1, 'true', 'false')
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.failed)
job = self.client.jobs.get(resource_group.name, job.name)
self.assertEqual(job.execution_info.exit_code, 1)
self.assertEqual(len(job.execution_info.errors), 1)
self.assertEqual(job.execution_info.errors[0].code, 'JobPreparationFailed')
print(job.serialize())
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer()
def test_job_container_preparation_failure_reporting(self, resource_group, location, cluster):
"""Tests if job preparation failure is reported correctly."""
# create a job with failing job preparation
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'job', 1, 'true',
'false',
container=models.ContainerSettings(
image_source_registry=models.ImageSourceRegistry(image='ubuntu')))
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.failed)
job = self.client.jobs.get(resource_group.name, job.name)
self.assertEqual(job.execution_info.exit_code, 1)
self.assertEqual(len(job.execution_info.errors), 1)
self.assertEqual(job.execution_info.errors[0].code, 'JobPreparationFailed')
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer(target_nodes=2)
def test_password_less_ssh(self, resource_group, location, cluster):
"""Tests if password-less ssh is configured on hosts."""
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'job', 2,
'ssh 10.0.0.4 echo done && ssh 10.0.0.5 echo done')
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.succeeded)
job = self.client.jobs.get(resource_group.name, job.name)
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'done\ndone\n',
u'stderr.txt': re.compile('Permanently added.*')})
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer(target_nodes=2)
def test_password_less_ssh_in_container(self, resource_group, location, cluster):
"""Tests if password-less ssh is configured in containers."""
job = helpers.create_custom_job(self.client, resource_group.name, location, cluster.id, 'job', 2,
'ssh 10.0.0.5 echo done && ssh 10.0.0.5 echo done',
container=models.ContainerSettings(
image_source_registry=models.ImageSourceRegistry(image='ubuntu')))
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.succeeded)
job = self.client.jobs.get(resource_group.name, job.name)
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'done\ndone\n',
u'stderr.txt': re.compile('Permanently added.*')})
self.client.jobs.delete(resource_group.name, job.name).result()
self.assertRaises(CloudError, lambda: self.client.jobs.get(resource_group.name, job.name))
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer(target_nodes=1)
def test_job_level_mounting(self, resource_group, location, cluster, storage_account, storage_account_key):
"""Tests if it's possible to mount external file systems for a job."""
job_name = 'job'
# Create file share and container to mount on the job level
if storage_account.name != helpers.FAKE_STORAGE.name:
files = FileService(storage_account.name, storage_account_key)
files.create_share('jobshare', fail_on_exist=False)
blobs = BlockBlobService(storage_account.name, storage_account_key)
blobs.create_container('jobcontainer', fail_on_exist=False)
job = self.client.jobs.create(
resource_group.name,
job_name,
parameters=models.JobCreateParameters(
location=location,
cluster=models.ResourceId(id=cluster.id),
node_count=1,
mount_volumes=models.MountVolumes(
azure_file_shares=[
models.AzureFileShareReference(
account_name=storage_account.name,
azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
storage_account.name, 'jobshare'),
relative_mount_path='job_afs',
credentials=models.AzureStorageCredentialsInfo(
account_key=storage_account_key
),
)
],
azure_blob_file_systems=[
models.AzureBlobFileSystemReference(
account_name=storage_account.name,
container_name='jobcontainer',
relative_mount_path='job_bfs',
credentials=models.AzureStorageCredentialsInfo(
account_key=storage_account_key
),
)
]
),
# Put standard output on cluster level AFS to check that the job has access to it.
std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(helpers.AZURE_FILES_MOUNTING_PATH),
# Create two output directories on job level AFS and blobfuse.
output_directories=[
models.OutputDirectory(id='OUTPUT1', path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/job_afs'),
models.OutputDirectory(id='OUTPUT2', path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/job_bfs')
],
# Check that the job preparation has access to job level file systems.
job_preparation=models.JobPreparation(
command_line='echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/prep_afs.txt; '
'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/prep_bfs.txt; '
'echo done'
),
# Check that the job has access to job
custom_toolkit_settings=models.CustomToolkitSettings(
command_line='echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/job_afs.txt; '
'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/job_bfs.txt; '
'mkdir $AZ_BATCHAI_OUTPUT_OUTPUT1/afs; '
'echo afs > $AZ_BATCHAI_OUTPUT_OUTPUT1/afs/job_afs.txt; '
'mkdir $AZ_BATCHAI_OUTPUT_OUTPUT2/bfs; '
'echo bfs > $AZ_BATCHAI_OUTPUT_OUTPUT2/bfs/job_bfs.txt; '
'echo done'
)
)
).result()
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.succeeded)
job = self.client.jobs.get(resource_group.name, job.name)
# Assert job and job prep standard output is populated on cluster level filesystem
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'done\n', u'stderr.txt': u'',
u'stdout-job_prep.txt': u'done\n', u'stderr-job_prep.txt': u''})
# Assert files are generated on job level AFS
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name, 'OUTPUT1',
{u'job_afs.txt': u'afs\n', u'prep_afs.txt': u'afs\n', u'afs': None})
# Assert files are generated on job level blobfuse
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name, 'OUTPUT2',
{u'job_bfs.txt': u'bfs\n', u'prep_bfs.txt': u'bfs\n', u'bfs': None})
# Assert subfolders are available via API
helpers.assert_job_files_in_path_are(self, self.client, resource_group.name, job.name, 'OUTPUT1',
'afs', {u'job_afs.txt': u'afs\n'})
helpers.assert_job_files_in_path_are(self, self.client, resource_group.name, job.name, 'OUTPUT2',
'bfs', {u'job_bfs.txt': u'bfs\n'})
# Assert that we can access the output files created on job level mount volumes directly in storage using path
# segment returned by the server.
if storage_account.name != helpers.FAKE_STORAGE.name:
files = FileService(storage_account.name, storage_account_key)
self.assertTrue(
files.exists('jobshare', job.job_output_directory_path_segment +
'/' + helpers.OUTPUT_DIRECTORIES_FOLDER_NAME, 'job_afs.txt'))
blobs = BlockBlobService(storage_account.name, storage_account_key)
self.assertTrue(
blobs.exists('jobcontainer', job.job_output_directory_path_segment +
'/' + helpers.OUTPUT_DIRECTORIES_FOLDER_NAME + '/job_bfs.txt'))
# After the job is done the filesystems should be unmounted automatically, check this by submitting a new job.
checker = self.client.jobs.create(
resource_group.name,
'checker',
parameters=models.JobCreateParameters(
location=location,
cluster=models.ResourceId(id=cluster.id),
node_count=1,
std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(helpers.AZURE_FILES_MOUNTING_PATH),
custom_toolkit_settings=models.CustomToolkitSettings(
command_line='echo job; df | grep -E "job_bfs|job_afs"'
)
)
).result()
# Check the job failed because there are not job level mount volumes anymore
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, checker.name,
helpers.MINUTE),
models.ExecutionState.failed)
# Check that the cluster level AFS was still mounted
helpers.assert_job_files_are(self, self.client, resource_group.name, checker.name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'job\n', u'stderr.txt': u''})
@ResourceGroupPreparer(location=helpers.LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=helpers.LOCATION, playback_fake_resource=helpers.FAKE_STORAGE)
@helpers.ClusterPreparer(target_nodes=1)
def test_job_environment_variables_and_secrets(self, resource_group, location, cluster):
"""Tests if it's possible to mount external file systems for a job."""
job_name = 'job'
job = self.client.jobs.create(
resource_group.name,
job_name,
parameters=models.JobCreateParameters(
location=location,
cluster=models.ResourceId(id=cluster.id),
node_count=1,
std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(helpers.AZURE_FILES_MOUNTING_PATH),
environment_variables=[
models.EnvironmentVariable(name='VARIABLE', value='VALUE')
],
secrets=[
models.EnvironmentVariableWithSecretValue(name='SECRET_VARIABLE', value='SECRET')
],
# Check that the job preparation has access to env variables and secrets.
job_preparation=models.JobPreparation(
command_line='echo $VARIABLE $SECRET_VARIABLE'
),
# Check that the job has access to env variables and secrets.
custom_toolkit_settings=models.CustomToolkitSettings(
command_line='echo $VARIABLE $SECRET_VARIABLE'
)
)
).result() # type: models.Job
self.assertEqual(
helpers.wait_for_job_completion(self.is_live, self.client, resource_group.name, job.name,
helpers.MINUTE),
models.ExecutionState.succeeded)
# Check that environment variables are reported by the server.
self.assertEqual(len(job.environment_variables), 1)
self.assertEqual(job.environment_variables[0].name, 'VARIABLE')
self.assertEqual(job.environment_variables[0].value, 'VALUE')
# Check that secrets are reported back by server, but value is not reported.
self.assertEqual(len(job.secrets), 1)
self.assertEqual(job.secrets[0].name, 'SECRET_VARIABLE')
self.assertIsNone(job.secrets[0].value)
# Check that job and job prep had access to the env variables and secrets.
helpers.assert_job_files_are(self, self.client, resource_group.name, job.name,
helpers.STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'VALUE SECRET\n', u'stderr.txt': u'',
u'stdout-job_prep.txt': u'VALUE SECRET\n', u'stderr-job_prep.txt': u''})
|
py | b40b159a5203c9a434edcecb8fc4a36a6f5247b4 | import sys
sys.path.insert(0, '../spymanager')
sys.path.insert(0, '../')
from tests import create_database_collection
from src.spy import SpyManager
# Database settings
DATABASE_NAME = 'spies_database'
COLLECTION_NAME = 'subscriptions'
subscriptions_collection = create_database_collection(DATABASE_NAME, COLLECTION_NAME)
# User to test
USERNAME = 'pinheirofellipe'
CHAT_ID = 123456
# Spy actions
spy_manager = SpyManager(subscriptions_collection)
# Remove if it's exists
spy_manager.remove(USERNAME)
# Adding bot user
spy_manager.add(USERNAME, CHAT_ID)
all_spies = spy_manager.all()
assert len(all_spies) == 1
# Get created spy
spy = spy_manager.get(USERNAME)
assert spy.username == USERNAME
assert spy.exists() is True
# Adding groups
new_group = 'devs'
spy.add_group(new_group)
new_group = 'sports'
spy.add_group(new_group)
assert len(spy.groups) == 2
# Adding user to group
member_mazulo = 'mazulo_'
member_pinheiro = 'pinheirofellipe'
group_to_add = 'devs'
spy.add_members_to_group([member_mazulo, member_pinheiro], group_to_add)
assert len(spy.members_from_group('devs')) == 2
# Remove group
spy.remove_group('sports')
assert len(spy.groups) == 1
# Removing member
spy.remove_member_from_group('mazulo_', 'devs')
assert len(spy.members_from_group('devs')) == 1
print('Well done!')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.