content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from setuptools import setup, find_packages
setup(
name='simplefb',
version='0.2.0a1',
description='A simple facebook graph api and auth Mixins',
url='https://github.com/fm100/simplefb',
author='Freddie Park',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='facebook graph api auth',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
)
| 31.736842 | 77 | 0.631012 | [
"MIT"
] | fm100/simplefb | setup.py | 1,206 | Python |
#
# Copyright (c) 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Multi-threaded sample to run a RMNet & SSDMobilenet v2 that will
# detect only person, bike and vehicle (change the output parsing
# for more classes)
#
# Example usage:
# RMNet: python3.6 multi_inputs.py -n "RMNet" -l "data" -o "detection_out"
# -d 1024 -i 127.0.0.1 -p 9001 -c 1
# -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4
# SSDMobileNet: python3.6 multi_inputs.py -n "SSDMobileNet" -l "image_tensor"
# -o "DetectionOutput" -d 300 -i 127.0.0.1 -p 9001 -c 1
# -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4
from __future__ import print_function
from argparse import ArgumentParser, SUPPRESS
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from time import time, sleep
import sys
import os
import cv2
import grpc
import threading
import logging as log
from tensorflow import make_tensor_proto, make_ndarray
# global data (shared between threads & main)
CLASSES = ["None", "Pedestrian", "Vehicle", "Bike", "Other"]
COLORS = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (128, 128, 128)]
SRC_TYPE = ["Camera", "Video"]
exit_ok = False # manage thread loop
CAM_WIDTH = 640 # camera width
CAM_HEIGHT = 480 # camera height
CAM_FPS = 30 # camera speed
CONFIDENCE_THRESHOLD = 0.75 # detection confidence
#####################################################################################
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS,
help='Show this help message and exit.')
args.add_argument('-n', '--network_name', required=True,
type=str, help='Network name')
args.add_argument('-l', '--input_layer', required=True,
type=str, help='Input layer name')
args.add_argument('-o', '--output_layer', required=True,
type=str, help='Output layer name')
args.add_argument('-d', '--frame_size', required=True,
type=int, help='Input frame width and height that matches used model')
args.add_argument('-c', '--num_cameras', help='Number of cameras to be used',
required=False, type=int, default=1)
args.add_argument('-f', '--file', help='Path to the video file',
required=False, type=str)
args.add_argument('-i', '--ip', help='ip address of the ovms', required=True)
args.add_argument('-p', '--port', help='port of the ovms', required=True)
return parser
# Decoding idea based on the link below. Not very accurate. So pls implement yours
# https://github.com/opencv/open_model_zoo/blob/master/intel_models/\
# person-vehicle-bike-detection-crossroad-0078/\
# description/person-vehicle-bike-detection-crossroad-0078.md
def parse_output(thr_id, res, frame):
for batch, data in enumerate(res):
pred = data[0]
for values in enumerate(pred):
# tuple
index = values[0]
l_pred = values[1]
# actual predictions
img_id = l_pred[0]
label = l_pred[1]
conf = l_pred[2]
x_min = l_pred[3]
y_min = l_pred[4]
x_max = l_pred[5]
y_max = l_pred[6]
# preventing any wrong array indexing (for RMNet)
if label > 4:
# Unsupported class label detected. Change to `other`.
label = 4
# Do you want confidence level to be passed from command line?
if img_id != -1 and conf >= CONFIDENCE_THRESHOLD:
# draw the bounding boxes on the frame
height, width = frame.shape[:2]
cv2.rectangle(frame, (int(width * x_min), int(height * y_min)),
(int(width * x_max), int(height * y_max)), COLORS[int(label)], 2)
cv2.putText(frame, str(CLASSES[int(label)]), (int(width * x_min)-10,
int(height * y_min)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
COLORS[int(label)], 2)
return frame
# This is common for both the camera & video files
def thread_function(thr_id, network_name, input_layer, output_layer, input_dimension,
ip, port, disp_buf, src_type, src_name):
if src_type == "Camera":
# UVC camera init - camera threads always come first and we use it
# to generate the camera indexes
cam = cv2.VideoCapture(thr_id)
if not (cam.isOpened()):
log.error("Failed to open the UVC camera {}".format(thr_id))
return
cam.set(cv2.CAP_PROP_FRAME_WIDTH, CAM_WIDTH)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, CAM_HEIGHT)
# not all UVC cameras honor below request
cam.set(cv2.CAP_PROP_FPS, CAM_FPS)
# If your camera sends other than MJPEG, change below
cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
elif src_type == "Video":
# Assumption: src_name will be valid
cam = cv2.VideoCapture(src_name)
# inference stats
fps = 0 # camera fps
inf_fps = 0 # inference fps
dropped_fps = 0 # dropped frame fps
cam_start_time = time()
# ovms connection
channel = grpc.insecure_channel("{}:{}".format(ip, port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
# Note: Pls maintain the same name while launching ovms docker container
request.model_spec.name = network_name
global exit_ok
while exit_ok == False:
ret, frame = cam.read()
if src_type == "Video":
# restart the video file when it reaches the end
if not ret:
cam.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
# normalize the video frame dimension to that of the camera
else:
# to maintain the frame inferencing parity with the cameras, lets sleep
# here to maintain cam_fps speed
sleep((1000 / CAM_FPS) / 1000)
# enable below line to keep video file & camera output window dimensions the same
# frame = cv2.resize(frame, (CAM_WIDTH, CAM_HEIGHT))
fps = fps + 1
if (time() - cam_start_time) * 1000 >= 1000:
log.warning('{}{} fps: {}, Inf fps: {}, dropped fps: {}'
.format(src_type, thr_id, fps, inf_fps, dropped_fps))
fps = 0
inf_fps = 0
dropped_fps = 0
cam_start_time = time()
# resize the frame to what network input layer expects it to be
image = cv2.resize(frame, (input_dimension, input_dimension))
image = image.transpose(2, 0, 1).reshape(1, 3, input_dimension, input_dimension)
image = image.astype('float32')
inf_time = time()
# send the input as protobuf
request.inputs[input_layer].CopyFrom(
make_tensor_proto(image, shape=None))
try:
result = stub.Predict(request, 10.0)
except Exception as e:
log.error('Caught exception {}'.format(e))
cam.release()
return
duration = time() - inf_time
# decode the received output as protobuf
res = make_ndarray(result.outputs[output_layer])
if not res.any():
log.error('Thr{}: Predictions came back with wrong output layer name'.format(thr_id))
dropped_fps = dropped_fps + 1
disp_buf[thr_id] = frame
else:
log.debug('Predictions came back fine')
inf_fps = inf_fps + 1
disp_buf[thr_id] = parse_output(thr_id, res, frame)
# while exit_ok == False
cam.release()
log.warning('Exiting thread {}'.format(thr_id))
#####################################################################################
def main():
log.basicConfig(format="[$(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
num_cam = args.num_cameras if (args.num_cameras) else 0
vid_src = args.file
network_name = args.network_name
input_layer = args.input_layer
output_layer = args.output_layer
input_dimension = args.frame_size
ip_addr = args.ip
port_no = args.port
if not args.file and not args.num_cameras:
log.error('Please supply either the camera or the video file. Try -f for options')
return
if not ip_addr or not port_no:
log.error('Please supply valid IP and/or port number of OVMS server')
return
video_files = []
if vid_src:
if os.path.isdir(vid_src):
for r, d, f in os.walk(vid_src):
for f_ in f:
# only mp4 files supported as of now
if '.mp4' in f_:
video_files.append(r + f_)
elif os.path.isfile(vid_src):
if '.mp4' in vid_src:
video_files.append(vid_src)
# thread management
thr = [None] * (num_cam + len(video_files))
# display buffers shared between camera threads
disp_buf = {}
# Known issue: Depending on the USB enumeration, camera nodes need not be
# in sequence. Pls pass the device node info through a file or command line
# if it happens in your system
for i in range(num_cam):
disp_buf[i] = None
thr[i] = threading.Thread(target=thread_function,
args=(i, network_name, input_layer, output_layer, input_dimension,
ip_addr, port_no, disp_buf, SRC_TYPE[0], None))
thr[i].start()
for i in range(num_cam, num_cam + len(video_files)):
disp_buf[i] = None
thr[i] = threading.Thread(target=thread_function,
args=(i, network_name, input_layer, output_layer, input_dimension,
ip_addr, port_no, disp_buf, SRC_TYPE[1], video_files[i - num_cam]))
thr[i].start()
# For whatever reasons, cv2.imshow() doesnt work from threads. Hence we shove the
# infered data to the main thread to display.
global exit_ok
while exit_ok == False:
for i in range(num_cam + len(video_files)):
if disp_buf[i] is not None:
cv2.imshow('Predictions {}'.format(i), disp_buf[i])
disp_buf[i] = None
# exit the program if 'q' is pressed on any window
if cv2.waitKey(1) == ord('q'):
exit_ok = True
break
# wait for all the threads to join
for i in range(num_cam):
thr[i].join()
# close all open windows
cv2.destroyAllWindows()
log.warning('Good Bye!')
if __name__ == '__main__':
sys.exit(main() or 0)
| 36.43299 | 91 | 0.658083 | [
"Apache-2.0"
] | BrightTux/model_server | example_client/multi_inputs.py | 10,602 | Python |
from sys import maxsize
class Contact:
def __init__(self, firstname=None, lastname=None, homephone=None, mobilephone=None,workphone=None, secondaryphone=None, id=None):
self.firstname=firstname
self.lastname=lastname
self.homephone=homephone
self.workphone = workphone
self.mobilephone=mobilephone
self.secondaryphone=secondaryphone
self.id=id
def __repr__(self):
return "%s:%s %s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id==other.id)\
and self.firstname==other.firstname and self.lastname==other.lastname
def id_or_max (self):
if self.id:
return int (self.id)
else:
return maxsize | 31.230769 | 134 | 0.64532 | [
"Apache-2.0"
] | Valeryiar/python_training | model/contact.py | 812 | Python |
import asyncio
import logging
import types
import typing
import enum
from dataclasses import dataclass
from ..types import ASGIApp, Message
from ..exceptions import LifespanUnsupported, LifespanFailure, UnexpectedMessage
class LifespanCycleState(enum.Enum):
"""
The state of the ASGI `lifespan` connection.
* **CONNECTING** - Initial state. The ASGI application instance will be run with
the connection scope containing the `lifespan` type.
* **STARTUP** - The lifespan startup event has been pushed to the queue to be
received by the application.
* **SHUTDOWN** - The lifespan shutdown event has been pushed to the queue to be
received by the application.
* **FAILED** - A lifespan failure has been detected, and the connection will be
closed with an error.
* **UNSUPPORTED** - An application attempted to send a message before receiving
the lifepan startup event. If the lifespan argument is "on", then the connection
will be closed with an error.
"""
CONNECTING = enum.auto()
STARTUP = enum.auto()
SHUTDOWN = enum.auto()
FAILED = enum.auto()
UNSUPPORTED = enum.auto()
@dataclass
class LifespanCycle:
"""
Manages the application cycle for an ASGI `lifespan` connection.
* **app** - An asynchronous callable that conforms to version 3.0 of the ASGI
specification. This will usually be an ASGI framework application instance.
* **lifespan** - A string to configure lifespan support. Choices are `auto`, `on`,
and `off`. Default is `auto`.
* **state** - An enumerated `LifespanCycleState` type that indicates the state of
the ASGI connection.
* **exception** - An exception raised while handling the ASGI event.
* **app_queue** - An asyncio queue (FIFO) containing messages to be received by the
application.
* **startup_event** - An asyncio event object used to control the application
startup flow.
* **shutdown_event** - An asyncio event object used to control the application
shutdown flow.
* **exception** - An exception raised while handling the ASGI event. This may or
may not be raised depending on the state.
"""
app: ASGIApp
lifespan: str
state: LifespanCycleState = LifespanCycleState.CONNECTING
exception: typing.Optional[BaseException] = None
def __post_init__(self) -> None:
self.logger = logging.getLogger("mangum.lifespan")
self.loop = asyncio.get_event_loop()
self.app_queue: asyncio.Queue = asyncio.Queue()
self.startup_event: asyncio.Event = asyncio.Event()
self.shutdown_event: asyncio.Event = asyncio.Event()
def __enter__(self) -> None:
"""
Runs the event loop for application startup.
"""
self.loop.create_task(self.run())
self.loop.run_until_complete(self.startup())
def __exit__(
self,
exc_type: typing.Optional[typing.Type[BaseException]],
exc_value: typing.Optional[BaseException],
traceback: typing.Optional[types.TracebackType],
) -> None:
"""
Runs the event loop for application shutdown.
"""
self.loop.run_until_complete(self.shutdown())
async def run(self) -> None:
"""
Calls the application with the `lifespan` connection scope.
"""
try:
await self.app({"type": "lifespan"}, self.receive, self.send)
except LifespanUnsupported:
self.logger.info("ASGI 'lifespan' protocol appears unsupported.")
except (LifespanFailure, UnexpectedMessage) as exc:
self.exception = exc
except BaseException as exc:
self.logger.error("Exception in 'lifespan' protocol.", exc_info=exc)
finally:
self.startup_event.set()
self.shutdown_event.set()
async def receive(self) -> Message:
"""
Awaited by the application to receive ASGI `lifespan` events.
"""
if self.state is LifespanCycleState.CONNECTING:
# Connection established. The next event returned by the queue will be
# `lifespan.startup` to inform the application that the connection is
# ready to receive lfiespan messages.
self.state = LifespanCycleState.STARTUP
elif self.state is LifespanCycleState.STARTUP:
# Connection shutting down. The next event returned by the queue will be
# `lifespan.shutdown` to inform the application that the connection is now
# closing so that it may perform cleanup.
self.state = LifespanCycleState.SHUTDOWN
return await self.app_queue.get()
async def send(self, message: Message) -> None:
"""
Awaited by the application to send ASGI `lifespan` events.
"""
message_type = message["type"]
self.logger.info(
"%s: '%s' event received from application.", self.state, message_type
)
if self.state is LifespanCycleState.CONNECTING:
if self.lifespan == "on":
raise LifespanFailure(
"Lifespan connection failed during startup and lifespan is 'on'."
)
# If a message is sent before the startup event is received by the
# application, then assume that lifespan is unsupported.
self.state = LifespanCycleState.UNSUPPORTED
raise LifespanUnsupported("Lifespan protocol appears unsupported.")
if message_type not in (
"lifespan.startup.complete",
"lifespan.shutdown.complete",
"lifespan.startup.failed",
"lifespan.shutdown.failed",
):
self.state = LifespanCycleState.FAILED
raise UnexpectedMessage(f"Unexpected '{message_type}' event received.")
if self.state is LifespanCycleState.STARTUP:
if message_type == "lifespan.startup.complete":
self.startup_event.set()
elif message_type == "lifespan.startup.failed":
self.state = LifespanCycleState.FAILED
self.startup_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan startup failure. {message}")
elif self.state is LifespanCycleState.SHUTDOWN:
if message_type == "lifespan.shutdown.complete":
self.shutdown_event.set()
elif message_type == "lifespan.shutdown.failed":
self.state = LifespanCycleState.FAILED
self.shutdown_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan shutdown failure. {message}")
async def startup(self) -> None:
"""
Pushes the `lifespan` startup event to application queue and handles errors.
"""
self.logger.info("Waiting for application startup.")
await self.app_queue.put({"type": "lifespan.startup"})
await self.startup_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
if not self.exception:
self.logger.info("Application startup complete.")
else:
self.logger.info("Application startup failed.")
async def shutdown(self) -> None:
"""
Pushes the `lifespan` shutdown event to application queue and handles errors.
"""
self.logger.info("Waiting for application shutdown.")
await self.app_queue.put({"type": "lifespan.shutdown"})
await self.shutdown_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
| 39.443878 | 87 | 0.644289 | [
"MIT"
] | IlyaSukhanov/mangum | mangum/protocols/lifespan.py | 7,731 | Python |
from django.contrib.auth.models import User
from django.test import TestCase
from .models import Conf, Site, SitePermission
class ConfTestCase(TestCase):
def test_conf_created(self):
site = Site.objects.create(domain='test.site', name='Test Site')
self.assertIsInstance(site.conf, Conf)
class SitePermissionTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('Test User', '[email protected]',
'test')
Site.objects.create(domain='test1.site', name='Test Site 1')
Site.objects.create(domain='test2.site', name='Test Site 2')
def test_sitepermission_created(self):
site_permission = SitePermission.objects.create(user=self.user)
self.assertIsInstance(site_permission, SitePermission)
def test_sitepermission_sites_added(self):
site_permission = SitePermission.objects.create(user=self.user)
site_permission.sites.set(Site.objects.all())
site_permission.save()
self.assertQuerysetEqual(site_permission.sites.all(),
map(repr, Site.objects.all()))
| 35.96875 | 74 | 0.67159 | [
"BSD-2-Clause"
] | dyndeploy-test/timestrap | conf/tests.py | 1,151 | Python |
import json
import socket
def is_jsonable(obj):
try:
json.dumps(obj)
return True
except (TypeError, OverflowError, ValueError):
return False
def sanitize_meta(meta):
keys_to_sanitize = []
for key, value in meta.items():
if not is_jsonable(value):
keys_to_sanitize.append(key)
if keys_to_sanitize:
for key in keys_to_sanitize:
del meta[key]
meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(
keys_to_sanitize)
return meta
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
| 22.702703 | 74 | 0.591667 | [
"MIT"
] | markcurtis1970/python | logdna/utils.py | 840 | Python |
# Copyright 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova_powervm.virt.powervm.volume import fileio
class GPFSVolumeAdapter(fileio.FileIOVolumeAdapter):
"""Connects GPFS Cinder Volumes to PowerVM VMs."""
def _get_path(self):
return self.connection_info.get("data")['device_path']
| 35.12 | 78 | 0.730068 | [
"Apache-2.0"
] | openstack/nova-powervm | nova_powervm/virt/powervm/volume/gpfs.py | 878 | Python |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import errno
import stat
c = get_config() # noqa: F821
c.NotebookApp.ip = "0.0.0.0"
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
c.Spawner.args = ['--NotebookApp.tornado_settings={"headers":{"Content-Security-Policy": "frame-ancestors * \'self\' colinjbrown.com:*"}}']
c.NotebookApp.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors * \'self\' colinjbrown.com:*"} }
c.JupyterHub.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors * \'self\' colinjbrown.com:*"} }
# https://github.com/jupyter/notebook/issues/3130
c.FileContentsManager.delete_to_trash = False
# Generate a self-signed certificate
if "GEN_CERT" in os.environ:
dir_name = jupyter_data_dir()
pem_file = os.path.join(dir_name, "notebook.pem")
try:
os.makedirs(dir_name)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir_name):
pass
else:
raise
# Generate an openssl.cnf file to set the distinguished name
cnf_file = os.path.join(os.getenv("CONDA_DIR", "/usr/lib"), "ssl", "openssl.cnf")
if not os.path.isfile(cnf_file):
with open(cnf_file, "w") as fh:
fh.write(
"""\
[req]
distinguished_name = req_distinguished_name
[req_distinguished_name]
"""
)
# Generate a certificate if one doesn't exist on disk
subprocess.check_call(
[
"openssl",
"req",
"-new",
"-newkey=rsa:2048",
"-days=365",
"-nodes",
"-x509",
"-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated",
f"-keyout={pem_file}",
f"-out={pem_file}",
]
)
# Restrict access to the file
os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)
c.NotebookApp.certfile = pem_file
# Change default umask for all subprocesses of the notebook server if set in
# the environment
if "NB_UMASK" in os.environ:
os.umask(int(os.environ["NB_UMASK"], 8))
| 31.422535 | 142 | 0.636934 | [
"BSD-3-Clause"
] | colinjbrown/dfext-dockerstack | jupyter_notebook_config.py | 2,231 | Python |
#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""stack symbolizes native crash dumps."""
import getopt
import glob
import logging
import os
import sys
import stack_core
import stack_libs
import subprocess
import symbol
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir,
'build', 'android'))
from pylib import constants
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir,
'tools', 'python'))
import llvm_symbolizer
DEFAULT_SYMROOT='/tmp/symbols'
# From: https://source.android.com/source/build-numbers.html
_ANDROID_M_MAJOR_VERSION=6
def PrintUsage():
"""Print usage and exit with error."""
# pylint: disable-msg=C6310
print
print " usage: " + sys.argv[0] + " [options] [FILE]"
print
print " --symbols-dir=path"
print " the path to a symbols dir, such as =/tmp/out/target/product/dream/symbols"
print
print " --chrome-symbols-dir=path"
print " the path to a Chrome symbols dir (can be absolute or relative"
print " to src), such as =out/Debug/lib.unstripped"
print
print " --output-directory=path"
print " the path to the build output directory, such as out/Debug."
print " Ignored if --chrome-symbols-dir is passed."
print
print " --packed-relocation-adjustments"
print " --no-packed-relocation-adjustments"
print " turn packed relocation adjustment on and off (default is off)"
print " If running on pre-M Android and the stack trace appears to"
print " make no sense, try turning this feature on."
print
print " --symbols-zip=path"
print " the path to a symbols zip file, such as =dream-symbols-12345.zip"
print
print " --more-info"
print " --less-info"
print " Change the level of detail in the output."
print " --more-info is slower and more verbose, but more functions will"
print " be fully qualified with namespace/classname and have full"
print " argument information. Also, the 'stack data' section will be"
print " printed."
print
print " --arch=arm|arm64|x64|x86|mips"
print " the target architecture"
print
print " --fallback-monochrome"
print " fallback to monochrome instead of chrome if fail to detect"
print " shared lib which is loaded from APK, this doesn't work for"
print " component build."
print
print " --verbose"
print " enable extra logging, particularly for debugging failed symbolization"
print
print " FILE should contain a stack trace in it somewhere"
print " the tool will find that and re-print it with"
print " source files and line numbers. If you don't"
print " pass FILE, or if file is -, it reads from"
print " stdin."
print
# pylint: enable-msg=C6310
sys.exit(1)
def UnzipSymbols(symbolfile, symdir=None):
"""Unzips a file to DEFAULT_SYMROOT and returns the unzipped location.
Args:
symbolfile: The .zip file to unzip
symdir: Optional temporary directory to use for extraction
Returns:
A tuple containing (the directory into which the zip file was unzipped,
the path to the "symbols" directory in the unzipped file). To clean
up, the caller can delete the first element of the tuple.
Raises:
SymbolDownloadException: When the unzip fails.
"""
if not symdir:
symdir = "%s/%s" % (DEFAULT_SYMROOT, hash(symbolfile))
if not os.path.exists(symdir):
os.makedirs(symdir)
print "extracting %s..." % symbolfile
saveddir = os.getcwd()
os.chdir(symdir)
try:
unzipcode = subprocess.call(["unzip", "-qq", "-o", symbolfile])
if unzipcode > 0:
os.remove(symbolfile)
raise SymbolDownloadException("failed to extract symbol files (%s)."
% symbolfile)
finally:
os.chdir(saveddir)
android_symbols = glob.glob("%s/out/target/product/*/symbols" % symdir)
if android_symbols:
return (symdir, android_symbols[0])
else:
# This is a zip of Chrome symbols, so symbol.CHROME_SYMBOLS_DIR needs to be
# updated to point here.
symbol.CHROME_SYMBOLS_DIR = symdir
return (symdir, symdir)
def main(argv):
try:
options, arguments = getopt.getopt(argv, "",
["packed-relocation-adjustments",
"no-packed-relocation-adjustments",
"more-info",
"less-info",
"chrome-symbols-dir=",
"output-directory=",
"symbols-dir=",
"symbols-zip=",
"packed-lib=",
"arch=",
"fallback-monochrome",
"verbose",
"help"])
except getopt.GetoptError, unused_error:
PrintUsage()
zip_arg = None
more_info = False
fallback_monochrome = False
arch_defined = False
packed_libs = []
for option, value in options:
if option == "--help":
PrintUsage()
elif option == "--symbols-dir":
symbol.SYMBOLS_DIR = os.path.expanduser(value)
elif option == "--symbols-zip":
zip_arg = os.path.expanduser(value)
elif option == "--arch":
symbol.ARCH = value
arch_defined = True
elif option == "--chrome-symbols-dir":
symbol.CHROME_SYMBOLS_DIR = os.path.join(constants.DIR_SOURCE_ROOT,
value)
elif option == "--output-directory":
constants.SetOutputDirectory(value)
elif option == "--packed-lib":
packed_libs.append(os.path.expanduser(value))
elif option == "--more-info":
more_info = True
elif option == "--less-info":
more_info = False
elif option == "--fallback-monochrome":
fallback_monochrome = True
elif option == "--verbose":
logging.basicConfig(level=logging.DEBUG)
elif option in (
'--packed-relocation-adjustments',
'--no-packed-relocation-adjustments'):
print ('--[no-]packed-relocation-adjustments options are deprecated. '
'Specify packed libs directory instead.')
if len(arguments) > 1:
PrintUsage()
# Do an up-front test that the output directory is known.
if not symbol.CHROME_SYMBOLS_DIR:
constants.CheckOutputDirectory()
if not arguments or arguments[0] == "-":
print "Reading native crash info from stdin"
f = sys.stdin
else:
print "Searching for native crashes in: " + os.path.realpath(arguments[0])
f = open(arguments[0], "r")
lines = f.readlines()
f.close()
rootdir = None
if zip_arg:
rootdir, symbol.SYMBOLS_DIR = UnzipSymbols(zip_arg)
version = stack_libs.GetTargetAndroidVersionNumber(lines)
if version is None:
print ("Unknown Android release, "
"consider passing --packed-lib.")
elif version < _ANDROID_M_MAJOR_VERSION and not packed_libs:
print ("Pre-M Android release detected, "
"but --packed-lib not specified. Stack symbolization may fail.")
if (version is None or version < _ANDROID_M_MAJOR_VERSION) and packed_libs:
load_vaddrs = stack_libs.GetLoadVaddrs(stripped_libs=packed_libs)
else:
load_vaddrs = {}
print ("Reading Android symbols from: "
+ os.path.normpath(symbol.SYMBOLS_DIR))
chrome_search_path = symbol.GetLibrarySearchPaths()
with llvm_symbolizer.LLVMSymbolizer() as symbolizer:
print ("Searching for Chrome symbols from within: "
+ ':'.join((os.path.normpath(d) for d in chrome_search_path)))
stack_core.ConvertTrace(lines, load_vaddrs, more_info, fallback_monochrome,
arch_defined, symbolizer)
if rootdir:
# be a good citizen and clean up...os.rmdir and os.removedirs() don't work
cmd = "rm -rf \"%s\"" % rootdir
print "\ncleaning up (%s)" % cmd
os.system(cmd)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
# vi: ts=2 sw=2
| 35.454183 | 90 | 0.627374 | [
"BSD-3-Clause"
] | FLOSSBoxIN/src | third_party/android_platform/development/scripts/stack.py | 8,899 | Python |
import FWCore.ParameterSet.Config as cms
process = cms.Process("DQM")
# message logger
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
cout = cms.untracked.PSet(threshold = cms.untracked.string('WARNING'))
)
#----------------------------
#### Event Source
#----------------------------
# for live online DQM in P5
process.load("DQM.Integration.config.inputsource_cfi")
# for testing in lxplus
#process.load("DQM.Integration.config.fileinputsource_cfi")
# Global tag - Condition for P5 cluster
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
#----------------------------
#### DQM Environment
#----------------------------
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = 'Info'
process.dqmSaver.tag = 'Info'
#-----------------------------
# Digitisation: produce the Scalers digis containing DCS bits
process.load("EventFilter.ScalersRawToDigi.ScalersRawToDigi_cfi")
# Digitisation: produce the TCDS digis containing BST record
from EventFilter.Utilities.tcdsRawToDigi_cfi import *
process.tcdsDigis = tcdsRawToDigi.clone()
# OnlineMetaDataRawToDigi will put DCSRecord to an event
process.load('EventFilter.OnlineMetaDataRawToDigi.onlineMetaDataRawToDigi_cfi')
process.onlineMetaDataDigis = cms.EDProducer('OnlineMetaDataRawToDigi')
# DQMProvInfo is the DQM module to be run
process.load("DQMServices.Components.DQMProvInfo_cfi")
# DQM Modules
process.dqmmodules = cms.Sequence(process.dqmEnv + process.dqmSaver)
process.evfDQMmodulesPath = cms.Path(
process.scalersRawToDigi*
process.tcdsDigis*
process.onlineMetaDataRawToDigi*
process.dqmProvInfo*
process.dqmmodules
)
process.schedule = cms.Schedule(process.evfDQMmodulesPath)
process.dqmProvInfo.runType = process.runType.getRunTypeName()
# Heavy Ion Specific Fed Raw Data Collection Label
if (process.runType.getRunType() == process.runType.hi_run):
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataRepacker")
process.tcdsDigis.InputLabel = cms.InputTag("rawDataRepacker")
else:
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataCollector")
process.tcdsDigis.InputLabel = cms.InputTag("rawDataCollector")
# Process customizations included here
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
| 39.485294 | 106 | 0.664804 | [
"Apache-2.0"
] | 7quantumphysics/cmssw | DQM/Integration/python/clients/info_dqm_sourceclient-live_cfg.py | 2,685 | Python |
"""CategoricalMLPPolicy."""
import akro
import tensorflow as tf
from metarl.tf.distributions import Categorical
from metarl.tf.models import MLPModel
from metarl.tf.policies import StochasticPolicy
class CategoricalMLPPolicy(StochasticPolicy):
"""CategoricalMLPPolicy
A policy that contains a MLP to make prediction based on
a categorical distribution.
It only works with akro.Discrete action space.
Args:
env_spec (metarl.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this policy consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='CategoricalMLPPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.glorot_uniform_initializer(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=tf.nn.softmax,
output_w_init=tf.glorot_uniform_initializer(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
assert isinstance(env_spec.action_space, akro.Discrete), (
'CategoricalMLPPolicy only works with akro.Discrete action '
'space.')
super().__init__(name, env_spec)
self.obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.n
self.model = MLPModel(output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization,
name='MLPModel')
self._initialize()
def _initialize(self):
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, self.obs_dim))
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self.model.build(state_input)
self._f_prob = tf.compat.v1.get_default_session().make_callable(
self.model.networks['default'].outputs,
feed_list=[self.model.networks['default'].input])
@property
def vectorized(self):
"""Vectorized or not."""
return True
def dist_info_sym(self, obs_var, state_info_vars=None, name=None):
"""Symbolic graph of the distribution."""
with tf.compat.v1.variable_scope(self._variable_scope):
prob = self.model.build(obs_var, name=name)
return dict(prob=prob)
def dist_info(self, obs, state_infos=None):
"""Distribution info."""
prob = self._f_prob(obs)
return dict(prob=prob)
def get_action(self, observation):
"""Return a single action."""
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
"""Return multiple actions."""
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
def get_regularizable_vars(self):
"""Get regularizable weight variables under the Policy scope."""
trainable = self.get_trainable_vars()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
@property
def distribution(self):
"""Policy distribution."""
return Categorical(self.action_dim)
def __getstate__(self):
"""Object.__getstate__."""
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__."""
super().__setstate__(state)
self._initialize()
| 39.71831 | 78 | 0.623227 | [
"MIT"
] | icml2020submission6857/metarl | src/metarl/tf/policies/categorical_mlp_policy.py | 5,640 | Python |
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as lib_constants
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import dvr_edge_ha_router as dvr_edge_ha_rtr
from neutron.agent.l3 import dvr_edge_router as dvr_edge_rtr
from neutron.agent.l3 import dvr_local_router as dvr_router
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import router_info
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.common import utils as common_utils
from neutron.conf.agent import common as agent_config
from neutron.conf.agent.l3 import config as l3_config
from neutron.conf.agent.l3 import ha as ha_conf
from neutron.conf import common as base_config
from neutron.tests import base
from neutron.tests.common import l3_test_common
_uuid = uuidutils.generate_uuid
FIP_PRI = 32768
HOSTNAME = 'myhost'
class TestDvrRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestDvrRouterOperations, self).setUp()
mock.patch('eventlet.spawn').start()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
log.register_options(self.conf)
self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf)
ha_conf.register_l3_agent_ha_opts(self.conf)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_process_monitor_opts(self.conf)
agent_config.register_interface_opts(self.conf)
agent_config.register_external_process_opts(self.conf)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('state_path', cfg.CONF.state_path)
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.ensure_dir = mock.patch(
'oslo_utils.fileutils.ensure_tree').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
'.get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron_lib.utils.file.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.process_monitor = mock.patch(
'neutron.agent.linux.external_process.ProcessMonitor').start()
self.send_adv_notif_p = mock.patch(
'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
self.send_adv_notif = self.send_adv_notif_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.mock_delete_ip_rule = mock.patch.object(ip_lib,
'delete_ip_rule').start()
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
subnet_id_1 = _uuid()
subnet_id_2 = _uuid()
self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': subnet_id_1}],
'network_id': _uuid(),
'device_owner':
lib_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_1,
'ip_address': '152.2.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()},
{'subnets': [{'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': subnet_id_2}],
'network_id': _uuid(),
'device_owner':
lib_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_2,
'ip_address': '152.10.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()}]
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _create_router(self, router=None, **kwargs):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.router_id = _uuid()
if not router:
router = mock.MagicMock()
kwargs['agent'] = agent
kwargs['router_id'] = self.router_id
kwargs['router'] = router
kwargs['agent_conf'] = self.conf
kwargs['interface_driver'] = mock.Mock()
return dvr_router.DvrLocalRouter(HOSTNAME, **kwargs)
def _set_ri_kwargs(self, agent, router_id, router):
self.ri_kwargs['agent'] = agent
self.ri_kwargs['router_id'] = router_id
self.ri_kwargs['router'] = router
def test_gw_ns_name(self):
ri = self._create_router()
self.assertEqual(ri.ns_name, ri.get_gw_ns_name())
def test_create_dvr_fip_interfaces_update(self):
ri = self._create_router()
fip_agent_port = {'subnets': []}
ri.get_floating_agent_gw_interface = mock.Mock(
return_value=fip_agent_port)
ri.get_floating_ips = mock.Mock(return_value=True)
ri.fip_ns = mock.Mock()
ri.fip_ns.subscribe.return_value = False
ri.rtr_fip_connect = True
ex_gw_port = {'network_id': 'fake_net_id'}
ri.create_dvr_external_gateway_on_agent(ex_gw_port)
ri.fip_ns.create_or_update_gateway_port.assert_called_once_with(
fip_agent_port)
def test_create_dvr_fip_interfaces_with_matching_address_scope(self):
self._setup_create_dvr_fip_interfaces_for_setting_routing_rules(
address_scopes_match=True)
def test_create_dvr_fip_interfaces_with_address_scope_mismatch(self):
self._setup_create_dvr_fip_interfaces_for_setting_routing_rules()
def _setup_create_dvr_fip_interfaces_for_setting_routing_rules(
self, address_scopes_match=False):
ri = self._create_router()
ri.get_floating_agent_gw_interface = mock.Mock()
ri.fip_ns = mock.Mock()
ri._add_interface_routing_rule_to_router_ns = mock.Mock()
ri._add_interface_route_to_fip_ns = mock.Mock()
ri.fip_ns._create_rtr_2_fip_link = mock.Mock()
ri.internal_ports = ['moke_port_1', 'moke_port_2']
if address_scopes_match:
ri._check_if_address_scopes_match = mock.Mock(
return_value=True)
else:
ri._check_if_address_scopes_match = mock.Mock(
return_value=False)
ri.rtr_fip_connect = False
ex_gw_port = {'network_id': 'fake_net_id'}
ri.create_dvr_external_gateway_on_agent(ex_gw_port)
ri._check_rtr_2_fip_connect = mock.Mock()
ri.connect_rtr_2_fip()
self.assertTrue(ri._check_if_address_scopes_match.called)
if address_scopes_match:
self.assertTrue(
ri.fip_ns.create_rtr_2_fip_link.called)
self.assertTrue(
ri._add_interface_routing_rule_to_router_ns.called)
self.assertTrue(
ri._add_interface_route_to_fip_ns.called)
else:
self.assertFalse(
ri._add_interface_routing_rule_to_router_ns.called)
self.assertFalse(
ri._add_interface_route_to_fip_ns.called)
self.assertTrue(
ri.fip_ns.create_rtr_2_fip_link.called)
def test_get_floating_ips_dvr(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
fips = ri.get_floating_ips()
self.assertEqual(
[{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}], fips)
def test_floating_forward_rules_no_fip_ns(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
fip = {'id': _uuid()}
ri = self._create_router(router)
self.assertFalse(ri.floating_forward_rules(fip))
def test_floating_forward_rules(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
floating_ip = '15.1.2.3'
rtr_2_fip_name = 'fake_router'
fixed_ip = '192.168.0.1'
fip = {'id': _uuid(),
'fixed_ip_address': '192.168.0.1',
'floating_ip_address': '15.1.2.3'}
instance = mock.Mock()
instance.get_rtr_ext_device_name = mock.Mock(
return_value=rtr_2_fip_name)
ri.fip_ns = instance
dnat_from_floatingip_to_fixedip = (
'PREROUTING', '-d %s/32 -i %s -j DNAT --to-destination %s' % (
floating_ip, rtr_2_fip_name, fixed_ip))
to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip)
if ri.iptables_manager.random_fully:
to_source += ' --random-fully'
snat_from_fixedip_to_floatingip = ('float-snat', to_source)
actual = ri.floating_forward_rules(fip)
expected = [dnat_from_floatingip_to_fixedip,
snat_from_fixedip_to_floatingip]
self.assertEqual(expected, actual)
def test_floating_mangle_rules_no_fip_ns(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
floating_ip = mock.Mock()
fixed_ip = mock.Mock()
internal_mark = mock.Mock()
self.assertFalse(ri.floating_mangle_rules(floating_ip, fixed_ip,
internal_mark))
def test_floating_mangle_rules(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
floating_ip = '15.1.2.3'
fixed_ip = '192.168.0.1'
internal_mark = 'fake_mark'
rtr_2_fip_name = 'fake_router'
instance = mock.Mock()
instance.get_rtr_ext_device_name = mock.Mock(
return_value=rtr_2_fip_name)
ri.fip_ns = instance
mark_traffic_to_floating_ip = (
'floatingip', '-d %s/32 -i %s -j MARK --set-xmark %s' % (
floating_ip, rtr_2_fip_name, internal_mark))
mark_traffic_from_fixed_ip = (
'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip)
actual = ri.floating_mangle_rules(floating_ip, fixed_ip, internal_mark)
expected = [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip]
self.assertEqual(expected, actual)
@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'add_ip_rule')
def test_floating_ip_added_dist(self, mock_add_ip_rule, mIPDevice,
mock_adv_notif):
router = mock.MagicMock()
ri = self._create_router(router)
ri.ex_gw_port = ri.router['gw_port']
ext_net_id = _uuid()
subnet_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': ext_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
fip = {'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': ext_net_id,
'port_id': _uuid()}
ri.fip_ns = mock.Mock()
ri.fip_ns.agent_gateway_port = agent_gw_port
ri.create_dvr_external_gateway_on_agent(ri.ex_gw_port)
ri._check_rtr_2_fip_connect = mock.Mock()
ri.connect_rtr_2_fip()
self.assertTrue(ri.rtr_fip_connect)
ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI
subnet = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.rtr_fip_subnet = subnet
ri.fip_ns.local_subnets = mock.Mock()
ri.fip_ns.local_subnets.allocate.return_value = subnet
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
ri.floating_ip_added_dist(fip, ip_cidr)
mock_add_ip_rule.assert_called_with(
namespace=ri.router_namespace.name, ip='192.168.0.1',
table=16, priority=FIP_PRI)
ri.fip_ns.local_subnets.allocate.assert_not_called()
# Validate that fip_ns.local_subnets is called when
# ri.rtr_fip_subnet is None
ri.rtr_fip_subnet = None
ri.floating_ip_added_dist(fip, ip_cidr)
mock_add_ip_rule.assert_called_with(
namespace=ri.router_namespace.name, ip='192.168.0.1',
table=16, priority=FIP_PRI)
ri.fip_ns.local_subnets.allocate.assert_called_once_with(ri.router_id)
# TODO(mrsmith): add more asserts
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch.object(ip_lib, 'IPDevice')
def test_floating_ip_removed_dist(self, mIPDevice, mIPWrapper):
router = mock.MagicMock()
ri = self._create_router(router)
ri.ex_gw_port = ri.router['gw_port']
subnet_id = _uuid()
fixed_ip = '20.0.0.30'
agent_gw_port = {'fixed_ips': [{'ip_address': fixed_ip,
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
fip_cidr = '11.22.33.44/24'
ri.fip_ns = mock.Mock()
ri.fip_ns.get_name.return_value = 'fip_ns_name'
ri.floating_ips_dict['11.22.33.44'] = (fixed_ip, FIP_PRI)
ri.fip_2_rtr = '11.22.33.42'
ri.rtr_2_fip = '11.22.33.40'
ri.fip_ns.agent_gateway_port = agent_gw_port
s = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.rtr_fip_subnet = s
ri.fip_ns.local_subnets = mock.Mock()
ri.floating_ip_removed_dist(fip_cidr)
self.mock_delete_ip_rule.assert_called_with(
ri.router_namespace.name, ip=fixed_ip, table=16, priority=FIP_PRI)
mIPDevice().route.delete_route.assert_called_with(fip_cidr,
via=str(s.ip))
ri.fip_ns.local_subnets.allocate.assert_not_called()
@mock.patch.object(ip_lib, 'add_ip_rule')
def test_floating_ip_moved_dist(self, mock_add_ip_rule):
router = mock.MagicMock()
ri = self._create_router(router)
floating_ip_address = '15.1.2.3'
fixed_ip = '192.168.0.1'
fip = {'floating_ip_address': floating_ip_address,
'fixed_ip_address': fixed_ip}
ri.floating_ips_dict['15.1.2.3'] = (fixed_ip, FIP_PRI)
ri.fip_ns = mock.Mock()
ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI
ri.floating_ip_moved_dist(fip)
self.mock_delete_ip_rule.assert_called_once_with(
ri.router_namespace.name, ip=fixed_ip, table=16, priority=FIP_PRI)
ri.fip_ns.deallocate_rule_priority.assert_called_once_with(
floating_ip_address)
ri.fip_ns.allocate_rule_priority.assert_called_once_with(
floating_ip_address)
mock_add_ip_rule.assert_called_with(
namespace=ri.router_namespace.name, ip=fixed_ip,
table=16, priority=FIP_PRI)
def _test_add_floating_ip(self, ri, fip, is_failure=False):
if not is_failure:
ri.floating_ip_added_dist = mock.Mock(
return_value=lib_constants.FLOATINGIP_STATUS_ACTIVE)
else:
ri.floating_ip_added_dist = mock.Mock(
return_value=lib_constants.FLOATINGIP_STATUS_ERROR)
result = ri.add_floating_ip(fip,
mock.sentinel.interface_name,
mock.sentinel.device)
ri.floating_ip_added_dist.assert_called_once_with(
fip, mock.ANY)
return result
def test_add_floating_ip(self):
ri = self._create_router(mock.MagicMock())
ip = '15.1.2.3'
fip = {'floating_ip_address': ip}
result = self._test_add_floating_ip(ri, fip)
ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32')
self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, result)
def test_add_floating_ip_failure(self):
ri = self._create_router(mock.MagicMock())
ip = '15.1.2.3'
fip = {'floating_ip_address': ip}
result = self._test_add_floating_ip(ri, fip, True)
ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32')
self.assertEqual(lib_constants.FLOATINGIP_STATUS_ERROR, result)
@mock.patch.object(router_info.RouterInfo, 'remove_floating_ip')
def test_remove_floating_ip(self, super_remove_floating_ip):
ri = self._create_router(mock.MagicMock())
ri.floating_ip_removed_dist = mock.Mock()
ri.remove_floating_ip(mock.sentinel.device, mock.sentinel.ip_cidr)
self.assertFalse(super_remove_floating_ip.called)
ri.floating_ip_removed_dist.assert_called_once_with(
mock.sentinel.ip_cidr)
def test__get_internal_port(self):
ri = self._create_router()
port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
router_ports = [port]
ri.router.get.return_value = router_ports
self.assertEqual(port, ri._get_internal_port(mock.sentinel.subnet_id))
def test__get_internal_port_not_found(self):
ri = self._create_router()
port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
router_ports = [port]
ri.router.get.return_value = router_ports
self.assertIsNone(ri._get_internal_port(mock.sentinel.subnet_id2))
def test__get_snat_idx_ipv4(self):
ip_cidr = '101.12.13.00/24'
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x650C0D00 is numerical value of 101.12.13.00
self.assertEqual(0x650C0D00, snat_idx)
def test__get_snat_idx_ipv6(self):
ip_cidr = '2620:0:a03:e100::/64'
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr
self.assertEqual(0x3D345705, snat_idx)
def test__get_snat_idx_ipv6_below_32768(self):
ip_cidr = 'd488::/30'
# crc32 of this ip_cidr is 0x1BD7
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x1BD7 + 0x3FFFFFFF = 0x40001BD6
self.assertEqual(0x40001BD6, snat_idx)
def test__set_subnet_arp_info(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
ports = ri.router.get(lib_constants.INTERFACE_KEY, [])
subnet_id = l3_test_common.get_subnet_id(ports[0])
test_ports = [{'mac_address': '00:11:22:33:44:55',
'device_owner': lib_constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'ip_address': '1.2.3.4',
'prefixlen': 24,
'subnet_id': subnet_id}]},
{'mac_address': '11:22:33:44:55:66',
'device_owner': lib_constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [{'ip_address': '1.2.3.5',
'prefixlen': 24,
'subnet_id': subnet_id}]},
{'mac_address': '22:33:44:55:66:77',
'device_owner':
lib_constants.DEVICE_OWNER_LOADBALANCERV2,
'fixed_ips': [{'ip_address': '1.2.3.6',
'prefixlen': 24,
'subnet_id': subnet_id}]}]
self.plugin_api.get_ports_by_subnet.return_value = test_ports
# Test basic case
ports[0]['subnets'] = [{'id': subnet_id,
'cidr': '1.2.3.0/24'}]
with mock.patch.object(ri,
'_process_arp_cache_for_internal_port') as parp:
ri._set_subnet_arp_info(subnet_id)
self.assertEqual(1, parp.call_count)
self.mock_ip_dev.neigh.add.assert_called_once_with(
'1.2.3.4', '00:11:22:33:44:55')
# Test negative case
router['distributed'] = False
ri._set_subnet_arp_info(subnet_id)
self.mock_ip_dev.neigh.add.never_called()
def test_add_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
subnet_id = l3_test_common.get_subnet_id(
router[lib_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
agent.add_arp_entry(None, payload)
agent.router_deleted(None, router['id'])
self.mock_ip_dev.neigh.add.assert_called_once_with(
'1.7.23.11', '00:11:22:33:44:55')
def test_add_arp_entry_no_routerinfo(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
subnet_id = l3_test_common.get_subnet_id(
router[lib_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent.add_arp_entry(None, payload)
def test__update_arp_entry_with_no_subnet(self):
self._set_ri_kwargs(mock.sentinel.agent,
'foo_router_id',
{'distributed': True, 'gw_port_host': HOSTNAME})
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
ri.get_internal_device_name = mock.Mock()
ri._update_arp_entry(mock.ANY, mock.ANY, 'foo_subnet_id', 'add')
self.assertFalse(ri.get_internal_device_name.call_count)
def _setup_test_for_arp_entry_cache(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
subnet_id = l3_test_common.get_subnet_id(
ri.router[lib_constants.INTERFACE_KEY][0])
return ri, subnet_id
def test__update_arp_entry_calls_arp_cache_with_no_device(self):
ri, subnet_id = self._setup_test_for_arp_entry_cache()
state = True
with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as rtrdev,\
mock.patch.object(ri, '_cache_arp_entry') as arp_cache:
rtrdev.return_value.exists.return_value = False
state = ri._update_arp_entry(
mock.ANY, mock.ANY, subnet_id, 'add')
self.assertFalse(state)
self.assertTrue(arp_cache.called)
arp_cache.assert_called_once_with(mock.ANY, mock.ANY,
subnet_id, 'add')
self.assertFalse(rtrdev.neigh.add.called)
def test__process_arp_cache_for_internal_port(self):
ri, subnet_id = self._setup_test_for_arp_entry_cache()
ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55',
subnet_id, 'add')
self.assertEqual(1, len(ri._pending_arp_set))
with mock.patch.object(ri, '_update_arp_entry') as update_arp:
update_arp.return_value = True
ri._process_arp_cache_for_internal_port(subnet_id)
self.assertEqual(0, len(ri._pending_arp_set))
def test__delete_arp_cache_for_internal_port(self):
ri, subnet_id = self._setup_test_for_arp_entry_cache()
ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55',
subnet_id, 'add')
self.assertEqual(1, len(ri._pending_arp_set))
ri._delete_arp_cache_for_internal_port(subnet_id)
self.assertEqual(0, len(ri._pending_arp_set))
def test_del_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
subnet_id = l3_test_common.get_subnet_id(
router[lib_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.5.25.15',
'mac_address': '00:44:33:22:11:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
# first add the entry
agent.add_arp_entry(None, payload)
# now delete it
agent.del_arp_entry(None, payload)
self.mock_ip_dev.neigh.delete.assert_called_once_with(
'1.5.25.15', '00:44:33:22:11:55')
agent.router_deleted(None, router['id'])
def test_get_floating_agent_gw_interfaces(self):
fake_network_id = _uuid()
subnet_id = _uuid()
agent_gateway_port = (
[{'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
portbindings.HOST_ID: 'myhost',
'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW,
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
self.assertEqual(
agent_gateway_port[0],
ri.get_floating_agent_gw_interface(fake_network_id))
def test_process_router_dist_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': mock.sentinel.ext_net_id,
'port_id': _uuid()},
{'id': _uuid(),
'host': 'some-other-host',
'floating_ip_address': '15.1.2.4',
'fixed_ip_address': '192.168.0.10',
'floating_network_id': mock.sentinel.ext_net_id,
'port_id': _uuid()}]}
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
fip_ns = agent.get_fip_ns(mock.sentinel.ext_net_id)
subnet_id = _uuid()
fip_ns.agent_gateway_port = (
{'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
)
def _test_ext_gw_updated_dvr_agent_mode(self, host,
agent_mode, expected_call_count):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_added = mock.Mock()
# test agent mode = dvr (compute node)
router['gw_port_host'] = host
agent.conf.agent_mode = agent_mode
ri.external_gateway_updated(ex_gw_port, interface_name)
# no gateway should be added on dvr node
self.assertEqual(expected_call_count,
ri._external_gateway_added.call_count)
def test_ext_gw_updated_dvr_agent_mode(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0)
def test_ext_gw_updated_dvr_agent_mode_host(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode(HOSTNAME,
'dvr', 0)
def test_external_gateway_removed_ext_gw_port_and_fip(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
external_net_id = router['gw_port']['network_id']
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_edge_rtr.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri.remove_floating_ip = mock.Mock()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.external_gateway_added(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
ri.fip_ns = None
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
ri.fip_ns = agent.get_fip_ns(external_net_id)
subnet_id = _uuid()
ri.fip_ns.agent_gateway_port = {
'fixed_ips': [{
'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id
}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': external_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
vm_floating_ip = '19.4.4.2'
ri.floating_ips_dict[vm_floating_ip] = FIP_PRI
ri.rtr_fip_subnet = ri.fip_ns.local_subnets.allocate(ri.router_id)
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev(ri.fip_ns.get_ext_device_name(_uuid()))]
ri.get_router_cidrs = mock.Mock(
return_value={vm_floating_ip + '/32', '19.4.4.1/24'})
self.device_exists.return_value = True
ri.external_gateway_removed(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
ri.remove_floating_ip.assert_called_once_with(self.mock_ip_dev,
'19.4.4.2/32')
def test_get_router_cidrs_no_fip_ns(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
device = mock.Mock()
self.assertFalse(ri.get_router_cidrs(device))
def test_get_router_cidrs_no_device_exists(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
fake_fip_ns = mock.Mock(return_value=True)
fake_fip_ns.get_name = mock.Mock(return_value=None)
fake_fip_ns.get_int_device_name = mock.Mock(return_value=None)
ri.fip_ns = fake_fip_ns
device = mock.Mock()
device.exists = mock.Mock(return_value=False)
with mock.patch.object(ip_lib, 'IPDevice', return_value=device):
self.assertFalse(ri.get_router_cidrs(device))
@mock.patch.object(router_info.RouterInfo, '_add_snat_rules')
@mock.patch.object(dvr_router.DvrLocalRouter, '_handle_router_snat_rules')
def test_handle_snat_rule_for_centralized_fip(
self, _add_snat_rules, _handle_router_snat_rules):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT
self.mock_driver.unplug.reset_mock()
router = l3_test_common.prepare_router_data(enable_floating_ip=True)
router['gw_port_host'] = HOSTNAME
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_edge_rtr.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri.snat_iptables_manager = mock.MagicMock()
ipv4_nat = ri.snat_iptables_manager.ipv4['nat']
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._handle_router_snat_rules(ex_gw_port, interface_name)
ipv4_nat.add_rule.assert_called_once_with('snat', '-j $float-snat')
@mock.patch.object(dvr_edge_rtr.DvrEdgeRouter,
'add_centralized_floatingip')
def test_add_centralized_floatingip_dvr_ha(
self,
super_add_centralized_floatingip):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT
router = l3_test_common.prepare_router_data(
num_internal_ports=2, enable_ha=True)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
self._set_ri_kwargs(agent, router['id'], router)
fip = {'id': _uuid()}
fip_cidr = '11.22.33.44/24'
ri = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri.is_router_master = mock.Mock(return_value=False)
ri._add_vip = mock.Mock()
interface_name = ri.get_snat_external_device_interface_name(
ri.get_ex_gw_port())
ri.add_centralized_floatingip(fip, fip_cidr)
ri._add_vip.assert_called_once_with(fip_cidr, interface_name)
super_add_centralized_floatingip.assert_not_called()
router[lib_constants.HA_INTERFACE_KEY]['status'] = 'DOWN'
self._set_ri_kwargs(agent, router['id'], router)
ri_1 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri_1.is_router_master = mock.Mock(return_value=True)
ri_1._add_vip = mock.Mock()
interface_name = ri_1.get_snat_external_device_interface_name(
ri_1.get_ex_gw_port())
ri_1.add_centralized_floatingip(fip, fip_cidr)
ri_1._add_vip.assert_called_once_with(fip_cidr, interface_name)
super_add_centralized_floatingip.assert_not_called()
router[lib_constants.HA_INTERFACE_KEY]['status'] = 'ACTIVE'
self._set_ri_kwargs(agent, router['id'], router)
ri_2 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri_2.is_router_master = mock.Mock(return_value=True)
ri_2._add_vip = mock.Mock()
interface_name = ri_2.get_snat_external_device_interface_name(
ri_2.get_ex_gw_port())
ri_2.add_centralized_floatingip(fip, fip_cidr)
ri_2._add_vip.assert_called_once_with(fip_cidr, interface_name)
super_add_centralized_floatingip.assert_called_once_with(fip,
fip_cidr)
@mock.patch.object(dvr_edge_rtr.DvrEdgeRouter,
'remove_centralized_floatingip')
def test_remove_centralized_floatingip(self,
super_remove_centralized_floatingip):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
self._set_ri_kwargs(agent, router['id'], router)
fip_cidr = '11.22.33.44/24'
ri = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri.is_router_master = mock.Mock(return_value=False)
ri._remove_vip = mock.Mock()
ri.remove_centralized_floatingip(fip_cidr)
ri._remove_vip.assert_called_once_with(fip_cidr)
super_remove_centralized_floatingip.assert_not_called()
ri1 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri1.is_router_master = mock.Mock(return_value=True)
ri1._remove_vip = mock.Mock()
ri1.remove_centralized_floatingip(fip_cidr)
ri1._remove_vip.assert_called_once_with(fip_cidr)
super_remove_centralized_floatingip.assert_called_once_with(fip_cidr)
| 46.102132 | 79 | 0.616817 | [
"Apache-2.0"
] | 1pintbeer/neutron | neutron/tests/unit/agent/l3/test_dvr_local_router.py | 41,077 | Python |
import os
import pickle
import string
import time
import logging
import numpy as np
def get_logger(name=__file__, level=logging.INFO):
logger = logging.getLogger(name)
if getattr(logger, "_init_done__", None):
logger.setLevel(level)
return logger
logger._init_done__ = True
logger.propagate = False
logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s:%(levelname)s::%(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(0)
del logger.handlers[:]
logger.addHandler(handler)
return logger
## Utils
def load_jets():
root_dir = "data/"
filename = os.path.join(root_dir, "TruthBS_10")
with open(filename + ".pkl", "rb") as fd:
Truth10, BS10 = pickle.load(fd, encoding='latin-1')
return Truth10, BS10
def sumLogLH(jetList):
for jet in jetList:
jet["totLogLH"] = np.sum(jet["logLH"])
def getConstituents(jet, node_id, outers_list):
"""
Recursive function to get a list of the tree leaves
"""
if jet["tree"][node_id, 0] == -1:
outers_list.append(jet["content"][node_id])
else:
getConstituents(
jet,
jet["tree"][node_id, 0],
outers_list,)
getConstituents(
jet,
jet["tree"][node_id, 1],
outers_list,)
return outers_list
def get_leaves(jet):
return getConstituents(jet, jet["root_id"], []) | 22 | 75 | 0.63843 | [
"MIT"
] | SebastianMacaluso/ClusterTrellis | src/ClusterTrellis/utils.py | 1,452 | Python |
"""
sphinx.util.cfamily
~~~~~~~~~~~~~~~~~~~
Utility functions common to the C and C++ domains.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import warnings
from copy import deepcopy
from typing import (
Any, Callable, List, Match, Pattern, Tuple, Union
)
from docutils import nodes
from docutils.nodes import TextElement
from sphinx.config import Config
from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.util import logging
logger = logging.getLogger(__name__)
StringifyTransform = Callable[[Any], str]
_whitespace_re = re.compile(r'(?u)\s+')
anon_identifier_re = re.compile(r'(@[a-zA-Z0-9_])[a-zA-Z0-9_]*\b')
identifier_re = re.compile(r'''(?x)
( # This 'extends' _anon_identifier_re with the ordinary identifiers,
# make sure they are in sync.
(~?\b[a-zA-Z_]) # ordinary identifiers
| (@[a-zA-Z0-9_]) # our extension for names of anonymous entities
)
[a-zA-Z0-9_]*\b
''')
integer_literal_re = re.compile(r'[1-9][0-9]*')
octal_literal_re = re.compile(r'0[0-7]*')
hex_literal_re = re.compile(r'0[xX][0-9a-fA-F][0-9a-fA-F]*')
binary_literal_re = re.compile(r'0[bB][01][01]*')
float_literal_re = re.compile(r'''(?x)
[+-]?(
# decimal
([0-9]+[eE][+-]?[0-9]+)
| ([0-9]*\.[0-9]+([eE][+-]?[0-9]+)?)
| ([0-9]+\.([eE][+-]?[0-9]+)?)
# hex
| (0[xX][0-9a-fA-F]+[pP][+-]?[0-9a-fA-F]+)
| (0[xX][0-9a-fA-F]*\.[0-9a-fA-F]+([pP][+-]?[0-9a-fA-F]+)?)
| (0[xX][0-9a-fA-F]+\.([pP][+-]?[0-9a-fA-F]+)?)
)
''')
char_literal_re = re.compile(r'''(?x)
((?:u8)|u|U|L)?
'(
(?:[^\\'])
| (\\(
(?:['"?\\abfnrtv])
| (?:[0-7]{1,3})
| (?:x[0-9a-fA-F]{2})
| (?:u[0-9a-fA-F]{4})
| (?:U[0-9a-fA-F]{8})
))
)'
''')
def verify_description_mode(mode: str) -> None:
if mode not in ('lastIsName', 'noneIsName', 'markType', 'markName', 'param'):
raise Exception("Description mode '%s' is invalid." % mode)
class NoOldIdError(Exception):
# Used to avoid implementing unneeded id generation for old id schemes.
@property
def description(self) -> str:
warnings.warn('%s.description is deprecated. '
'Coerce the instance to a string instead.' % self.__class__.__name__,
RemovedInSphinx40Warning, stacklevel=2)
return str(self)
class ASTBaseBase:
def __eq__(self, other: Any) -> bool:
if type(self) is not type(other):
return False
try:
for key, value in self.__dict__.items():
if value != getattr(other, key):
return False
except AttributeError:
return False
return True
__hash__ = None # type: Callable[[], int]
def clone(self) -> Any:
"""Clone a definition expression node."""
return deepcopy(self)
def _stringify(self, transform: StringifyTransform) -> str:
raise NotImplementedError(repr(self))
def __str__(self) -> str:
return self._stringify(lambda ast: str(ast))
def get_display_string(self) -> str:
return self._stringify(lambda ast: ast.get_display_string())
def __repr__(self) -> str:
return '<%s>' % self.__class__.__name__
################################################################################
# Attributes
################################################################################
class ASTAttribute(ASTBaseBase):
def describe_signature(self, signode: TextElement) -> None:
raise NotImplementedError(repr(self))
class ASTCPPAttribute(ASTAttribute):
def __init__(self, arg: str) -> None:
self.arg = arg
def _stringify(self, transform: StringifyTransform) -> str:
return "[[" + self.arg + "]]"
def describe_signature(self, signode: TextElement) -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTGnuAttribute(ASTBaseBase):
def __init__(self, name: str, args: Any) -> None:
self.name = name
self.args = args
def _stringify(self, transform: StringifyTransform) -> str:
res = [self.name]
if self.args:
res.append('(')
res.append(transform(self.args))
res.append(')')
return ''.join(res)
class ASTGnuAttributeList(ASTAttribute):
def __init__(self, attrs: List[ASTGnuAttribute]) -> None:
self.attrs = attrs
def _stringify(self, transform: StringifyTransform) -> str:
res = ['__attribute__((']
first = True
for attr in self.attrs:
if not first:
res.append(', ')
first = False
res.append(transform(attr))
res.append('))')
return ''.join(res)
def describe_signature(self, signode: TextElement) -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTIdAttribute(ASTAttribute):
"""For simple attributes defined by the user."""
def __init__(self, id: str) -> None:
self.id = id
def _stringify(self, transform: StringifyTransform) -> str:
return self.id
def describe_signature(self, signode: TextElement) -> None:
signode.append(nodes.Text(self.id, self.id))
class ASTParenAttribute(ASTAttribute):
"""For paren attributes defined by the user."""
def __init__(self, id: str, arg: str) -> None:
self.id = id
self.arg = arg
def _stringify(self, transform: StringifyTransform) -> str:
return self.id + '(' + self.arg + ')'
def describe_signature(self, signode: TextElement) -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
################################################################################
class UnsupportedMultiCharacterCharLiteral(Exception):
@property
def decoded(self) -> str:
warnings.warn('%s.decoded is deprecated. '
'Coerce the instance to a string instead.' % self.__class__.__name__,
RemovedInSphinx40Warning, stacklevel=2)
return str(self)
class DefinitionError(Exception):
@property
def description(self) -> str:
warnings.warn('%s.description is deprecated. '
'Coerce the instance to a string instead.' % self.__class__.__name__,
RemovedInSphinx40Warning, stacklevel=2)
return str(self)
class BaseParser:
def __init__(self, definition: str, *,
location: Union[nodes.Node, Tuple[str, int]],
config: "Config") -> None:
self.definition = definition.strip()
self.location = location # for warnings
self.config = config
self.pos = 0
self.end = len(self.definition)
self.last_match = None # type: Match
self._previous_state = (0, None) # type: Tuple[int, Match]
self.otherErrors = [] # type: List[DefinitionError]
# in our tests the following is set to False to capture bad parsing
self.allowFallbackExpressionParsing = True
def _make_multi_error(self, errors: List[Any], header: str) -> DefinitionError:
if len(errors) == 1:
if len(header) > 0:
return DefinitionError(header + '\n' + str(errors[0][0]))
else:
return DefinitionError(str(errors[0][0]))
result = [header, '\n']
for e in errors:
if len(e[1]) > 0:
indent = ' '
result.append(e[1])
result.append(':\n')
for line in str(e[0]).split('\n'):
if len(line) == 0:
continue
result.append(indent)
result.append(line)
result.append('\n')
else:
result.append(str(e[0]))
return DefinitionError(''.join(result))
@property
def language(self) -> str:
raise NotImplementedError
def status(self, msg: str) -> None:
# for debugging
indicator = '-' * self.pos + '^'
print("%s\n%s\n%s" % (msg, self.definition, indicator))
def fail(self, msg: str) -> None:
errors = []
indicator = '-' * self.pos + '^'
exMain = DefinitionError(
'Invalid %s declaration: %s [error at %d]\n %s\n %s' %
(self.language, msg, self.pos, self.definition, indicator))
errors.append((exMain, "Main error"))
for err in self.otherErrors:
errors.append((err, "Potential other error"))
self.otherErrors = []
raise self._make_multi_error(errors, '')
def warn(self, msg: str) -> None:
logger.warning(msg, location=self.location)
def match(self, regex: Pattern) -> bool:
match = regex.match(self.definition, self.pos)
if match is not None:
self._previous_state = (self.pos, self.last_match)
self.pos = match.end()
self.last_match = match
return True
return False
def skip_string(self, string: str) -> bool:
strlen = len(string)
if self.definition[self.pos:self.pos + strlen] == string:
self.pos += strlen
return True
return False
def skip_word(self, word: str) -> bool:
return self.match(re.compile(r'\b%s\b' % re.escape(word)))
def skip_ws(self) -> bool:
return self.match(_whitespace_re)
def skip_word_and_ws(self, word: str) -> bool:
if self.skip_word(word):
self.skip_ws()
return True
return False
def skip_string_and_ws(self, string: str) -> bool:
if self.skip_string(string):
self.skip_ws()
return True
return False
@property
def eof(self) -> bool:
return self.pos >= self.end
@property
def current_char(self) -> str:
try:
return self.definition[self.pos]
except IndexError:
return 'EOF'
@property
def matched_text(self) -> str:
if self.last_match is not None:
return self.last_match.group()
else:
return None
def read_rest(self) -> str:
rv = self.definition[self.pos:]
self.pos = self.end
return rv
def assert_end(self, *, allowSemicolon: bool = False) -> None:
self.skip_ws()
if allowSemicolon:
if not self.eof and self.definition[self.pos:] != ';':
self.fail('Expected end of definition or ;.')
else:
if not self.eof:
self.fail('Expected end of definition.')
################################################################################
@property
def id_attributes(self):
raise NotImplementedError
@property
def paren_attributes(self):
raise NotImplementedError
def _parse_balanced_token_seq(self, end: List[str]) -> str:
# TODO: add handling of string literals and similar
brackets = {'(': ')', '[': ']', '{': '}'}
startPos = self.pos
symbols = [] # type: List[str]
while not self.eof:
if len(symbols) == 0 and self.current_char in end:
break
if self.current_char in brackets.keys():
symbols.append(brackets[self.current_char])
elif len(symbols) > 0 and self.current_char == symbols[-1]:
symbols.pop()
elif self.current_char in ")]}":
self.fail("Unexpected '%s' in balanced-token-seq." % self.current_char)
self.pos += 1
if self.eof:
self.fail("Could not find end of balanced-token-seq starting at %d."
% startPos)
return self.definition[startPos:self.pos]
def _parse_attribute(self) -> ASTAttribute:
self.skip_ws()
# try C++11 style
startPos = self.pos
if self.skip_string_and_ws('['):
if not self.skip_string('['):
self.pos = startPos
else:
# TODO: actually implement the correct grammar
arg = self._parse_balanced_token_seq(end=[']'])
if not self.skip_string_and_ws(']'):
self.fail("Expected ']' in end of attribute.")
if not self.skip_string_and_ws(']'):
self.fail("Expected ']' in end of attribute after [[...]")
return ASTCPPAttribute(arg)
# try GNU style
if self.skip_word_and_ws('__attribute__'):
if not self.skip_string_and_ws('('):
self.fail("Expected '(' after '__attribute__'.")
if not self.skip_string_and_ws('('):
self.fail("Expected '(' after '__attribute__('.")
attrs = []
while 1:
if self.match(identifier_re):
name = self.matched_text
self.skip_ws()
if self.skip_string_and_ws('('):
self.fail('Parameterized GNU style attribute not yet supported.')
attrs.append(ASTGnuAttribute(name, None))
# TODO: parse arguments for the attribute
if self.skip_string_and_ws(','):
continue
elif self.skip_string_and_ws(')'):
break
else:
self.fail("Expected identifier, ')', or ',' in __attribute__.")
if not self.skip_string_and_ws(')'):
self.fail("Expected ')' after '__attribute__((...)'")
return ASTGnuAttributeList(attrs)
# try the simple id attributes defined by the user
for id in self.id_attributes:
if self.skip_word_and_ws(id):
return ASTIdAttribute(id)
# try the paren attributes defined by the user
for id in self.paren_attributes:
if not self.skip_string_and_ws(id):
continue
if not self.skip_string('('):
self.fail("Expected '(' after user-defined paren-attribute.")
arg = self._parse_balanced_token_seq(end=[')'])
if not self.skip_string(')'):
self.fail("Expected ')' to end user-defined paren-attribute.")
return ASTParenAttribute(id, arg)
return None
| 33.050228 | 91 | 0.550014 | [
"BSD-2-Clause"
] | OliverSieweke/sphinx | sphinx/util/cfamily.py | 14,476 | Python |
from app import create_app
app = create_app()
if __name__ == '__main__':
app.run(debug=True, port=5000) | 18.166667 | 34 | 0.706422 | [
"MIT"
] | simonwuchj/docker-nginx-uwsgi-flask-mysql | nginx-flask/webapp/run.py | 109 | Python |
#!/usr/bin/env python
"""
Generate Sequence from a pdbfile and to modify the squences.
Author: {0} ({1})
This module is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
"""
from __future__ import print_function
import logging
import os
import sys
import time
import config
__author__ = "Beat Amrein"
__email__ = "[email protected]"
logger = logging.getLogger('prep.genseqs')
# ERROR/EXIT CODES
ERR_USAGE = 1
ERR_OUTPUTFOLDER_EXISTS = 2
ERR_TOPO_GENERATION_WT = 3
ERR_QPREP5_INEXISTENT = 4
ERR_MKTOP_INEXISTENT = 5
ERR_NO_BABEL = 6
# CONSTANTS
NLC = '\n'
def genseq2(wtseq, mutations, keepdupes=False):
""" generate a sequences library based of wtseq
@param: list of tupel, [ (resid, library), (resid, library), ...]
@returns: list of sequences
"""
def estimator(mutations):
est = 1
for mut in mutations:
lib = mut[1]
est *= (len(lib)+1)
return est
logger.info('will mutate wtseq %s and create about %s mutations',
wtseq, estimator(mutations))
seqo = list(wtseq)
sequences = [seqo]
while len(mutations) > 0:
newseqs = sequences[:]
res, lib = mutations.pop()
for seqo in sequences:
res = int(res)
if res < 1:
raise ValueError('Impossible: resid < 1!', res)
pos = res - 1
for aa in lib:
if len(aa) != 1:
raise ValueError('Impossible 1-letter aminoacid',
aa, 'in lib', lib)
seqn = seqo[:]
seqn[pos] = aa
if keepdupes or seqn not in newseqs:
newseqs.append(seqn)
sequences = newseqs
return sequences
def combine(lib, pos):
"""generate combinations of up to 7.
@param lib: library
@param pos: positions to mutate
# TODO: implement in readable (recursively)
"""
numseqs = 1
for each in lib:
numseqs *= len(each)
logger.info('Generating %s %s', numseqs, 'sequeces. Please wait.')
seqlib = []
logger.info('Library %s, Positions %s', lib, pos)
for every in lib[0]:
if len(pos) > 1:
for every2, in lib[1]:
if len(pos) > 2:
for every3, in lib[2]:
if len(pos) > 3:
for every4, in lib[3]:
if len(pos) > 4:
for every5, in lib[4]:
if len(pos) > 5:
for every6, in lib[5]:
if len(pos) > 6:
for every7 in lib[6]:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6,
every7])
else:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6])
else:
seqlib.append([every,
every2,
every3,
every4,
every5])
else:
seqlib.append([every, every2, every3,
every4, every4])
else:
seqlib.append([every, every2, every3])
else:
seqlib.append([every, every2])
else:
seqlib.append([every])
return seqlib
def gen_seqlib(sequence, pos, lib):
"""
Generates sequences, mutating at pos[x] to all as in lib[x]
Generates sequences, mutating at pos[x] if len(lib)==1,
the same lib will be used for all
Return sequences
"""
# is lib a string?
if isinstance(lib, str):
lib = [lib]
# when only 1 library is given, reuse it
if len(lib) == 1:
while range(1, len(pos)):
lib.append(lib[0])
if len(pos) != len(lib):
msg = 'Bad Input: Dimensions of pos and lib must be equal: '
msg += 'found: #pos: {0}, #lib {1}'.format(len(pos), len(lib))
raise (Exception, msg)
seqlib = combine(lib, pos)
# insert combinations into sequence
sequences_1d = {}
for i in range(0, len(seqlib)):
nfa = list(sequence)
for j, posj in pos:
if nfa[posj].upper() != seqlib[i][j].upper():
nfa[posj] = seqlib[i][j]
modseq = ''.join(nfa)
sequences_1d[modseq] = 1
return sequences_1d
def get_fasta(wtpdb):
"""Return fasta code of wtpdb"""
# preparations
from pyscwrl import babel_pdb_for_scwrl
babel_pdb_for_scwrl(wtpdb)
# read fasta
fasta = ''
for line in open('proper.fasta'):
line = line[:-1]
if line[0] == '>':
# fasta-comment, ignore line
continue
for char in line:
fasta += char.lower()
return fasta
def get_sequences(wtpdb, resids, library):
"""Return list of sequences for resids, created with library"""
print(wtpdb, resids)
# Get the fasta sequence from pdbfile
fasta = get_fasta(wtpdb)
posids = []
# position - ids start from 0 (not 1), so we have to convert
for resid in resids:
posids.append(int(resid)-1)
# generate sequences:
sequences = gen_seqlib(fasta, posids, [library])
return sequences
if __name__ == "__main__":
# Parse Command Line
LIB = config.SatLibs.ALL
def usage():
"""Print Usage and exit"""
print('')
print('Usage:')
print(' ' + sys.argv[0] + ' qprep-wt.pdb res1 [ res2 ...] ]')
print('')
sys.exit(ERR_USAGE)
def get_resnumbers(args):
"""Return residue-numbers as list-of-integers"""
resids = []
for resid in args:
try:
resids.append(int(resid))
except ValueError:
print('ValueError with ', resid, ' expected: Integer')
usage()
if len(resids) > 7:
print('FATAL:')
print('You ask me to mutate more than 7 residues at one time.')
print('This is NOT IMPLEMENTED... ...probably a BAD IDEA :')
print('This is a bad idea, because we grow with LIBRARY^{#RES}!')
print('In your case ', len(LIB), '^', len(LIB), '=',
len(LIB)**len(resids), '!')
usage()
return resids
START = time.time()
if len(sys.argv) < 3:
usage()
if len(get_resnumbers) > 7:
usage()
get_sequences(os.path.abspath(sys.argv[1]),
get_resnumbers(sys.argv[2:]), LIB)
print('time', round(time.time()-START, 2), 's')
| 30.851563 | 79 | 0.443024 | [
"MIT"
] | kamerlinlab/cadee | cadee/prep/genseqs.py | 7,898 | Python |
from sklearn.cluster import MiniBatchKMeans
import numpy as np
import torch
from models import TransformerModel, Seq2SeqTransformer, generate_square_subsequent_mask
from models import LM_NAME, MLM_NAME, MT_NAME, NLAYERS, NUM2WORD
import os
from data_preprocessing import DATA_DIR_DEV, SAVE_DATA_MT_TRAIN
from data_preprocessing import SAVE_VOCAB_SRC, SAVE_VOCAB_TRG, PAD_WORD
import pickle
from torchtext.legacy.data import Dataset, BucketIterator
import pandas as pd
from analytics_helper import MostFreqToken, GetInter, GetMI, GetInterValues
from analytics_helper import MIN_SAMPLE_SIZE_DEV, MIN_SAMPLE_SIZE_FULL
from analytics_helper import N_FREQUENT_DEV, N_FREQUENT_FULL
from analytics_helper import N_CLUSTER_DEV, N_CLUSTER_FULL
from data_preprocessing import SAVE_MODEL_PATH, DEVELOPMENT_MODE
from MT_helpers import patch_trg, create_mask
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if DEVELOPMENT_MODE:
min_sample_size=MIN_SAMPLE_SIZE_DEV
N_frequent=N_FREQUENT_DEV
N_cluster=N_CLUSTER_DEV
data_dir=DATA_DIR_DEV
else:
min_sample_size=MIN_SAMPLE_SIZE_FULL
N_frequent=N_FREQUENT_FULL
N_cluster=N_CLUSTER_FULL
data_dir=DATA_DIR_FULL
MI_results_INP={LM_NAME.split('.')[0]:[],
f"{MLM_NAME.split('.')[0]}_SAME":[],
f"{MLM_NAME.split('.')[0]}_DIFF":[],
MT_NAME.split('.')[0]:[]}
MI_results_OUT={LM_NAME.split('.')[0]:[],
MLM_NAME.split('.')[0]:[]}
MODELS_INP=[LM_NAME, MLM_NAME, MT_NAME]
vocab_pkl_src = os.path.join(data_dir, SAVE_VOCAB_SRC)
vocab_pkl_trg = os.path.join(data_dir, SAVE_VOCAB_TRG)
train_pkl = os.path.join(data_dir, SAVE_DATA_MT_TRAIN)
field_src = pickle.load(open(vocab_pkl_src, 'rb'))
field_trg = pickle.load(open(vocab_pkl_trg, 'rb'))
src_pad_idx = field_src.vocab.stoi[PAD_WORD]
trg_pad_idx = field_trg.vocab.stoi[PAD_WORD]
train_examples = pickle.load(open(train_pkl, 'rb'))
fields = {'src':field_src , 'trg':field_trg}
train = Dataset(examples=train_examples, fields=fields)
train_iter = BucketIterator(train, batch_size=1, device=device, train=True, shuffle=False)
frequent_vocab = MostFreqToken(field_src, N_frequent, min_sample_size)
# token_reps_list saves NLAYERS dicts, for ith dict, the key is the token ID,
# the value is the representation of the ID in the ith layer.
token_reps_model_INP={}
token_reps_model_OUT={}
for this_model_name in MODELS_INP:
token_reps_list=[]
for _ in range(NLAYERS):
this_token_reps={}
for this_token_id in frequent_vocab:
this_token_reps[this_token_id]=[]
token_reps_list.append(this_token_reps)
if this_model_name.startswith("MLM"):
token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]=token_reps_list
token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]=token_reps_list
token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list
elif this_model_name.startswith("LM"):
token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list
token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list
elif this_model_name.startswith("MT"):
token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list
sample_size_dict_INP={}
sample_size_dict_OUT={}
for this_model_name in MODELS_INP:
if this_model_name.startswith("MLM"):
this_sample_size_dict_INP_SAME={}
this_sample_size_dict_INP_DIFF={}
this_sample_size_dict_OUT={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP_SAME[this_token_id]=0
this_sample_size_dict_INP_DIFF[this_token_id]=0
this_sample_size_dict_OUT[this_token_id]=0
sample_size_dict_INP[f"{this_model_name.split('.')[0]}_SAME"]=this_sample_size_dict_INP_SAME
sample_size_dict_INP[f"{this_model_name.split('.')[0]}_DIFF"]=this_sample_size_dict_INP_DIFF
sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT
elif this_model_name.startswith("LM"):
this_sample_size_dict_INP={}
this_sample_size_dict_OUT={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP[this_token_id]=0
this_sample_size_dict_OUT[this_token_id]=0
sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP
sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT
elif this_model_name.startswith("MT"):
this_sample_size_dict_INP={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP[this_token_id]=0
sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP
for batch in train_iter:
src_seq_MT = batch.src.to(device)
target_sample_INP_MT=GetInter(src_seq_MT.detach().numpy(), frequent_vocab)
src_seq_MLM_SAME = batch.src.to(device)
target_sample_INP_MLM_SAME=GetInter(src_seq_MLM_SAME.detach().numpy(), frequent_vocab)
src_seq=batch.src.to(device)
src_seq_MLM_DIFF = src_seq.clone()
src_mask = generate_square_subsequent_mask(src_seq.size(0))
rand_value = torch.rand(src_seq.shape)
rand_mask = (rand_value < 0.15) * (input != src_pad_idx)
mask_idx=(rand_mask.flatten() == True).nonzero().view(-1)
src_seq_MLM_DIFF = src_seq_MLM_DIFF.flatten()
src_seq_MLM_DIFF[mask_idx] = 103
src_seq_MLM_DIFF = src_seq_MLM_DIFF.view(src_seq.size())
target_sample_INP_MLM_DIFF=GetInter(src_seq_MLM_DIFF.detach().numpy(), frequent_vocab)
src_seq_LM = batch.src[:-1]
target_sample_INP_LM=GetInter(src_seq_LM.detach().numpy(), frequent_vocab)
trg = batch.trg
trg_seq_MT, gold = map(lambda x: x.to(device), patch_trg(trg, trg_pad_idx))
trg_seq_MT = trg_seq_MT.to(device)
trg_seq_LM = src_seq[1:].to(device)
target_sample_OUT_LM=GetInter(trg_seq_LM.detach().numpy(), frequent_vocab)
trg_seq_MLM = src_seq
target_sample_OUT_MLM=GetInter(trg_seq_MLM.detach().numpy(), frequent_vocab)
for this_model_name in MODELS_INP:
this_model = torch.load(os.path.join(SAVE_MODEL_PATH,this_model_name))
this_model.eval()
if this_model_name.startswith("MT") and len(target_sample_INP_MT)>0:
src_mask, trg_mask, src_padding_mask, trg_padding_mask = create_mask(src_seq_MT, trg_seq_MT, src_pad_idx, trg_pad_idx)
_ = this_model(src=src_seq_MT,
src_mask=src_mask,
trg=trg_seq_MT,
tgt_mask=trg_mask,
src_padding_mask=src_padding_mask,
tgt_padding_mask=trg_padding_mask,
memory_key_padding_mask=src_padding_mask)
token_reps_list=token_reps_model_INP[MT_NAME.split('.')[0]]
this_sample_size_dict=sample_size_dict_INP[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_MT, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)
elif this_model_name.startswith("MLM"):
if len(target_sample_INP_MLM_SAME)>0:
src_mask = generate_square_subsequent_mask(src_seq_MLM_SAME.size(0))
src_padding_mask = (src_seq_MLM_SAME == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_MLM_SAME, src_mask.to(device),src_padding_mask.to(device))
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
this_sample_size_dict=sample_size_dict_INP[f"{this_model_name.split('.')[0]}_SAME"]
GetInterValues(this_model, target_sample_INP_MLM_SAME, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)
if len(target_sample_INP_MLM_DIFF)>0 and len(target_sample_OUT_MLM)>0:
src_mask = generate_square_subsequent_mask(src_seq_MLM_DIFF.size(0))
src_padding_mask = (src_seq_MLM_DIFF == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_MLM_DIFF.to(device), src_mask.to(device),src_padding_mask.to(device))
token_reps_list_INP=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
this_sample_size_dict_INP=sample_size_dict_INP[f"{this_model_name.split('.')[0]}_DIFF"]
token_reps_list_OUT=token_reps_model_OUT[MLM_NAME.split('.')[0]]
this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_MLM_DIFF, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)
GetInterValues(this_model, target_sample_OUT_MLM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)
elif this_model_name.startswith("LM") and len(target_sample_INP_LM)>0 and len(target_sample_OUT_LM)>0:
src_mask = generate_square_subsequent_mask(src_seq_LM.size(0))
src_padding_mask = (src_seq_LM == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_LM, src_mask.to(device),src_padding_mask.to(device))
token_reps_list_INP=token_reps_model_INP[this_model_name.split('.')[0]]
token_reps_list_OUT=token_reps_model_OUT[this_model_name.split('.')[0]]
this_sample_size_dict_INP=sample_size_dict_INP[this_model_name.split('.')[0]]
this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_LM, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)
GetInterValues(this_model, target_sample_OUT_LM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)
# we only need to keep the minimum sample size that has been collected
this_min_sample_size_inp=float('inf')
this_min_sample_size_out=float('inf')
for model_name, this_sample_size_dict in sample_size_dict_INP.items():
for token_id, size in this_sample_size_dict.items():
if size<this_min_sample_size_inp:
this_min_sample_size_inp=size
for model_name, this_sample_size_dict in sample_size_dict_OUT.items():
for token_id, size in this_sample_size_dict.items():
if size<this_min_sample_size_out:
this_min_sample_size_out=size
is_enough=True
if this_min_sample_size_inp>=min_sample_size and this_min_sample_size_out>=min_sample_size:
for model_name, reps_dict in token_reps_model_INP.items():
if is_enough is False:
break
for this_layer in reps_dict:
if is_enough is False:
break
for token_id, rep_list in this_layer.items():
if len(rep_list)<min_sample_size:
is_enough=False
break
for model_name, reps_list in token_reps_model_OUT.items():
if is_enough is False:
break
for this_layer in reps_dict:
if is_enough is False:
break
for token_id, rep_list in this_layer.items():
if len(rep_list)<min_sample_size:
is_enough=False
break
else:
is_enough=False
if is_enough:
break
if is_enough is False:
assert 1==0, "We have not collected enough data!"
for this_model_name in MODELS_INP:
if this_model_name.startswith("MLM"):
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
result_list=MI_results_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
result_list=MI_results_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]
result_list=MI_results_OUT[MLM_NAME.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
elif this_model_name.startswith("MT"):
token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]
result_list=MI_results_INP[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
elif this_model_name.startswith("LM"):
token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]
result_list=MI_results_INP[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]
result_list=MI_results_OUT[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
print("result",MI_results_INP)
print("result",MI_results_OUT)
| 48.347015 | 154 | 0.713668 | [
"MIT"
] | Superhzf/PaperImplementation | NLP/The_Bottom_up_Evolution_of_Representations_in_the_Transformer/analytics.py | 12,957 | Python |
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class WidgetsBundleControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_widgets_bundle_using_delete(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Delete widgets bundle (deleteWidgetsBundle) # noqa: E501
Deletes the widget bundle. Referencing non-existing Widget Bundle Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_widgets_bundle_using_delete(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
else:
(data) = self.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
return data
def delete_widgets_bundle_using_delete_with_http_info(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Delete widgets bundle (deleteWidgetsBundle) # noqa: E501
Deletes the widget bundle. Referencing non-existing Widget Bundle Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['widgets_bundle_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_widgets_bundle_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'widgets_bundle_id' is set
if ('widgets_bundle_id' not in params or
params['widgets_bundle_id'] is None):
raise ValueError("Missing the required parameter `widgets_bundle_id` when calling `delete_widgets_bundle_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'widgets_bundle_id' in params:
path_params['widgetsBundleId'] = params['widgets_bundle_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle/{widgetsBundleId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundle_by_id_using_get(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Get Widget Bundle (getWidgetsBundleById) # noqa: E501
Get the Widget Bundle based on the provided Widget Bundle Id. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundle_by_id_using_get(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
return data
def get_widgets_bundle_by_id_using_get_with_http_info(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Get Widget Bundle (getWidgetsBundleById) # noqa: E501
Get the Widget Bundle based on the provided Widget Bundle Id. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['widgets_bundle_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundle_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'widgets_bundle_id' is set
if ('widgets_bundle_id' not in params or
params['widgets_bundle_id'] is None):
raise ValueError("Missing the required parameter `widgets_bundle_id` when calling `get_widgets_bundle_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'widgets_bundle_id' in params:
path_params['widgetsBundleId'] = params['widgets_bundle_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle/{widgetsBundleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundles_using_get(self, **kwargs): # noqa: E501
"""Get all Widget Bundles (getWidgetsBundles) # noqa: E501
Returns an array of Widget Bundle objects that are available for current user.Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[WidgetsBundle]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundles_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundles_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_widgets_bundles_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all Widget Bundles (getWidgetsBundles) # noqa: E501
Returns an array of Widget Bundle objects that are available for current user.Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[WidgetsBundle]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundles_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[WidgetsBundle]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundles_using_get1(self, page_size, page, **kwargs): # noqa: E501
"""Get Widget Bundles (getWidgetsBundles) # noqa: E501
Returns a page of Widget Bundle objects available for current user. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get1(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the widget bundle title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataWidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundles_using_get1_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundles_using_get1_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_widgets_bundles_using_get1_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""Get Widget Bundles (getWidgetsBundles) # noqa: E501
Returns a page of Widget Bundle objects available for current user. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get1_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the widget bundle title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataWidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundles_using_get1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_widgets_bundles_using_get1`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_widgets_bundles_using_get1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundles{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataWidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_widgets_bundle_using_post(self, **kwargs): # noqa: E501
"""Create Or Update Widget Bundle (saveWidgetsBundle) # noqa: E501
Create or update the Widget Bundle. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. When creating the bundle, platform generates Widget Bundle Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Widget Bundle Id will be present in the response. Specify existing Widget Bundle id to update the Widget Bundle. Referencing non-existing Widget Bundle Id will cause 'Not Found' error. Widget Bundle alias is unique in the scope of tenant. Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_widgets_bundle_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WidgetsBundle body:
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_widgets_bundle_using_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.save_widgets_bundle_using_post_with_http_info(**kwargs) # noqa: E501
return data
def save_widgets_bundle_using_post_with_http_info(self, **kwargs): # noqa: E501
"""Create Or Update Widget Bundle (saveWidgetsBundle) # noqa: E501
Create or update the Widget Bundle. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. When creating the bundle, platform generates Widget Bundle Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Widget Bundle Id will be present in the response. Specify existing Widget Bundle id to update the Widget Bundle. Referencing non-existing Widget Bundle Id will cause 'Not Found' error. Widget Bundle alias is unique in the scope of tenant. Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_widgets_bundle_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WidgetsBundle body:
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_widgets_bundle_using_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 47.564299 | 824 | 0.655179 | [
"Apache-2.0"
] | D34DPlayer/thingsboard-python-rest-client | tb_rest_client/api/api_pe/widgets_bundle_controller_api.py | 24,781 | Python |
#!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from collections import defaultdict
import in_generator
import template_expander
import name_utilities
from make_qualified_names import MakeQualifiedNamesWriter
class MakeElementFactoryWriter(MakeQualifiedNamesWriter):
pass
if __name__ == "__main__":
in_generator.Maker(MakeElementFactoryWriter).main(sys.argv)
| 42.863636 | 72 | 0.792683 | [
"BSD-3-Clause"
] | TribeMedia/sky_engine | sky/engine/build/scripts/make_element_factory.py | 1,886 | Python |
"""
Module to handle gamma matrices expressed as tensor objects.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> G(i)
GammaMatrix(i)
Note that there is already an instance of GammaMatrixHead in four dimensions:
GammaMatrix, which is simply declare as
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> GammaMatrix(i)
GammaMatrix(i)
To access the metric tensor
>>> LorentzIndex.metric
metric(LorentzIndex,LorentzIndex)
"""
from sympy import S, Mul, eye, trace
from sympy.tensor.tensor import TensorIndexType, TensorIndex,\
TensMul, TensAdd, tensor_mul, Tensor, TensorHead, TensorSymmetry
from sympy.core.compatibility import range
# DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_fmt="S")
LorentzIndex = TensorIndexType('LorentzIndex', dim=4, dummy_fmt="L")
GammaMatrix = TensorHead("GammaMatrix", [LorentzIndex],
TensorSymmetry.no_symmetry(1), comm=None)
def extract_type_tens(expression, component):
"""
Extract from a ``TensExpr`` all tensors with `component`.
Returns two tensor expressions:
* the first contains all ``Tensor`` of having `component`.
* the second contains all remaining.
"""
if isinstance(expression, Tensor):
sp = [expression]
elif isinstance(expression, TensMul):
sp = expression.args
else:
raise ValueError('wrong type')
# Collect all gamma matrices of the same dimension
new_expr = S.One
residual_expr = S.One
for i in sp:
if isinstance(i, Tensor) and i.component == component:
new_expr *= i
else:
residual_expr *= i
return new_expr, residual_expr
def simplify_gamma_expression(expression):
extracted_expr, residual_expr = extract_type_tens(expression, GammaMatrix)
res_expr = _simplify_single_line(extracted_expr)
return res_expr * residual_expr
def simplify_gpgp(ex, sort=True):
"""
simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)``
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, simplify_gpgp
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> simplify_gpgp(ps*qs*qs)
GammaMatrix(-L_0)*p(L_0)*q(L_1)*q(-L_1)
"""
def _simplify_gpgp(ex):
components = ex.components
a = []
comp_map = []
for i, comp in enumerate(components):
comp_map.extend([i]*comp.rank)
dum = [(i[0], i[1], comp_map[i[0]], comp_map[i[1]]) for i in ex.dum]
for i in range(len(components)):
if components[i] != GammaMatrix:
continue
for dx in dum:
if dx[2] == i:
p_pos1 = dx[3]
elif dx[3] == i:
p_pos1 = dx[2]
else:
continue
comp1 = components[p_pos1]
if comp1.comm == 0 and comp1.rank == 1:
a.append((i, p_pos1))
if not a:
return ex
elim = set()
tv = []
hit = True
coeff = S.One
ta = None
while hit:
hit = False
for i, ai in enumerate(a[:-1]):
if ai[0] in elim:
continue
if ai[0] != a[i + 1][0] - 1:
continue
if components[ai[1]] != components[a[i + 1][1]]:
continue
elim.add(ai[0])
elim.add(ai[1])
elim.add(a[i + 1][0])
elim.add(a[i + 1][1])
if not ta:
ta = ex.split()
mu = TensorIndex('mu', LorentzIndex)
hit = True
if i == 0:
coeff = ex.coeff
tx = components[ai[1]](mu)*components[ai[1]](-mu)
if len(a) == 2:
tx *= 4 # eye(4)
tv.append(tx)
break
if tv:
a = [x for j, x in enumerate(ta) if j not in elim]
a.extend(tv)
t = tensor_mul(*a)*coeff
# t = t.replace(lambda x: x.is_Matrix, lambda x: 1)
return t
else:
return ex
if sort:
ex = ex.sorted_components()
# this would be better off with pattern matching
while 1:
t = _simplify_gpgp(ex)
if t != ex:
ex = t
else:
return t
def gamma_trace(t):
"""
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
gamma_trace, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
"""
if isinstance(t, TensAdd):
res = TensAdd(*[_trace_single_line(x) for x in t.args])
return res
t = _simplify_single_line(t)
res = _trace_single_line(t)
return res
def _simplify_single_line(expression):
"""
Simplify single-line product of gamma matrices.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, _simplify_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1 = tensor_indices('i0:2', LorentzIndex)
>>> _simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0)
0
"""
t1, t2 = extract_type_tens(expression, GammaMatrix)
if t1 != 1:
t1 = kahane_simplify(t1)
res = t1*t2
return res
def _trace_single_line(t):
"""
Evaluate the trace of a single gamma matrix line inside a ``TensExpr``.
Notes
=====
If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right``
indices trace over them; otherwise traces are not implied (explain)
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, _trace_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> _trace_single_line(G(i0)*G(i1))
4*metric(i0, i1)
>>> _trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0)
0
"""
def _trace_single_line1(t):
t = t.sorted_components()
components = t.components
ncomps = len(components)
g = LorentzIndex.metric
# gamma matirices are in a[i:j]
hit = 0
for i in range(ncomps):
if components[i] == GammaMatrix:
hit = 1
break
for j in range(i + hit, ncomps):
if components[j] != GammaMatrix:
break
else:
j = ncomps
numG = j - i
if numG == 0:
tcoeff = t.coeff
return t.nocoeff if tcoeff else t
if numG % 2 == 1:
return TensMul.from_data(S.Zero, [], [], [])
elif numG > 4:
# find the open matrix indices and connect them:
a = t.split()
ind1 = a[i].get_indices()[0]
ind2 = a[i + 1].get_indices()[0]
aa = a[:i] + a[i + 2:]
t1 = tensor_mul(*aa)*g(ind1, ind2)
t1 = t1.contract_metric(g)
args = [t1]
sign = 1
for k in range(i + 2, j):
sign = -sign
ind2 = a[k].get_indices()[0]
aa = a[:i] + a[i + 1:k] + a[k + 1:]
t2 = sign*tensor_mul(*aa)*g(ind1, ind2)
t2 = t2.contract_metric(g)
t2 = simplify_gpgp(t2, False)
args.append(t2)
t3 = TensAdd(*args)
t3 = _trace_single_line(t3)
return t3
else:
a = t.split()
t1 = _gamma_trace1(*a[i:j])
a2 = a[:i] + a[j:]
t2 = tensor_mul(*a2)
t3 = t1*t2
if not t3:
return t3
t3 = t3.contract_metric(g)
return t3
t = t.expand()
if isinstance(t, TensAdd):
a = [_trace_single_line1(x)*x.coeff for x in t.args]
return TensAdd(*a)
elif isinstance(t, (Tensor, TensMul)):
r = t.coeff*_trace_single_line1(t)
return r
else:
return trace(t)
def _gamma_trace1(*a):
gctr = 4 # FIXME specific for d=4
g = LorentzIndex.metric
if not a:
return gctr
n = len(a)
if n%2 == 1:
#return TensMul.from_data(S.Zero, [], [], [])
return S.Zero
if n == 2:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
return gctr*g(ind0, ind1)
if n == 4:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
ind2 = a[2].get_indices()[0]
ind3 = a[3].get_indices()[0]
return gctr*(g(ind0, ind1)*g(ind2, ind3) - \
g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2))
def kahane_simplify(expression):
r"""
This function cancels contracted elements in a product of four
dimensional gamma matrices, resulting in an expression equal to the given
one, without the contracted gamma matrices.
Parameters
==========
`expression` the tensor expression containing the gamma matrices to simplify.
Notes
=====
If spinor indices are given, the matrices must be given in
the order given in the product.
Algorithm
=========
The idea behind the algorithm is to use some well-known identities,
i.e., for contractions enclosing an even number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )`
for an odd number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}`
Instead of repeatedly applying these identities to cancel out all contracted indices,
it is possible to recognize the links that would result from such an operation,
the problem is thus reduced to a simple rearrangement of free gamma matrices.
Examples
========
When using, always remember that the original expression coefficient
has to be handled separately
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.physics.hep.gamma_matrices import kahane_simplify
>>> from sympy.tensor.tensor import tensor_indices
>>> i0, i1, i2 = tensor_indices('i0:3', LorentzIndex)
>>> ta = G(i0)*G(-i0)
>>> kahane_simplify(ta)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> tb = G(i0)*G(i1)*G(-i0)
>>> kahane_simplify(tb)
-2*GammaMatrix(i1)
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
If there are no contractions, the same expression is returned
>>> tc = G(i0)*G(i1)
>>> kahane_simplify(tc)
GammaMatrix(i0)*GammaMatrix(i1)
References
==========
[1] Algorithm for Reducing Contracted Products of gamma Matrices,
Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968.
"""
if isinstance(expression, Mul):
return expression
if isinstance(expression, TensAdd):
return TensAdd(*[kahane_simplify(arg) for arg in expression.args])
if isinstance(expression, Tensor):
return expression
assert isinstance(expression, TensMul)
gammas = expression.args
for gamma in gammas:
assert gamma.component == GammaMatrix
free = expression.free
# spinor_free = [_ for _ in expression.free_in_args if _[1] != 0]
# if len(spinor_free) == 2:
# spinor_free.sort(key=lambda x: x[2])
# assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2
# assert spinor_free[0][2] == 0
# elif spinor_free:
# raise ValueError('spinor indices do not match')
dum = []
for dum_pair in expression.dum:
if expression.index_types[dum_pair[0]] == LorentzIndex:
dum.append((dum_pair[0], dum_pair[1]))
dum = sorted(dum)
if len(dum) == 0: # or GammaMatrixHead:
# no contractions in `expression`, just return it.
return expression
# find the `first_dum_pos`, i.e. the position of the first contracted
# gamma matrix, Kahane's algorithm as described in his paper requires the
# gamma matrix expression to start with a contracted gamma matrix, this is
# a workaround which ignores possible initial free indices, and re-adds
# them later.
first_dum_pos = min(map(min, dum))
# for p1, p2, a1, a2 in expression.dum_in_args:
# if p1 != 0 or p2 != 0:
# # only Lorentz indices, skip Dirac indices:
# continue
# first_dum_pos = min(p1, p2)
# break
total_number = len(free) + len(dum)*2
number_of_contractions = len(dum)
free_pos = [None]*total_number
for i in free:
free_pos[i[1]] = i[0]
# `index_is_free` is a list of booleans, to identify index position
# and whether that index is free or dummy.
index_is_free = [False]*total_number
for i, indx in enumerate(free):
index_is_free[indx[1]] = True
# `links` is a dictionary containing the graph described in Kahane's paper,
# to every key correspond one or two values, representing the linked indices.
# All values in `links` are integers, negative numbers are used in the case
# where it is necessary to insert gamma matrices between free indices, in
# order to make Kahane's algorithm work (see paper).
links = dict()
for i in range(first_dum_pos, total_number):
links[i] = []
# `cum_sign` is a step variable to mark the sign of every index, see paper.
cum_sign = -1
# `cum_sign_list` keeps storage for all `cum_sign` (every index).
cum_sign_list = [None]*total_number
block_free_count = 0
# multiply `resulting_coeff` by the coefficient parameter, the rest
# of the algorithm ignores a scalar coefficient.
resulting_coeff = S.One
# initialize a list of lists of indices. The outer list will contain all
# additive tensor expressions, while the inner list will contain the
# free indices (rearranged according to the algorithm).
resulting_indices = [[]]
# start to count the `connected_components`, which together with the number
# of contractions, determines a -1 or +1 factor to be multiplied.
connected_components = 1
# First loop: here we fill `cum_sign_list`, and draw the links
# among consecutive indices (they are stored in `links`). Links among
# non-consecutive indices will be drawn later.
for i, is_free in enumerate(index_is_free):
# if `expression` starts with free indices, they are ignored here;
# they are later added as they are to the beginning of all
# `resulting_indices` list of lists of indices.
if i < first_dum_pos:
continue
if is_free:
block_free_count += 1
# if previous index was free as well, draw an arch in `links`.
if block_free_count > 1:
links[i - 1].append(i)
links[i].append(i - 1)
else:
# Change the sign of the index (`cum_sign`) if the number of free
# indices preceding it is even.
cum_sign *= 1 if (block_free_count % 2) else -1
if block_free_count == 0 and i != first_dum_pos:
# check if there are two consecutive dummy indices:
# in this case create virtual indices with negative position,
# these "virtual" indices represent the insertion of two
# gamma^0 matrices to separate consecutive dummy indices, as
# Kahane's algorithm requires dummy indices to be separated by
# free indices. The product of two gamma^0 matrices is unity,
# so the new expression being examined is the same as the
# original one.
if cum_sign == -1:
links[-1-i] = [-1-i+1]
links[-1-i+1] = [-1-i]
if (i - cum_sign) in links:
if i != first_dum_pos:
links[i].append(i - cum_sign)
if block_free_count != 0:
if i - cum_sign < len(index_is_free):
if index_is_free[i - cum_sign]:
links[i - cum_sign].append(i)
block_free_count = 0
cum_sign_list[i] = cum_sign
# The previous loop has only created links between consecutive free indices,
# it is necessary to properly create links among dummy (contracted) indices,
# according to the rules described in Kahane's paper. There is only one exception
# to Kahane's rules: the negative indices, which handle the case of some
# consecutive free indices (Kahane's paper just describes dummy indices
# separated by free indices, hinting that free indices can be added without
# altering the expression result).
for i in dum:
# get the positions of the two contracted indices:
pos1 = i[0]
pos2 = i[1]
# create Kahane's upper links, i.e. the upper arcs between dummy
# (i.e. contracted) indices:
links[pos1].append(pos2)
links[pos2].append(pos1)
# create Kahane's lower links, this corresponds to the arcs below
# the line described in the paper:
# first we move `pos1` and `pos2` according to the sign of the indices:
linkpos1 = pos1 + cum_sign_list[pos1]
linkpos2 = pos2 + cum_sign_list[pos2]
# otherwise, perform some checks before creating the lower arcs:
# make sure we are not exceeding the total number of indices:
if linkpos1 >= total_number:
continue
if linkpos2 >= total_number:
continue
# make sure we are not below the first dummy index in `expression`:
if linkpos1 < first_dum_pos:
continue
if linkpos2 < first_dum_pos:
continue
# check if the previous loop created "virtual" indices between dummy
# indices, in such a case relink `linkpos1` and `linkpos2`:
if (-1-linkpos1) in links:
linkpos1 = -1-linkpos1
if (-1-linkpos2) in links:
linkpos2 = -1-linkpos2
# move only if not next to free index:
if linkpos1 >= 0 and not index_is_free[linkpos1]:
linkpos1 = pos1
if linkpos2 >=0 and not index_is_free[linkpos2]:
linkpos2 = pos2
# create the lower arcs:
if linkpos2 not in links[linkpos1]:
links[linkpos1].append(linkpos2)
if linkpos1 not in links[linkpos2]:
links[linkpos2].append(linkpos1)
# This loop starts from the `first_dum_pos` index (first dummy index)
# walks through the graph deleting the visited indices from `links`,
# it adds a gamma matrix for every free index in encounters, while it
# completely ignores dummy indices and virtual indices.
pointer = first_dum_pos
previous_pointer = 0
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
else:
break
if pointer == previous_pointer:
break
if pointer >=0 and free_pos[pointer] is not None:
for ri in resulting_indices:
ri.append(free_pos[pointer])
# The following loop removes the remaining connected components in `links`.
# If there are free indices inside a connected component, it gives a
# contribution to the resulting expression given by the factor
# `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's
# paper represented as {gamma_a, gamma_b, ... , gamma_z},
# virtual indices are ignored. The variable `connected_components` is
# increased by one for every connected component this loop encounters.
# If the connected component has virtual and dummy indices only
# (no free indices), it contributes to `resulting_indices` by a factor of two.
# The multiplication by two is a result of the
# factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper.
# Note: curly brackets are meant as in the paper, as a generalized
# multi-element anticommutator!
while links:
connected_components += 1
pointer = min(links.keys())
previous_pointer = pointer
# the inner loop erases the visited indices from `links`, and it adds
# all free indices to `prepend_indices` list, virtual indices are
# ignored.
prepend_indices = []
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
if len(next_ones) > 1:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
if pointer >= first_dum_pos and free_pos[pointer] is not None:
prepend_indices.insert(0, free_pos[pointer])
# if `prepend_indices` is void, it means there are no free indices
# in the loop (and it can be shown that there must be a virtual index),
# loops of virtual indices only contribute by a factor of two:
if len(prepend_indices) == 0:
resulting_coeff *= 2
# otherwise, add the free indices in `prepend_indices` to
# the `resulting_indices`:
else:
expr1 = prepend_indices
expr2 = list(reversed(prepend_indices))
resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)]
# sign correction, as described in Kahane's paper:
resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1
# power of two factor, as described in Kahane's paper:
resulting_coeff *= 2**(number_of_contractions)
# If `first_dum_pos` is not zero, it means that there are trailing free gamma
# matrices in front of `expression`, so multiply by them:
for i in range(0, first_dum_pos):
[ri.insert(0, free_pos[i]) for ri in resulting_indices]
resulting_expr = S.Zero
for i in resulting_indices:
temp_expr = S.One
for j in i:
temp_expr *= GammaMatrix(j)
resulting_expr += temp_expr
t = resulting_coeff * resulting_expr
t1 = None
if isinstance(t, TensAdd):
t1 = t.args[0]
elif isinstance(t, TensMul):
t1 = t
if t1:
pass
else:
t = eye(4)*t
return t
| 33.739554 | 180 | 0.588483 | [
"MIT"
] | CatTiger/vnpy | venv/lib/python3.7/site-packages/sympy/physics/hep/gamma_matrices.py | 24,225 | Python |
"""
Test Contacts API Endpoint | Cannlytics API
Author: Keegan Skeate
Contact: <[email protected]>
Created: 7/19/2021
Updated: 7/19/2021
License: MIT License <https://opensource.org/licenses/MIT>
"""
import os
import requests
from dotenv import load_dotenv
# Test using development server.
BASE = 'http://127.0.0.1:8000/api'
# Uncomment to test with production server.
# BASE = 'https://console.cannlytics.com/api'
# Load your API key.
load_dotenv('../../.env')
API_KEY = os.getenv('CANNLYTICS_API_KEY')
# Pass your API key through the authorization header as a bearer token.
HEADERS = {
'Authorization': 'Bearer %s' % API_KEY,
'Content-type': 'application/json',
}
# Identify the organization that you are working with.
ORG_ID = 'test-company'
# Define the endpoint.
ENDPOINT = 'contacts'
#------------------------------------------------------------------------------
# Create a contact.
#------------------------------------------------------------------------------
data = {
'address': '',
'city': '',
'contact_id': 'TEST',
'county': '',
'email': '',
'latitude': '',
'longitude': '',
'organization': 'Cannlytics Test Contact',
'phone': '',
'state': '',
'street': '',
'website': '',
'zip_code': ''
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.post(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Created:', response.json()['data'])
#------------------------------------------------------------------------------
# Get contacts.
#------------------------------------------------------------------------------
organization_id = 'test-company'
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.get(url, headers=HEADERS)
assert response.status_code == 200
data = response.json()['data']
print('Found:', len(data))
#------------------------------------------------------------------------------
# Update a contact.
#------------------------------------------------------------------------------
data = {
'contact_id': 'TEST',
'city': 'Tulsa',
'state': 'OK',
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.post(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Updated:', response.json()['data'])
#------------------------------------------------------------------------------
# Delete a contact.
#------------------------------------------------------------------------------
data = {
'contact_id': 'TEST',
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.delete(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Deleted:', response.json()['data'])
| 29.543478 | 79 | 0.513245 | [
"MIT"
] | mathematicalmichael/cannlytics | tests/api/test_contacts_endpoint.py | 2,718 | Python |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pysnmp/license.html
#
# ASN.1 source http://mibs.snmplabs.com:80/asn1/SNMPv2-TM
# Produced by pysmi-0.4.0 at Sun Feb 17 08:56:38 2019
#
# Parts of otherwise autogenerated MIB has been updated manually.
#
try:
from socket import inet_ntop, inet_pton, AF_INET
except ImportError:
from socket import inet_ntoa, inet_aton, AF_INET
inet_ntop = lambda x, y: inet_ntoa(y)
inet_pton = lambda x, y: inet_aton(y)
from pyasn1.compat.octets import int2oct
from pyasn1.compat.octets import oct2int
if 'mibBuilder' not in globals():
import sys
sys.stderr.write(__doc__)
sys.exit(1)
(Integer,
OctetString,
ObjectIdentifier) = mibBuilder.importSymbols(
"ASN1",
"Integer",
"OctetString",
"ObjectIdentifier")
(NamedValues,) = mibBuilder.importSymbols(
"ASN1-ENUMERATION",
"NamedValues")
(ConstraintsIntersection,
SingleValueConstraint,
ValueRangeConstraint,
ValueSizeConstraint,
ConstraintsUnion) = mibBuilder.importSymbols(
"ASN1-REFINEMENT",
"ConstraintsIntersection",
"SingleValueConstraint",
"ValueRangeConstraint",
"ValueSizeConstraint",
"ConstraintsUnion")
(Counter64,
iso,
NotificationType,
ObjectIdentity,
Bits,
ModuleIdentity,
TimeTicks,
Counter32,
IpAddress,
snmpProxys,
MibScalar,
MibTable,
MibTableRow,
MibTableColumn,
Gauge32,
Unsigned32,
snmpDomains,
Integer32,
MibIdentifier,
snmpModules) = mibBuilder.importSymbols(
"SNMPv2-SMI",
"Counter64",
"iso",
"NotificationType",
"ObjectIdentity",
"Bits",
"ModuleIdentity",
"TimeTicks",
"Counter32",
"IpAddress",
"snmpProxys",
"MibScalar",
"MibTable",
"MibTableRow",
"MibTableColumn",
"Gauge32",
"Unsigned32",
"snmpDomains",
"Integer32",
"MibIdentifier",
"snmpModules")
(TextualConvention,) = mibBuilder.importSymbols(
"SNMPv2-TC",
"TextualConvention")
snmpv2tm = ModuleIdentity(
(1, 3, 6, 1, 6, 3, 19)
)
snmpv2tm.setRevisions(
("2002-10-16 00:00",
"1996-01-01 00:00",
"1993-04-01 00:00")
)
snmpv2tm.setLastUpdated("200210160000Z")
if mibBuilder.loadTexts:
snmpv2tm.setOrganization("""\
IETF SNMPv3 Working Group
""")
snmpv2tm.setContactInfo("""\
WG-EMail: [email protected] Subscribe: [email protected]
Co-Chair: Russ Mundy Network Associates Laboratories postal: 15204 Omega Drive,
Suite 300 Rockville, MD 20850-4601 USA EMail: [email protected] phone: +1 301
947-7107 Co-Chair: David Harrington Enterasys Networks postal: 35 Industrial
Way P. O. Box 5005 Rochester, NH 03866-5005 USA EMail: [email protected] phone:
+1 603 337-2614 Editor: Randy Presuhn BMC Software, Inc. postal: 2141 North
First Street San Jose, CA 95131 USA EMail: [email protected] phone: +1 408
546-1006
""")
if mibBuilder.loadTexts:
snmpv2tm.setDescription("""\
The MIB module for SNMP transport mappings. Copyright (C) The Internet Society
(2002). This version of this MIB module is part of RFC 3417; see the RFC itself
for full legal notices.
""")
class SnmpUDPAddress(TextualConvention, OctetString):
status = "current"
displayHint = "1d.1d.1d.1d/2d"
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(6, 6),
)
if mibBuilder.loadTexts:
description = """\
Represents a UDP over IPv4 address: octets contents encoding 1-4 IP-address
network-byte order 5-6 UDP-port network-byte order
"""
fixedLength = 6
def prettyIn(self, value):
if isinstance(value, tuple):
# Wild hack -- need to implement TextualConvention.prettyIn
value = inet_pton(AF_INET, value[0]) + int2oct((value[1] >> 8) & 0xff) + int2oct(value[1] & 0xff)
return OctetString.prettyIn(self, value)
# Socket address syntax coercion
def __asSocketAddress(self):
if not hasattr(self, '__tuple_value'):
v = self.asOctets()
self.__tuple_value = (
inet_ntop(AF_INET, v[:4]),
oct2int(v[4]) << 8 | oct2int(v[5])
)
return self.__tuple_value
def __iter__(self):
return iter(self.__asSocketAddress())
def __getitem__(self, item):
return self.__asSocketAddress()[item]
class SnmpOSIAddress(TextualConvention, OctetString):
status = "current"
displayHint = "*1x:/1x:"
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(1, 1),
ValueSizeConstraint(4, 85),
)
if mibBuilder.loadTexts:
description = """\
Represents an OSI transport-address: octets contents encoding 1 length of NSAP
'n' as an unsigned-integer (either 0 or from 3 to 20) 2..(n+1) NSAP concrete
binary representation (n+2)..m TSEL string of (up to 64) octets
"""
class SnmpNBPAddress(TextualConvention, OctetString):
status = "current"
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(3, 99),
)
if mibBuilder.loadTexts:
description = """\
Represents an NBP name: octets contents encoding 1 length of object 'n' as an
unsigned integer 2..(n+1) object string of (up to 32) octets n+2 length of type
'p' as an unsigned integer (n+3)..(n+2+p) type string of (up to 32) octets
n+3+p length of zone 'q' as an unsigned integer (n+4+p)..(n+3+p+q) zone string
of (up to 32) octets For comparison purposes, strings are case-insensitive. All
strings may contain any octet other than 255 (hex ff).
"""
class SnmpIPXAddress(TextualConvention, OctetString):
status = "current"
displayHint = "4x.1x:1x:1x:1x:1x:1x.2d"
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(12, 12),
)
if mibBuilder.loadTexts:
description = """\
Represents an IPX address: octets contents encoding 1-4 network-number network-
byte order 5-10 physical-address network-byte order 11-12 socket-number
network-byte order
"""
fixedLength = 12
_SnmpUDPDomain_ObjectIdentity = ObjectIdentity
snmpUDPDomain = _SnmpUDPDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 1)
)
if mibBuilder.loadTexts:
snmpUDPDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpUDPDomain.setDescription("""\
The SNMP over UDP over IPv4 transport domain. The corresponding transport
address is of type SnmpUDPAddress.
""")
_SnmpCLNSDomain_ObjectIdentity = ObjectIdentity
snmpCLNSDomain = _SnmpCLNSDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 2)
)
if mibBuilder.loadTexts:
snmpCLNSDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpCLNSDomain.setDescription("""\
The SNMP over CLNS transport domain. The corresponding transport address is of
type SnmpOSIAddress.
""")
_SnmpCONSDomain_ObjectIdentity = ObjectIdentity
snmpCONSDomain = _SnmpCONSDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 3)
)
if mibBuilder.loadTexts:
snmpCONSDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpCONSDomain.setDescription("""\
The SNMP over CONS transport domain. The corresponding transport address is of
type SnmpOSIAddress.
""")
_SnmpDDPDomain_ObjectIdentity = ObjectIdentity
snmpDDPDomain = _SnmpDDPDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 4)
)
if mibBuilder.loadTexts:
snmpDDPDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpDDPDomain.setDescription("""\
The SNMP over DDP transport domain. The corresponding transport address is of
type SnmpNBPAddress.
""")
_SnmpIPXDomain_ObjectIdentity = ObjectIdentity
snmpIPXDomain = _SnmpIPXDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 5)
)
if mibBuilder.loadTexts:
snmpIPXDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpIPXDomain.setDescription("""\
The SNMP over IPX transport domain. The corresponding transport address is of
type SnmpIPXAddress.
""")
_Rfc1157Proxy_ObjectIdentity = ObjectIdentity
rfc1157Proxy = _Rfc1157Proxy_ObjectIdentity(
(1, 3, 6, 1, 6, 2, 1)
)
_Rfc1157Domain_ObjectIdentity = ObjectIdentity
rfc1157Domain = _Rfc1157Domain_ObjectIdentity(
(1, 3, 6, 1, 6, 2, 1, 1)
)
if mibBuilder.loadTexts:
rfc1157Domain.setStatus("deprecated")
if mibBuilder.loadTexts:
rfc1157Domain.setDescription("""\
The transport domain for SNMPv1 over UDP over IPv4. The corresponding transport
address is of type SnmpUDPAddress.
""")
mibBuilder.exportSymbols(
"SNMPv2-TM",
**{"SnmpUDPAddress": SnmpUDPAddress,
"SnmpOSIAddress": SnmpOSIAddress,
"SnmpNBPAddress": SnmpNBPAddress,
"SnmpIPXAddress": SnmpIPXAddress,
"snmpUDPDomain": snmpUDPDomain,
"snmpCLNSDomain": snmpCLNSDomain,
"snmpCONSDomain": snmpCONSDomain,
"snmpDDPDomain": snmpDDPDomain,
"snmpIPXDomain": snmpIPXDomain,
"rfc1157Proxy": rfc1157Proxy,
"rfc1157Domain": rfc1157Domain,
"snmpv2tm": snmpv2tm}
)
| 28.92233 | 109 | 0.71176 | [
"BSD-2-Clause"
] | BurgundyWillow/pysnmp | pysnmp/smi/mibs/SNMPv2-TM.py | 8,937 | Python |
from .mail import (MSGraphMailSource, MSGraphMailAccountHandle,
MSGraphMailAccountResource, MSGraphMailAccountSource,
MSGraphMailMessageResource, MSGraphMailMessageHandle) # noqa
from . import files # noqa
| 44.8 | 69 | 0.794643 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | bellcom/os2datascanner | src/os2datascanner/engine2/model/msgraph/__init__.py | 224 | Python |
# coding=utf-8
"""Tests for certbot._internal.main."""
# pylint: disable=too-many-lines
import datetime
from importlib import reload as reload_module
import io
import itertools
import json
import shutil
import sys
import tempfile
import traceback
import unittest
from typing import List
import josepy as jose
import pytz
from certbot import crypto_util
from certbot import errors
from certbot import interfaces # pylint: disable=unused-import
from certbot import util
from certbot._internal import account
from certbot._internal import cli
from certbot._internal import configuration
from certbot._internal import constants
from certbot._internal import main
from certbot._internal import updater
from certbot._internal.plugins import disco
from certbot._internal.plugins import manual
from certbot._internal.plugins import null
from certbot.compat import filesystem
from certbot.compat import os
from certbot.plugins import enhancements
import certbot.tests.util as test_util
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
CERT_PATH = test_util.vector_path('cert_512.pem')
CERT = test_util.vector_path('cert_512.pem')
CSR = test_util.vector_path('csr_512.der')
KEY = test_util.vector_path('rsa256_key.pem')
JWK = jose.JWKRSA.load(test_util.load_vector('rsa512_key.pem'))
RSA2048_KEY_PATH = test_util.vector_path('rsa2048_key.pem')
SS_CERT_PATH = test_util.vector_path('cert_2048.pem')
class TestHandleCerts(unittest.TestCase):
"""Test for certbot._internal.main._handle_* methods"""
@mock.patch("certbot._internal.main._handle_unexpected_key_type_migration")
def test_handle_identical_cert_request_pending(self, mock_handle_migration):
mock_lineage = mock.Mock()
mock_lineage.ensure_deployed.return_value = False
# pylint: disable=protected-access
ret = main._handle_identical_cert_request(mock.Mock(), mock_lineage)
self.assertEqual(ret, ("reinstall", mock_lineage))
self.assertTrue(mock_handle_migration.called)
@mock.patch("certbot._internal.main._handle_unexpected_key_type_migration")
def test_handle_subset_cert_request(self, mock_handle_migration):
mock_config = mock.Mock()
mock_config.expand = True
mock_lineage = mock.Mock()
mock_lineage.names.return_value = ["dummy1", "dummy2"]
ret = main._handle_subset_cert_request(mock_config, ["dummy1"], mock_lineage)
self.assertEqual(ret, ("renew", mock_lineage))
self.assertTrue(mock_handle_migration.called)
@mock.patch("certbot._internal.main.cli.set_by_cli")
def test_handle_unexpected_key_type_migration(self, mock_set):
config = mock.Mock()
config.key_type = "rsa"
cert = mock.Mock()
cert.private_key_type = "ecdsa"
mock_set.return_value = True
main._handle_unexpected_key_type_migration(config, cert)
mock_set.return_value = False
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
mock_set.side_effect = lambda var: var != "certname"
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
mock_set.side_effect = lambda var: var != "key_type"
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
class RunTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.run."""
def setUp(self):
super().setUp()
self.domain = 'example.org'
patches = [
mock.patch('certbot._internal.main._get_and_save_cert'),
mock.patch('certbot._internal.main.display_ops.success_installation'),
mock.patch('certbot._internal.main.display_ops.success_renewal'),
mock.patch('certbot._internal.main._init_le_client'),
mock.patch('certbot._internal.main._suggest_donation_if_appropriate'),
mock.patch('certbot._internal.main._report_new_cert'),
mock.patch('certbot._internal.main._find_cert'),
mock.patch('certbot._internal.eff.handle_subscription'),
]
self.mock_auth = patches[0].start()
self.mock_success_installation = patches[1].start()
self.mock_success_renewal = patches[2].start()
self.mock_init = patches[3].start()
self.mock_suggest_donation = patches[4].start()
self.mock_report_cert = patches[5].start()
self.mock_find_cert = patches[6].start()
self.mock_subscription = patches[7].start()
for patch in patches:
self.addCleanup(patch.stop)
def _call(self):
args = '-a webroot -i null -d {0}'.format(self.domain).split()
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
from certbot._internal.main import run
run(config, plugins)
def test_newcert_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = True, None
self._call()
self.mock_success_installation.assert_called_once_with([self.domain])
def test_reinstall_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = False, mock.Mock()
self._call()
self.mock_success_installation.assert_called_once_with([self.domain])
def test_renewal_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = True, mock.Mock()
self._call()
self.mock_success_renewal.assert_called_once_with([self.domain])
@mock.patch('certbot._internal.main.plug_sel.choose_configurator_plugins')
def test_run_enhancement_not_supported(self, mock_choose):
mock_choose.return_value = (null.Installer(self.config, "null"), None)
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.assertRaises(errors.NotSupportedError,
main.run,
self.config, plugins)
class CertonlyTest(unittest.TestCase):
"""Tests for certbot._internal.main.certonly."""
def setUp(self):
self.get_utility_patch = test_util.patch_get_utility()
self.mock_get_utility = self.get_utility_patch.start()
def tearDown(self):
self.get_utility_patch.stop()
def _call(self, args):
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
with mock.patch('certbot._internal.main._suggest_donation_if_appropriate'):
with mock.patch('certbot._internal.eff.handle_subscription'):
main.certonly(config, plugins)
return mock_init() # returns the client
@mock.patch('certbot._internal.main._find_cert')
@mock.patch('certbot._internal.main._get_and_save_cert')
@mock.patch('certbot._internal.main._report_new_cert')
def test_no_reinstall_text_pause(self, unused_report, mock_auth,
mock_find_cert):
mock_notification = self.mock_get_utility().notification
mock_notification.side_effect = self._assert_no_pause
mock_auth.return_value = mock.Mock()
mock_find_cert.return_value = False, None
self._call('certonly --webroot -d example.com'.split())
def _assert_no_pause(self, message, pause=True): # pylint: disable=unused-argument
self.assertFalse(pause)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
@mock.patch('certbot._internal.renewal.renew_cert')
@mock.patch('certbot._internal.main._handle_unexpected_key_type_migration')
@mock.patch('certbot._internal.main._report_new_cert')
def test_find_lineage_for_domains_and_certname(self, mock_report_cert,
mock_handle_type, mock_renew_cert, mock_domains, mock_lineage):
domains = ['example.com', 'test.org']
mock_domains.return_value = domains
mock_lineage.names.return_value = domains
self._call(('certonly --webroot -d example.com -d test.org '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 1)
self.assertEqual(mock_domains.call_count, 1)
self.assertEqual(mock_renew_cert.call_count, 1)
self.assertEqual(mock_report_cert.call_count, 1)
self.assertEqual(mock_handle_type.call_count, 1)
# user confirms updating lineage with new domains
self._call(('certonly --webroot -d example.com -d test.com '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 2)
self.assertEqual(mock_domains.call_count, 2)
self.assertEqual(mock_renew_cert.call_count, 2)
self.assertEqual(mock_report_cert.call_count, 2)
self.assertEqual(mock_handle_type.call_count, 2)
# error in _ask_user_to_confirm_new_names
self.mock_get_utility().yesno.return_value = False
self.assertRaises(errors.ConfigurationError, self._call,
'certonly --webroot -d example.com -d test.com --cert-name example.com'.split())
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
@mock.patch('certbot.display.ops.choose_names')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main._report_new_cert')
def test_find_lineage_for_domains_new_certname(self, mock_report_cert,
mock_lineage, mock_choose_names, mock_domains_for_certname):
mock_lineage.return_value = None
# no lineage with this name but we specified domains so create a new cert
self._call(('certonly --webroot -d example.com -d test.com '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 1)
self.assertEqual(mock_report_cert.call_count, 1)
# no lineage with this name and we didn't give domains
mock_choose_names.return_value = ["somename"]
mock_domains_for_certname.return_value = None
self._call(('certonly --webroot --cert-name example.com').split())
self.assertIs(mock_choose_names.called, True)
class FindDomainsOrCertnameTest(unittest.TestCase):
"""Tests for certbot._internal.main._find_domains_or_certname."""
@mock.patch('certbot.display.ops.choose_names')
def test_display_ops(self, mock_choose_names):
mock_config = mock.Mock(domains=None, certname=None)
mock_choose_names.return_value = "domainname"
# pylint: disable=protected-access
self.assertEqual(main._find_domains_or_certname(mock_config, None),
("domainname", None))
@mock.patch('certbot.display.ops.choose_names')
def test_no_results(self, mock_choose_names):
mock_config = mock.Mock(domains=None, certname=None)
mock_choose_names.return_value = []
# pylint: disable=protected-access
self.assertRaises(errors.Error, main._find_domains_or_certname, mock_config, None)
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
def test_grab_domains(self, mock_domains):
mock_config = mock.Mock(domains=None, certname="one.com")
mock_domains.return_value = ["one.com", "two.com"]
# pylint: disable=protected-access
self.assertEqual(main._find_domains_or_certname(mock_config, None),
(["one.com", "two.com"], "one.com"))
class RevokeTest(test_util.TempDirTestCase):
"""Tests for certbot._internal.main.revoke."""
def setUp(self):
super().setUp()
shutil.copy(CERT_PATH, self.tempdir)
self.tmp_cert_path = os.path.abspath(os.path.join(self.tempdir, 'cert_512.pem'))
patches = [
mock.patch('acme.client.BackwardsCompatibleClientV2'),
mock.patch('certbot._internal.client.Client'),
mock.patch('certbot._internal.main._determine_account'),
mock.patch('certbot._internal.main.display_ops.success_revocation')
]
self.mock_acme_client = patches[0].start()
patches[1].start()
self.mock_determine_account = patches[2].start()
self.mock_success_revoke = patches[3].start()
for patch in patches:
self.addCleanup(patch.stop)
from certbot._internal.account import Account
self.regr = mock.MagicMock()
self.meta = Account.Meta(
creation_host="test.certbot.org",
creation_dt=datetime.datetime(
2015, 7, 4, 14, 4, 10, tzinfo=pytz.UTC))
self.acc = Account(self.regr, JWK, self.meta)
self.mock_determine_account.return_value = (self.acc, None)
def _call(self, args=None):
if not args:
args = 'revoke --cert-path={0} '
args = args.format(self.tmp_cert_path).split()
cli.set_by_cli.detector = None # required to reset set_by_cli state
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
from certbot._internal.main import revoke
revoke(config, plugins)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main.client.acme_client')
def test_revoke_with_reason(self, mock_acme_client,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
mock_revoke = mock_acme_client.BackwardsCompatibleClientV2().revoke
expected = []
for reason, code in constants.REVOCATION_REASONS.items():
args = 'revoke --cert-path={0} --reason {1}'.format(self.tmp_cert_path, reason).split()
self._call(args)
expected.append(mock.call(mock.ANY, code))
args = 'revoke --cert-path={0} --reason {1}'.format(self.tmp_cert_path,
reason.upper()).split()
self._call(args)
expected.append(mock.call(mock.ANY, code))
self.assertEqual(expected, mock_revoke.call_args_list)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path,
server="https://acme.example")
args = 'revoke --cert-name=example.com'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(mock.ANY, mock.ANY, 'https://acme.example')
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname_and_server(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
"""Revoking with --server should use the server from the CLI"""
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path,
server="https://acme.example")
args = 'revoke --cert-name=example.com --server https://other.example'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(mock.ANY, mock.ANY, 'https://other.example')
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname_empty_server(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
"""Revoking with --cert-name where the lineage server is empty shouldn't crash """
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path, server=None)
args = 'revoke --cert-name=example.com'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(
mock.ANY, mock.ANY, constants.CLI_DEFAULTS['server'])
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
def test_revocation_success(self, mock_delete_if_appropriate):
self._call()
mock_delete_if_appropriate.return_value = False
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
def test_revocation_error(self):
from acme import errors as acme_errors
self.mock_acme_client.side_effect = acme_errors.ClientError()
self.assertRaises(acme_errors.ClientError, self._call)
self.mock_success_revoke.assert_not_called()
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_revocation_with_prompt(self, mock_get_utility,
mock_delete, mock_delete_if_appropriate):
mock_get_utility().yesno.return_value = False
mock_delete_if_appropriate.return_value = False
self._call()
self.assertFalse(mock_delete.called)
class DeleteIfAppropriateTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main._delete_if_appropriate """
def _call(self, mock_config):
from certbot._internal.main import _delete_if_appropriate
_delete_if_appropriate(mock_config)
def _test_delete_opt_out_common(self):
with mock.patch('certbot._internal.cert_manager.delete') as mock_delete:
self._call(self.config)
mock_delete.assert_not_called()
@test_util.patch_get_utility()
def test_delete_flag_opt_out(self, unused_mock_get_utility):
self.config.delete_after_revoke = False
self._test_delete_opt_out_common()
@test_util.patch_get_utility()
def test_delete_prompt_opt_out(self, mock_get_utility):
util_mock = mock_get_utility()
util_mock.yesno.return_value = False
self._test_delete_opt_out_common()
@mock.patch("certbot._internal.main.logger.warning")
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.delete')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@test_util.patch_get_utility()
def test_overlapping_archive_dirs(self, mock_get_utility,
mock_cert_path_to_lineage, mock_archive,
mock_match_and_check_overlaps, mock_delete,
mock_renewal_file_for_certname, mock_warning):
# pylint: disable = unused-argument
config = self.config
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_match_and_check_overlaps.side_effect = errors.OverlappingMatchFound()
self._call(config)
mock_delete.assert_not_called()
self.assertEqual(mock_warning.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.delete')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@test_util.patch_get_utility()
def test_cert_path_only(self, mock_get_utility,
mock_cert_path_to_lineage, mock_delete, mock_archive,
mock_overlapping_archive_dirs, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_overlapping_archive_dirs.return_value = False
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_noninteractive_deletion(self, mock_get_utility, mock_delete,
mock_cert_path_to_lineage, mock_full_archive_dir,
mock_match_and_check_overlaps, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.namespace.noninteractive_mode = True
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_full_archive_dir.return_value = ""
mock_match_and_check_overlaps.return_value = ""
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_opt_in_deletion(self, mock_get_utility, mock_delete,
mock_cert_path_to_lineage, mock_full_archive_dir,
mock_match_and_check_overlaps, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.namespace.delete_after_revoke = True
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_full_archive_dir.return_value = ""
mock_match_and_check_overlaps.return_value = ""
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
self.assertFalse(mock_get_utility().yesno.called)
class DetermineAccountTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main._determine_account."""
def setUp(self):
super().setUp()
self.config.account = None
self.config.email = None
self.config.register_unsafely_without_email = False
self.accs = [mock.MagicMock(id='x'), mock.MagicMock(id='y')]
self.account_storage = account.AccountMemoryStorage()
# For use in saving accounts: fake out the new_authz URL.
self.mock_client = mock.MagicMock()
self.mock_client.directory.new_authz = "hi"
def _call(self):
# pylint: disable=protected-access
from certbot._internal.main import _determine_account
with mock.patch('certbot._internal.main.account.AccountFileStorage') as mock_storage, \
test_util.patch_get_utility():
mock_storage.return_value = self.account_storage
return _determine_account(self.config)
def test_args_account_set(self):
self.account_storage.save(self.accs[1], self.mock_client)
self.config.account = self.accs[1].id
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
def test_single_account(self):
self.account_storage.save(self.accs[0], self.mock_client)
self.assertEqual((self.accs[0], None), self._call())
self.assertEqual(self.accs[0].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot._internal.client.display_ops.choose_account')
def test_multiple_accounts(self, mock_choose_accounts):
for acc in self.accs:
self.account_storage.save(acc, self.mock_client)
mock_choose_accounts.return_value = self.accs[1]
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(
set(mock_choose_accounts.call_args[0][0]), set(self.accs))
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot._internal.client.display_ops.get_email')
@mock.patch('certbot._internal.main.display_util.notify')
def test_no_accounts_no_email(self, mock_notify, mock_get_email):
mock_get_email.return_value = '[email protected]'
with mock.patch('certbot._internal.main.client') as client:
client.register.return_value = (
self.accs[0], mock.sentinel.acme)
self.assertEqual((self.accs[0], mock.sentinel.acme), self._call())
client.register.assert_called_once_with(
self.config, self.account_storage, tos_cb=mock.ANY)
self.assertEqual(self.accs[0].id, self.config.account)
self.assertEqual('[email protected]', self.config.email)
mock_notify.assert_called_once_with('Account registered.')
def test_no_accounts_email(self):
self.config.email = 'other email'
with mock.patch('certbot._internal.main.client') as client:
client.register.return_value = (self.accs[1], mock.sentinel.acme)
self._call()
self.assertEqual(self.accs[1].id, self.config.account)
self.assertEqual('other email', self.config.email)
class MainTest(test_util.ConfigTestCase):
"""Tests for different commands."""
def setUp(self):
super().setUp()
filesystem.mkdir(self.config.logs_dir)
self.standard_args = ['--config-dir', self.config.config_dir,
'--work-dir', self.config.work_dir,
'--logs-dir', self.config.logs_dir, '--text']
self.mock_sleep = mock.patch('time.sleep').start()
def tearDown(self):
# Reset globals in cli
reload_module(cli)
super().tearDown()
def _call(self, args, stdout=None, mockisfile=False):
"""Run the cli with output streams, actual client and optionally
os.path.isfile() mocked out"""
if mockisfile:
orig_open = os.path.isfile
def mock_isfile(fn, *args, **kwargs): # pylint: disable=unused-argument
"""Mock os.path.isfile()"""
if (fn.endswith("cert") or
fn.endswith("chain") or
fn.endswith("privkey")):
return True
return orig_open(fn)
with mock.patch("certbot.compat.os.path.isfile") as mock_if:
mock_if.side_effect = mock_isfile
with mock.patch('certbot._internal.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
else:
with mock.patch('certbot._internal.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
def _call_no_clientmock(self, args, stdout=None):
"Run the client with output streams mocked out"
args = self.standard_args + args
toy_stdout = stdout if stdout else io.StringIO()
with mock.patch('certbot._internal.main.sys.stdout', new=toy_stdout):
with mock.patch('certbot._internal.main.sys.stderr') as stderr:
with mock.patch("certbot.util.atexit"):
ret = main.main(args[:]) # NOTE: parser can alter its args!
return ret, toy_stdout, stderr
def test_no_flags(self):
with mock.patch('certbot._internal.main.run') as mock_run:
self._call([])
self.assertEqual(1, mock_run.call_count)
def test_version_string_program_name(self):
toy_out = io.StringIO()
toy_err = io.StringIO()
with mock.patch('certbot._internal.main.sys.stdout', new=toy_out):
with mock.patch('certbot._internal.main.sys.stderr', new=toy_err):
try:
main.main(["--version"])
except SystemExit:
pass
finally:
output = toy_out.getvalue() or toy_err.getvalue()
self.assertTrue("certbot" in output, "Output is {0}".format(output))
def _cli_missing_flag(self, args, message):
"Ensure that a particular error raises a missing cli flag error containing message"
exc = None
try:
with mock.patch('certbot._internal.main.sys.stderr'):
main.main(self.standard_args + args[:]) # NOTE: parser can alter its args!
except errors.MissingCommandlineFlag as exc_:
exc = exc_
self.assertTrue(message in str(exc))
self.assertTrue(exc is not None)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_noninteractive(self, _):
args = ['-n', 'certonly']
self._cli_missing_flag(args, "specify a plugin")
args.extend(['--standalone', '-d', 'eg.is'])
self._cli_missing_flag(args, "register before running")
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._report_new_cert')
@mock.patch('certbot._internal.main.client.acme_client.Client')
@mock.patch('certbot._internal.main._determine_account')
@mock.patch('certbot._internal.main.client.Client.obtain_and_enroll_certificate')
@mock.patch('certbot._internal.main._get_and_save_cert')
def test_user_agent(self, gsc, _obt, det, _client, _, __, ___):
# Normally the client is totally mocked out, but here we need more
# arguments to automate it...
args = ["--standalone", "certonly", "-m", "[email protected]",
"-d", "example.com", '--agree-tos'] + self.standard_args
det.return_value = mock.MagicMock(), None
gsc.return_value = mock.MagicMock()
with mock.patch('certbot._internal.main.client.acme_client.ClientNetwork') as acme_net:
self._call_no_clientmock(args)
os_ver = util.get_os_info_ua()
ua = acme_net.call_args[1]["user_agent"]
self.assertTrue(os_ver in ua)
import platform
plat = platform.platform()
if "linux" in plat.lower():
self.assertTrue(util.get_os_info_ua() in ua)
with mock.patch('certbot._internal.main.client.acme_client.ClientNetwork') as acme_net:
ua = "bandersnatch"
args += ["--user-agent", ua]
self._call_no_clientmock(args)
acme_net.assert_called_once_with(mock.ANY, account=mock.ANY, verify_ssl=True,
user_agent=ua)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_selection(self, mock_pick_installer, _rec):
self._call(['install', '--domains', 'foo.bar', '--cert-path', 'cert',
'--key-path', 'privkey', '--chain-path', 'chain'], mockisfile=True)
self.assertEqual(mock_pick_installer.call_count, 1)
@mock.patch('certbot._internal.main._install_cert')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_certname(self, _inst, _rec, mock_install):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install', '--cert-name', 'whatever'], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('privkey'))
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._install_cert')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_param_override(self, _inst, _rec, mock_install, _):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install', '--cert-name', 'whatever',
'--key-path', test_util.temp_join('overriding_privkey')], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.chain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('overriding_privkey'))
mock_install.reset()
self._call(['install', '--cert-name', 'whatever',
'--cert-path', test_util.temp_join('overriding_cert')], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('overriding_cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('privkey'))
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_param_error(self, _inst, _rec):
self.assertRaises(errors.ConfigurationError,
self._call,
['install', '--cert-name', 'notfound',
'--key-path', 'invalid'])
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.cert_manager.get_certnames')
@mock.patch('certbot._internal.main._install_cert')
def test_installer_select_cert(self, mock_inst, mock_getcert, _inst, _rec):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install'], mockisfile=True)
self.assertTrue(mock_getcert.called)
self.assertTrue(mock_inst.called)
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._report_new_cert')
@mock.patch('certbot.util.exe_exists')
def test_configurator_selection(self, mock_exe_exists, _, __, ___):
mock_exe_exists.return_value = True
real_plugins = disco.PluginsRegistry.find_all()
args = ['--apache', '--authenticator', 'standalone']
# This needed two calls to find_all(), which we're avoiding for now
# because of possible side effects:
# https://github.com/letsencrypt/letsencrypt/commit/51ed2b681f87b1eb29088dd48718a54f401e4855
# with mock.patch('certbot._internal.cli.plugins_testable') as plugins:
# plugins.return_value = {"apache": True, "nginx": True}
# ret, _, _, _ = self._call(args)
# self.assertTrue("Too many flags setting" in ret)
args = ["install", "--nginx", "--cert-path",
test_util.temp_join('blah'), "--key-path", test_util.temp_join('blah'),
"--nginx-server-root", "/nonexistent/thing", "-d",
"example.com", "--debug"]
if "nginx" in real_plugins:
# Sending nginx a non-existent conf dir will simulate misconfiguration
# (we can only do that if certbot-nginx is actually present)
ret, _, _, _ = self._call(args)
self.assertTrue("The nginx plugin is not working" in ret)
self.assertTrue("MisconfigurationError" in ret)
self._cli_missing_flag(["--standalone"], "With the standalone plugin, you probably")
with mock.patch("certbot._internal.main._init_le_client") as mock_init:
with mock.patch("certbot._internal.main._get_and_save_cert") as mock_gsc:
mock_gsc.return_value = mock.MagicMock()
self._call(["certonly", "--manual", "-d", "foo.bar"])
unused_config, auth, unused_installer = mock_init.call_args[0]
self.assertTrue(isinstance(auth, manual.Authenticator))
with mock.patch('certbot._internal.main.certonly') as mock_certonly:
self._call(["auth", "--standalone"])
self.assertEqual(1, mock_certonly.call_count)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_rollback(self, _):
_, _, _, client = self._call(['rollback'])
self.assertEqual(1, client.rollback.call_count)
_, _, _, client = self._call(['rollback', '--checkpoints', '123'])
client.rollback.assert_called_once_with(
mock.ANY, 123, mock.ANY, mock.ANY)
@mock.patch('certbot._internal.cert_manager.update_live_symlinks')
def test_update_symlinks(self, mock_cert_manager):
self._call_no_clientmock(['update_symlinks'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.cert_manager.certificates')
def test_certificates(self, mock_cert_manager):
self._call_no_clientmock(['certificates'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.cert_manager.delete')
def test_delete(self, mock_cert_manager):
self._call_no_clientmock(['delete'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_plugins(self, _, _det, mock_disco):
flags = ['--init', '--prepare', '--authenticators', '--installers']
for args in itertools.chain(
*(itertools.combinations(flags, r)
for r in range(len(flags)))):
self._call(['plugins'] + list(args))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args_unprivileged(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
def throw_error(directory, mode, strict):
"""Raises error.Error."""
_, _, _ = directory, mode, strict
raise errors.Error()
stdout = io.StringIO()
with mock.patch('certbot.util.set_up_core_dir') as mock_set_up_core_dir:
with test_util.patch_get_utility_with_stdout(stdout=stdout):
mock_set_up_core_dir.side_effect = throw_error
_, stdout, _, _ = self._call(['plugins'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_init(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins', '--init'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
self.assertEqual(stdout.getvalue().strip(), str(verified))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_prepare(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins', '--init', '--prepare'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
verified.prepare.assert_called_once_with()
verified.available.assert_called_once_with()
available = verified.available()
self.assertEqual(stdout.getvalue().strip(), str(available))
def test_certonly_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('certbot._internal.main.certonly') as mock_certonly:
self._call(['certonly', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
config, unused_plugins = mock_certonly.call_args[0]
self.assertEqual(config.cert_path, os.path.abspath(cert))
self.assertEqual(config.key_path, os.path.abspath(key))
self.assertEqual(config.chain_path, os.path.abspath(chain))
self.assertEqual(config.fullchain_path, os.path.abspath(fullchain))
def test_certonly_bad_args(self):
try:
self._call(['-a', 'bad_auth', 'certonly'])
assert False, "Exception should have been raised"
except errors.PluginSelectionError as e:
self.assertTrue('The requested bad_auth plugin does not appear' in str(e))
def test_check_config_sanity_domain(self):
# FQDN
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'a' * 64])
# FQDN 2
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', (('a' * 50) + '.') * 10])
# Bare IP address (this is actually a different error message now)
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', '204.11.231.35'])
def test_csr_with_besteffort(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0} --allow-subset-of-names'.format(CSR).split())
def test_run_with_csr(self):
# This is an error because you can only use --csr with certonly
try:
self._call(['--csr', CSR])
except errors.Error as e:
assert "Please try the certonly" in repr(e)
return
assert False, "Expected supplying --csr to fail with default verb"
def test_csr_with_no_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0}'.format(
test_util.vector_path('csr-nonames_512.pem')).split())
def test_csr_with_inconsistent_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly -d example.org --csr {0}'.format(CSR).split())
def _certonly_new_request_common(self, mock_client, args=None):
with mock.patch('certbot._internal.main._find_lineage_for_domains_and_certname') \
as mock_renewal:
mock_renewal.return_value = ("newcert", None)
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
if args is None:
args = []
args += '-d foo.bar -a standalone certonly'.split()
self._call(args)
@test_util.patch_get_utility()
def test_certonly_dry_run_new_request_success(self, mock_get_utility):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = None
self._certonly_new_request_common(mock_client, ['--dry-run'])
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
# Asserts we don't suggest donating after a successful dry run
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot.crypto_util.notAfter')
@test_util.patch_get_utility()
def test_certonly_new_request_success(self, mock_get_utility, mock_notAfter, mock_subscription):
cert_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/foo.bar'))
key_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/baz.qux'))
date = '1970-01-01'
mock_notAfter().date.return_value = date
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=cert_path,
fullchain_path=cert_path, key_path=key_path)
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = mock_lineage
self._certonly_new_request_common(mock_client)
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue(cert_path in cert_msg)
self.assertTrue(date in cert_msg)
self.assertTrue(key_path in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
self.assertTrue(mock_subscription.called)
@mock.patch('certbot._internal.eff.handle_subscription')
def test_certonly_new_request_failure(self, mock_subscription):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = False
self.assertRaises(errors.Error,
self._certonly_new_request_common, mock_client)
self.assertFalse(mock_subscription.called)
def _test_renewal_common(self, due_for_renewal, extra_args, log_out=None,
args=None, should_renew=True, error_expected=False,
quiet_mode=False, expiry_date=datetime.datetime.now(),
reuse_key=False):
cert_path = test_util.vector_path('cert_512.pem')
chain_path = os.path.normpath(os.path.join(self.config.config_dir,
'live/foo.bar/fullchain.pem'))
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=chain_path,
cert_path=cert_path, fullchain_path=chain_path)
mock_lineage.should_autorenew.return_value = due_for_renewal
mock_lineage.has_pending_deployment.return_value = False
mock_lineage.names.return_value = ['isnot.org']
mock_lineage.private_key_type = 'RSA'
mock_certr = mock.MagicMock()
mock_key = mock.MagicMock(pem='pem_key')
mock_client = mock.MagicMock()
stdout = io.StringIO()
mock_client.obtain_certificate.return_value = (mock_certr, 'chain',
mock_key, 'csr')
def write_msg(message, *args, **kwargs): # pylint: disable=unused-argument
"""Write message to stdout."""
stdout.write(message)
try:
with mock.patch('certbot._internal.cert_manager.find_duplicative_certs') as mock_fdc:
mock_fdc.return_value = (mock_lineage, None)
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
with test_util.patch_get_utility() as mock_get_utility:
if not quiet_mode:
mock_get_utility().notification.side_effect = write_msg
with mock.patch('certbot._internal.main.renewal.OpenSSL') as mock_ssl:
mock_latest = mock.MagicMock()
mock_latest.get_issuer.return_value = "Artificial pretend"
mock_ssl.crypto.load_certificate.return_value = mock_latest
with mock.patch('certbot._internal.main.renewal.crypto_util') \
as mock_crypto_util:
mock_crypto_util.notAfter.return_value = expiry_date
with mock.patch('certbot._internal.eff.handle_subscription'):
if not args:
args = ['-d', 'isnot.org', '-a', 'standalone', 'certonly']
if extra_args:
args += extra_args
try:
ret, stdout, _, _ = self._call(args, stdout)
if ret:
print("Returned", ret)
raise AssertionError(ret)
assert not error_expected, "renewal should have errored"
except: # pylint: disable=bare-except
if not error_expected:
raise AssertionError(
"Unexpected renewal error:\n" +
traceback.format_exc())
if should_renew:
if reuse_key:
# The location of the previous live privkey.pem is passed
# to obtain_certificate
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'],
os.path.normpath(os.path.join(
self.config.config_dir, "live/sample-renewal/privkey.pem")))
else:
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'], None)
else:
self.assertEqual(mock_client.obtain_certificate.call_count, 0)
except:
self._dump_log()
raise
finally:
if log_out:
with open(os.path.join(self.config.logs_dir, "letsencrypt.log")) as lf:
self.assertTrue(log_out in lf.read())
return mock_lineage, mock_get_utility, stdout
@mock.patch('certbot.crypto_util.notAfter')
def test_certonly_renewal(self, _):
lineage, get_utility, _ = self._test_renewal_common(True, [])
self.assertEqual(lineage.save_successor.call_count, 1)
lineage.update_all_links_to.assert_called_once_with(
lineage.latest_common_version())
cert_msg = get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertTrue('donate' in get_utility().add_message.call_args[0][0])
@mock.patch('certbot._internal.log.logging.handlers.RotatingFileHandler.doRollover')
@mock.patch('certbot.crypto_util.notAfter')
def test_certonly_renewal_triggers(self, _, __):
# --dry-run should force renewal
_, get_utility, _ = self._test_renewal_common(False, ['--dry-run', '--keep'],
log_out="simulating renewal")
self.assertEqual(get_utility().add_message.call_count, 1)
self.assertTrue('dry run' in get_utility().add_message.call_args[0][0])
self._test_renewal_common(False, ['--renew-by-default', '-tvv', '--debug'],
log_out="Auto-renewal forced")
self.assertEqual(get_utility().add_message.call_count, 1)
self._test_renewal_common(False, ['-tvv', '--debug', '--keep'],
log_out="not yet due", should_renew=False)
def _dump_log(self):
print("Logs:")
log_path = os.path.join(self.config.logs_dir, "letsencrypt.log")
if os.path.exists(log_path):
with open(log_path) as lf:
print(lf.read())
def test_renew_verb(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
def test_reuse_key(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--reuse-key"]
self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True)
@mock.patch('certbot._internal.storage.RenewableCert.save_successor')
def test_reuse_key_no_dry_run(self, unused_save_successor):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--reuse-key"]
self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True)
@mock.patch('sys.stdin')
def test_noninteractive_renewal_delay(self, stdin):
stdin.isatty.return_value = False
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
self.assertEqual(self.mock_sleep.call_count, 1)
# in main.py:
# sleep_time = random.randint(1, 60*8)
sleep_call_arg = self.mock_sleep.call_args[0][0]
self.assertTrue(1 <= sleep_call_arg <= 60*8)
@mock.patch('sys.stdin')
def test_interactive_no_renewal_delay(self, stdin):
stdin.isatty.return_value = True
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
self.assertEqual(self.mock_sleep.call_count, 0)
@mock.patch('certbot._internal.renewal.should_renew')
def test_renew_skips_recent_certs(self, should_renew):
should_renew.return_value = False
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
expiry = datetime.datetime.now() + datetime.timedelta(days=90)
_, _, stdout = self._test_renewal_common(False, extra_args=None, should_renew=False,
args=['renew'], expiry_date=expiry)
self.assertTrue('No renewals were attempted.' in stdout.getvalue())
self.assertTrue('The following certificates are not due for renewal yet:' in stdout.getvalue())
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_quiet_renew(self, _):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run"]
_, _, stdout = self._test_renewal_common(True, [], args=args, should_renew=True)
out = stdout.getvalue()
self.assertTrue("renew" in out)
args = ["renew", "--dry-run", "-q"]
_, _, stdout = self._test_renewal_common(True, [], args=args,
should_renew=True, quiet_mode=True)
out = stdout.getvalue()
self.assertEqual("", out)
def test_renew_hook_validation(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--post-hook=no-such-command"]
self._test_renewal_common(True, [], args=args, should_renew=False,
error_expected=True)
def test_renew_no_hook_validation(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--post-hook=no-such-command",
"--disable-hook-validation"]
with mock.patch("certbot._internal.hooks.post_hook"):
self._test_renewal_common(True, [], args=args, should_renew=True,
error_expected=False)
def test_renew_verb_empty_config(self):
rd = os.path.join(self.config.config_dir, 'renewal')
if not os.path.exists(rd):
filesystem.makedirs(rd)
with open(os.path.join(rd, 'empty.conf'), 'w'):
pass # leave the file empty
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(False, [], args=args, should_renew=False, error_expected=True)
def test_renew_with_certname(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
self._test_renewal_common(True, [], should_renew=True,
args=['renew', '--dry-run', '--cert-name', 'sample-renewal'])
def test_renew_with_bad_certname(self):
self._test_renewal_common(True, [], should_renew=False,
args=['renew', '--dry-run', '--cert-name', 'sample-renewal'],
error_expected=True)
def _make_dummy_renewal_config(self):
renewer_configs_dir = os.path.join(self.config.config_dir, 'renewal')
filesystem.makedirs(renewer_configs_dir)
with open(os.path.join(renewer_configs_dir, 'test.conf'), 'w') as f:
f.write("My contents don't matter")
def _test_renew_common(self, renewalparams=None, names=None,
assert_oc_called=None, **kwargs):
self._make_dummy_renewal_config()
with mock.patch('certbot._internal.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somepath/fullchain.pem"
if renewalparams is not None:
mock_lineage.configuration = {'renewalparams': renewalparams}
if names is not None:
mock_lineage.names.return_value = names
mock_rc.return_value = mock_lineage
with mock.patch('certbot._internal.main.renew_cert') as mock_renew_cert:
kwargs.setdefault('args', ['renew'])
self._test_renewal_common(True, None, should_renew=False, **kwargs)
if assert_oc_called is not None:
if assert_oc_called:
self.assertTrue(mock_renew_cert.called)
else:
self.assertFalse(mock_renew_cert.called)
def test_renew_no_renewalparams(self):
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_no_authenticator(self):
self._test_renew_common(renewalparams={}, assert_oc_called=False,
error_expected=True)
def test_renew_with_bad_int(self):
renewalparams = {'authenticator': 'webroot',
'rsa_key_size': 'over 9000'}
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
assert_oc_called=False)
def test_renew_with_nonetype_http01(self):
renewalparams = {'authenticator': 'webroot',
'http01_port': 'None'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_bad_domain(self):
renewalparams = {'authenticator': 'webroot'}
names = ['uniçodé.com']
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
names=names, assert_oc_called=False)
@mock.patch('certbot._internal.plugins.selection.choose_configurator_plugins')
def test_renew_with_configurator(self, mock_sel):
mock_sel.return_value = (mock.MagicMock(), mock.MagicMock())
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args='renew --configurator apache'.split())
def test_renew_plugin_config_restoration(self):
renewalparams = {'authenticator': 'webroot',
'webroot_path': 'None',
'webroot_imaginary_flag': '42'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_webroot_map(self):
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args=['renew', '--webroot-map', json.dumps({'example.com': tempfile.gettempdir()})])
def test_renew_reconstitute_error(self):
# pylint: disable=protected-access
with mock.patch('certbot._internal.main.renewal._reconstitute') as mock_reconstitute:
mock_reconstitute.side_effect = Exception
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_obtain_cert_error(self):
self._make_dummy_renewal_config()
with mock.patch('certbot._internal.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somewhere/fullchain.pem"
mock_rc.return_value = mock_lineage
mock_lineage.configuration = {
'renewalparams': {'authenticator': 'webroot'}}
with mock.patch('certbot._internal.main.renew_cert') as mock_renew_cert:
mock_renew_cert.side_effect = Exception
self._test_renewal_common(True, None, error_expected=True,
args=['renew'], should_renew=False)
def test_renew_with_bad_cli_args(self):
self._test_renewal_common(True, None, args='renew -d example.com'.split(),
should_renew=False, error_expected=True)
self._test_renewal_common(True, None, args='renew --csr {0}'.format(CSR).split(),
should_renew=False, error_expected=True)
def test_no_renewal_with_hooks(self):
_, _, stdout = self._test_renewal_common(
due_for_renewal=False, extra_args=None, should_renew=False,
args=['renew', '--post-hook',
'{0} -c "print(\'hello world\');"'
.format(sys.executable)])
self.assertTrue('No hooks were run.' in stdout.getvalue())
@test_util.patch_get_utility()
@mock.patch('certbot._internal.main._find_lineage_for_domains_and_certname')
@mock.patch('certbot._internal.main._init_le_client')
@mock.patch('certbot._internal.main._report_new_cert')
def test_certonly_reinstall(self, mock_report_new_cert, mock_init,
mock_renewal, mock_get_utility):
mock_renewal.return_value = ('reinstall', mock.MagicMock())
mock_init.return_value = mock_client = mock.MagicMock()
self._call(['-d', 'foo.bar', '-a', 'standalone', 'certonly'])
self.assertFalse(mock_client.obtain_certificate.called)
self.assertFalse(mock_client.obtain_and_enroll_certificate.called)
self.assertEqual(mock_get_utility().add_message.call_count, 0)
mock_report_new_cert.assert_not_called()
#self.assertTrue('donate' not in mock_get_utility().add_message.call_args[0][0])
def _test_certonly_csr_common(self, extra_args=None):
certr = 'certr'
chain = 'chain'
mock_client = mock.MagicMock()
mock_client.obtain_certificate_from_csr.return_value = (certr, chain)
cert_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/cert_512.pem'))
full_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/fullchain.pem'))
mock_client.save_certificate.return_value = cert_path, None, full_path
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
with test_util.patch_get_utility() as mock_get_utility:
chain_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/chain.pem'))
args = ('-a standalone certonly --csr {0} --cert-path {1} '
'--chain-path {2} --fullchain-path {3}').format(
CSR, cert_path, chain_path, full_path).split()
if extra_args:
args += extra_args
with mock.patch('certbot._internal.main.crypto_util'):
self._call(args)
if '--dry-run' in args:
self.assertFalse(mock_client.save_certificate.called)
else:
mock_client.save_certificate.assert_called_once_with(
certr, chain, cert_path, chain_path, full_path)
return mock_get_utility
@mock.patch('certbot._internal.eff.handle_subscription')
def test_certonly_csr(self, mock_subscription):
mock_get_utility = self._test_certonly_csr_common()
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertFalse('Your key file has been saved at' in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
self.assertTrue(mock_subscription.called)
def test_certonly_csr_dry_run(self):
mock_get_utility = self._test_certonly_csr_common(['--dry-run'])
self.assertEqual(mock_get_utility().add_message.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main.client.acme_client')
def test_revoke_with_key(self, mock_acme_client,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
server = 'foo.bar'
self._call_no_clientmock(['--cert-path', SS_CERT_PATH, '--key-path', RSA2048_KEY_PATH,
'--server', server, 'revoke'])
with open(RSA2048_KEY_PATH, 'rb') as f:
mock_acme_client.BackwardsCompatibleClientV2.assert_called_once_with(
mock.ANY, jose.JWK.load(f.read()), server)
with open(SS_CERT_PATH, 'rb') as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = mock_acme_client.BackwardsCompatibleClientV2().revoke
mock_revoke.assert_called_once_with(
jose.ComparableX509(cert),
mock.ANY)
def test_revoke_with_key_mismatch(self):
server = 'foo.bar'
self.assertRaises(errors.Error, self._call_no_clientmock,
['--cert-path', CERT, '--key-path', KEY,
'--server', server, 'revoke'])
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main._determine_account')
def test_revoke_without_key(self, mock_determine_account,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
mock_determine_account.return_value = (mock.MagicMock(), None)
_, _, _, client = self._call(['--cert-path', CERT, 'revoke'])
with open(CERT) as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = client.acme_from_config_key().revoke
mock_revoke.assert_called_once_with(
jose.ComparableX509(cert),
mock.ANY)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_register(self, _):
with mock.patch('certbot._internal.main.client') as mocked_client:
acc = mock.MagicMock()
acc.id = "imaginary_account"
mocked_client.register.return_value = (acc, "worked")
self._call_no_clientmock(["register", "--email", "[email protected]"])
# TODO: It would be more correct to explicitly check that
# _determine_account() gets called in the above case,
# but coverage statistics should also show that it did.
with mock.patch('certbot._internal.main.account') as mocked_account:
mocked_storage = mock.MagicMock()
mocked_account.AccountFileStorage.return_value = mocked_storage
mocked_storage.find_all.return_value = ["an account"]
x = self._call_no_clientmock(["register", "--email", "[email protected]"])
self.assertTrue("There is an existing account" in x[0])
@mock.patch('certbot._internal.plugins.selection.choose_configurator_plugins')
@mock.patch('certbot._internal.updater._run_updaters')
def test_plugin_selection_error(self, mock_run, mock_choose):
mock_choose.side_effect = errors.PluginSelectionError
self.assertRaises(errors.PluginSelectionError, main.renew_cert,
None, None, None)
self.config.dry_run = False
updater.run_generic_updaters(self.config, None, None)
# Make sure we're returning None, and hence not trying to run the
# without installer
self.assertFalse(mock_run.called)
class UnregisterTest(unittest.TestCase):
def setUp(self):
self.patchers = {
'_determine_account': mock.patch('certbot._internal.main._determine_account'),
'account': mock.patch('certbot._internal.main.account'),
'client': mock.patch('certbot._internal.main.client'),
'get_utility': test_util.patch_get_utility()}
self.mocks = {k: v.start() for k, v in self.patchers.items()}
def tearDown(self):
for patch in self.patchers.values():
patch.stop()
def test_abort_unregister(self):
self.mocks['account'].AccountFileStorage.return_value = mock.Mock()
util_mock = self.mocks['get_utility']()
util_mock.yesno.return_value = False
config = mock.Mock()
unused_plugins = mock.Mock()
res = main.unregister(config, unused_plugins)
self.assertEqual(res, "Deactivation aborted.")
@mock.patch("certbot._internal.main.display_util.notify")
def test_unregister(self, mock_notify):
mocked_storage = mock.MagicMock()
mocked_storage.find_all.return_value = ["an account"]
self.mocks['account'].AccountFileStorage.return_value = mocked_storage
self.mocks['_determine_account'].return_value = (mock.MagicMock(), "foo")
cb_client = mock.MagicMock()
self.mocks['client'].Client.return_value = cb_client
config = mock.MagicMock()
unused_plugins = mock.MagicMock()
res = main.unregister(config, unused_plugins)
self.assertTrue(res is None)
mock_notify.assert_called_once_with("Account deactivated.")
def test_unregister_no_account(self):
mocked_storage = mock.MagicMock()
mocked_storage.find_all.return_value = []
self.mocks['account'].AccountFileStorage.return_value = mocked_storage
cb_client = mock.MagicMock()
self.mocks['client'].Client.return_value = cb_client
config = mock.MagicMock()
unused_plugins = mock.MagicMock()
res = main.unregister(config, unused_plugins)
m = "Could not find existing account to deactivate."
self.assertEqual(res, m)
self.assertFalse(cb_client.acme.deactivate_registration.called)
class MakeOrVerifyNeededDirs(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.make_or_verify_needed_dirs."""
@mock.patch("certbot._internal.main.util")
def test_it(self, mock_util):
main.make_or_verify_needed_dirs(self.config)
for core_dir in (self.config.config_dir, self.config.work_dir,):
mock_util.set_up_core_dir.assert_any_call(
core_dir, constants.CONFIG_DIRS_MODE,
self.config.strict_permissions
)
hook_dirs = (self.config.renewal_pre_hooks_dir,
self.config.renewal_deploy_hooks_dir,
self.config.renewal_post_hooks_dir,)
for hook_dir in hook_dirs:
# default mode of 755 is used
mock_util.make_or_verify_dir.assert_any_call(
hook_dir, strict=self.config.strict_permissions)
class EnhanceTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.enhance."""
def setUp(self):
super().setUp()
self.get_utility_patch = test_util.patch_get_utility()
self.mock_get_utility = self.get_utility_patch.start()
self.mockinstaller = mock.MagicMock(spec=enhancements.AutoHSTSEnhancement)
def tearDown(self):
self.get_utility_patch.stop()
def _call(self, args):
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
with mock.patch('certbot._internal.cert_manager.get_certnames') as mock_certs:
mock_certs.return_value = ['example.com']
with mock.patch('certbot._internal.cert_manager.domains_for_certname') as mock_dom:
mock_dom.return_value = ['example.com']
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_client = mock.MagicMock()
mock_client.config = config
mock_init.return_value = mock_client
main.enhance(config, plugins)
return mock_client # returns the client
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main._find_domains_or_certname')
def test_selection_question(self, mock_find, mock_choose, mock_lineage, _rec):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ['example.com']
mock_find.return_value = (None, None)
with mock.patch('certbot._internal.main.plug_sel.pick_installer') as mock_pick:
self._call(['enhance', '--redirect'])
self.assertTrue(mock_pick.called)
# Check that the message includes "enhancements"
self.assertTrue("enhancements" in mock_pick.call_args[0][3])
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main._find_domains_or_certname')
def test_selection_auth_warning(self, mock_find, mock_choose, mock_lineage, _rec):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
mock_find.return_value = (None, None)
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
with mock.patch('certbot._internal.main.plug_sel.logger.warning') as mock_log:
mock_client = self._call(['enhance', '-a', 'webroot', '--redirect'])
self.assertTrue(mock_log.called)
self.assertTrue("make sense" in mock_log.call_args[0][0])
self.assertTrue(mock_client.enhance_config.called)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_enhance_config_call(self, _rec, mock_choose, mock_lineage):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
mock_client = self._call(['enhance', '--redirect', '--hsts'])
req_enh = ["redirect", "hsts"]
not_req_enh = ["uir"]
self.assertTrue(mock_client.enhance_config.called)
self.assertTrue(
all(getattr(mock_client.config, e) for e in req_enh))
self.assertFalse(
any(getattr(mock_client.config, e) for e in not_req_enh))
self.assertTrue(
"example.com" in mock_client.enhance_config.call_args[0][0])
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_enhance_noninteractive(self, _rec, mock_choose, mock_lineage):
mock_lineage.return_value = mock.MagicMock(
chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
mock_client = self._call(['enhance', '--redirect',
'--hsts', '--non-interactive'])
self.assertTrue(mock_client.enhance_config.called)
self.assertFalse(mock_choose.called)
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_user_abort_domains(self, _rec, mock_choose):
mock_choose.return_value = []
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
self.assertRaises(errors.Error,
self._call,
['enhance', '--redirect', '--hsts'])
def test_no_enhancements_defined(self):
self.assertRaises(errors.MisconfigurationError,
self._call, ['enhance', '-a', 'null'])
@mock.patch('certbot._internal.main.plug_sel.choose_configurator_plugins')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_plugin_selection_error(self, _rec, mock_choose, mock_pick):
mock_choose.return_value = ["example.com"]
mock_pick.return_value = (None, None)
mock_pick.side_effect = errors.PluginSelectionError()
mock_client = self._call(['enhance', '--hsts'])
self.assertFalse(mock_client.enhance_config.called)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@test_util.patch_get_utility()
def test_enhancement_enable(self, _, _rec, mock_inst, mock_choose, mock_lineage):
mock_inst.return_value = self.mockinstaller
mock_choose.return_value = ["example.com", "another.tld"]
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
self._call(['enhance', '--auto-hsts'])
self.assertTrue(self.mockinstaller.enable_autohsts.called)
self.assertEqual(self.mockinstaller.enable_autohsts.call_args[0][1],
["example.com", "another.tld"])
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@test_util.patch_get_utility()
def test_enhancement_enable_not_supported(self, _, _rec, mock_inst, mock_choose, mock_lineage):
mock_inst.return_value = null.Installer(self.config, "null")
mock_choose.return_value = ["example.com", "another.tld"]
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
self.assertRaises(
errors.NotSupportedError,
self._call, ['enhance', '--auto-hsts'])
def test_enhancement_enable_conflict(self):
self.assertRaises(
errors.Error,
self._call, ['enhance', '--auto-hsts', '--hsts'])
class InstallTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.install."""
def setUp(self):
super().setUp()
self.mockinstaller = mock.MagicMock(spec=enhancements.AutoHSTSEnhancement)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_install_enhancement_not_supported(self, mock_inst, _rec):
mock_inst.return_value = null.Installer(self.config, "null")
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.config.certname = "nonexistent"
self.assertRaises(errors.NotSupportedError,
main.install,
self.config, plugins)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_install_enhancement_no_certname(self, mock_inst, _rec):
mock_inst.return_value = self.mockinstaller
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.config.certname = None
self.config.key_path = "/tmp/nonexistent"
self.config.cert_path = "/tmp/nonexistent"
self.assertRaises(errors.ConfigurationError,
main.install,
self.config, plugins)
class UpdateAccountTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.update_account"""
def setUp(self):
patches = {
'account': mock.patch('certbot._internal.main.account'),
'atexit': mock.patch('certbot.util.atexit'),
'client': mock.patch('certbot._internal.main.client'),
'determine_account': mock.patch('certbot._internal.main._determine_account'),
'notify': mock.patch('certbot._internal.main.display_util.notify'),
'prepare_sub': mock.patch('certbot._internal.eff.prepare_subscription'),
'util': test_util.patch_get_utility()
}
self.mocks = { k: patches[k].start() for k in patches }
for patch in patches.values():
self.addCleanup(patch.stop)
return super().setUp()
def _call(self, args):
with mock.patch('certbot._internal.main.sys.stdout'), \
mock.patch('certbot._internal.main.sys.stderr'):
args = ['--config-dir', self.config.config_dir,
'--work-dir', self.config.work_dir,
'--logs-dir', self.config.logs_dir, '--text'] + args
return main.main(args[:]) # NOTE: parser can alter its args!
def _prepare_mock_account(self):
mock_storage = mock.MagicMock()
mock_account = mock.MagicMock()
mock_regr = mock.MagicMock()
mock_storage.find_all.return_value = [mock_account]
self.mocks['account'].AccountFileStorage.return_value = mock_storage
mock_account.regr.body = mock_regr.body
self.mocks['determine_account'].return_value = (mock_account, mock.MagicMock())
return (mock_account, mock_storage, mock_regr)
def _test_update_no_contact(self, args):
"""Utility to assert that email removal is handled correctly"""
(_, mock_storage, mock_regr) = self._prepare_mock_account()
result = self._call(args)
# When update succeeds, the return value of update_account() is None
self.assertIsNone(result)
# We submitted a registration to the server
self.assertEqual(self.mocks['client'].Client().acme.update_registration.call_count, 1)
mock_regr.body.update.assert_called_with(contact=())
# We got an update from the server and persisted it
self.assertEqual(mock_storage.update_regr.call_count, 1)
# We should have notified the user
self.mocks['notify'].assert_called_with(
'Any contact information associated with this account has been removed.'
)
# We should not have called subscription because there's no email
self.mocks['prepare_sub'].assert_not_called()
def test_no_existing_accounts(self):
"""Test that no existing account is handled correctly"""
mock_storage = mock.MagicMock()
mock_storage.find_all.return_value = []
self.mocks['account'].AccountFileStorage.return_value = mock_storage
self.assertEqual(self._call(['update_account', '--email', '[email protected]']),
'Could not find an existing account to update.')
def test_update_account_remove_email(self):
"""Test that --register-unsafely-without-email is handled as no email"""
self._test_update_no_contact(['update_account', '--register-unsafely-without-email'])
def test_update_account_empty_email(self):
"""Test that providing an empty email is handled as no email"""
self._test_update_no_contact(['update_account', '-m', ''])
@mock.patch('certbot._internal.main.display_ops.get_email')
def test_update_account_with_email(self, mock_email):
"""Test that updating with a singular email is handled correctly"""
mock_email.return_value = '[email protected]'
(_, mock_storage, _) = self._prepare_mock_account()
mock_client = mock.MagicMock()
self.mocks['client'].Client.return_value = mock_client
result = self._call(['update_account'])
# None if registration succeeds
self.assertIsNone(result)
# We should have updated the server
self.assertEqual(mock_client.acme.update_registration.call_count, 1)
# We should have updated the account on disk
self.assertEqual(mock_storage.update_regr.call_count, 1)
# Subscription should have been prompted
self.assertEqual(self.mocks['prepare_sub'].call_count, 1)
# Should have printed the email
self.mocks['notify'].assert_called_with(
'Your e-mail address was updated to [email protected].')
def test_update_account_with_multiple_emails(self):
"""Test that multiple email addresses are handled correctly"""
(_, mock_storage, mock_regr) = self._prepare_mock_account()
self.assertIsNone(
self._call(['update_account', '-m', '[email protected],[email protected]'])
)
mock_regr.body.update.assert_called_with(
contact=['mailto:[email protected]', 'mailto:[email protected]']
)
self.assertEqual(mock_storage.update_regr.call_count, 1)
self.mocks['notify'].assert_called_with(
'Your e-mail address was updated to [email protected],[email protected].')
if __name__ == '__main__':
unittest.main() # pragma: no cover
| 48.66326 | 103 | 0.659459 | [
"Apache-2.0"
] | I-Cat/certbot | certbot/tests/main_test.py | 90,467 | Python |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""conv"""
import numpy as np
from mindspore import log as logger
from mindspore.ops import operations as P
from mindspore.ops.primitive import constexpr
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer
from mindspore.common.tensor import Tensor
from mindspore._checkparam import ParamValidator as validator, Rel
from mindspore._checkparam import Validator
from mindspore._checkparam import check_bool, twice, check_int_positive
from mindspore._extends import cell_attr_register
from ..cell import Cell
__all__ = ['Conv2d', 'Conv2dTranspose', 'DepthwiseConv2d', 'Conv1d', 'Conv1dTranspose']
class _Conv(Cell):
"""
Applies a N-D convolution over an input signal composed of several input planes.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=False):
super(_Conv, self).__init__()
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
self.kernel_size = kernel_size
self.stride = stride
self.pad_mode = pad_mode
self.weight_init = weight_init
self.bias_init = bias_init
if isinstance(padding, int):
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
self.padding = padding
elif isinstance(padding, tuple):
for pad in padding:
Validator.check_integer('padding item', pad, 0, Rel.GE, self.cls_name)
self.padding = padding
else:
raise TypeError("padding type must be int/tuple(int) cannot be {}!".format(type(padding)))
self.dilation = dilation
self.group = check_int_positive(group)
self.has_bias = has_bias
if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
isinstance(kernel_size[0], bool) or isinstance(kernel_size[1], bool) or \
kernel_size[0] < 1 or kernel_size[1] < 1:
raise ValueError("Attr 'kernel_size' of 'Conv2D' Op passed "
+ str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.")
if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or \
isinstance(stride[0], bool) or isinstance(stride[1], bool) or stride[0] < 1 or stride[1] < 1:
raise ValueError("Attr 'stride' of 'Conv2D' Op passed "
+ str(self.stride) + ", should be a int or tuple and equal to or greater than 1.")
if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \
isinstance(dilation[0], bool) or isinstance(dilation[1], bool) or dilation[0] < 1 or dilation[1] < 1:
raise ValueError("Attr 'dilation' of 'Conv2D' Op passed "
+ str(self.dilation) + ", should be a int or tuple and equal to or greater than 1.")
if in_channels % group != 0:
raise ValueError("Attr 'in_channels' of 'Conv2D' Op must be divisible by "
"attr 'group' of 'Conv2D' Op.")
if out_channels % group != 0:
raise ValueError("Attr 'out_channels' of 'Conv2D' Op must be divisible by "
"attr 'group' of 'Conv2D' Op.")
if transposed:
shape = [in_channels, out_channels // group, *kernel_size]
else:
shape = [out_channels, in_channels // group, *kernel_size]
self.weight = Parameter(initializer(self.weight_init, shape), name='weight')
if check_bool(has_bias):
self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias')
else:
if self.bias_init != 'zeros':
logger.warning("Value of 'has_bias' is False, value of 'bias_init' will be ignored.")
self.bias = None
def construct(self, *inputs):
"""Must be overridden by all subclasses."""
raise NotImplementedError
class Conv2d(_Conv):
r"""
2D convolution layer.
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
where :math:`N` is batch size, :math:`C_{in}` is channel number, and :math:`H_{in}, W_{in})` are height and width.
For each batch of shape :math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross-correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 1024, 640)
"""
@cell_attr_register
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
super(Conv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init)
self.conv2d = P.Conv2D(out_channel=self.out_channels,
kernel_size=self.kernel_size,
mode=1,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
def construct(self, x):
output = self.conv2d(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
@constexpr
def _check_input_3d(input_shape):
if len(input_shape) != 3:
raise ValueError(f"Input should be 3d, but got shape {input_shape}")
class Conv1d(_Conv):
r"""
1D convolution layer.
Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, W_{in})`,
where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
:math:`(C_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_w})`, where :math:`\text{ks_w}` is the width of the convolution kernel.
The full kernel has shape :math:`(C_{out}, C_{in} // \text{group}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output width will be
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction of convolution layer can be found in paper `Gradient Based Learning Applied to Document
Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (int): The data type is int. Specifies the
width of the 1D convolution window.
stride (int): The distance of kernel moving, an int number that represents
the width of movement. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The output width will be the same as the input.
The total number of padding will be calculated in the horizontal
direction and evenly distributed to left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest width of the output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (int): Implicit paddings on both sides of the input. Default: 0.
dilation (int): The data type is int. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): An initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.
Examples:
>>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 640)
"""
@cell_attr_register
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
Validator.check_value_type("stride", stride, [int], self.cls_name)
Validator.check_value_type("padding", padding, [int], self.cls_name)
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
kernel_size = (1, kernel_size)
stride = (1, stride)
dilation = (1, dilation)
get_shape = P.Shape()
get_dtype = P.DType()
if isinstance(weight_init, Tensor):
weight_init_shape = get_shape(weight_init)
Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
weight_init_dtype = get_dtype(weight_init)
weight_init_value = weight_init.asnumpy()
weight_init_value = np.expand_dims(weight_init_value, 2)
weight_init = Tensor(weight_init_value, weight_init_dtype)
super(Conv1d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init)
self.padding = (0, 0, padding, padding)
self.conv2d = P.Conv2D(out_channel=self.out_channels,
kernel_size=self.kernel_size,
mode=1,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
self.shape = P.Shape()
def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
x = self.expand_dims(x, 2)
output = self.conv2d(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
output = self.squeeze(output)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class Conv2dTranspose(_Conv):
r"""
2D transposed convolution layer.
Compute a 2D transposed convolution, which is also known as a deconvolution
(although it is not an actual deconvolution).
Input is typically of shape :math:`(N, C, H, W)`, where :math:`N` is batch size and :math:`C` is channel number.
Args:
in_channels (int): The number of channels in the input space.
out_channels (int): The number of channels in the output space.
kernel_size (Union[int, tuple]): int or a tuple of 2 integers, which specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Its value should be equal to or greater than 1.
Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"pad", "same", "valid". Default: "same".
- pad: Implicit paddings on both sides of the input.
- same: Adopted the way of completion.
- valid: Adopted the way of discarding.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater than or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_channels` and `out_channels` should be
divisible by the number of groups. This does not support for Davinci devices when group > 1. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
>>> net(input)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
if isinstance(padding, tuple):
Validator.check_integer('padding size', len(padding), 4, Rel.EQ, self.cls_name)
# out_channels and in_channels swap.
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
# then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
super(Conv2dTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=True)
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
kernel_size=kernel_size,
mode=1,
pad_mode=pad_mode,
pad=padding,
stride=stride,
dilation=dilation,
group=group)
self.bias_add = P.BiasAdd()
if isinstance(self.padding, int):
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = (self.padding,) * 4
else:
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = self.padding
def set_strategy(self, strategy):
self.conv2d_transpose.set_strategy(strategy)
return self
def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
"""Calculate the width and height of output."""
length = 0
filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
if self.is_valid:
if filter_size - stride_size > 0:
length = input_length * stride_size + filter_size - stride_size
else:
length = input_length * stride_size
elif self.is_same:
length = input_length * stride_size
elif self.is_pad:
length = input_length * stride_size - padding + filter_size - stride_size
return length
def construct(self, x):
n, _, h, w = self.shape(x)
h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
self.padding_top + self.padding_bottom)
w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
self.padding_left + self.padding_right)
if self.has_bias:
return self.bias_add(self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out)),
self.bias)
return self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class Conv1dTranspose(_Conv):
r"""
1D transposed convolution layer.
Compute a 1D transposed convolution, which is also known as a deconvolution
(although it is not an actual deconvolution).
Input is typically of shape :math:`(N, C, W)`, where :math:`N` is batch size and :math:`C` is channel number.
Args:
in_channels (int): The number of channels in the input space.
out_channels (int): The number of channels in the output space.
kernel_size (int): int, which specifies the width of the 1D convolution window.
stride (int): The distance of kernel moving, an int number that represents
the width of movement. Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"pad", "same", "valid". Default: "same".
- pad: Implicit paddings on both sides of the input.
- same: Adopted the way of completion.
- valid: Adopted the way of discarding.
padding (int): Implicit paddings on both sides of the input. Default: 0.
dilation (int): The data type is int. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the width of the
input. Default: 1.
group (int): Split filter into groups, `in_channels` and `out_channels` should be
divisible by the number of groups. This is not support for Davinci devices when group > 1. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.
Examples:
>>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32)
>>> net(input)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
Validator.check_value_type("stride", stride, [int], self.cls_name)
Validator.check_value_type("padding", padding, [int], self.cls_name)
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
kernel_size = (1, kernel_size)
stride = (1, stride)
dilation = (1, dilation)
get_shape = P.Shape()
get_dtype = P.DType()
if isinstance(weight_init, Tensor):
weight_init_shape = get_shape(weight_init)
Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
weight_init_dtype = get_dtype(weight_init)
weight_init_value = weight_init.asnumpy()
weight_init_value = np.expand_dims(weight_init_value, 2)
weight_init = Tensor(weight_init_value, weight_init_dtype)
# out_channels and in_channels swap.
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
# then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
super(Conv1dTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=True)
self.padding = (0, 0, padding, padding)
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
kernel_size=kernel_size,
mode=1,
pad_mode=pad_mode,
pad=self.padding,
stride=stride,
dilation=dilation,
group=group)
self.bias_add = P.BiasAdd()
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
def set_strategy(self, strategy):
self.conv2d_transpose.set_strategy(strategy)
return self
def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
"""Calculate the width and height of output."""
length = 0
filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
if self.is_valid:
if filter_size - stride_size > 0:
length = input_length * stride_size + filter_size - stride_size
else:
length = input_length * stride_size
elif self.is_same:
length = input_length * stride_size
elif self.is_pad:
length = input_length * stride_size - padding + filter_size - stride_size
return length
def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
x = self.expand_dims(x, 2)
n, _, h, w = self.shape(x)
h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
self.padding[0] + self.padding[1])
w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
self.padding[2] + self.padding[3])
output = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
if self.has_bias:
output = self.bias_add(output, self.bias)
output = self.squeeze(output)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class DepthwiseConv2d(Cell):
r"""
2D depthwise convolution layer.
Applies a 2D depthwise convolution over an input tensor which is typically of shape:
math:`(N, C_{in}, H_{in}, W_{in})`, where :math:`N` is batch size and :math:`C_{in}` is channel number.
For each batch of shape:math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater than or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. If 'group' is None, it will be set as the value of 'in_channels'
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.DepthwiseConv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 1024, 640)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
group,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
super(DepthwiseConv2d, self).__init__()
self.kernel_size = twice(kernel_size)
self.stride = twice(stride)
self.dilation = twice(dilation)
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
if group is None:
group = in_channels
validator.check_integer('group', group, in_channels, Rel.EQ)
validator.check_integer('group', group, out_channels, Rel.EQ)
validator.check_integer('group', group, 1, Rel.GE)
self.pad_mode = pad_mode
self.dilation = dilation
self.group = group
self.has_bias = has_bias
self.weight_init = weight_init
self.bias_init = bias_init
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
if isinstance(padding, tuple):
Validator.check_integer('padding size', len(padding), 4, Rel.EQ, self.cls_name)
self.padding = padding
self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
kernel_size=self.kernel_size,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation)
self.bias_add = P.BiasAdd()
weight_shape = [1, in_channels, *self.kernel_size]
self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
else:
if bias_init != 'zeros':
logger.warning("value of `has_bias` is False, value of `bias_init` will be ignore.")
self.bias = None
def construct(self, x):
out = self.conv(x, self.weight)
if self.has_bias:
out = self.bias_add(out, self.bias)
return out
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={}, stride={}, ' \
'pad_mode={}, padding={}, dilation={}, group={}, ' \
'has_bias={}, weight_init={}, bias_init={}'.format(
self.in_channels, self.out_channels, self.kernel_size, self.stride,
self.pad_mode, self.padding, self.dilation, self.group,
self.has_bias, self.weight_init, self.bias_init)
if self.has_bias:
s += ', bias={}'.format(self.bias)
return s
| 51.592284 | 119 | 0.581462 | [
"Apache-2.0"
] | Rossil2012/mindspore | mindspore/nn/layer/conv.py | 49,477 | Python |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: encodings.cp1026
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp1026', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x9c\t\x86\x7f\x97\x8d\x8e\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x9d\x85\x08\x87\x18\x19\x92\x8f\x1c\x1d\x1e\x1f\x80\x81\x82\x83\x84\n\x17\x1b\x88\x89\x8a\x8b\x8c\x05\x06\x07\x90\x91\x16\x93\x94\x95\x96\x04\x98\x99\x9a\x9b\x14\x15\x9e\x1a \xa0\xe2\xe4\xe0\xe1\xe3\xe5{\xf1\xc7.<(+!&\xe9\xea\xeb\xe8\xed\xee\xef\xec\xdf\u011e\u0130*);^-/\xc2\xc4\xc0\xc1\xc3\xc5[\xd1\u015f,%_>?\xf8\xc9\xca\xcb\xc8\xcd\xce\xcf\xcc\u0131:\xd6\u015e\'=\xdc\xd8abcdefghi\xab\xbb}`\xa6\xb1\xb0jklmnopqr\xaa\xba\xe6\xb8\xc6\xa4\xb5\xf6stuvwxyz\xa1\xbf]$@\xae\xa2\xa3\xa5\xb7\xa9\xa7\xb6\xbc\xbd\xbe\xac|\xaf\xa8\xb4\xd7\xe7ABCDEFGHI\xad\xf4~\xf2\xf3\xf5\u011fJKLMNOPQR\xb9\xfb\\\xf9\xfa\xff\xfc\xf7STUVWXYZ\xb2\xd4#\xd2\xd3\xd50123456789\xb3\xdb"\xd9\xda\x9f'
encoding_table = codecs.charmap_build(decoding_table) | 48.878049 | 767 | 0.749501 | [
"Apache-2.0"
] | theclashingfritz/Cog-Invasion-Online-Dump | encodings/cp1026.py | 2,004 | Python |
"""
WSGI config for lacuna project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 39.692308 | 79 | 0.805556 | [
"BSD-3-Clause"
] | Murithi/lacuna | config/wsgi.py | 1,548 | Python |
# Undirected Graph from demo represented as Adjacency List
graph = {
"a": [("b", 7), ("c", 9), ("f", 14)],
"b": [("a", 7), ("c", 10), ("d", 15)],
"c": [("a", 9), ("b", 10), ("d", 11), ("f", 2)],
"d": [("b", 15), ("c", 11), ("e", 6)],
"e": [("d", 6), ("f", 9)],
"f": [("a", 14), ("c", 2), ("e", 9)],
}
def find_vertices():
return graph.keys()
def find_edges():
edges = []
for v in graph:
for e in graph[v]:
edges.append((v, e[0], e[1]))
return edges
print("Vertices: {}".format(find_vertices()))
print("Edges: {}".format(find_edges())) | 24.958333 | 58 | 0.435726 | [
"MIT"
] | PacktPublishing/Python-Data-Structures-and-Algorithms-v- | Section4/graph_adj_list.py | 599 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from tensorlayer.models import Model
from tests.utils import CustomTestCase
class Laye_BatchNorm_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
x_1_input_shape =[None, 100, 1]
x_2_input_shape =[None, 100, 100, 3]
x_3_input_shape =[None, 100, 100, 100, 3]
batchsize = 2
cls.x1 = tf.random.normal([batchsize] + x_1_input_shape[1:])
cls.x2 = tf.random.normal([batchsize] + x_2_input_shape[1:])
cls.x3 = tf.random.normal([batchsize] + x_3_input_shape[1:])
## Base
ni_1 = Input(x_1_input_shape, name='test_ni1')
nn_1 = Conv1d(
n_filter=32, filter_size=5, stride=2, name='test_conv1d'
)(ni_1)
n1_b = BatchNorm(name='test_conv')(nn_1)
cls.n1_b = n1_b
cls.base_1d = Model(inputs=ni_1, outputs=n1_b, name='test_base_1d')
ni_2 = Input(x_2_input_shape, name='test_ni2')
nn_2 = Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d'
)(ni_2)
n2_b = BatchNorm2d(name='test_bn2d')(nn_2)
cls.n2_b = n2_b
cls.base_2d = Model(inputs=ni_2, outputs=n2_b, name='test_base_2d')
ni_3 = Input(x_3_input_shape, name='test_ni2')
nn_3 = Conv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d'
)(ni_3)
n3_b = BatchNorm3d(name='test_bn3d')(nn_3)
cls.n3_b = n3_b
cls.base_3d = Model(inputs=ni_3, outputs=n3_b, name='test_base_3d')
## 1D ========================================================================
nin_1 = Input(x_1_input_shape, name='test_in1')
n1 = Conv1d(
n_filter=32, filter_size=5, stride=2, name='test_conv1d'
)(nin_1)
n1 = BatchNorm1d(name='test_bn1d')(n1)
cls.n1 = n1
cls.static_1d = Model(inputs=nin_1, outputs=n1)
class bn_1d_model(Model):
def __init__(self):
super(bn_1d_model, self).__init__(name='test_bn_1d_model')
self.conv = Conv1d(n_filter=32, filter_size=5, stride=2, name='test_conv1d', in_channels=1)
self.bn = BatchNorm1d(num_features=32, name='test_bn1d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_1d = bn_1d_model()
print("Printing BatchNorm1d")
print(cls.static_1d)
print(cls.dynamic_1d)
## 2D ========================================================================
nin_2 = Input(x_2_input_shape, name='test_in2')
n2 = Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d'
)(nin_2)
n2 = BatchNorm2d(name='test_bn2d')(n2)
cls.n2 = n2
cls.static_2d = Model(inputs=nin_2, outputs=n2)
class bn_2d_model(Model):
def __init__(self):
super(bn_2d_model, self).__init__(name='test_bn_2d_model')
self.conv = Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d', in_channels=3)
self.bn = BatchNorm2d(num_features=32, name='test_bn2d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_2d = bn_2d_model()
print("Printing BatchNorm1d")
print(cls.static_2d)
print(cls.dynamic_2d)
## 3D ========================================================================
nin_3 = Input(x_3_input_shape, name='test_in3')
n3 = Conv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d'
)(nin_3)
n3 = BatchNorm3d(name='test_bn3d', act=tf.nn.relu)(n3)
cls.n3 = n3
cls.static_3d = Model(inputs=nin_3, outputs=n3)
class bn_3d_model(Model):
def __init__(self):
super(bn_3d_model, self).__init__(name='test_bn_3d_model')
self.conv = Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d', in_channels=3)
self.bn = BatchNorm3d(num_features=32, name='test_bn3d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_3d = bn_3d_model()
print("Printing BatchNorm1d")
print(cls.static_3d)
print(cls.dynamic_3d)
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_BatchNorm(self):
self.assertEqual(self.n1_b.shape[1:], (50, 32))
out = self.base_1d(self.x1, is_train=True)
self.assertEqual(self.n2_b.shape[1:], (50, 50, 32))
out = self.base_2d(self.x2, is_train=True)
self.assertEqual(self.n3_b.shape[1:], (50, 50, 50, 32))
out = self.base_3d(self.x3, is_train=True)
def test_BatchNorm1d(self):
self.assertEqual(self.n1.shape[1:], (50, 32))
out = self.static_1d(self.x1, is_train=True)
out = self.dynamic_1d(self.x1, is_train=True)
def test_BatchNorm2d(self):
self.assertEqual(self.n2.shape[1:], (50, 50, 32))
out = self.static_2d(self.x2, is_train=True)
out = self.dynamic_2d(self.x2, is_train=True)
out = self.dynamic_2d(self.x2, is_train=False)
def test_BatchNorm3d(self):
self.assertEqual(self.n3.shape[1:], (50, 50, 50, 32))
out = self.static_3d(self.x3, is_train=True)
out = self.dynamic_3d(self.x3, is_train=True)
def test_dataformat(self):
bn1d = BatchNorm1d(data_format='channels_first', num_features=32)
bn2d = BatchNorm2d(data_format='channels_first', num_features=32)
bn3d = BatchNorm3d(data_format='channels_first', num_features=32)
bn = BatchNorm(data_format='channels_first')
try:
bn_fail = BatchNorm1d(data_format='xyz', num_features=32)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
def test_exception(self):
try:
bn = BatchNorm(num_features=32)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
try:
ni = Input([None, 100, 1], name='test_ni1')
bn = BatchNorm(decay=1.5)(ni)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
if __name__ == '__main__':
tl.logging.set_verbosity(tl.logging.DEBUG)
unittest.main()
| 32.736585 | 124 | 0.573387 | [
"Apache-2.0"
] | JingqingZ/tensorlayer2 | tests/layers/test_layers_normalization.py | 6,711 | Python |
#!/usr/bin/env python3
#author [email protected]
"""RabbitMQ helper class.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
"""
IBM-Review-Requirement: Art30.3 - DO NOT TRANSFER OR EXCLUSIVELY LICENSE THE FOLLOWING CODE UNTIL 30/11/2025!
Please note that the following code was developed for the project MUSKETEER in DRL funded by the European Union
under the Horizon 2020 Program.
The project started on 01/12/2018 and was completed on 30/11/2021. Thus, in accordance with article 30.3 of the
Multi-Beneficiary General Model Grant Agreement of the Program, the above limitations are in force until 30/11/2025.
"""
import pytest
import json
def pytest_addoption(parser):
parser.addoption("--credentials", required=True)
parser.addoption("--feed_queue", required=False)
parser.addoption("--reply_queue", required=False)
@pytest.fixture
def credentials(request):
value = request.config.getoption('credentials')
if request.cls:
with open(value) as json_file:
request.cls.credentials = json.load(json_file)
return value
@pytest.fixture
def feed_queue(request):
value = request.config.getoption('feed_queue')
if request.cls:
request.cls.feed_queue = value
return value
@pytest.fixture
def reply_queue(request):
value = request.config.getoption('reply_queue')
if request.cls:
request.cls.reply_queue = value
return value
| 35 | 116 | 0.74424 | [
"Apache-2.0"
] | IBM/castor-messaging | tests/conftest.py | 2,170 | Python |
from unittest.mock import call
from sls.completion.complete import Completion
from sls.completion.context import CompletionContext
from sls.document import Document
import sls.sentry as sentry
def test_complete(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
cache = magic()
c = Completion(plugins=[], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(cache.update.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [],
}
def test_complete_plugin(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
my_plugin = magic()
i1 = {"label": "i1"}
i2 = {"label": "i2"}
cache = magic()
my_plugin.complete.return_value = [i1, i2]
c = Completion(plugins=[my_plugin], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(my_plugin.complete.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [i1, i2],
}
def test_complete_exec(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
patch.object(sentry, "handle_exception")
cache = magic()
plugin = magic()
ex = Exception("e")
plugin.complete.side_effect = ex
c = Completion(plugins=[plugin], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(cache.update.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [],
}
assert sentry.handle_exception.call_args == call(ex)
| 29.880597 | 76 | 0.653846 | [
"Apache-2.0"
] | wilzbach/storyscript-sls | tests/unittests/completion/complete.py | 2,002 | Python |
import json
import numpy as np
from tune._utils import normalize_hp
def test_normalize_hp():
assert isinstance(np.int64(10), np.int64)
assert 10 == normalize_hp(np.int64(10))
assert not isinstance(normalize_hp(np.int64(10)), np.int64)
assert json.dumps(dict(a=[0, 1], b=1.1, c="x")) == json.dumps(
normalize_hp(dict(a=[np.int64(0), 1], b=np.float64(1.1), c="x"))
)
| 26.466667 | 72 | 0.657431 | [
"Apache-2.0"
] | fugue-project/tune | tests/tune/_utils/test_values.py | 397 | Python |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_stack_hci.generated._client_factory import cf_cluster
stack_hci_cluster = CliCommandType(
operations_tmpl='azext_stack_hci.vendored_sdks.azurestackhci.operations._cluster_operations#ClusterOperations.{}',
client_factory=cf_cluster)
with self.command_group('stack-hci cluster', stack_hci_cluster, client_factory=cf_cluster) as g:
g.custom_command('list', 'stack_hci_cluster_list')
g.custom_show_command('show', 'stack_hci_cluster_show')
g.custom_command('create', 'stack_hci_cluster_create')
g.custom_command('update', 'stack_hci_cluster_update')
g.custom_command('delete', 'stack_hci_cluster_delete', confirmation=True)
| 45.642857 | 122 | 0.676056 | [
"MIT"
] | 00Kai0/azure-cli-extensions | src/stack-hci/azext_stack_hci/generated/commands.py | 1,278 | Python |
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def index(request):
return HttpResponse("Check URL => /admin") | 27.666667 | 46 | 0.771084 | [
"MIT"
] | pradyneel/xtreme-weather | emailautomate/views.py | 166 | Python |
# -*- coding: utf-8 -*-
#
# This document is free and open-source software, subject to the OSI-approved
# BSD license below.
#
# Copyright (c) 2011 - 2013 Alexis Petrounias <www.petrounias.org>,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the author nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Django CTE Trees - an experimental PostgreSQL Common Table Expressions (CTE)
implementation of of Adjacency-Linked trees.
"""
VERSION = (0, 2, 2)
__version__ = ".".join(map(str, VERSION))
| 46.925 | 80 | 0.773042 | [
"BSD-3-Clause"
] | kordian-kowalski/django-cte-forest | cte_forest/__init__.py | 1,877 | Python |
"""CveException Class"""
import cloudpassage.sanity as sanity
from .halo_endpoint import HaloEndpoint
from .http_helper import HttpHelper
class CveExceptions(HaloEndpoint):
"""Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
Keyword args:
endpoint_version (int): Endpoint version override.
"""
object_name = "cve_exception"
objects_name = "cve_exceptions"
default_endpoint_version = 1
def endpoint(self):
"""Return the endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def object_key(cls):
"""Return the key used to pull the object from the json document."""
return cls.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return cls.objects_name
def create(self, package_name, package_version, scope="all", scope_id=''):
"""This method allows user to create CVE exceptions.
Args:
package_name (str): The name of the vulnerable
package to be excepted.
package_version (str): The version number of the
vulnerable package.
scope (str): Possible values are server, group and all.
scope_id (str): If you pass the value server as scope, this field
will include server ID. If you pass the value group as scope,
this field will include group ID.
Returns:
str: ID of the newly-created cve exception
"""
body_ref = {
"server": "server_id",
"group": "group_id"
}
params = {
"package_name": package_name,
"package_version": package_version,
"scope": scope
}
endpoint = self.endpoint()
if scope != "all":
sanity.validate_cve_exception_scope_id(scope_id)
scope_key = body_ref[scope]
params[scope_key] = scope_id
body = {"cve_exception": params}
request = HttpHelper(self.session)
response = request.post(endpoint, body)
return response["cve_exception"]["id"]
def update(self, exception_id, **kwargs):
""" Update CVE Exceptions.
Args:
exception_id (str): Identifier for the CVE exception.
Keyword Args:
scope (str): Possible values are server, group and all.
group_id (str): The ID of the server group containing the server to
which this exception applies.
server_id (str): The ID of the server to which this exception
applies.
cve_entries : List of CVEs
Returns:
True if successful, throws exception otherwise.
"""
endpoint = "{}/{}".format(self.endpoint(), exception_id)
body = {"cve_exception": kwargs}
request = HttpHelper(self.session)
response = request.put(endpoint, body)
return response
# The following class needs to live on only in name, and should absorb the
# functionality of the current CveExceptions class.
class CveException(HaloEndpoint):
"""Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
"""
object_name = "cve_exception"
objects_name = "cve_exceptions"
default_endpoint_version = 1
def endpoint(self):
"""Return the endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def object_key(cls):
"""Return the key used to pull the object from the json document."""
return cls.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return cls.objects_name
| 32.366412 | 79 | 0.619811 | [
"BSD-3-Clause"
] | cloudpassage/cloudpassage-halo-python-sdk | cloudpassage/cve_exception.py | 4,240 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAccountBindingSyncModel(object):
def __init__(self):
self._alipay_user_id = None
self._create_time = None
self._data_version = None
self._havana_user_id = None
self._modify_time = None
self._realm = None
self._status = None
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def data_version(self):
return self._data_version
@data_version.setter
def data_version(self, value):
self._data_version = value
@property
def havana_user_id(self):
return self._havana_user_id
@havana_user_id.setter
def havana_user_id(self, value):
self._havana_user_id = value
@property
def modify_time(self):
return self._modify_time
@modify_time.setter
def modify_time(self, value):
self._modify_time = value
@property
def realm(self):
return self._realm
@realm.setter
def realm(self, value):
self._realm = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.data_version:
if hasattr(self.data_version, 'to_alipay_dict'):
params['data_version'] = self.data_version.to_alipay_dict()
else:
params['data_version'] = self.data_version
if self.havana_user_id:
if hasattr(self.havana_user_id, 'to_alipay_dict'):
params['havana_user_id'] = self.havana_user_id.to_alipay_dict()
else:
params['havana_user_id'] = self.havana_user_id
if self.modify_time:
if hasattr(self.modify_time, 'to_alipay_dict'):
params['modify_time'] = self.modify_time.to_alipay_dict()
else:
params['modify_time'] = self.modify_time
if self.realm:
if hasattr(self.realm, 'to_alipay_dict'):
params['realm'] = self.realm.to_alipay_dict()
else:
params['realm'] = self.realm
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAccountBindingSyncModel()
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'create_time' in d:
o.create_time = d['create_time']
if 'data_version' in d:
o.data_version = d['data_version']
if 'havana_user_id' in d:
o.havana_user_id = d['havana_user_id']
if 'modify_time' in d:
o.modify_time = d['modify_time']
if 'realm' in d:
o.realm = d['realm']
if 'status' in d:
o.status = d['status']
return o
| 30.244275 | 79 | 0.59364 | [
"Apache-2.0"
] | Anning01/alipay-sdk-python-all | alipay/aop/api/domain/AlipayUserAccountBindingSyncModel.py | 3,962 | Python |
h = input('Digite algo: ')
print(type(h))
print('É alfanumérico?',h.isalnum())
print('É decimal?',h.isdecimal())
print('É maiúsculo?',h.isupper())
print('É minúsculo?',h.islower())
print('É imprimível?',h.isprintable())
| 24.555556 | 38 | 0.674208 | [
"MIT"
] | miguelsndc/Exercicios-Python | primeiros-exercicios/lpc002.py | 230 | Python |
"""
JunOSLikeDevice Class is abstract class for using in Juniper JunOS like devices
Connection Method are based upon AsyncSSH and should be running in asyncio loop
"""
import re
from netdev.logger import logger
from netdev.vendors.base import BaseDevice
class JunOSLikeDevice(BaseDevice):
"""
JunOSLikeDevice Class for working with Juniper JunOS like devices
Juniper JunOS like devices having several concepts:
* shell mode (csh). This is csh shell for FreeBSD. This mode is not covered by this Class.
* cli mode (specific shell). The entire configuration is usual configured in this shell:
* operation mode. This mode is using for getting information from device
* configuration mode. This mode is using for configuration system
"""
_delimiter_list = ["%", ">", "#"]
"""All this characters will stop reading from buffer. It mean the end of device prompt"""
_pattern = r"\w+(\@[\-\w]*)?[{delimiters}]"
"""Pattern for using in reading buffer. When it found processing ends"""
_disable_paging_command = "set cli screen-length 0"
"""Command for disabling paging"""
_config_enter = "configure"
"""Command for entering to configuration mode"""
_config_exit = "exit configuration-mode"
"""Command for existing from configuration mode to privilege exec"""
_config_check = "#"
"""Checking string in prompt. If it's exist im prompt - we are in configuration mode"""
_commit_command = "commit"
"""Command for committing changes"""
_commit_comment_command = "commit comment {}"
"""Command for committing changes with comment"""
async def _set_base_prompt(self):
"""
Setting two important vars
base_prompt - textual prompt in CLI (usually username or hostname)
base_pattern - regexp for finding the end of command. IT's platform specific parameter
For JunOS devices base_pattern is "user(@[hostname])?[>|#]
"""
logger.info("Host {}: Setting base prompt".format(self._host))
prompt = await self._find_prompt()
prompt = prompt[:-1]
# Strip off trailing terminator
if "@" in prompt:
prompt = prompt.split("@")[1]
self._base_prompt = prompt
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
base_prompt = re.escape(self._base_prompt[:12])
pattern = type(self)._pattern
self._base_pattern = pattern.format(delimiters=delimiters)
logger.debug("Host {}: Base Prompt: {}".format(self._host, self._base_prompt))
logger.debug("Host {}: Base Pattern: {}".format(self._host, self._base_pattern))
return self._base_prompt
async def check_config_mode(self):
"""Check if are in configuration mode. Return boolean"""
logger.info("Host {}: Checking configuration mode".format(self._host))
check_string = type(self)._config_check
self._stdin.write(self._normalize_cmd("\n"))
output = await self._read_until_prompt()
return check_string in output
async def config_mode(self):
"""Enter to configuration mode"""
logger.info("Host {}: Entering to configuration mode".format(self._host))
output = ""
config_enter = type(self)._config_enter
if not await self.check_config_mode():
self._stdin.write(self._normalize_cmd(config_enter))
output += await self._read_until_prompt()
if not await self.check_config_mode():
raise ValueError("Failed to enter to configuration mode")
return output
async def exit_config_mode(self):
"""Exit from configuration mode"""
logger.info("Host {}: Exiting from configuration mode".format(self._host))
output = ""
config_exit = type(self)._config_exit
if await self.check_config_mode():
self._stdin.write(self._normalize_cmd(config_exit))
output += await self._read_until_prompt()
if await self.check_config_mode():
raise ValueError("Failed to exit from configuration mode")
return output
async def send_config_set(
self,
config_commands=None,
with_commit=True,
commit_comment="",
exit_config_mode=True,
):
"""
Sending configuration commands to device
By default automatically exits/enters configuration mode.
:param list config_commands: iterable string list with commands for applying to network devices in system view
:param bool with_commit: if true it commit all changes after applying all config_commands
:param string commit_comment: message for configuration commit
:param bool exit_config_mode: If true it will quit from configuration mode automatically
:return: The output of these commands
"""
if config_commands is None:
return ""
# Send config commands
output = await self.config_mode()
output += await super().send_config_set(config_commands=config_commands)
if with_commit:
commit = type(self)._commit_command
if commit_comment:
commit = type(self)._commit_comment_command.format(commit_comment)
self._stdin.write(self._normalize_cmd(commit))
output += await self._read_until_prompt()
if exit_config_mode:
output += await self.exit_config_mode()
output = self._normalize_linefeeds(output)
logger.debug(
"Host {}: Config commands output: {}".format(self._host, repr(output))
)
return output
| 40.10274 | 119 | 0.644236 | [
"Apache-2.0"
] | ColinSix/netdev | netdev/vendors/junos_like.py | 5,855 | Python |
"""
Finance-specific data cleaning functions.
"""
import json
from datetime import date
from functools import lru_cache
import pandas as pd
import pandas_flavor as pf
import requests
from janitor.errors import JanitorError
from .utils import check, deprecated_alias, is_connected
currency_set = {
"AUD",
"BGN",
"BRL",
"CAD",
"CHF",
"CNY",
"CZK",
"DKK",
"EUR",
"GBP",
"HKD",
"HRK",
"HUF",
"IDR",
"ILS",
"INR",
"ISK",
"JPY",
"KRW",
"MXN",
"MYR",
"NOK",
"NZD",
"PHP",
"PLN",
"RON",
"RUB",
"SEK",
"SGD",
"THB",
"TRY",
"USD",
"ZAR",
}
# Dictionary of recognized World Bank countries and their abbreviations
wb_country_dict = {
"Aruba": "ABW",
"Afghanistan": "AFG",
"Angola": "AGO",
"Albania": "ALB",
"Andorra": "AND",
"Arab World": "ARB",
"United Arab Emirates": "ARE",
"Argentina": "ARG",
"Armenia": "ARM",
"American Samoa": "ASM",
"Antigua and Barbuda": "ATG",
"Australia": "AUS",
"Austria": "AUT",
"Azerbaijan": "AZE",
"Burundi": "BDI",
"Belgium": "BEL",
"Benin": "BEN",
"Burkina Faso": "BFA",
"Bangladesh": "BGD",
"Bulgaria": "BGR",
"Bahrain": "BHR",
"Bahamas, The": "BHS",
"Bosnia and Herzegovina": "BIH",
"Belarus": "BLR",
"Belize": "BLZ",
"Bermuda": "BMU",
"Bolivia": "BOL",
"Brazil": "BRA",
"Barbados": "BRB",
"Brunei Darussalam": "BRN",
"Bhutan": "BTN",
"Botswana": "BWA",
"Central African Republic": "CAF",
"Canada": "CAN",
"Central Europe and the Baltics": "CEB",
"Switzerland": "CHE",
"Channel Islands": "CHI",
"Chile": "CHL",
"China": "CHN",
"Cote d'Ivoire": "CIV",
"Cameroon": "CMR",
"Congo, Dem. Rep.": "COD",
"Congo, Rep.": "COG",
"Colombia": "COL",
"Comoros": "COM",
"Cabo Verde": "CPV",
"Costa Rica": "CRI",
"Caribbean small states": "CSS",
"Cuba": "CUB",
"Curacao": "CUW",
"Cayman Islands": "CYM",
"Cyprus": "CYP",
"Czech Republic": "CZE",
"Germany": "DEU",
"Djibouti": "DJI",
"Dominica": "DMA",
"Denmark": "DNK",
"Dominican Republic": "DOM",
"Algeria": "DZA",
"East Asia & Pacific (excluding high income)": "EAP",
"Early-demographic dividend": "EAR",
"East Asia & Pacific": "EAS",
"Europe & Central Asia (excluding high income)": "ECA",
"Europe & Central Asia": "ECS",
"Ecuador": "ECU",
"Egypt, Arab Rep.": "EGY",
"Euro area": "EMU",
"Eritrea": "ERI",
"Spain": "ESP",
"Estonia": "EST",
"Ethiopia": "ETH",
"European Union": "EUU",
"Fragile and conflict affected situations": "FCS",
"Finland": "FIN",
"Fiji": "FJI",
"France": "FRA",
"Faroe Islands": "FRO",
"Micronesia, Fed. Sts.": "FSM",
"Gabon": "GAB",
"United Kingdom": "GBR",
"Georgia": "GEO",
"Ghana": "GHA",
"Gibraltar": "GIB",
"Guinea": "GIN",
"Gambia, The": "GMB",
"Guinea-Bissau": "GNB",
"Equatorial Guinea": "GNQ",
"Greece": "GRC",
"Grenada": "GRD",
"Greenland": "GRL",
"Guatemala": "GTM",
"Guam": "GUM",
"Guyana": "GUY",
"High income": "HIC",
"Hong Kong SAR, China": "HKG",
"Honduras": "HND",
"Heavily indebted poor countries (HIPC)": "HPC",
"Croatia": "HRV",
"Haiti": "HTI",
"Hungary": "HUN",
"IBRD only": "IBD",
"IDA & IBRD total": "IBT",
"IDA total": "IDA",
"IDA blend": "IDB",
"Indonesia": "IDN",
"IDA only": "IDX",
"Isle of Man": "IMN",
"India": "IND",
"Not classified": "INX",
"Ireland": "IRL",
"Iran, Islamic Rep.": "IRN",
"Iraq": "IRQ",
"Iceland": "ISL",
"Israel": "ISR",
"Italy": "ITA",
"Jamaica": "JAM",
"Jordan": "JOR",
"Japan": "JPN",
"Kazakhstan": "KAZ",
"Kenya": "KEN",
"Kyrgyz Republic": "KGZ",
"Cambodia": "KHM",
"Kiribati": "KIR",
"St. Kitts and Nevis": "KNA",
"Korea, Rep.": "KOR",
"Kuwait": "KWT",
"Latin America & Caribbean (excluding high income)": "LAC",
"Lao PDR": "LAO",
"Lebanon": "LBN",
"Liberia": "LBR",
"Libya": "LBY",
"St. Lucia": "LCA",
"Latin America & Caribbean": "LCN",
"Least developed countries: UN classification": "LDC",
"Low income": "LIC",
"Liechtenstein": "LIE",
"Sri Lanka": "LKA",
"Lower middle income": "LMC",
"Low & middle income": "LMY",
"Lesotho": "LSO",
"Late-demographic dividend": "LTE",
"Lithuania": "LTU",
"Luxembourg": "LUX",
"Latvia": "LVA",
"Macao SAR, China": "MAC",
"St. Martin (French part)": "MAF",
"Morocco": "MAR",
"Monaco": "MCO",
"Moldova": "MDA",
"Madagascar": "MDG",
"Maldives": "MDV",
"Middle East & North Africa": "MEA",
"Mexico": "MEX",
"Marshall Islands": "MHL",
"Middle income": "MIC",
"North Macedonia": "MKD",
"Mali": "MLI",
"Malta": "MLT",
"Myanmar": "MMR",
"Middle East & North Africa (excluding high income)": "MNA",
"Montenegro": "MNE",
"Mongolia": "MNG",
"Northern Mariana Islands": "MNP",
"Mozambique": "MOZ",
"Mauritania": "MRT",
"Mauritius": "MUS",
"Malawi": "MWI",
"Malaysia": "MYS",
"North America": "NAC",
"Namibia": "NAM",
"New Caledonia": "NCL",
"Niger": "NER",
"Nigeria": "NGA",
"Nicaragua": "NIC",
"Netherlands": "NLD",
"Norway": "NOR",
"Nepal": "NPL",
"Nauru": "NRU",
"New Zealand": "NZL",
"OECD members": "OED",
"Oman": "OMN",
"Other small states": "OSS",
"Pakistan": "PAK",
"Panama": "PAN",
"Peru": "PER",
"Philippines": "PHL",
"Palau": "PLW",
"Papua New Guinea": "PNG",
"Poland": "POL",
"Pre-demographic dividend": "PRE",
"Puerto Rico": "PRI",
"Korea, Dem. People's Rep.": "PRK",
"Portugal": "PRT",
"Paraguay": "PRY",
"West Bank and Gaza": "PSE",
"Pacific island small states": "PSS",
"Post-demographic dividend": "PST",
"French Polynesia": "PYF",
"Qatar": "QAT",
"Romania": "ROU",
"Russian Federation": "RUS",
"Rwanda": "RWA",
"South Asia": "SAS",
"Saudi Arabia": "SAU",
"Sudan": "SDN",
"Senegal": "SEN",
"Singapore": "SGP",
"Solomon Islands": "SLB",
"Sierra Leone": "SLE",
"El Salvador": "SLV",
"San Marino": "SMR",
"Somalia": "SOM",
"Serbia": "SRB",
"Sub-Saharan Africa (excluding high income)": "SSA",
"South Sudan": "SSD",
"Sub-Saharan Africa": "SSF",
"Small states": "SST",
"Sao Tome and Principe": "STP",
"Suriname": "SUR",
"Slovak Republic": "SVK",
"Slovenia": "SVN",
"Sweden": "SWE",
"Eswatini": "SWZ",
"Sint Maarten (Dutch part)": "SXM",
"Seychelles": "SYC",
"Syrian Arab Republic": "SYR",
"Turks and Caicos Islands": "TCA",
"Chad": "TCD",
"East Asia & Pacific (IDA & IBRD countries)": "TEA",
"Europe & Central Asia (IDA & IBRD countries)": "TEC",
"Togo": "TGO",
"Thailand": "THA",
"Tajikistan": "TJK",
"Turkmenistan": "TKM",
"Latin America & the Caribbean (IDA & IBRD countries)": "TLA",
"Timor-Leste": "TLS",
"Middle East & North Africa (IDA & IBRD countries)": "TMN",
"Tonga": "TON",
"South Asia (IDA & IBRD)": "TSA",
"Sub-Saharan Africa (IDA & IBRD countries)": "TSS",
"Trinidad and Tobago": "TTO",
"Tunisia": "TUN",
"Turkey": "TUR",
"Tuvalu": "TUV",
"Tanzania": "TZA",
"Uganda": "UGA",
"Ukraine": "UKR",
"Upper middle income": "UMC",
"Uruguay": "URY",
"United States": "USA",
"Uzbekistan": "UZB",
"St. Vincent and the Grenadines": "VCT",
"Venezuela, RB": "VEN",
"British Virgin Islands": "VGB",
"Virgin Islands (U.S.)": "VIR",
"Vietnam": "VNM",
"Vanuatu": "VUT",
"World": "WLD",
"Samoa": "WSM",
"Kosovo": "XKX",
"Yemen, Rep.": "YEM",
"South Africa": "ZAF",
"Zambia": "ZMB",
"Zimbabwe": "ZWE",
}
def _check_currency(currency: str):
"""Check that currency is in supported set."""
if currency not in currency_set:
raise ValueError(
f"currency {currency} not in supported currency set, "
f"{currency_set}"
)
def _check_wb_country(country: str):
"""Check that world bank country is in supported set."""
if (country not in wb_country_dict.keys()) & (
country not in wb_country_dict.values() # noqa: PD011
):
raise ValueError(
f"country {country} not in supported World Bank country dict, "
f"{wb_country_dict}"
)
def _check_wb_years(year: int):
"""Check that year is in world bank dataset years."""
if year < 1960:
raise ValueError("year value must be 1960 or later")
# @lru_cache(maxsize=32)
# def _convert_currency(
# api_key: str,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: Optional[date] = None,
# ) -> float:
# """
# Currency conversion for Pandas DataFrame column.
# Helper function for `convert_currency` method.
# The API used is https://exchangeratesapi.io/.
# """
# url = "http://api.exchangeratesapi.io"
# if historical_date:
# check("historical_date", historical_date, [datetime, date])
# if isinstance(historical_date, datetime):
# if historical_date < datetime(1999, 1, 4):
# raise ValueError(
# "historical_date:datetime must be later than 1999-01-04!"
# )
# string_date = str(historical_date)[:10]
# else:
# if historical_date < date(1999, 1, 4):
# raise ValueError(
# "historical_date:date must be later than 1999-01-04!"
# )
# string_date = str(historical_date)
# url = url + "/%s" % string_date
# else:
# url = url + "/latest"
# _check_currency(from_currency)
# _check_currency(to_currency)
# payload = {
# # "base": from_currency,
# "symbols": to_currency,
# "access_key": api_key,
# }
# result = requests.get(url, params=payload)
# if result.status_code != 200:
# raise ConnectionError(
# "Exchange Rate API failed to receive a 200 "
# "response from the server. "
# "Please try again later."
# )
# currency_dict = json.loads(result.text)
# rate = currency_dict["rates"][to_currency]
# return rate
@pf.register_dataframe_method
@deprecated_alias(colname="column_name")
def convert_currency(
df: pd.DataFrame,
api_key: str,
column_name: str = None,
from_currency: str = None,
to_currency: str = None,
historical_date: date = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""Deprecated function."""
raise JanitorError(
"The `convert_currency` function has been temporarily disabled due to "
"exchangeratesapi.io disallowing free pinging of its API. "
"(Our tests started to fail due to this issue.) "
"There is no easy way around this problem "
"except to find a new API to call on."
"Please comment on issue #829 "
"(https://github.com/pyjanitor-devs/pyjanitor/issues/829) "
"if you know of an alternative API that we can call on, "
"otherwise the function will be removed in pyjanitor's 1.0 release."
)
# @pf.register_dataframe_method
# @deprecated_alias(colname="column_name")
# def convert_currency(
# df: pd.DataFrame,
# api_key: str,
# column_name: str = None,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: date = None,
# make_new_column: bool = False,
# ) -> pd.DataFrame:
# """
# Converts a column from one currency to another, with an option to
# convert based on historical exchange values.
# On April 10 2021,
# we discovered that there was no more free API available.
# Thus, an API key is required to perform currency conversion.
# API keys should be set as an environment variable,
# for example, `EXCHANGE_RATE_API_KEY``,
# and then passed into the function
# by calling on `os.getenv("EXCHANGE_RATE_APIKEY")``.
# :param df: A pandas dataframe.
# :param api_key: exchangeratesapi.io API key.
# :param column_name: Name of the new column. Should be a string, in order
# for the column name to be compatible with the Feather binary
# format (this is a useful thing to have).
# :param from_currency: The base currency to convert from.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param to_currency: The target currency to convert to.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param historical_date: If supplied,
# get exchange rate on a certain date.
# If not supplied, get the latest exchange rate.
# The exchange rates go back to Jan. 4, 1999.
# :param make_new_column: Generates new column
# for converted currency if True,
# otherwise, converts currency in place.
# :returns: The dataframe with converted currency column.
# .. code-block:: python
# import pandas as pd
# import janitor
# from datetime import date
# data_dict = {
# "a": [1.23452345, 2.456234, 3.2346125] * 3,
# "Bell__Chart": [1/3, 2/7, 3/2] * 3,
# "decorated-elephant": [1/234, 2/13, 3/167] * 3,
# "animals": ["rabbit", "leopard", "lion"] * 3,
# "cities": ["Cambridge", "Shanghai", "Basel"] * 3,
# }
# example_dataframe = pd.DataFrame(data_dict)
# Example: Converting a column from one currency to another
# using rates from 01/01/2018.
# .. code-block:: python
# example_dataframe.convert_currency('a', from_currency='USD',
# to_currency='EUR', historical_date=date(2018,1,1))
# Output:
# .. code-block:: python
# a Bell__Chart decorated-elephant animals cities
# 0 1.029370 0.333333 0.004274 rabbit Cambridge
# 1 2.048056 0.285714 0.153846 leopard Shanghai
# 2 2.697084 1.500000 0.017964 lion Basel
# 3 1.029370 0.333333 0.004274 rabbit Cambridge
# 4 2.048056 0.285714 0.153846 leopard Shanghai
# 5 2.697084 1.500000 0.017964 lion Basel
# 6 1.029370 0.333333 0.004274 rabbit Cambridge
# 7 2.048056 0.285714 0.153846 leopard Shanghai
# 8 2.697084 1.500000 0.017964 lion Basel
# """
# rate = _convert_currency(
# api_key, from_currency, to_currency, historical_date
# )
# if make_new_column:
# # new_column_name = column_name + "_" + to_currency
# column_name = column_name + "_" + to_currency
# df = df.assign(column_name=df[column_name] * rate)
# return df
@lru_cache(maxsize=32)
def _inflate_currency(
country: str = None, currency_year: int = None, to_year: int = None
) -> float:
"""
Currency inflation for Pandas DataFrame column.
Helper function for `inflate_currency` method.
The API used is the World Bank Indicator API:
https://datahelpdesk.worldbank.org/knowledgebase/articles/889392-about-the-indicators-api-documentation
"""
# Check all inputs are correct data type
check("country", country, [str])
check("currency_year", currency_year, [int])
check("to_year", to_year, [int])
# Get WB country abbreviation
_check_wb_country(country)
if country in wb_country_dict.keys():
country = wb_country_dict[country]
else:
# `country` is already a correct abbreviation; do nothing
pass
_check_wb_years(currency_year)
_check_wb_years(to_year)
url = (
"https://api.worldbank.org/v2/country/"
+ country
+ "/indicator/FP.CPI.TOTL?date="
+ str(min(currency_year, to_year))
+ ":"
+ str(max(currency_year, to_year))
+ "&format=json"
)
result = requests.get(url)
if result.status_code != 200:
raise ConnectionError(
"WB Indicator API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
# The API returns a list of two items;
# the second item in the list is what we want
inflation_dict = json.loads(result.text)[1]
# Error checking
if inflation_dict is None:
raise ValueError(
"The WB Indicator API returned nothing. "
"This likely means the currency_year and "
"to_year are outside of the year range for "
"which the WB has inflation data for the "
"specified country."
)
# Create new dict with only the year and inflation values
inflation_dict_ready = {
int(inflation_dict[i]["date"]): float(inflation_dict[i]["value"])
for i in range(len(inflation_dict))
if inflation_dict[i]["value"] is not None
}
# Error catching
if currency_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {currency_year} for {country}."
)
if to_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {to_year} for {country}."
)
inflator = (
inflation_dict_ready[to_year] / inflation_dict_ready[currency_year]
)
return inflator
@pf.register_dataframe_method
def inflate_currency(
df: pd.DataFrame,
column_name: str = None,
country: str = None,
currency_year: int = None,
to_year: int = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""
Inflates a column of monetary values from one year to another, based on
the currency's country.
The provided country can be any economy name or code from the World Bank
[list of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
**Note**: This method mutates the original DataFrame.
Method chaining usage example:
>>> import pandas as pd
>>> import janitor.finance
>>> df = pd.DataFrame({"profit":[100.10, 200.20, 300.30, 400.40, 500.50]})
>>> df
profit
0 100.1
1 200.2
2 300.3
3 400.4
4 500.5
>>> df.inflate_currency(
... column_name='profit',
... country='USA',
... currency_year=2015,
... to_year=2018,
... make_new_column=True
... )
profit profit_2018
0 100.1 106.050596
1 200.2 212.101191
2 300.3 318.151787
3 400.4 424.202382
4 500.5 530.252978
:param df: A pandas DataFrame.
:param column_name: Name of the column containing monetary
values to inflate.
:param country: The country associated with the currency being inflated.
May be any economy or code from the World Bank [List of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
:param currency_year: The currency year to inflate from.
The year should be 1960 or later.
:param to_year: The currency year to inflate to.
The year should be 1960 or later.
:param make_new_column: Generates new column for inflated currency if
True, otherwise, inflates currency in place.
:returns: The dataframe with inflated currency column.
"""
inflator = _inflate_currency(country, currency_year, to_year)
if make_new_column:
new_column_name = column_name + "_" + str(to_year)
df[new_column_name] = df[column_name] * inflator
else:
df[column_name] = df[column_name] * inflator
return df
def convert_stock(stock_symbol: str) -> str:
"""
This function takes in a stock symbol as a parameter,
queries an API for the companies full name and returns
it
Functional usage example:
```python
import janitor.finance
janitor.finance.convert_stock("aapl")
```
:param stock_symbol: Stock ticker Symbol
:raises ConnectionError: Internet connection is not available
:returns: Full company name
"""
if is_connected("www.google.com"):
stock_symbol = stock_symbol.upper()
return get_symbol(stock_symbol)
else:
raise ConnectionError(
"Connection Error: Client Not Connected to Internet"
)
def get_symbol(symbol: str):
"""
This is a helper function to get a companies full
name based on the stock symbol.
Functional usage example:
```python
import janitor.finance
janitor.finance.get_symbol("aapl")
```
:param symbol: This is our stock symbol that we use
to query the api for the companies full name.
:return: Company full name
"""
result = requests.get(
"http://d.yimg.com/autoc."
+ "finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
).json()
for x in result["ResultSet"]["Result"]:
if x["symbol"] == symbol:
return x["name"]
else:
return None
| 29.384718 | 107 | 0.57356 | [
"MIT"
] | aliavni/pyjanitor | janitor/finance.py | 21,921 | Python |
from __future__ import annotations
from uuid import uuid4
import pytest
from protean import BaseCommandHandler, BaseEvent, BaseEventSourcedAggregate, handle
from protean.core.command import BaseCommand
from protean.core.event_sourced_aggregate import apply
from protean.fields import Identifier, String
from protean.globals import current_domain
from protean.utils import fqn
class Register(BaseCommand):
id = Identifier()
email = String()
name = String()
password_hash = String()
class Registered(BaseEvent):
id = Identifier()
email = String()
name = String()
password_hash = String()
class User(BaseEventSourcedAggregate):
email = String()
name = String()
password_hash = String()
@classmethod
def register(cls, command: Register) -> User:
user = cls(
id=command.id,
email=command.email,
name=command.name,
password_hash=command.password_hash,
)
user.raise_(
Registered(
id=command.id,
email=command.email,
name=command.name,
password_hash=command.password_hash,
)
)
current_domain.repository_for(User).add(user)
return user
@apply(Registered)
def registered(self, _: Registered) -> None:
pass
class UserCommandHandler(BaseCommandHandler):
@handle(Register)
def register_user(self, command: Register) -> None:
User.register(command)
@pytest.fixture(autouse=True)
def register_elements(test_domain):
test_domain.register(User)
test_domain.register(UserCommandHandler, aggregate_cls=User)
@pytest.mark.eventstore
def test_that_events_can_be_raised_from_within_aggregates(test_domain):
identifier = str(uuid4())
UserCommandHandler().register_user(
Register(
id=identifier,
email="[email protected]",
name="John Doe",
password_hash="hash",
)
)
messages = test_domain.event_store.store._read("user")
assert len(messages) == 1
assert messages[0]["stream_name"] == f"user-{identifier}"
assert messages[0]["type"] == f"{fqn(Registered)}"
| 24.988764 | 84 | 0.657824 | [
"BSD-3-Clause"
] | mpsiva89/protean | tests/event_sourced_aggregates/test_raising_events_from_within_aggregates.py | 2,224 | Python |
from datetime import datetime
import json
from unittest import TestCase
from celery.schedules import schedule, crontab
try: # celery 3.x
from celery.utils.timeutils import timezone
except ImportError: # celery 4.x
from celery.utils.time import timezone
from redbeat.decoder import RedBeatJSONDecoder, RedBeatJSONEncoder
from redbeat.schedules import rrule
class JSONTestCase(TestCase):
def dumps(self, d):
return json.dumps(d, cls=RedBeatJSONEncoder)
def loads(self, d):
return json.loads(d, cls=RedBeatJSONDecoder)
def datetime(self, **kwargs):
d = {
'__type__': 'datetime',
'year': 2015,
'month': 12,
'day': 30,
'hour': 12,
'minute': 59,
'second': 22,
'microsecond': 333,
}
d.update(kwargs)
return d
def schedule(self, **kwargs):
d = {
'__type__': 'interval',
'every': 60.0,
'relative': False,
}
d.update(kwargs)
return d
def crontab(self, **kwargs):
d = {
'__type__': 'crontab',
'minute': '*',
'hour': '*',
'day_of_week': '*',
'day_of_month': '*',
'month_of_year': '*',
}
d.update(kwargs)
return d
def rrule(self, **kwargs):
d = {
'__type__': 'rrule',
'freq': 5,
'dtstart': 1451480362,
'interval': 1,
'wkst': None,
'count': 1,
'until': None,
'bysetpos': None,
'bymonth': None,
'bymonthday': None,
'byyearday': None,
'byeaster': None,
'byweekno': None,
'byweekday': None,
'byhour': None,
'byminute': None,
'bysecond': None,
}
d.update(kwargs)
return d
class RedBeatJSONEncoderTestCase(JSONTestCase):
def test_datetime(self):
dt = datetime.now()
result = self.dumps(dt)
expected = self.datetime()
for key in (k for k in expected if hasattr(dt, k)):
expected[key] = getattr(dt, key)
self.assertEqual(result, json.dumps(expected))
def test_schedule(self):
s = schedule(run_every=60.0)
result = self.dumps(s)
self.assertEqual(result, json.dumps(self.schedule(every=60.0)))
def test_crontab(self):
c = crontab()
result = self.dumps(c)
self.assertEqual(result, json.dumps(self.crontab()))
def test_rrule(self):
r = rrule('MINUTELY', dtstart=datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc), count=1)
result = self.dumps(r)
self.assertEqual(result, json.dumps(self.rrule()))
def test_rrule_timezone(self):
tz = timezone.get_timezone('US/Eastern')
start1 = datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc)
start2 = start1.astimezone(tz)
r1 = rrule('MINUTELY', dtstart=start1, count=1)
r2 = rrule('MINUTELY', dtstart=start2, count=1)
self.assertEqual(self.dumps(r1), self.dumps(r2))
class RedBeatJSONDecoderTestCase(JSONTestCase):
def test_datetime(self):
d = self.datetime()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, datetime(tzinfo=timezone.utc, **d))
def test_schedule(self):
d = self.schedule()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, schedule(run_every=60))
def test_crontab(self):
d = self.crontab()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, crontab())
def test_rrule(self):
d = self.rrule()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(
result,
rrule('MINUTELY', dtstart=datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc), count=1),
)
| 26.006369 | 104 | 0.546657 | [
"Apache-2.0"
] | NextChance/redbeat | tests/test_json.py | 4,083 | Python |
from typing import Optional, Any, Dict, List, Text, Tuple
from collections import defaultdict
SUBJECT_WITH_BRANCH_TEMPLATE = u'{repo} / {branch}'
SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE = u'{repo} / {type} #{id} {title}'
EMPTY_SHA = '0000000000000000000000000000000000000000'
COMMITS_LIMIT = 20
COMMIT_ROW_TEMPLATE = u'* {commit_msg} ([{commit_short_sha}]({commit_url}))\n'
COMMITS_MORE_THAN_LIMIT_TEMPLATE = u"[and {commits_number} more commit(s)]"
COMMIT_OR_COMMITS = u"commit{}"
PUSH_PUSHED_TEXT_WITH_URL = u"[pushed]({compare_url}) {number_of_commits} {commit_or_commits}"
PUSH_PUSHED_TEXT_WITHOUT_URL = u"pushed {number_of_commits} {commit_or_commits}"
PUSH_COMMITS_MESSAGE_TEMPLATE_WITH_COMMITTERS = u"""{user_name} {pushed_text} to branch {branch_name}. {committers_details}.
{commits_data}
"""
PUSH_COMMITS_MESSAGE_TEMPLATE_WITHOUT_COMMITTERS = u"""{user_name} {pushed_text} to branch {branch_name}.
{commits_data}
"""
PUSH_COMMITS_MESSAGE_EXTENSION = u"Commits by {}"
PUSH_COMMITTERS_LIMIT_INFO = 3
FORCE_PUSH_COMMITS_MESSAGE_TEMPLATE = u"{user_name} [force pushed]({url}) to branch {branch_name}. Head is now {head}"
CREATE_BRANCH_MESSAGE_TEMPLATE = u"{user_name} created [{branch_name}]({url}) branch"
REMOVE_BRANCH_MESSAGE_TEMPLATE = u"{user_name} deleted branch {branch_name}"
PULL_REQUEST_OR_ISSUE_MESSAGE_TEMPLATE = u"{user_name} {action} [{type}{id}]({url})"
PULL_REQUEST_OR_ISSUE_ASSIGNEE_INFO_TEMPLATE = u"(assigned to {assignee})"
PULL_REQUEST_BRANCH_INFO_TEMPLATE = u"\nfrom `{target}` to `{base}`"
SETUP_MESSAGE_TEMPLATE = u"{integration} webhook has been successfully configured"
SETUP_MESSAGE_USER_PART = u" by {user_name}"
CONTENT_MESSAGE_TEMPLATE = u"\n~~~ quote\n{message}\n~~~"
COMMITS_COMMENT_MESSAGE_TEMPLATE = u"{user_name} {action} on [{sha}]({url})"
PUSH_TAGS_MESSAGE_TEMPLATE = u"""{user_name} {action} tag {tag}"""
TAG_WITH_URL_TEMPLATE = u"[{tag_name}]({tag_url})"
TAG_WITHOUT_URL_TEMPLATE = u"{tag_name}"
def get_push_commits_event_message(user_name, compare_url, branch_name, commits_data, is_truncated=False):
# type: (Text, Optional[Text], Text, List[Dict[str, Any]], Optional[bool]) -> Text
pushed_message_template = PUSH_PUSHED_TEXT_WITH_URL if compare_url else PUSH_PUSHED_TEXT_WITHOUT_URL
pushed_text_message = pushed_message_template.format(
compare_url=compare_url,
number_of_commits=len(commits_data),
commit_or_commits=COMMIT_OR_COMMITS.format(u's' if len(commits_data) > 1 else u''))
committers_items = get_all_committers(commits_data) # type: List[Tuple[str, int]]
if len(committers_items) == 1 and user_name == committers_items[0][0]:
return PUSH_COMMITS_MESSAGE_TEMPLATE_WITHOUT_COMMITTERS.format(
user_name=user_name,
pushed_text=pushed_text_message,
branch_name=branch_name,
commits_data=get_commits_content(commits_data, is_truncated),
).rstrip()
else:
committers_details = "{} ({})".format(*committers_items[0])
for name, number_of_commits in committers_items[1:-1]:
committers_details = "{}, {} ({})".format(committers_details, name, number_of_commits)
if len(committers_items) > 1:
committers_details = "{} and {} ({})".format(committers_details, *committers_items[-1])
return PUSH_COMMITS_MESSAGE_TEMPLATE_WITH_COMMITTERS.format(
user_name=user_name,
pushed_text=pushed_text_message,
branch_name=branch_name,
committers_details=PUSH_COMMITS_MESSAGE_EXTENSION.format(committers_details),
commits_data=get_commits_content(commits_data, is_truncated),
).rstrip()
def get_force_push_commits_event_message(user_name, url, branch_name, head):
# type: (Text, Text, Text, Text) -> Text
return FORCE_PUSH_COMMITS_MESSAGE_TEMPLATE.format(
user_name=user_name,
url=url,
branch_name=branch_name,
head=head
)
def get_create_branch_event_message(user_name, url, branch_name):
# type: (Text, Text, Text) -> Text
return CREATE_BRANCH_MESSAGE_TEMPLATE.format(
user_name=user_name,
url=url,
branch_name=branch_name,
)
def get_remove_branch_event_message(user_name, branch_name):
# type: (Text, Text) -> Text
return REMOVE_BRANCH_MESSAGE_TEMPLATE.format(
user_name=user_name,
branch_name=branch_name,
)
def get_pull_request_event_message(
user_name, action, url, number=None,
target_branch=None, base_branch=None,
message=None, assignee=None, type='PR'
):
# type: (Text, Text, Text, Optional[int], Optional[Text], Optional[Text], Optional[Text], Optional[Text], Optional[Text]) -> Text
main_message = PULL_REQUEST_OR_ISSUE_MESSAGE_TEMPLATE.format(
user_name=user_name,
action=action,
type=type,
url=url,
id=" #{}".format(number) if number is not None else ''
)
if assignee:
main_message += PULL_REQUEST_OR_ISSUE_ASSIGNEE_INFO_TEMPLATE.format(assignee=assignee)
if target_branch and base_branch:
main_message += PULL_REQUEST_BRANCH_INFO_TEMPLATE.format(
target=target_branch,
base=base_branch
)
if message:
main_message += '\n' + CONTENT_MESSAGE_TEMPLATE.format(message=message)
return main_message.rstrip()
def get_setup_webhook_message(integration, user_name=None):
# type: (Text, Optional[Text]) -> Text
content = SETUP_MESSAGE_TEMPLATE.format(integration=integration)
if user_name:
content += SETUP_MESSAGE_USER_PART.format(user_name=user_name)
return content
def get_issue_event_message(user_name, action, url, number=None, message=None, assignee=None):
# type: (Text, Text, Text, Optional[int], Optional[Text], Optional[Text]) -> Text
return get_pull_request_event_message(
user_name,
action,
url,
number,
message=message,
assignee=assignee,
type='Issue'
)
def get_push_tag_event_message(user_name, tag_name, tag_url=None, action='pushed'):
# type: (Text, Text, Optional[Text], Optional[Text]) -> Text
if tag_url:
tag_part = TAG_WITH_URL_TEMPLATE.format(tag_name=tag_name, tag_url=tag_url)
else:
tag_part = TAG_WITHOUT_URL_TEMPLATE.format(tag_name=tag_name)
return PUSH_TAGS_MESSAGE_TEMPLATE.format(
user_name=user_name,
action=action,
tag=tag_part
)
def get_commits_comment_action_message(user_name, action, commit_url, sha, message=None):
# type: (Text, Text, Text, Text, Optional[Text]) -> Text
content = COMMITS_COMMENT_MESSAGE_TEMPLATE.format(
user_name=user_name,
action=action,
sha=get_short_sha(sha),
url=commit_url
)
if message is not None:
content += CONTENT_MESSAGE_TEMPLATE.format(
message=message
)
return content
def get_commits_content(commits_data, is_truncated=False):
# type: (List[Dict[str, Any]], Optional[bool]) -> Text
commits_content = u''
for commit in commits_data[:COMMITS_LIMIT]:
commits_content += COMMIT_ROW_TEMPLATE.format(
commit_short_sha=get_short_sha(commit.get('sha')),
commit_url=commit.get('url'),
commit_msg=commit.get('message').partition('\n')[0]
)
if len(commits_data) > COMMITS_LIMIT:
commits_content += COMMITS_MORE_THAN_LIMIT_TEMPLATE.format(
commits_number=len(commits_data) - COMMITS_LIMIT
)
elif is_truncated:
commits_content += COMMITS_MORE_THAN_LIMIT_TEMPLATE.format(
commits_number=''
).replace(' ', ' ')
return commits_content.rstrip()
def get_short_sha(sha):
# type: (Text) -> Text
return sha[:7]
def get_all_committers(commits_data):
# type: (List[Dict[str, Any]]) -> List[Tuple[str, int]]
committers = defaultdict(int) # type: Dict[str, int]
for commit in commits_data:
committers[commit['name']] += 1
# Sort by commit count, breaking ties alphabetically.
committers_items = sorted(list(committers.items()),
key=lambda item: (-item[1], item[0])) # type: List[Tuple[str, int]]
committers_values = [c_i[1] for c_i in committers_items] # type: List[int]
if len(committers) > PUSH_COMMITTERS_LIMIT_INFO:
others_number_of_commits = sum(committers_values[PUSH_COMMITTERS_LIMIT_INFO:])
committers_items = committers_items[:PUSH_COMMITTERS_LIMIT_INFO]
committers_items.append(('others', others_number_of_commits))
return committers_items
| 39.930556 | 133 | 0.704464 | [
"Apache-2.0"
] | roberthoenig/zulip | zerver/lib/webhooks/git.py | 8,625 | Python |
import random
import unittest
from typing import Tuple
import torch
import numpy as np
from src.utilities import set_random_seed
_RANDOM_SEED: int = random.randint(0, 100)
_TEST_ARRAY_SIZE: Tuple[int, int] = (2, 2)
_TEST_TENSOR_SIZE: Tuple[int, int] = (2, 2)
def _set_random_seed():
set_random_seed(
random_seed=_RANDOM_SEED,
)
class TestSetRandomSeed(unittest.TestCase):
"""Unit test class for ``set_random_seed`` function.
The test checks the random seed function for Python random,
NumPy, and PyTorch by asserting the first random number, array,
or tensor is always the same after seeding.
"""
def test_random(self):
_set_random_seed()
_random = random.random()
_set_random_seed()
assert _random == random.random()
def test_numpy(self):
_set_random_seed()
_array = np.random.random(size=_TEST_ARRAY_SIZE)
_set_random_seed()
assert (_array == np.random.random(size=_TEST_ARRAY_SIZE)).all()
def test_torch(self):
_set_random_seed()
_tensor = torch.rand(size=_TEST_TENSOR_SIZE)
_set_random_seed()
assert (_tensor == torch.rand(size=_TEST_TENSOR_SIZE)).all()
if __name__ == '__main__':
unittest.main()
| 24.764706 | 72 | 0.684086 | [
"MIT"
] | iblamedom/kuenstliche-intelligenz | tests/test_set_random_seed.py | 1,263 | Python |
import re
from typing import Any
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
class CommandRouter:
def __init__(self, subrouters: List["CommandRouter"] = []) -> None:
self.command_handlers: Dict[str, Callable[..., Awaitable[Any]]] = dict()
for subrouter in subrouters:
self.command_handlers.update(subrouter.command_handlers)
def register_command(self, regex: str) -> Callable[[Callable], Callable]:
def decorator(
function: Callable[..., Awaitable[Any]]
) -> Callable[..., Awaitable[Any]]:
self.command_handlers[regex] = function
return function
return decorator
def find_commands(self, body: str) -> List[str]:
"""Find all commands in a comment."""
commands = []
for regex in self.command_handlers.keys():
for _ in re.findall(regex, body):
commands.append(regex)
return commands
| 32.451613 | 80 | 0.637177 | [
"MIT"
] | asymmetric/marvin-mk2 | marvin/command_router.py | 1,006 | Python |
# coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.contrib.keras as kr
import tensorflow as tf
if sys.version_info[0] > 2:
is_py3 = True
else:
# reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
# while True:
# line = f.readline()
try:
label, content = line.strip().split('\t')
contents.append(content)
if content:
# contents.append(list(native_content(content)))
labels.append(native_content(label))
except:
pass
# if not line:
# break
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
"""根据训练集构建词汇表,存储, x, y"""
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content)
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1)
words, _ = list(zip(*count_pairs))
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words))))
return words, word_to_id
def read_category():
"""读取分类目录,固定"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id
def to_words(content, words):
"""将id表示的内容转换为文字"""
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
"""将文件转换为id表示"""
contents, labels = read_file(filename)
# np.save('./train_x.npy', contents)
# np.savetxt('./train_x.txt', contents, fmt='%s')
data_id, label_id = [], []
for i in range(len(contents)):
# data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
# x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示
return contents, y_pad
def batch_iter(x, y, batch_size=64):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
# 区别在于shuffle直接在原来的数组上进行操作,改变原来数组的顺序,无返回值。
# 而permutation不直接在原来的数组上进行操作,而是返回一个新的打乱顺序的数组,并不改变原来的数组。
indices = np.random.permutation(np.arange(data_len))
x_shuffle = np.array(x)[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
# yield x[start_id:end_id], y[start_id:end_id]
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
def attention(inputs, attention_size, l2_reg_lambda):
"""
Attention mechanism layer.
:param inputs: outputs of RNN/Bi-RNN layer (not final state)
:param attention_size: linear size of attention weights
:return: outputs of the passed RNN/Bi-RNN reduced with attention vector
"""
# In case of Bi-RNN input we need to concatenate outputs of its forward and backward parts
if isinstance(inputs, tuple):
inputs = tf.concat(2, inputs)
sequence_length = inputs.get_shape()[1].value # the length of sequences processed in the antecedent RNN layer
hidden_size = inputs.get_shape()[2].value # hidden size of the RNN layer
# Attention mechanism W,b 相当于对RNN的输出做一个非线性变化,得到的结果在和u做内积
W_omega = tf.get_variable("W_omega", initializer=tf.random_normal([hidden_size, attention_size], stddev=0.1))
b_omega = tf.get_variable("b_omega", initializer=tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.get_variable("u_omega", initializer=tf.random_normal([attention_size], stddev=0.1))
v = tf.tanh(tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_omega) + tf.reshape(b_omega, [1, -1]))
vu = tf.matmul(v, tf.reshape(u_omega, [-1, 1]))
exps = tf.reshape(tf.exp(vu), [-1, sequence_length])
alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1])
# Output of Bi-RNN is reduced with attention vector
output = tf.reduce_sum(inputs * tf.reshape(alphas, [-1, sequence_length, 1]), 1)
#if l2_reg_lambda > 0:
# l2_loss += tf.nn.l2_loss(W_omega)
# l2_loss += tf.nn.l2_loss(b_omega)
# l2_loss += tf.nn.l2_loss(u_omega)
# tf.add_to_collection('losses', l2_loss)
return output
| 32.739645 | 114 | 0.646665 | [
"MIT"
] | a414351664/Bert-THUCNews | data/cnews_loader_bert.py | 6,073 | Python |
import os
import pandas as pd
COMPETITION_NAME = "tabular-playground-series-sep-2021"
SUBMISSION_DIR = "."
SUBMISSION_FILE = "sub_blending_1_my_rank_004-2o-lightgbm-colsample_81830_my_ranking_81790_0926_1918.csv"
SUBMISSION_MESSAGE = '"004-2o-lightgbm-colsample-tps-sep-2021 + stacking_lgb_xbg_cat_imputer_no_imputer"'
df = pd.read_csv(f"{SUBMISSION_DIR}/{SUBMISSION_FILE}")
print(df.head())
submission_string = f"kaggle competitions submit {COMPETITION_NAME} -f {SUBMISSION_DIR}/{SUBMISSION_FILE} -m {SUBMISSION_MESSAGE}"
print(submission_string)
os.system(submission_string)
| 29.3 | 130 | 0.812287 | [
"Apache-2.0"
] | arnabbiswas1/k_tab_sept_roc_auc_binary_classification_KFold | submissions/submissions_22.py | 586 | Python |
from dataclasses import field, dataclass
from pathlib import Path
from typing import Any
from .anki_deck_archiver import AnkiDeckArchiver
from .archiver import AllDeckArchiver
from .dulwich_repo import DulwichAnkiRepo
from ..anki.adapters.deck_manager import AnkiStaticDeckManager, DeckManager
from ..anki.ui.utils import progress_indicator
from ..config.config_settings import ConfigSettings
from ..export.anki_exporter import AnkiJsonExporter
from ..utils.notifier import Notifier, AnkiTooltipNotifier
@dataclass
class ArchiverVendor:
window: Any
config: ConfigSettings
notifier: Notifier = field(default_factory=AnkiTooltipNotifier)
@property
def deck_manager(self) -> DeckManager:
return AnkiStaticDeckManager(self.window.col.decks)
def all_deck_archiver(self):
return AllDeckArchiver(
self.deck_manager,
lambda deck: AnkiDeckArchiver(deck,
self.config.full_snapshot_path,
AnkiJsonExporter(self.window.col, self.config),
DulwichAnkiRepo))
def snapshot_path(self):
return Path(self.config.snapshot_path)
def do_manual_snapshot(self):
self.do_snapshot('CrowdAnki: Manual snapshot')
def snapshot_on_sync(self):
if self.config.automated_snapshot:
self.do_snapshot('CrowdAnki: Snapshot on sync')
def do_snapshot(self, reason):
with progress_indicator(self.window, 'Taking CrowdAnki snapshot of all decks'):
import datetime
print(f"{datetime.datetime.now()} Starting snapshot for {self.config.full_snapshot_path}...")
self.all_deck_archiver().archive(overrides=self.overrides(),
reason=reason)
print(f"{datetime.datetime.now()} Finished snapshot for {self.config.full_snapshot_path}...")
self.notifier.info("Snapshot successful",
f"The CrowdAnki snapshot to {str(self.config.full_snapshot_path)} successfully completed")
def overrides(self):
return self.deck_manager.for_names(self.config.snapshot_root_decks)
| 40.290909 | 121 | 0.678249 | [
"MIT"
] | evandroforks/CrowdAnki | history/archiver_vendor.py | 2,216 | Python |
import hiplot
import lwa_antpos
def get_exp(uri):
df = lwa_antpos.lwa_df.reset_index()
df.drop(0, inplace=True) # remove antnum=0
df.antname = df.antname.apply(lambda x: int(x.split('-')[1]))
df.rename(columns={'antname': 'antnum'}, inplace=True)
df = df[['antnum', 'pola_fee', 'polb_fee', 'arx_address', 'pola_arx_channel', 'polb_arx_channel', 'snap2_hostname',
'pola_digitizer_channel', 'polb_digitizer_channel']]
return hiplot.Experiment.from_dataframe(df)
| 38.461538 | 119 | 0.69 | [
"BSD-3-Clause"
] | jaycedowell/mnc_python | mnc/lwa_hiplot.py | 500 | Python |
#
# This file is part of Orchid and related technologies.
#
# Copyright (c) 2017-2021 Reveal Energy Services. All Rights Reserved.
#
# LEGAL NOTICE:
# Orchid contains trade secrets and otherwise confidential information
# owned by Reveal Energy Services. Access to and use of this information is
# strictly limited and controlled by the Company. This file may not be copied,
# distributed, or otherwise disclosed outside of the Company's facilities
# except under appropriate precautions to maintain the confidentiality hereof,
# and may not be used in any way not expressly authorized by the Company.
#
import pathlib
def _stem_names():
"""Returns the sequence of example stem names."""
example_stems = ['completion_analysis', 'plot_time_series', 'plot_trajectories',
'plot_treatment', 'search_data_frames', 'volume_2_first_response']
return example_stems
def notebook_names():
"""Returns the sequence of example notebook names."""
result = [str(pathlib.Path(s).with_suffix('.ipynb')) for s in _stem_names()]
return result
def ordered_script_names():
script_name_pairs = [
('plot_trajectories.py', 0),
('plot_treatment.py', 1),
('plot_time_series.py', 2),
('completion_analysis.py', 3),
('volume_2_first_response.py', 4),
('search_data_frames.py', 5),
]
ordered_pairs = sorted(script_name_pairs, key=lambda op: op[1])
ordered_names = [op[0] for op in ordered_pairs]
difference = set(script_names()).difference(set(ordered_names))
assert len(difference) == 0, f'Ordered set, {ordered_names},' \
f' differs from, set {script_names()}' \
f' by, {difference}.'
return ordered_names
def script_names():
"""Returns the sequence of example script names."""
result = [str(pathlib.Path(s).with_suffix('.py')) for s in _stem_names()]
return result
| 36.811321 | 87 | 0.678626 | [
"Apache-2.0"
] | Reveal-Energy-Services/orchid-python-api | examples.py | 1,951 | Python |
import os
import subprocess
from tempfile import NamedTemporaryFile
from jinja2 import Template
# This file designed in a way that is independent of Django
# in order to be easy (but changes are required) to be used
# outside Django in the future
# That's why is using jinja2 as a template language instead of
# Django's template language.
#
# Example of use:
# Make sure to have jinja2 template language:
# python3 -m venv venv
# pip3 install jinja2
#
# In a Python file:
# import json
# import main # or the name that this file is saved as...
#
# datapackage = json.load(open("datapackage.json"))
# main.datapackage_to_markdown(datapackage)
def datapackage_to_markdown(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
"""
template = Template(template_to_md)
rendered = template.render(datapackage)
return rendered.encode('utf-8')
def datapackage_to_pdf(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
"""
markdown = datapackage_to_markdown(datapackage)
f = NamedTemporaryFile(suffix='.pdf', delete=False)
f.close()
command_line = ['pandoc', '--to=latex', f'--output={f.name}']
try:
pandoc_process = subprocess.run(command_line,
input=markdown)
except FileNotFoundError:
os.unlink(f.name)
raise OSError(f'FileNotFoundError trying to execute: {command_line}')
except subprocess.CalledProcessError:
os.unlink(f.name)
raise RuntimeError(f'CalledProcessError trying to execute: {command_line}')
if pandoc_process.returncode != 0:
os.unlink(f.name)
raise RuntimeError(f'Command {command_line} returned a PDF file of size 0')
pdf_file = open(f.name, 'rb')
pdf_content = pdf_file.read()
os.unlink(f.name)
return pdf_content
template_to_md = '''# {{ title }}
## Dataset description
{{ description }}
{% if contributors|length == 1 %}
## Contributor
{% else %}
## Contributors
{% endif %}{% for contributor in contributors %} * {{ contributor.title }} ({{ contributor.role }})
{% endfor %}
{% if keywords|length == 1 %}
## Keyword
{% else %}## Keywords
{% endif %}{% for keyword in keywords %} * {{ keyword }}
{% endfor %}
## Version
{{ version }}
## Homepage
[{{ homepage }}]({{ homepage }})
{% if licenses|length == 1 %}
## Dataset license
{% else %}
## Dataset license
{% endif %}{% for license in licenses %} * {{ license.title }} ([{{ license.name }}]({{ license.path }}))
{% endfor %}
## Resources
{% for resource in resources %}
### {{ resource.title }}
* Name: {{ resource.name }}
* Profile: {{ resource.profile }}
* Path: {{ resource.path }}
{% if resource.format %} * Format: {{ resource.format }}{% endif %}
{% if resource.encoding %} * Encoding: {{ resource.encoding }}{% endif %}
{% if resource.description %} * Desription: {{ resource.description }}{% endif %}
{% if resource.schema.fields %}
#### Fields
{% for field in resource.schema.fields %} * **{{ field.name }}** ({{ field.type }}): {{ field.description }}
{% endfor %}
{% endif %}
{% endfor %}
'''
| 27.355932 | 108 | 0.656753 | [
"MIT"
] | Swiss-Polar-Institute/schema-collaboration-arctic-century | SchemaCollaboration/datapackage_to_documentation/main.py | 3,228 | Python |
# ========================================================================= #
# Copyright 2018 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================= #
from ..instruction_parent_class import LogicalInstruction
from ...circuits.quantum_circuit import QuantumCircuit
from ..helper_functions import pos2qudit
class InstrSynExtraction(LogicalInstruction):
"""
Instruction for a round of syndrome extraction.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
qecc_init_ticks = qecc.qecc_params.get('init_ticks', 0)
qecc_meas_ticks = qecc.qecc_params.get('meas_ticks', 7)
qecc_x_ticks = qecc.qecc_params.get('x_ticks', [2, 4, 3, 5])
qecc_z_ticks = qecc.qecc_params.get('z_ticks', [2, 4, 3, 5])
self.init_ticks = gate_params.get('init_ticks', qecc_init_ticks)
self.meas_ticks = gate_params.get('meas_ticks', qecc_meas_ticks)
self.x_ticks = gate_params.get('x_ticks', qecc_x_ticks)
self.z_ticks = gate_params.get('z_ticks', qecc_z_ticks)
self.abstract_circuit = QuantumCircuit(**gate_params)
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
self.ancilla_x_check = set([])
self.ancilla_z_check = set([])
# Go through the ancillas and grab the data qubits that are on either side of it.
layout = qecc.layout # qudit_id => (x, y)
self.pos2qudit = pos2qudit(layout)
for q, (x, y) in layout.items():
if x % 2 == 0 and y % 2 == 0:
# Ancilla
if x % 4 == y % 4:
# X check
self._create_x_check(q, x, y)
else:
# Z check
self._create_z_check(q, x, y)
# Determine the logical operations
# --------------------------------
z_qudits = set(qecc.sides['top'])
x_qudits = set(qecc.sides['left'])
logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': QuantumCircuit([{'X': x_qudits}]), 'Z': QuantumCircuit([{'Z': z_qudits}])},
]
self.initial_logical_ops = logical_ops
logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': QuantumCircuit([{'X': x_qudits}]), 'Z': QuantumCircuit([{'Z': z_qudits}])},
]
self.final_logical_ops = logical_ops
self.logical_signs = None
self.logical_stabilizers = None
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
def _create_x_check(self, ancilla, x, y):
"""
Creates X-checks for circuit_extended.
"""
# register the x syndrome ancillas
self.ancilla_x_check.add(ancilla)
# get where the position of where the data qubits should be relative to the ancilla
data_pos = self._data_pos_x_check(x, y)
# Get the actual, available data-qubits and their ticks that correspond to the possible data qubit positions
datas, my_data_ticks = self._find_data(position_to_qudit=self.pos2qudit, positions=data_pos,
ticks=self.x_ticks)
# Now add the check to the extended circuit
locations = set(datas)
locations.add(ancilla)
self.abstract_circuit.append('X check', locations=locations, datas=datas, ancillas=ancilla,
ancilla_ticks=self.init_ticks, data_ticks=my_data_ticks,
meas_ticks=self.meas_ticks)
def _create_z_check(self, ancilla, x, y):
"""
Creates Z-checks for circuit_extended.
"""
# register the z syndrome ancillas
self.ancilla_z_check.add(ancilla)
# get where the position of where the data qubits should be relative to the ancilla
data_pos = self._data_pos_z_check(x, y)
# Get the actual, available data-qubits and their ticks that correspond to the possible data qubit positions
datas, my_data_ticks = self._find_data(position_to_qudit=self.pos2qudit, positions=data_pos,
ticks=self.z_ticks)
# Now add the check to the extended circuit
locations = set(datas)
locations.add(ancilla)
self.abstract_circuit.append('Z check', locations=locations, datas=datas, ancillas=ancilla,
ancilla_ticks=self.init_ticks, data_ticks=my_data_ticks,
meas_ticks=self.meas_ticks)
@staticmethod
def _find_data(position_to_qudit, positions, ticks):
"""
From the positions given for possible data qudits, add the qudits and their corresponding ticks for each qudit
that does exist.
:param position_to_qudit:
:param positions:
:param ticks:
:return:
"""
data_list = []
tick_list = []
for i, p in enumerate(positions):
data = position_to_qudit.get(p, None)
if data is not None:
data_list.append(data)
tick_list.append(ticks[i])
return data_list, tick_list
@staticmethod
def _data_pos_z_check(x, y):
"""
Determines the position of data qudits in a Z check in order of ticks.
Check direction: 1 | 2
|
---+---
|
3 | 4
"""
data_pos = [
(x - 1, y + 1),
(x + 1, y + 1),
(x - 1, y - 1),
(x + 1, y - 1)
]
return data_pos
@staticmethod
def _data_pos_x_check(x, y):
"""
Determines the position of data qudits in a Z check in order of ticks.
Check direction: 1 | 3
|
---+---
|
2 | 4
"""
data_pos = [
(x - 1, y + 1),
(x - 1, y - 1),
(x + 1, y + 1),
(x + 1, y - 1)
]
return data_pos
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
if self.qecc.height != self.qecc.width:
raise Exception('This currently only works for square code blocks.')
# instr = self.instruction('instr_syn_extract')
instr = self
stabs_row_x = []
stabs_row_z = []
destabs_row_x = []
destabs_row_z = []
for a in self.ancilla_qudit_set:
stabs_row_z.append({a})
stabs_row_x.append(set([]))
destabs_row_x.append({a})
destabs_row_z.append(set([]))
xdestabs = self.generate_xdestabs()
zdestabs = self.generate_zdestabs()
# Creating stabilizers
for check_type, _, params in instr.abstract_circuit.items():
if check_type == 'X check':
# Ancillas initialized in |0>
# Pauli X-type stabilizers
stabs_row_x.append(set(params['datas']))
stabs_row_z.append(set([]))
destabs_row_x.append(set([]))
destabs_row_z.append(zdestabs[params['ancillas']])
else:
# Ancillas initialized in |0>
# Pauli Z-type stabilizers
stabs_row_z.append(set(params['datas']))
stabs_row_x.append(set([]))
destabs_row_z.append(set([]))
destabs_row_x.append(xdestabs[params['ancillas']])
output_dict = {
'stabs_x': stabs_row_x,
'stabs_z': stabs_row_z,
'destabs_x': destabs_row_x,
'destabs_z': destabs_row_z,
}
self._stabs_destabs = output_dict
return output_dict
def generate_xdestabs(self):
distance = self.qecc.distance
# x-type destabilizers
xdestabs_temp = []
# going alone the bottom
if distance % 2 == 0:
b = 1
else:
b = 2
for x in range(b, distance, 2):
temp = []
y = distance - 1
for j in range(0, distance):
new_point = (x + j, y - j)
if new_point[1] <= 0:
break
if new_point[0] > distance - 1:
break
temp.append(new_point)
xdestabs_temp.append(temp)
# ----------------
xdestabs = []
for ds in xdestabs_temp:
for i in range(len(ds)):
temp = []
for j in range(i + 1):
# print('-', i, j)
temp.append(ds[j])
xdestabs.append(temp)
# -----------------
# ladder climb
ladder = []
x = 0
for y in range(distance - 1, 0, -1):
ladder.append((x, y))
for i in range(len(ladder)):
xdestabs.append(ladder[:i + 1])
ladder_points = []
for i in range((distance + 1) % 2, distance - 1, 2):
ladder_points.append(i)
ladder_temp = []
for i in ladder_points:
temp = list(ladder[:i + 1])
x, y = ladder[i]
for j in range(1, distance):
if j != 1:
temp = list(ladder_temp[-1])
new_point = (x + j, y - j)
if new_point[1] <= 0:
break
if new_point[0] >= distance - 1:
break
temp.append(new_point)
ladder_temp.append(temp)
xdestabs.extend(ladder_temp)
set_destabs = {}
relayout = {v: k for k, v in self.qecc.layout.items()}
for d in xdestabs:
row = set([])
# Find the associated ancilla location
x, y = d[-1]
a = relayout[(2 * x + 1 + 1, 2 * y + 1 - 1)]
if a in self.ancilla_x_check:
a = relayout[(2 * x - 1 + 1, 2 * y + 1 - 1)]
for x, y in d:
row.add(relayout[(2 * x + 1, 2 * y + 1)])
set_destabs[a] = set(row)
return set_destabs
def generate_zdestabs(self):
distance = self.qecc.distance
# x-type destabilizers
zdestabs_temp = []
# going alone the bottom
if distance % 2 == 0:
b = 2
else:
b = 1
for y in range(b, distance, 2):
temp = []
x = distance - 1
for j in range(0, distance):
new_point = (x - j, y + j)
if new_point[0] <= 0:
break
if new_point[1] > distance - 1:
break
temp.append(new_point)
# print(x, y)
zdestabs_temp.append(temp)
# ----------------
zdestabs = []
for ds in zdestabs_temp:
for i in range(len(ds)):
temp = []
for j in range(i + 1):
# print('-', i, j)
temp.append(ds[j])
zdestabs.append(temp)
# -----------------
# ladder climb
ladder = []
y = 0
for x in range(distance - 1, 0, -1):
ladder.append((x, y))
for i in range(len(ladder)):
zdestabs.append(ladder[:i + 1])
ladder_points = []
for i in range(distance % 2, distance - 1, 2):
ladder_points.append(i)
ladder_temp = []
for i in ladder_points:
temp = list(ladder[:i + 1])
x, y = ladder[i]
for j in range(1, distance):
if j != 1:
temp = list(ladder_temp[-1])
new_point = (x - j, y + j)
if new_point[0] <= 0:
break
if new_point[1] >= distance - 1:
break
temp.append(new_point)
ladder_temp.append(temp)
zdestabs.extend(ladder_temp)
set_destabs = {}
relayout = {v: k for k, v in self.qecc.layout.items()}
for d in zdestabs:
row = set([])
# Find the associated ancilla location
x, y = d[-1]
a = relayout[(2 * x + 1 - 1, 2 * y + 1 + 1)]
if a in self.ancilla_z_check:
a = relayout[(2 * x + 1 - 1, 2 * y + 1 - 1)]
for x, y in d:
row.add(relayout[(2 * x + 1, 2 * y + 1)])
set_destabs[a] = row
return set_destabs
class InstrInitZero(LogicalInstruction):
"""
Instruction for initializing a logical zero.
It is just like syndrome extraction except the data qubits are initialized in the zero state at tick = 0.
`ideal_meas` == True will cause the measurements to be replace with ideal measurements.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
self.symbol = 'instr_init_zero'
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
# This is basically syndrome extraction round where all the data qubits are initialized to zero.
syn_ext = qecc.instruction('instr_syn_extract', **gate_params)
# Make a shallow copy of the abstract circuits.
self.abstract_circuit = syn_ext.abstract_circuit.copy()
self.abstract_circuit.params.update(gate_params)
self.ancilla_x_check = syn_ext.ancilla_x_check
self.ancilla_z_check = syn_ext.ancilla_z_check
data_qudits = syn_ext.data_qudit_set
self.abstract_circuit.append('init |0>', locations=data_qudits, tick=0)
self.initial_logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': None, 'Z': None}, # None => can be anything
]
# Special for state initialization:
# ---------------------------------
# list of tuples of logical check and delogical stabilizer for each logical qudit.
self.final_logical_ops = [
{'Z': QuantumCircuit([{'Z': set(qecc.sides['top'])}]), 'X': QuantumCircuit([{'X': set(qecc.sides['left'])}])}
]
# List of corresponding logical sign. (The logical sign if the instruction is preformed ideally.)
self.logical_signs = [0]
self.logical_stabilizers = ['Z']
# ---------------------------------
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
gate_params = self.gate_params
syn_ext = self.qecc.instruction('instr_syn_extract', **gate_params)
for name, rows in syn_ext.stabs_destabs.items():
self._stabs_destabs[name] = []
for row in rows:
self._stabs_destabs[name].append(set(row))
# |0> -> logical Z is a stabilizer
self._stabs_destabs['stabs_z'].append(set(self.qecc.sides['top']))
self._stabs_destabs['stabs_x'].append(set([]))
self._stabs_destabs['destabs_x'].append(set(self.qecc.sides['left']))
self._stabs_destabs['destabs_z'].append(set([]))
return self._stabs_destabs
class InstrInitPlus(LogicalInstruction):
"""
Instruction for initializing a logical plus.
It is just like syndrome extraction except the data qubits are initialized in the plus state at tick = 0.
`ideal_meas` == True will cause the measurements to be replace with ideal measurements.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
self.symbol = 'instr_init_plus'
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
# This is basically syndrome extraction round where all the data qubits are initialized to plus.
syn_ext = qecc.instruction('instr_syn_extract', **gate_params)
# Make a shallow copy of the abstract circuits.
self.abstract_circuit = syn_ext.abstract_circuit.copy()
self.abstract_circuit.params.update(gate_params)
self.ancilla_x_check = syn_ext.ancilla_x_check
self.ancilla_z_check = syn_ext.ancilla_z_check
data_qudits = syn_ext.data_qudit_set
# self.abstract_circuit.append('init |+>', qudits=data_qudits, tick=0)
self.abstract_circuit.append('init |0>', locations=data_qudits, tick=0)
self.abstract_circuit.append('H', locations=data_qudits, tick=1)
self.initial_logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': None, 'Z': None}, # None => can be anything
]
# Special for state initialization:
# ---------------------------------
# list of tuples of logical check and delogical stabilizer for each logical qudit.
self.final_logical_ops = [
{'X': QuantumCircuit([{'X': set(qecc.sides['left'])}]), 'Z': QuantumCircuit([{'Z': set(qecc.sides['top'])}])}
]
# List of corresponding logical sign. (The logical sign if the instruction is preformed ideally.)
self.logical_signs = [0]
self.logical_stabilizers = ['X']
# ---------------------------------
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
gate_params = self.gate_params
syn_ext = self.qecc.instruction('instr_syn_extract', **gate_params)
for name, rows in syn_ext.stabs_destabs.items():
self._stabs_destabs[name] = []
for row in rows:
self._stabs_destabs[name].append(set(row))
# |0> -> logical Z is a stabilizer
self._stabs_destabs['stabs_x'].append(set(self.qecc.sides['left']))
self._stabs_destabs['stabs_z'].append(set([]))
self._stabs_destabs['destabs_z'].append(set(self.qecc.sides['top']))
self._stabs_destabs['stabs_x'].append(set([]))
return self._stabs_destabs
| 32.293638 | 121 | 0.547181 | [
"Apache-2.0"
] | DaveDRoberts/PECOS | pecos/qeccs/surface_medial_4444/instructions.py | 19,796 | Python |
# Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import unified_planning as up
import unified_planning.engines as engines
from unified_planning.plans import Plan
from unified_planning.model import ProblemKind
from unified_planning.exceptions import UPUsageError
from unified_planning.engines.results import LogLevel, PlanGenerationResultStatus, Result, ValidationResult, PlanGenerationResult
from typing import IO, Callable, Dict, List, Optional, Tuple, Type, cast
from fractions import Fraction
from multiprocessing import Process, Queue
class Parallel(engines.engine.Engine,
engines.mixins.OneshotPlannerMixin,
engines.mixins.PlanValidatorMixin):
"""Create a parallel instance of multiple Engines."""
def __init__(self, engines: List[Tuple[Type[engines.engine.Engine], Dict[str, str]]]):
self.engines = engines
@property
def name(self) -> str:
return 'Parallel'
@staticmethod
def supports(problem_kind: 'ProblemKind') -> bool:
# The supported features depends on its actual engines
return True
def _run_parallel(self, fname, *args) -> List[Result]:
signaling_queue: Queue = Queue()
processes = []
for idx, (engine_class, opts) in enumerate(self.engines):
options = opts
_p = Process(name=str(idx),
target=_run,
args=(idx, engine_class, options,
signaling_queue, fname, *args))
processes.append(_p)
_p.start()
processes_alive = len(processes)
results: List[Result] = []
definitive_result_found: bool = False
while True:
if processes_alive == 0: # Every planner gave a result
break
(idx, res) = signaling_queue.get(block=True)
processes_alive -= 1
if isinstance(res, BaseException):
raise res
else:
assert isinstance(res, Result)
# If the planner is sure about the result (optimality of the result or impossibility of the problem or the problem does not need optimality) exit the loop
if res.is_definitive_result(*args):
definitive_result_found = True
break
else:
results.append(res)
for p in processes:
p.terminate()
if definitive_result_found: # A planner found a definitive result
return [res]
return results
def _solve(self, problem: 'up.model.AbstractProblem',
callback: Optional[Callable[['up.engines.results.PlanGenerationResult'], None]] = None,
timeout: Optional[float] = None,
output_stream: Optional[IO[str]] = None) -> 'up.engines.results.PlanGenerationResult':
for engine, _ in self.engines:
assert issubclass(engine, engines.mixins.OneshotPlannerMixin)
if not engine.supports(problem.kind):
raise UPUsageError('Parallel engines cannot solve this kind of problem!')
if callback is not None:
warnings.warn('Parallel engines do not support the callback system.', UserWarning)
if output_stream is not None:
warnings.warn('Parallel engines do not support the output stream system.', UserWarning)
final_reports = self._run_parallel('solve', problem, None, timeout, None)
result_order: List[PlanGenerationResultStatus] = [
PlanGenerationResultStatus.SOLVED_OPTIMALLY, # List containing the results in the order we prefer them
PlanGenerationResultStatus.UNSOLVABLE_PROVEN,
PlanGenerationResultStatus.SOLVED_SATISFICING,
PlanGenerationResultStatus.UNSOLVABLE_INCOMPLETELY,
PlanGenerationResultStatus.TIMEOUT,
PlanGenerationResultStatus.MEMOUT,
PlanGenerationResultStatus.INTERNAL_ERROR,
PlanGenerationResultStatus.UNSUPPORTED_PROBLEM]
final_result: Optional[PlanGenerationResult] = None
result_found: bool = False
for ro in result_order:
if result_found:
break
for r in final_reports:
pgr = cast(PlanGenerationResult, r)
if pgr.status == ro:
result_found = True
final_result = pgr
break
logs = [up.engines.LogMessage(LogLevel.INFO, str(fr)) for fr in final_reports]
# if no results are given by the planner, we create a default one
if final_result is None:
return up.engines.PlanGenerationResult(PlanGenerationResultStatus.UNSOLVABLE_INCOMPLETELY,
None, self.name, log_messages=logs)
new_plan = problem.normalize_plan(final_result.plan) if final_result.plan is not None else None
if final_result.log_messages is not None:
logs = final_result.log_messages + logs
return up.engines.results.PlanGenerationResult(
final_result.status,
new_plan,
final_result.engine_name,
final_result.metrics,
logs
)
def _validate(self, problem: 'up.model.AbstractProblem',
plan: Plan) -> 'up.engines.results.ValidationResult':
for engine, _ in self.engines:
assert issubclass(engine, engines.mixins.PlanValidatorMixin)
if not engine.supports(problem.kind):
raise UPUsageError('Parallel engines cannot validate this kind of problem!')
return cast(ValidationResult, self._run_parallel('validate', problem, plan)[0])
def _run(idx: int, EngineClass: type, options: Dict[str, str], signaling_queue: Queue, fname: str, *args):
with EngineClass(**options) as s:
try:
local_res = getattr(s, fname)(*args)
except Exception as ex:
signaling_queue.put((idx, ex))
return
signaling_queue.put((idx, local_res))
| 44.885906 | 170 | 0.638906 | [
"Apache-2.0"
] | aiplan4eu/unified-planning | unified_planning/engines/parallel.py | 6,688 | Python |
# coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HardwareConnectorResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[HardwareConnector]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
HardwareConnectorResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this HardwareConnectorResponse.
pagination information, only available in GET requests
:return: The pagination_info of this HardwareConnectorResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this HardwareConnectorResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this HardwareConnectorResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this HardwareConnectorResponse.
a list of hardware connectors
:return: The items of this HardwareConnectorResponse.
:rtype: list[HardwareConnector]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this HardwareConnectorResponse.
a list of hardware connectors
:param items: The items of this HardwareConnectorResponse.
:type: list[HardwareConnector]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HardwareConnectorResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.701299 | 197 | 0.585795 | [
"Apache-2.0"
] | unixtreme/purity_fb_python_client | purity_fb/purity_fb_1dot5/models/hardware_connector_response.py | 4,266 | Python |
#####################
# IMPORT DEPENDENCIES
######################
# flask (server)
from flask import(
Flask,
render_template,
jsonify,
request,
redirect)
#######################
# FLASK SET-UP
#######################
app = Flask(__name__)
#######################
# FLASK ROUTES
#######################
@app.route("/")
def index():
return render_template("index.html")
# @app.route("/outcomes")
# def charts():
# return render_template("outcomes.html")
if __name__ == "__main__":
app.run(debug = True) | 17.483871 | 45 | 0.479705 | [
"MIT"
] | risatino/seadogz | app.py | 542 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <[email protected]>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ieee802_15_4_swig as ieee802_15_4
import numpy as np
class qa_dqpsk_soft_demapper_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
pi=np.pi
data_in = [0, pi/2, pi, -pi/2, pi/2, -pi/2, -pi/2, 0, 0, pi, pi/2, pi/2]
data_in = [np.exp(1j*i) for i in data_in]
data_in = [i*np.exp(1j*pi/4) for i in data_in]
self.src = blocks.vector_source_c(data_in)
self.dqpsk = ieee802_15_4.dqpsk_soft_demapper_cc(framelen=6)
self.snk = blocks.vector_sink_c(1)
self.tb.connect(self.src, self.dqpsk, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
ref = [0, pi/2, pi, -pi/2, pi/2, pi, -pi/2, 0, 0, pi, pi, pi/2]
ref = np.array([np.exp(1j*i) for i in ref])
print "angle in:", np.angle(data_in)/pi*180
print "angle out:", np.angle(data_out)/pi*180
print "angle ref:", np.angle(ref)/pi*180
self.assertFloatTuplesAlmostEqual(ref, data_out, 5)
if __name__ == '__main__':
gr_unittest.run(qa_dqpsk_soft_demapper_cc)
| 37.666667 | 140 | 0.670703 | [
"BSD-2-Clause"
] | xueyuecanfeng/C-LQI | gr-ieee802-15-4/python/qa_dqpsk_soft_demapper_cc.py | 2,147 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
from snowflake.connector.util_text import construct_hostname
def test_construct_hostname_basic():
assert (
construct_hostname("eu-central-1", "account1")
== "account1.eu-central-1.snowflakecomputing.com"
)
assert construct_hostname("", "account1") == "account1.snowflakecomputing.com"
assert construct_hostname(None, "account1") == "account1.snowflakecomputing.com"
assert (
construct_hostname("as-east-3", "account1")
== "account1.as-east-3.snowflakecomputing.com"
)
assert (
construct_hostname("as-east-3", "account1.eu-central-1")
== "account1.as-east-3.snowflakecomputing.com"
)
assert (
construct_hostname("", "account1.eu-central-1")
== "account1.eu-central-1.snowflakecomputing.com"
)
assert (
construct_hostname(None, "account1.eu-central-1")
== "account1.eu-central-1.snowflakecomputing.com"
)
assert (
construct_hostname(None, "account1-jkabfvdjisoa778wqfgeruishafeuw89q.global")
== "account1-jkabfvdjisoa778wqfgeruishafeuw89q.global.snowflakecomputing.com"
)
| 28.477273 | 85 | 0.672785 | [
"Apache-2.0"
] | Fayel-cyber/snowflake-connector-python | test/unit/test_construct_hostname.py | 1,253 | Python |
#
# -*- coding: utf-8 -*-
#
import unittest
import os
import shutil
import yaml
import tensorflow as tf
from neural_compressor.experimental import model_conversion
tf.compat.v1.enable_eager_execution()
from tensorflow import keras
from tensorflow.python.framework import graph_util
from neural_compressor.adaptor.tf_utils.util import disable_random
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
device: cpu
model_conversion:
source: qat
destination: default
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def prepare_dataset():
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
return train_images, train_labels
def prepare_model(model_out_path, train_images, train_labels):
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
model.save(model_out_path)
def prepare_qat_model(model_in_path, model_out_path, train_images, train_labels):
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
model = tf.keras.models.load_model(model_in_path)
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
train_images_subset = train_images[0:1000] # out of 60000
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=1, validation_split=0.1)
q_aware_model.save(model_out_path)
@unittest.skipIf(tf.version.VERSION < '2.4.0', "Only supports tf 2.4.0 or above")
class TestModelConversion(unittest.TestCase):
@classmethod
def setUpClass(self):
self._baseline_temp_path = './temp_baseline'
self._qat_temp_path = './temp_qat'
self._quantized_temp_path = './temp_quantized'
build_fake_yaml()
train_images, train_labels = prepare_dataset()
prepare_model(self._baseline_temp_path, train_images, train_labels)
prepare_qat_model(self._baseline_temp_path, self._qat_temp_path, train_images, train_labels)
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
shutil.rmtree(self._qat_temp_path, ignore_errors=True)
shutil.rmtree(self._baseline_temp_path, ignore_errors=True)
shutil.rmtree(self._quantized_temp_path, ignore_errors=True)
def test_model_conversion(self):
from neural_compressor.experimental import ModelConversion, common
from neural_compressor.conf.config import Conf
conversion = ModelConversion()
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
q_model.save(self._quantized_temp_path)
conf = Conf('fake_yaml.yaml')
conversion = ModelConversion(conf)
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
conversion = ModelConversion('fake_yaml.yaml')
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
graph = tf.compat.v1.Graph()
with graph.as_default():
with tf.compat.v1.Session() as sess:
meta_graph=tf.compat.v1.saved_model.loader.load(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], self._quantized_temp_path)
print(meta_graph.graph_def.node)
for i in meta_graph.graph_def.node:
if 'MatMul' in i.op:
self.assertTrue('QuantizedMatMul' in i.op)
if 'MaxPool' in i.op:
self.assertTrue('QuantizedMaxPool' in i.op)
if 'Conv2D' in i.op:
self.assertTrue('QuantizedConv2D' in i.op)
if __name__ == "__main__":
unittest.main()
| 35.808219 | 146 | 0.666985 | [
"Apache-2.0"
] | huggingface/neural-compressor | test/test_model_conversion.py | 5,228 | Python |
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
| 31.853752 | 269 | 0.627608 | [
"Apache-2.0"
] | apapadopoulou/core | homeassistant/helpers/template.py | 49,660 | Python |
import threading, queue
import time
import random
import logging
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-9s) %(message)s',)
NUMBER_OF_THREADS = 4
TIMEOUT_SECONDS = 5
class SampleThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, id=None, kwargs=None):
super().__init__(group=group, target=target, name=name)
self.id = id
self.kwargs = kwargs
self.queue = kwargs['queue']
return
def run(self):
# do some work here
logging.debug(f'Tunning thread id={self.id}')
r = random.uniform(0, 5)
time.sleep(r)
self.queue.put(f'Thread id={self.id} finished running in {r} seconds')
if __name__ == '__main__':
print('Starting threads')
# create a list to hold running SampleThread object instances
threads = list()
# build a single queue to send to all thread objects
q = queue.Queue()
for i in range(NUMBER_OF_THREADS):
t = SampleThread(id = i, kwargs={'queue':q})
t.start()
threads.append(t)
# wait until all threads are finished
logging.debug('Waiting for all threads to finish running')
[t.join() for t in threads]
logging.debug('All processes are finished running')
logging.debug('Results')
while not q.empty():
logging.debug(q.get())
| 27.918367 | 82 | 0.645468 | [
"MIT"
] | guneykayim/python-examples | multithreading/multithreading_simple.py | 1,368 | Python |
'''
Class Name: File
Purpose: The purpose of this class is represent data of a particular file
in a file system.
'''
class File:
def __init__(self, name = None, directory = None, date = None, fId = None, folderId = None, extension = ""):
self.__name = name
self.__directory = directory
self.__date = date
self.__id = fId
self.__folderId = folderId
self.__mimeType = extension
def __repr__(self):
return self.getName
'''
Name: getName
Purpose: A getter method for the name of the file.
return: private attribute __name
'''
@property
def getName(self):
return self.__name
'''
Name: getDir
Purpose: a getter method for the name of the directory the file is in.
return: private attribute __directory
'''
@property
def getDir(self):
return self.__directory
'''
Name: getLastModified
Purpose: a getter method for the date that the file was last modified at
return: private attribute __date
'''
@property
def getLastModified(self):
return self.__date
'''
Name: getDetails
Purpose: Returns the full file address of a file object.
return: a string representing the full file details
'''
def getDetails(self):
return self.getDir + self.getName
@property
def getFileId(self):
return self.__id
@property
def getFolderId(self):
return self.__folderId
@property
def getMimeType(self):
return self.__mimeType | 23 | 109 | 0.717391 | [
"MIT"
] | tanvirtin/Cloud-Backup | scripts/File.py | 1,380 | Python |
import csv
import numpy as np
import os
import sys
import time
import jismesh.utils as ju
import pandas as pd
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from common.datastructure.Point import Point
from common.datastructure.Mesh import Mesh
# meshTokyo = Mesh('tokyo','500m')
# GRIDNUMBER = meshTokyo.lonNum * meshTokyo.latNum
# print(meshTokyo.size, GRIDNUMBER)
# InterpolatedStep = 12
def getTimestamps(fileName):
last_tid = ''
D = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
timestamp = line[1]
D.append(timestamp)
last_tid = tid
return D
def getMesh(mesh, readFileName, writeFileName):
cnt = 0
wf = open(writeFileName, 'w')
with open(readFileName, 'r') as rf:
for line in csv.reader(rf):
if cnt % 1000000 == 0:
print(cnt)
tid = line[0]
timestamp = line[1]
p = Point(float(line[2]), float(line[3]))
meshid = mesh.inWhichGrid(p)
wf.write(','.join([tid, timestamp, str(meshid)])+'\n')
cnt += 1
wf.close()
def genMeshDynamic(mesh, fileName, meshFileName):
MD = {}
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
timestamp = line[1]
meshid = line[2]
key = (timestamp, meshid)
if key in MD:
MD[key].add(tid)
else:
MD[key] = set(tid)
wf = open(meshFileName, 'w')
Timestamps = getTimestamps(fileName)
for ts in Timestamps:
for meshid in range(mesh.lonNum * mesh.latNum):
key = (ts, str(meshid))
if key in MD:
value = len(MD[key])
else:
value = 0
wf.write(','.join([key[0], key[1], str(value)]) + '\n')
wf.close()
def getGrids(fileName):
last_tid = ''
G = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
grid = line[1]
G.append(grid)
last_tid = tid
return G
def getDynamicMesh_mobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
# tid = line[0]
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getDynamicMeshMobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
# tid = line[0]
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getRfromDynamicMeshMobmap(meshcode_level, dynamicFileName, dynamicFileName1, dynamicFileName2):
df1 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df1.iloc[:,2] = np.log10(df1.iloc[:,2]+1) * 100
df2 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df2.iloc[:, 2] = np.log(df2.iloc[:,2]+1) * 100
with open(dynamicFileName1, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
with open(dynamicFileName2, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
df1.to_csv(dynamicFileName1, header=False, index=False, mode='a')
df2.to_csv(dynamicFileName2, header=False, index=False, mode='a')
def getDynamicMeshMobmapR(R, trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def genMeshDynamicTimeInterval(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicTimeInterval_Mobmap(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] == '@dynamic-mesh' or '"@use-mesh-code':
wf.write(line + '\n')
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicMobmap(mesh, meshFileName, mobmapFile, timestamp):
wf = open(mobmapFile, 'w')
wf.write('@static-mesh' + '\n')
wf.write(','.join([str(x) for x in
[mesh.minLat, mesh.minLon, mesh.dLat, mesh.dLon]]) + '\n')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if timestamp != line[0]:
continue
else:
meshid = line[1]
number = line[2]
xi, yi = mesh.Index[int(meshid)]
wf.write(','.join([str(item) for item in [yi, xi, number]]) + '\n')
wf.close()
def loadGTrajectory(fileName):
print('loadTrajectory Started : ', time.ctime())
TDB = {}
with open(fileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
# timestamp = line[1]
meshid = line[2]
if tid in TDB:
TDB[tid].append(meshid)
else:
TDB[tid] = [meshid]
print('loadTrajectory Ended : ', time.ctime())
return TDB
def getINDEX(mesh, gTrajFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('getTrajectoryINDEX Started : ', time.ctime())
Timestamps = getTimestamps(gTrajFileName)
print('timestamps...', len(Timestamps))
TDB = loadGTrajectory(gTrajFileName)
INDEX = []
for i in range(len(Timestamps)):
INDEX.append([])
for G in range(GRIDNUMBER):
INDEX[i].append(set()) # set().add
# print(np.array(INDEX).shape)
for tid in TDB:
traj = TDB[tid]
for i in range(len(traj)):
HH = i
if traj[i] == 'None':
pass
else:
gid = int(traj[i])
INDEX[HH][gid].add(tid) # set().add
return INDEX
def getGridImageIndex(mesh, window=15):
GRIDNUMBER = mesh.lonNum * mesh.latNum
IMG = []
for g in range(GRIDNUMBER):
R = np.zeros((window, window), dtype='int32')
current_x, current_y = mesh.Index[g]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
IMG.append(R)
return IMG
def genGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
for i in range(len(Timestamps) - 1):
for j in range(GRIDNUMBER):
cur_time = i
next_time = i + 1
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
trajfirst = INDEX[cur_time][cur_grid]
trajsecond = INDEX[next_time][next_grid]
transit_num = len(trajfirst & trajsecond)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
# This grid transit version is for 1minutes trajectory, more accurate, not for 5minutes.
# !!!!!!!!!!!!!!!!!!!! 1 minute trajectory data.
# TT is supposed to be 288 not 289 because it is interval.
def genGridTransit_5minutes_from_1minute(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
# Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
TT, SPAN = 24 * 12, 5
for i in range(TT):
for j in range(GRIDNUMBER):
cur_time = i
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
cur_time_start = cur_time * SPAN
cur_time_end = (cur_time + 1) * SPAN + 1
SS = set()
for pp in range(cur_time_start, cur_time_end):
trajfirst = INDEX[pp][cur_grid]
for qq in range(pp, cur_time_end):
trajsecond = INDEX[qq][next_grid]
SS.update(trajfirst & trajsecond)
transit_num = len(SS)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
def getGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps) - 1 # -1 is because of transit
print('getGridTransit Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def getGridPop(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 80, 80, 1
return R
def getGridPopPartition(R, M, K):
# Original 8*8 matrix N = 8 = M*K
# M = 4 # M*M sub matrix
# K = 2 # each sub matrix has the size of K * K
P = []
for i in range(M):
for j in range(M):
P.append(R[:, i*K:i*K+K, j*K:j*K+K, :])
return np.array(P)
def getGridPop2DNumpy(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
return R
def getGridPopTimeInterval(mesh, popFileName):
print('getGridPop', popFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = []
lastTimestamp = ''
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = line[0]
if timestamp != lastTimestamp:
Timestamps.append(timestamp)
lastTimestamp = timestamp
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 75, 80, 1
return R
def getGridTransitTimeInterval(mesh, transitFileName):
print('getGridTransit Started : ', transitFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
# Timestamps = []
# lastTimestamp = ''
# with open(transitFileName, 'r') as rf:
# tansistReader = csv.reader(rf)
# for line in tansistReader:
# timestamp = line[0]
# if timestamp != lastTimestamp:
# Timestamps.append(timestamp)
# lastTimestamp = timestamp
# TIMENUMBER = len(Timestamps)
TIMENUMBER = 24 * 12
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def shuffleTrainValidateTest(InterpolatedStep, path, fileName, R, testRate=0.2):
TIMESTEP = InterpolatedStep * 2
Sequence = []
for i in range(R.shape[0] - TIMESTEP):
Sequence.append(R[i:i+TIMESTEP, :, :, :])
Sequence = np.array(Sequence, dtype='int32')
INDEX = list(range(len(Sequence)))
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate))]
testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
train = Sequence[trainINDEX]
test = Sequence[testINDEX]
np.save(path + 'train_' + fileName, train)
np.save(path + 'test_' + fileName, test)
print(train.shape, test.shape)
# trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate - validateRate))]
# validateINDEX = INDEX[int(len(INDEX) * (1 - testRate - validateRate)):int(len(INDEX) * (1 - testRate))]
# testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
# train = Sequence[trainINDEX]
# validate = Sequence[validateINDEX]
# test = Sequence[testINDEX]
# np.save(path + 'train_' + fileName, train)
# np.save(path + 'validate_' + fileName, validate)
# np.save(path + 'test_' + fileName, test)
# print(train.shape, validate.shape, test.shape)
# or directly return not save to file because just too big.
# return train, validate, test
def getShuffledTrainTest(path, fileName, TrainTest):
return np.load(path + TrainTest + '_' + fileName + '.npy')
def testcode(mesh):
GRIDNUMBER = mesh.lonNum * mesh.latNum
window = 5
R = np.zeros((window, window), dtype='int32')
center = mesh.ReverseIndex[(2,2)]
current_x, current_y = mesh.Index[center]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
print(R)
for i in range(len(R)):
print(R[i])
for i in range(len(R)):
print(R[i][0], R[i][1], R[i][2], R[i][3], R[i][4])
T = R.reshape(-1)
print(T.tolist())
P = T.reshape(window, window)
print(P)
print(R.shape)
print(R[54][4178])
print(np.max(R) == 3369)
print(mesh.Index[3369])
x, y = mesh.Index[3369]
lon, lat = mesh.minLon + (x + 0.5) * mesh.dLon, \
mesh.minLat + (y + 0.5) * mesh.dLat
print(lon, lat)
print(mesh.lonNum, mesh.latNum)
T = np.array(range(GRIDNUMBER))
T = T.reshape(mesh.lonNum, mesh.latNum)
T = np.swapaxes(T, 1, 0)
T = T[::-1, :]
print(T)
print(T.shape)
def run5min201802(mesh, dataPATH, dates):
print('Now is getting trainig XS and YS...', dates)
# timestamp = '2011-10-20 09:00:00'
# filenameTime = timestamp[0:4] + timestamp[5:7] + timestamp[8:10] \
# + timestamp[11:13] + timestamp[14:16] + timestamp[17:19]
# print(filenameTime)
for date in dates:
# first step: from trajectory point to mesh
getMesh(dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv')
# second step: calculate mesh population at each timestamp
genMeshDynamic(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_pop.csv')
# fourth step: mesh transit between two consecutive timestamps
genGridTransit(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_transit.csv')
def getHHTransit(HH):
assert HH <= 22, 'Hour should not be over 22.'
dataPATH = '../interpo_data/'
date = '20111020'
R = getGridTransit(dataPATH + date + 'tokyo_meshtransit10min_1min_15.csv')
# (144, 72, 80, 225)
R = R[HH*6:HH*6+6, :, :, :]
# (6, 72, 80, 225)
R = R.reshape(R.shape[0], -1, R.shape[-1])
# (6, 5760, 225)
R = R.transpose(1, 0, 2)
# (5760, 6, 225)
R = R.reshape(R.shape[0], R.shape[1], int(R.shape[2]**0.5), int(R.shape[2]**0.5), 1)
return R
def runCrowdDensity():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217","20110218","20110219","20110220", "20110221",
"20110222","20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv')
genMeshDynamic(meshTokyo, dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_pop.csv')
# def runCrowdFlow_from5min():
# from common.dataparam.Param import alldates
# dataPATH = '../interpo_data/'
# meshTokyo = Mesh('tokyo', '500m')
# #meshcode_level = 4
#
# for date in alldates:
# print('this is date', date)
# genGridTransit(meshTokyo,
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit_from5min.csv')
# paper crowd flow is from 1min.!!!!!!!!!!!!
def runCrowdFlow():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217", "20110218", "20110219", "20110220", "20110221",
"20110222", "20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv')
genGridTransit_5minutes_from_1minute(meshTokyo,
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit.csv')
def main():
runCrowdDensity()
if __name__ == '__main__':
main() | 36.542135 | 110 | 0.547506 | [
"MIT"
] | deepkashiwa/DeepUrbanEvent | meshdynamic/meshDynamic-Density.py | 26,018 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test the quoted APOGEE uncertainties from individual (rebinned) spectra. """
__author__ = "Andy Casey <[email protected]>"
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from glob import glob
from itertools import combinations
def get_differences(apStar_filename):
image = fits.open(apStar_filename)
N_visits = image[0].header["NVISITS"]
data_index = 1
error_index = 2
mask_index = 3
# Generate all permutations.
differences = []
for i, j in combinations(range(N_visits), 2):
di = image[data_index].data[i + 2, :]
dj = image[data_index].data[j + 2, :]
sigma = np.sqrt(image[error_index].data[i + 2, :]**2 \
+ image[error_index].data[j + 2, :]**2)
ok = (di > 0) * (dj > 0) * np.isfinite(di * dj * sigma) \
* (image[mask_index].data[i + 2, :] == 0) \
* (image[mask_index].data[j + 2, :] == 0)
differences.extend(((di - dj)/sigma)[ok])
differences = np.array(differences).flatten()
return differences
def plot_differences(differences):
fig, ax = plt.subplots(1)
y_bin, x_bin, _ = ax.hist(differences, bins=100, facecolor="#666666")
x = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 1000)
y = np.exp(-0.5*x**2)/np.sqrt(2*np.pi)
ax.plot(x, y*np.trapz(y_bin, x=x_bin[1:])/np.sqrt(2*np.pi), lw=2, c="r")
ax.set_title("mu = {0:.1f}, sigma(|d|) = {1:.1f}".format(
np.median(differences), np.std(np.abs(differences))))
ax.set_xlabel("(F1 - F2)/sqrt(sigma_1^2 + sigma_2^2)")
return fig
if __name__ == "__main__":
filenames = glob("APOGEE/*.fits")
all_differences = []
for filename in filenames:
differences = get_differences(filename)
if len(differences) > 0:
fig = plot_differences(differences)
fig.savefig("APOGEE/{0}.png".format(filename.split("/")[-1].split(".")[0]))
plt.close("all")
print(filename)
all_differences.extend(differences)
fig = plot_differences(np.array(all_differences))
fig.savefig("APOGEE/all.png")
| 28.142857 | 87 | 0.606368 | [
"MIT"
] | andycasey/luminosity-cannon | data/check_apogee_spectra.py | 2,167 | Python |
import praw
c_id='34kxuaxc4yWiKw'
c_secret='8bJqHqNHFdB6NKV9sHzFbo4_Dl4'
ua='my user agent'
un='the_ugly_bot'
pwd='whatever930'
def login():
r = praw.Reddit(client_id=c_id,
client_secret=c_secret,
user_agent=ua,
username=un,
password=pwd)
return r | 22.4 | 44 | 0.58631 | [
"MIT"
] | MrTsRex/Reddit_bot | obot.py | 336 | Python |
from __future__ import annotations
import abc
from dataclasses import asdict as asdict_, fields, is_dataclass
from pathlib import Path
from typing import Dict, Union, Tuple
from pprint import pformat
from covid_shared import ihme_deps
import numpy as np
import pandas as pd
import yaml
class YamlIOMixin:
"""Mixin for reading and writing yaml files."""
@staticmethod
def _coerce_path(path: Union[str, Path]) -> Path:
path = Path(path)
if path.suffix not in ['.yaml', '.yml']:
raise ValueError('Path must point to a yaml file. '
f'You provided {str(path)}')
return path
@classmethod
def _load(cls, path: Union[str, Path]):
path = cls._coerce_path(path)
with path.open() as f:
data = yaml.full_load(f)
return data
@classmethod
def _dump(cls, data, path: Union[str, Path]) -> None:
path = cls._coerce_path(path)
with path.open('w') as f:
yaml.dump(data, f, sort_keys=False)
class Specification(YamlIOMixin):
"""Generic class for pipeline stage specifications."""
@classmethod
def from_path(cls, specification_path: Union[str, Path]) -> Specification:
"""Builds the specification from a file path."""
spec_dict = cls._load(specification_path)
return cls.from_dict(spec_dict)
@classmethod
def from_dict(cls, spec_dict: Dict) -> Specification:
"""Builds the specification from a dictionary."""
args = cls.parse_spec_dict(spec_dict)
return cls(*args)
@classmethod
@abc.abstractmethod
def parse_spec_dict(cls, specification: Dict) -> Tuple:
"""Parses a dict representation of the specification into init args."""
raise NotImplementedError
@abc.abstractmethod
def to_dict(self) -> Dict:
"""Coerce the specification to a dict."""
raise NotImplementedError
def dump(self, path: Union[str, Path]) -> None:
"""Writes this specification to a file."""
data = self.to_dict()
self._dump(data, path)
def __repr__(self):
return f'{self.__class__.__name__}(\n{pformat(self.to_dict())}\n)'
def asdict(data_class) -> Dict:
"""Type coerce items for easy serialization"""
data = asdict_(data_class)
out = {}
for k, v in data.items():
if isinstance(v, tuple):
out[k] = list(v)
elif isinstance(v, np.ndarray):
out[k] = v.tolist()
else:
out[k] = v
return out
def filter_to_spec_fields(spec_dict: dict, specification):
if is_dataclass(specification):
return {
k: v for k, v in spec_dict.items()
if k in [f.name for f in fields(specification)]
}
else:
return spec_dict
def load_location_hierarchy(location_set_version_id: int = None,
location_file: Path = None, **kwargs):
assert ((location_set_version_id and not location_file)
or (not location_set_version_id and location_file))
if location_set_version_id:
return ihme_deps.get_location_hierarchy_by_version(
location_set_version_id=location_set_version_id,
)
else:
return pd.read_csv(location_file)
| 29.612613 | 79 | 0.634621 | [
"BSD-3-Clause"
] | ihmeuw/covid-model-seiir-pipeline | src/covid_model_seiir_pipeline/lib/utilities.py | 3,287 | Python |
import tensorflow as tf
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import pairwise_distance
def dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Distance weighted sampling.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
# Arguments:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
a_indices: indices of anchors.
anchors: sampled anchor embeddings.
positives: sampled positive embeddings.
negatives: sampled negative embeddings.
"""
if not isinstance(neg_multiplier, int):
raise ValueError("`neg_multiplier` must be an integer.")
n = tf.size(labels)
if not isinstance(embeddings, tf.Tensor):
embeddings = tf.convert_to_tensor(embeddings)
d = embeddings.shape[1].value
distances = pairwise_distance(embeddings, squared=False)
# cut off to void high variance.
distances = tf.maximum(distances, high_var_threshold)
# subtract max(log(distance)) for stability
log_weights = (2 - d) * tf.log(distances + 1e-16) - 0.5 * (d - 3) * tf.log(1 + 1e-16 - 0.25 * (distances**2))
weights = tf.exp(log_weights - tf.reduce_max(log_weights))
# sample only negative examples by setting weights of the same class examples to 0.
lshape = tf.shape(labels)
assert lshape.shape == 1
labels = tf.reshape(labels, [lshape[0], 1])
adjacency = tf.equal(labels, tf.transpose(labels))
adjacency_not = tf.logical_not(adjacency)
mask = tf.cast(adjacency_not, tf.float32)
# number of negative/positive samples to sampling per sample.
# For imbalanced data, this sampling method can be a sample weighted method.
adjacency_ex = tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))
m = tf.reduce_sum(adjacency_ex, axis=1)
if tf.reduce_min(m) == 0:
m = tf.diag(tf.cast(tf.equal(m,0), tf.int32))
adjacency_ex += m
k = tf.maximum(tf.reduce_max(m),1) * neg_multiplier
pos_weights = tf.cast(adjacency_ex, tf.float32)
weights = weights * mask * tf.cast(distances < nonzero_loss_threshold, tf.float32)
weights = weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)
# anchors indices
a_indices = tf.reshape(tf.range(n), (-1,1))
a_indices = tf.tile(a_indices, [1, k])
a_indices = tf.reshape(a_indices, (-1,))
# negative sampling
def neg_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32)
n_indices = tf.reshape(n_indices, (-1,))
# postive samping
def pos_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(pos_weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32)
p_indices = tf.reshape(p_indices, (-1,))
anchors = tf.gather(embeddings, a_indices, name='gather_anchors')
positives = tf.gather(embeddings, p_indices, name='gather_pos')
negatives = tf.gather(embeddings, n_indices, name='gather_neg')
return a_indices, anchors, positives, negatives
def margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Computes the margin base loss.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.
margin: Float, margin term in the loss function.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
margin_based_Loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
if isinstance(beta_in, (float,int)):
beta = beta_in
beta_reg_loss = 0.0
else:
if isinstance(beta_in, tf.Tensor):
assert tf.shape(beta_in).shape == 1
k = tf.size(a_indices) / tf.size(labels)
k = tf.cast(k, tf.int32)
beta = tf.reshape(beta_in, (-1, 1))
beta = tf.tile(beta, [1, k])
beta = tf.reshape(beta, (-1,))
beta_reg_loss = tf.reduce_sum(beta) * nu
else:
raise ValueError("`beta_in` must be one of [float, int, tf.Tensor].")
d_ap = tf.sqrt(tf.reduce_sum(tf.square(positives - anchors), axis=1) + 1e-16)
d_an = tf.sqrt(tf.reduce_sum(tf.square(negatives - anchors), axis=1) + 1e-16)
pos_loss = tf.maximum(margin + d_ap - beta, 0)
neg_loss = tf.maximum(margin + beta - d_an, 0)
pair_cnt = tf.cast(tf.size(a_indices), tf.float32)
# normalize based on the number of pairs
loss = (tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss) + beta_reg_loss) / pair_cnt
return loss
def distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""distance weighted sampling + triplet loss
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
margin: Float, margin term in the loss function.
squared: Boolean, whether or not to square the triplet distances.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
triplet_loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
d_ap = tf.reduce_sum(tf.square(positives - anchors), axis=1)
d_an = tf.reduce_sum(tf.square(negatives - anchors), axis=1)
if not squared:
d_ap = K.sqrt(d_ap + 1e-16)
d_an = K.sqrt(d_an + 1e-16)
loss = tf.maximum(d_ap - d_an + margin, 0)
loss = tf.reduce_mean(loss)
return loss
| 45.948864 | 128 | 0.632497 | [
"Apache-2.0"
] | miroozyx/Magin-Based-loss | loss.py | 8,087 | Python |
import tensorflow as tf
import numpy as np
class WindowGenerator():
def __init__(self, input_width, label_width, shift,
train_df, val_df, test_df, label_columns=None):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def split_window(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack(
[labels[:, :, self.column_indices[name]] for name in self.label_columns],
axis=-1)
# Slicing doesn't preserve static shape information, so set the shapes
# manually. This way the `tf.data.Datasets` are easier to inspect.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,)
ds = ds.map(self.split_window)
return ds
@property
def train(self):
return self.make_dataset(self.train_df)
@property
def val(self):
return self.make_dataset(self.val_df)
@property
def test(self):
return self.make_dataset(self.test_df)
@property
def example(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_example', None)
if result is None:
# No example batch was found, so get one from the `.train` dataset
result = next(iter(self.train))
# And cache it for next time
self._example = result
return result
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
| 31.387097 | 83 | 0.666667 | [
"MPL-2.0"
] | EFR-AI/AIBSIF | src/data_cleaning/window_generator.py | 2,919 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-10 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0015_auto_20170707_1820'),
]
operations = [
migrations.AlterModelOptions(
name='study',
options={'ordering': ['name'], 'permissions': (('can_view_study', 'Can View Study'), ('can_create_study', 'Can Create Study'), ('can_edit_study', 'Can Edit Study'), ('can_remove_study', 'Can Remove Study'), ('can_activate_study', 'Can Activate Study'), ('can_deactivate_study', 'Can Deactivate Study'), ('can_pause_study', 'Can Pause Study'), ('can_resume_study', 'Can Resume Study'), ('can_approve_study', 'Can Approve Study'), ('can_submit_study', 'Can Submit Study'), ('can_retract_study', 'Can Retract Study'), ('can_resubmit_study', 'Can Resubmit Study'), ('can_edit_study_permissions', 'Can Edit Study Permissions'), ('can_view_study_permissions', 'Can View Study Permissions'), ('can_view_study_responses', 'Can View Study Responses'), ('can_view_study_video_responses', 'Can View Study Video Responses'), ('can_view_study_demographics', 'Can View Study Demographics'))},
),
]
| 62.8 | 890 | 0.700637 | [
"MIT"
] | enrobyn/lookit-api | studies/migrations/0016_auto_20170710_1438.py | 1,256 | Python |
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
import json
from src.util.constant import BASE_DIR
from src.util.util import get_mktime2
import pandas as pd
import re
from src.analysis.SentimentClassify import SentimentClassify
class TrainMood(QQZoneAnalysis):
"""
生成各种训练需要的数据集
"""
def __init__(self, use_redis=False, debug=True, file_name_head=''):
QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)
TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'
self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'
self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'
self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'
self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'
self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'
self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'
self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'
self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)
self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'
self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'
self.sc = SentimentClassify()
self.mood_data_df['score'] = '-1'
self.label_dict = {'1': '旅游与运动',
'2': '爱情与家庭',
'3': '学习与工作',
'4': '广告',
'5': '生活日常',
'6': '其他',
'7': '人生感悟'}
self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}
def calculate_score_for_each_mood(self):
"""
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
"""
# nima模型预测结果文件
self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'
with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:
self.image_score_dict = json.load(r)
self.image_score_df = pd.DataFrame(self.image_score_dict)
mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score
tid_list = self.mood_data_df['tid'].values
for tid in tid_list:
scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score
if len(scores) > 0:
self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)
self.mood_data_df.fillna(mean_score)
print("score shape:", self.mood_data_df.shape)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def calculate_send_time(self):
"""
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
"""
day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))
day_time_stamp = self.mood_data_df['time_stamp']
time_diff = day_time_stamp - day_begin_time
# 四个小时的时间差
time_step = 60 * 60 * 4
time_state = time_diff.apply(lambda x: x // time_step)
self.mood_data_df['time_state'] = time_state
print('send time:', self.mood_data_df.shape)
def export_df_after_clean(self):
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def export_train_text(self):
train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')
train_text = train_text[['type', 'content']]
train_text.columns = ['Y', 'content']
train_text.fillna('空', inplace=True)
train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])
train_text.content = train_text.content.apply(lambda x: str(x).replace('\n', ''))
train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))
train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))
train_text.fillna('空', inplace=True)
train_dataset = train_text.sample(frac=0.8)
val_dataset = train_text.sample(frac=0.3)
test_dataset = train_text.sample(frac=0.3)
self.print_label_dict(train_text)
self.print_label_dict(train_dataset)
self.print_label_dict(val_dataset)
self.print_label_dict(test_dataset)
train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\t', index=None, header=None)
val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\t', index=None, header=None)
test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\t', index=None, header=None)
self.calculate_avg_length(train_text)
# train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None)
def calculate_avg_length(self, data_df):
num = data_df.shape[0]
content_list = data_df.content.sum()
print(len(content_list) / num)
def calculate_sentiment(self):
print("Begin to calculate sentiment...")
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\n', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))
# 使用apply会导致超过qps限额
# sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x))
# self.mood_data_df['sentiment'] = sentiments
self.mood_data_df['sentiments'] = -1
for i in range(self.mood_data_df.shape[0]):
content = self.mood_data_df.loc[i, 'content']
sentiment = self.sc.get_sentiment_for_text(content)
print('content:', content, 'senti:', sentiment)
self.mood_data_df.loc[i, 'sentiments'] = sentiment
self.mood_data_df = self.re_do_sentiment(self.mood_data_df)
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv('after_sentiment.csv')
print("text sentiment:", self.mood_data_df.shape)
def print_label_dict(self, data_df):
for item in self.label_dict.values():
print(item, data_df.loc[data_df.Y == item, :].shape[0])
print('==========')
def re_do_sentiment(self, data_df):
# data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
for i in range(data_df.shape[0]):
sentiment = data_df.loc[i, 'sentiments']
content = data_df.loc[i, 'content']
if sentiment == -1:
content = content.replace('\u2207', '')
content = content.replace('\ue40c', '')
content = content.replace('\ue412', '')
content = content.replace('\ue056', '')
sentiment = self.sc.get_sentiment_for_text(str(content))
data_df.loc[i, 'sentiments'] = sentiment
data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)
return data_df
def export_classification_data(self):
"""
导出待分类待的数据
:return:
"""
data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)
data_df = data[['content']]
data_df['Y'] = '旅游与运动'
data_df.fillna('空', inplace=True)
columns = ['Y', 'content']
data_df = data_df.ix[:, columns]
print(data_df.shape)
data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\t')
def combine_text_type_data(self):
data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
print('mood_after_object_data:', data.shape)
label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)
print('label data:', label.shape)
label_y = label['Y']
data['type'] = label_y
data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
def attach_image_object_for_each_mood(self):
with open('qq_big_image.json', 'r', encoding='utf-8') as r:
data = json.load(r)
with open('category.json', 'r', encoding='utf-8') as r:
category = json.load(r)
category_df = pd.DataFrame(category)
image_object_df = pd.DataFrame(
columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',
'furniture',
'electronic', 'appliance', 'indoor'])
i = 0
for key, value in data.items():
tid = key.split('--')[0].split('/')[-1]
if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:
image_object_df.loc[i, 'tid'] = tid
i +=1
for item in value:
item = item.split(' ')[0]
super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']
if len(super_cate) > 0:
print(super_cate)
image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1
image_object_df.fillna(0, inplace=True)
image_object_df['vector'] = 0
image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])
image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)
def combine_image_object(self):
image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)
mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
try:
mood_data_df.drop(['vector'], axis=1, inplace=True)
except BaseException as e:
print(e)
image_object = image_object_df[['tid', 'vector']]
print(image_object_df.shape, mood_data_df.shape)
result = pd.merge(mood_data_df, image_object, on='tid', how='left')
print(result.shape)
result.to_csv(self.MOOD_DATA_AFTER_OBJECT)
def export_final_train_data(self):
data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)
train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]
train = train.loc[6:, :]
self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))
train.type = train['type'].map(self.label_dict_reverse)
train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)
train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))
train.sort_values(by='n_E', inplace=True, ascending=False)
train.to_csv(self.FINAL_RESULT_TRAIN_DATA)
def change_neg_image_score(self, score):
if score == -1:
return self.mean_score
else:
return score
def change_vector_to_int(self, vector):
vector = re.findall(re.compile('[0-9]'), vector)
str_vector = "".join(vector)
sum = 0
length = len(str_vector)
for i in range(length):
sum += int(str_vector[i]) **(length - 1)
return sum
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ == '__main__':
train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')
# train.calculate_score_for_each_mood()
# train.calculate_send_time()
# train.calculate_sentiment()
# train.export_df_after_clean()
train.export_train_text()
# train.export_classification_data()
# train.attach_image_object_for_each_mood()
# train.combine_text_type_data()
# train.combine_image_object()
# train.export_final_train_data()
| 43.269231 | 141 | 0.627232 | [
"MIT"
] | 343695222/QQZoneMood | src/analysis/TrainMood.py | 12,717 | Python |
from __future__ import absolute_import, unicode_literals
from django import forms
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.classes import ModelProperty
from mayan.apps.common.forms import FilteredSelectionForm
from mayan.apps.documents.models import Document
from .models import Index, IndexTemplateNode
from .permissions import permission_document_indexing_rebuild
class IndexTemplateFilteredForm(FilteredSelectionForm):
class Meta:
allow_multiple = True
field_name = 'index_templates'
help_text = _('Index templates to be queued for rebuilding.')
label = _('Index templates')
queryset = Index.objects.filter(enabled=True)
permission = permission_document_indexing_rebuild
widget_attributes = {'class': 'select2'}
class IndexTemplateNodeForm(forms.ModelForm):
"""
A standard model form to allow users to create a new index template node
"""
def __init__(self, *args, **kwargs):
super(IndexTemplateNodeForm, self).__init__(*args, **kwargs)
self.fields['index'].widget = forms.widgets.HiddenInput()
self.fields['parent'].widget = forms.widgets.HiddenInput()
self.fields['expression'].help_text = ' '.join(
[
force_text(self.fields['expression'].help_text),
'<br>',
ModelProperty.get_help_text_for(
model=Document, show_name=True
).replace('\n', '<br>')
]
)
class Meta:
fields = ('parent', 'index', 'expression', 'enabled', 'link_documents')
model = IndexTemplateNode
| 36.191489 | 79 | 0.678424 | [
"Apache-2.0"
] | BajacDev/Mayan-EDMS | mayan/apps/document_indexing/forms.py | 1,701 | Python |
import io
import json
import os
from django.conf import settings
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class DocumentationMenu(CMSAttachMenu):
name = _("Documentation Menu") # give the menu a name this is required.
def get_nodes(self, request):
"""
This method is used to build the menu tree.
"""
nodes = []
docsmap_file = os.path.join(settings.SPHINX_DOCS_ROOT, 'docsmap.json')
if not os.path.exists(docsmap_file):
return nodes
with io.open(docsmap_file) as fh:
docs_map = json.load(fh, encoding='utf-8')
for counter, items in enumerate(docs_map.items(), 1):
bits = items[0].split('/')
if len(bits) == 1 and bits[0] == 'index' or len(bits) == 2 and bits[1] != 'index':
continue
node = NavigationNode(
title=items[1],
url=reverse_lazy('sphinx-documentation', args=(bits[0],)),
id=counter,
)
nodes.append(node)
return nodes
menu_pool.register_menu(DocumentationMenu)
| 32.589744 | 94 | 0.623131 | [
"MIT"
] | beeduino/djangocms-cascade | cmsplugin_cascade/sphinx/cms_menus.py | 1,271 | Python |
"""Constants for Airly integration."""
from __future__ import annotations
from typing import Final
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
DEVICE_CLASS_AQI,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PM1,
DEVICE_CLASS_PM10,
DEVICE_CLASS_PM25,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from .model import AirlySensorEntityDescription
ATTR_API_ADVICE: Final = "ADVICE"
ATTR_API_CAQI: Final = "CAQI"
ATTR_API_CAQI_DESCRIPTION: Final = "DESCRIPTION"
ATTR_API_CAQI_LEVEL: Final = "LEVEL"
ATTR_API_HUMIDITY: Final = "HUMIDITY"
ATTR_API_PM10: Final = "PM10"
ATTR_API_PM1: Final = "PM1"
ATTR_API_PM25: Final = "PM25"
ATTR_API_PRESSURE: Final = "PRESSURE"
ATTR_API_TEMPERATURE: Final = "TEMPERATURE"
ATTR_ADVICE: Final = "advice"
ATTR_DESCRIPTION: Final = "description"
ATTR_LEVEL: Final = "level"
ATTR_LIMIT: Final = "limit"
ATTR_PERCENT: Final = "percent"
SUFFIX_PERCENT: Final = "PERCENT"
SUFFIX_LIMIT: Final = "LIMIT"
ATTRIBUTION: Final = "Data provided by Airly"
CONF_USE_NEAREST: Final = "use_nearest"
DEFAULT_NAME: Final = "Airly"
DOMAIN: Final = "airly"
LABEL_ADVICE: Final = "advice"
MANUFACTURER: Final = "Airly sp. z o.o."
MAX_UPDATE_INTERVAL: Final = 90
MIN_UPDATE_INTERVAL: Final = 5
NO_AIRLY_SENSORS: Final = "There are no Airly sensors in this area yet."
SENSOR_TYPES: tuple[AirlySensorEntityDescription, ...] = (
AirlySensorEntityDescription(
key=ATTR_API_CAQI,
device_class=DEVICE_CLASS_AQI,
name=ATTR_API_CAQI,
native_unit_of_measurement="CAQI",
),
AirlySensorEntityDescription(
key=ATTR_API_PM1,
device_class=DEVICE_CLASS_PM1,
name=ATTR_API_PM1,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_PM25,
device_class=DEVICE_CLASS_PM25,
name="PM2.5",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_PM10,
device_class=DEVICE_CLASS_PM10,
name=ATTR_API_PM10,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_HUMIDITY,
device_class=DEVICE_CLASS_HUMIDITY,
name=ATTR_API_HUMIDITY.capitalize(),
native_unit_of_measurement=PERCENTAGE,
state_class=STATE_CLASS_MEASUREMENT,
value=lambda value: round(value, 1),
),
AirlySensorEntityDescription(
key=ATTR_API_PRESSURE,
device_class=DEVICE_CLASS_PRESSURE,
name=ATTR_API_PRESSURE.capitalize(),
native_unit_of_measurement=PRESSURE_HPA,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_TEMPERATURE,
device_class=DEVICE_CLASS_TEMPERATURE,
name=ATTR_API_TEMPERATURE.capitalize(),
native_unit_of_measurement=TEMP_CELSIUS,
state_class=STATE_CLASS_MEASUREMENT,
value=lambda value: round(value, 1),
),
)
| 31.666667 | 76 | 0.73985 | [
"Apache-2.0"
] | Airzzz0801/core | homeassistant/components/airly/const.py | 3,325 | Python |
#!/usr/bin/env python3
# Copyright 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Ignore indention messages, since legacy scripts use 2 spaces instead of 4.
# pylint: disable=bad-indentation,docstring-section-indent
# pylint: disable=docstring-trailing-quotes
# A script to pack EC binary into SPI flash image for MEC17xx
# Based on MEC170x_ROM_Description.pdf DS00002225C (07-28-17).
import argparse
import hashlib
import os
import struct
import subprocess
import tempfile
import zlib # CRC32
# MEC1701 has 256KB SRAM from 0xE0000 - 0x120000
# SRAM is divided into contiguous CODE & DATA
# CODE at [0xE0000, 0x117FFF] DATA at [0x118000, 0x11FFFF]
# SPI flash size for board is 512KB
# Boot-ROM TAG is located at SPI offset 0 (two 4-byte tags)
#
LFW_SIZE = 0x1000
LOAD_ADDR = 0x0E0000
LOAD_ADDR_RW = 0xE1000
HEADER_SIZE = 0x40
SPI_CLOCK_LIST = [48, 24, 16, 12]
SPI_READ_CMD_LIST = [0x3, 0xb, 0x3b, 0x6b]
CRC_TABLE = [0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15,
0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d]
def mock_print(*args, **kwargs):
pass
debug_print = mock_print
def Crc8(crc, data):
"""Update CRC8 value."""
for v in data:
crc = ((crc << 4) & 0xff) ^ (CRC_TABLE[(crc >> 4) ^ (v >> 4)]);
crc = ((crc << 4) & 0xff) ^ (CRC_TABLE[(crc >> 4) ^ (v & 0xf)]);
return crc ^ 0x55
def GetEntryPoint(payload_file):
"""Read entry point from payload EC image."""
with open(payload_file, 'rb') as f:
f.seek(4)
s = f.read(4)
return struct.unpack('<I', s)[0]
def GetPayloadFromOffset(payload_file, offset):
"""Read payload and pad it to 64-byte aligned."""
with open(payload_file, 'rb') as f:
f.seek(offset)
payload = bytearray(f.read())
rem_len = len(payload) % 64
if rem_len:
payload += b'\0' * (64 - rem_len)
return payload
def GetPayload(payload_file):
"""Read payload and pad it to 64-byte aligned."""
return GetPayloadFromOffset(payload_file, 0)
def GetPublicKey(pem_file):
"""Extract public exponent and modulus from PEM file."""
result = subprocess.run(['openssl', 'rsa', '-in', pem_file, '-text',
'-noout'], stdout=subprocess.PIPE, encoding='utf-8')
modulus_raw = []
in_modulus = False
for line in result.stdout.splitlines():
if line.startswith('modulus'):
in_modulus = True
elif not line.startswith(' '):
in_modulus = False
elif in_modulus:
modulus_raw.extend(line.strip().strip(':').split(':'))
if line.startswith('publicExponent'):
exp = int(line.split(' ')[1], 10)
modulus_raw.reverse()
modulus = bytearray((int(x, 16) for x in modulus_raw[:256]))
return struct.pack('<Q', exp), modulus
def GetSpiClockParameter(args):
assert args.spi_clock in SPI_CLOCK_LIST, \
"Unsupported SPI clock speed %d MHz" % args.spi_clock
return SPI_CLOCK_LIST.index(args.spi_clock)
def GetSpiReadCmdParameter(args):
assert args.spi_read_cmd in SPI_READ_CMD_LIST, \
"Unsupported SPI read command 0x%x" % args.spi_read_cmd
return SPI_READ_CMD_LIST.index(args.spi_read_cmd)
def PadZeroTo(data, size):
data.extend(b'\0' * (size - len(data)))
def BuildHeader(args, payload_len, load_addr, rorofile):
# Identifier and header version
header = bytearray(b'PHCM\0')
# byte[5]
b = GetSpiClockParameter(args)
b |= (1 << 2)
header.append(b)
# byte[6]
b = 0
header.append(b)
# byte[7]
header.append(GetSpiReadCmdParameter(args))
# bytes 0x08 - 0x0b
header.extend(struct.pack('<I', load_addr))
# bytes 0x0c - 0x0f
header.extend(struct.pack('<I', GetEntryPoint(rorofile)))
# bytes 0x10 - 0x13
header.append((payload_len >> 6) & 0xff)
header.append((payload_len >> 14) & 0xff)
PadZeroTo(header, 0x14)
# bytes 0x14 - 0x17
header.extend(struct.pack('<I', args.payload_offset))
# bytes 0x14 - 0x3F all 0
PadZeroTo(header, 0x40)
# header signature is appended by the caller
return header
def BuildHeader2(args, payload_len, load_addr, payload_entry):
# Identifier and header version
header = bytearray(b'PHCM\0')
# byte[5]
b = GetSpiClockParameter(args)
b |= (1 << 2)
header.append(b)
# byte[6]
b = 0
header.append(b)
# byte[7]
header.append(GetSpiReadCmdParameter(args))
# bytes 0x08 - 0x0b
header.extend(struct.pack('<I', load_addr))
# bytes 0x0c - 0x0f
header.extend(struct.pack('<I', payload_entry))
# bytes 0x10 - 0x13
header.append((payload_len >> 6) & 0xff)
header.append((payload_len >> 14) & 0xff)
PadZeroTo(header, 0x14)
# bytes 0x14 - 0x17
header.extend(struct.pack('<I', args.payload_offset))
# bytes 0x14 - 0x3F all 0
PadZeroTo(header, 0x40)
# header signature is appended by the caller
return header
#
# Compute SHA-256 of data and return digest
# as a bytearray
#
def HashByteArray(data):
hasher = hashlib.sha256()
hasher.update(data)
h = hasher.digest()
bah = bytearray(h)
return bah
#
# Return 64-byte signature of byte array data.
# Signature is SHA256 of data with 32 0 bytes appended
#
def SignByteArray(data):
debug_print("Signature is SHA-256 of data")
sigb = HashByteArray(data)
sigb.extend(b'\0' * 32)
return sigb
# MEC1701H supports two 32-bit Tags located at offsets 0x0 and 0x4
# in the SPI flash.
# Tag format:
# bits[23:0] correspond to bits[31:8] of the Header SPI address
# Header is always on a 256-byte boundary.
# bits[31:24] = CRC8-ITU of bits[23:0].
# Notice there is no chip-select field in the Tag both Tag's point
# to the same flash part.
#
def BuildTag(args):
tag = bytearray([(args.header_loc >> 8) & 0xff,
(args.header_loc >> 16) & 0xff,
(args.header_loc >> 24) & 0xff])
tag.append(Crc8(0, tag))
return tag
def BuildTagFromHdrAddr(header_loc):
tag = bytearray([(header_loc >> 8) & 0xff,
(header_loc >> 16) & 0xff,
(header_loc >> 24) & 0xff])
tag.append(Crc8(0, tag))
return tag
#
# Creates temporary file for read/write
# Reads binary file containing LFW image_size (loader_file)
# Writes LFW image to temporary file
# Reads RO image at beginning of rorw_file up to image_size
# (assumes RO/RW images have been padded with 0xFF
# Returns temporary file name
#
def PacklfwRoImage(rorw_file, loader_file, image_size):
"""Create a temp file with the
first image_size bytes from the loader file and append bytes
from the rorw file.
return the filename"""
fo=tempfile.NamedTemporaryFile(delete=False) # Need to keep file around
with open(loader_file,'rb') as fin1: # read 4KB loader file
pro = fin1.read()
fo.write(pro) # write 4KB loader data to temp file
with open(rorw_file, 'rb') as fin:
ro = fin.read(image_size)
fo.write(ro)
fo.close()
return fo.name
#
# Generate a test EC_RW image of same size
# as original.
# Preserve image_data structure and fill all
# other bytes with 0xA5.
# useful for testing SPI read and EC build
# process hash generation.
#
def gen_test_ecrw(pldrw):
debug_print("gen_test_ecrw: pldrw type =", type(pldrw))
debug_print("len pldrw =", len(pldrw), " = ", hex(len(pldrw)))
cookie1_pos = pldrw.find(b'\x99\x88\x77\xce')
cookie2_pos = pldrw.find(b'\xdd\xbb\xaa\xce', cookie1_pos+4)
t = struct.unpack("<L", pldrw[cookie1_pos+0x24:cookie1_pos+0x28])
size = t[0]
debug_print("EC_RW size =", size, " = ", hex(size))
debug_print("Found cookie1 at ", hex(cookie1_pos))
debug_print("Found cookie2 at ", hex(cookie2_pos))
if cookie1_pos > 0 and cookie2_pos > cookie1_pos:
for i in range(0, cookie1_pos):
pldrw[i] = 0xA5
for i in range(cookie2_pos+4, len(pldrw)):
pldrw[i] = 0xA5
with open("ec_RW_test.bin", "wb") as fecrw:
fecrw.write(pldrw[:size])
def parseargs():
rpath = os.path.dirname(os.path.relpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input",
help="EC binary to pack, usually ec.bin or ec.RO.flat.",
metavar="EC_BIN", default="ec.bin")
parser.add_argument("-o", "--output",
help="Output flash binary file",
metavar="EC_SPI_FLASH", default="ec.packed.bin")
parser.add_argument("--loader_file",
help="EC loader binary",
default="ecloader.bin")
parser.add_argument("-s", "--spi_size", type=int,
help="Size of the SPI flash in KB",
default=512)
parser.add_argument("-l", "--header_loc", type=int,
help="Location of header in SPI flash",
default=0x1000)
parser.add_argument("-p", "--payload_offset", type=int,
help="The offset of payload from the start of header",
default=0x80)
parser.add_argument("-r", "--rw_loc", type=int,
help="Start offset of EC_RW. Default is -1 meaning 1/2 flash size",
default=-1)
parser.add_argument("--spi_clock", type=int,
help="SPI clock speed. 8, 12, 24, or 48 MHz.",
default=24)
parser.add_argument("--spi_read_cmd", type=int,
help="SPI read command. 0x3, 0xB, or 0x3B.",
default=0xb)
parser.add_argument("--image_size", type=int,
help="Size of a single image. Default 220KB",
default=(220 * 1024))
parser.add_argument("--test_spi", action='store_true',
help="Test SPI data integrity by adding CRC32 in last 4-bytes of RO/RW binaries",
default=False)
parser.add_argument("--test_ecrw", action='store_true',
help="Use fixed pattern for EC_RW but preserve image_data",
default=False)
parser.add_argument("--verbose", action='store_true',
help="Enable verbose output",
default=False)
return parser.parse_args()
# Debug helper routine
def dumpsects(spi_list):
debug_print("spi_list has {0} entries".format(len(spi_list)))
for s in spi_list:
debug_print("0x{0:x} 0x{1:x} {2:s}".format(s[0],len(s[1]),s[2]))
def printByteArrayAsHex(ba, title):
debug_print(title,"= ")
count = 0
for b in ba:
count = count + 1
debug_print("0x{0:02x}, ".format(b),end="")
if (count % 8) == 0:
debug_print("")
debug_print("\n")
def print_args(args):
debug_print("parsed arguments:")
debug_print(".input = ", args.input)
debug_print(".output = ", args.output)
debug_print(".loader_file = ", args.loader_file)
debug_print(".spi_size (KB) = ", hex(args.spi_size))
debug_print(".image_size = ", hex(args.image_size))
debug_print(".header_loc = ", hex(args.header_loc))
debug_print(".payload_offset = ", hex(args.payload_offset))
if args.rw_loc < 0:
debug_print(".rw_loc = ", args.rw_loc)
else:
debug_print(".rw_loc = ", hex(args.rw_loc))
debug_print(".spi_clock = ", args.spi_clock)
debug_print(".spi_read_cmd = ", args.spi_read_cmd)
debug_print(".test_spi = ", args.test_spi)
debug_print(".verbose = ", args.verbose)
#
# Handle quiet mode build from Makefile
# Quiet mode when V is unset or V=0
# Verbose mode when V=1
#
def main():
global debug_print
args = parseargs()
if args.verbose:
debug_print = print
debug_print("Begin MEC17xx pack_ec.py script")
# MEC17xx maximum 192KB each for RO & RW
# mec1701 chip Makefile sets args.spi_size = 512
# Tags at offset 0
#
print_args(args)
spi_size = args.spi_size * 1024
debug_print("SPI Flash image size in bytes =", hex(spi_size))
# !!! IMPORTANT !!!
# These values MUST match chip/mec1701/config_flash_layout.h
# defines.
# MEC17xx Boot-ROM TAGs are at offset 0 and 4.
# lfw + EC_RO starts at beginning of second 4KB sector
# EC_RW starts at offset 0x40000 (256KB)
spi_list = []
debug_print("args.input = ",args.input)
debug_print("args.loader_file = ",args.loader_file)
debug_print("args.image_size = ",hex(args.image_size))
rorofile=PacklfwRoImage(args.input, args.loader_file, args.image_size)
payload = GetPayload(rorofile)
payload_len = len(payload)
# debug
debug_print("EC_LFW + EC_RO length = ",hex(payload_len))
# SPI image integrity test
# compute CRC32 of EC_RO except for last 4 bytes
# skip over 4KB LFW
# Store CRC32 in last 4 bytes
if args.test_spi == True:
crc = zlib.crc32(bytes(payload[LFW_SIZE:(payload_len - 4)]))
crc_ofs = payload_len - 4
debug_print("EC_RO CRC32 = 0x{0:08x} @ 0x{1:08x}".format(crc, crc_ofs))
for i in range(4):
payload[crc_ofs + i] = crc & 0xff
crc = crc >> 8
# Chromebooks are not using MEC BootROM ECDSA.
# We implemented the ECDSA disabled case where
# the 64-byte signature contains a SHA-256 of the binary plus
# 32 zeros bytes.
payload_signature = SignByteArray(payload)
# debug
printByteArrayAsHex(payload_signature, "LFW + EC_RO payload_signature")
# MEC17xx Header is 0x80 bytes with an 64 byte signature
# (32 byte SHA256 + 32 zero bytes)
header = BuildHeader(args, payload_len, LOAD_ADDR, rorofile)
# debug
printByteArrayAsHex(header, "Header LFW + EC_RO")
# MEC17xx payload ECDSA not used, 64 byte signature is
# SHA256 + 32 zero bytes
header_signature = SignByteArray(header)
# debug
printByteArrayAsHex(header_signature, "header_signature")
tag = BuildTag(args)
# MEC17xx truncate RW length to args.image_size to not overwrite LFW
# offset may be different due to Header size and other changes
# MCHP we want to append a SHA-256 to the end of the actual payload
# to test SPI read routines.
debug_print("Call to GetPayloadFromOffset")
debug_print("args.input = ", args.input)
debug_print("args.image_size = ", hex(args.image_size))
payload_rw = GetPayloadFromOffset(args.input, args.image_size)
debug_print("type(payload_rw) is ", type(payload_rw))
debug_print("len(payload_rw) is ", hex(len(payload_rw)))
# truncate to args.image_size
rw_len = args.image_size
payload_rw = payload_rw[:rw_len]
payload_rw_len = len(payload_rw)
debug_print("Truncated size of EC_RW = ", hex(payload_rw_len))
payload_entry_tuple = struct.unpack_from('<I', payload_rw, 4)
debug_print("payload_entry_tuple = ", payload_entry_tuple)
payload_entry = payload_entry_tuple[0]
debug_print("payload_entry = ", hex(payload_entry))
# Note: payload_rw is a bytearray therefore is mutable
if args.test_ecrw:
gen_test_ecrw(payload_rw)
# SPI image integrity test
# compute CRC32 of EC_RW except for last 4 bytes
# Store CRC32 in last 4 bytes
if args.test_spi == True:
crc = zlib.crc32(bytes(payload_rw[:(payload_rw_len - 32)]))
crc_ofs = payload_rw_len - 4
debug_print("EC_RW CRC32 = 0x{0:08x} at offset 0x{1:08x}".format(crc, crc_ofs))
for i in range(4):
payload_rw[crc_ofs + i] = crc & 0xff
crc = crc >> 8
payload_rw_sig = SignByteArray(payload_rw)
# debug
printByteArrayAsHex(payload_rw_sig, "payload_rw_sig")
os.remove(rorofile) # clean up the temp file
# MEC170x Boot-ROM Tags are located at SPI offset 0
spi_list.append((0, tag, "tag"))
spi_list.append((args.header_loc, header, "header(lwf + ro)"))
spi_list.append((args.header_loc + HEADER_SIZE, header_signature,
"header(lwf + ro) signature"))
spi_list.append((args.header_loc + args.payload_offset, payload,
"payload(lfw + ro)"))
offset = args.header_loc + args.payload_offset + payload_len
# No SPI Header for EC_RW as its not loaded by BootROM
spi_list.append((offset, payload_signature,
"payload(lfw_ro) signature"))
# EC_RW location
rw_offset = int(spi_size // 2)
if args.rw_loc >= 0:
rw_offset = args.rw_loc
debug_print("rw_offset = 0x{0:08x}".format(rw_offset))
if rw_offset < offset + len(payload_signature):
print("ERROR: EC_RW overlaps EC_RO")
spi_list.append((rw_offset, payload_rw, "payload(rw)"))
# don't add to EC_RW. We don't know if Google will process
# EC SPI flash binary with other tools during build of
# coreboot and OS.
#offset = rw_offset + payload_rw_len
#spi_list.append((offset, payload_rw_sig, "payload(rw) signature"))
spi_list = sorted(spi_list)
dumpsects(spi_list)
#
# MEC17xx Boot-ROM locates TAG at SPI offset 0 instead of end of SPI.
#
with open(args.output, 'wb') as f:
debug_print("Write spi list to file", args.output)
addr = 0
for s in spi_list:
if addr < s[0]:
debug_print("Offset ",hex(addr)," Length", hex(s[0]-addr),
"fill with 0xff")
f.write(b'\xff' * (s[0] - addr))
addr = s[0]
debug_print("Offset ",hex(addr), " Length", hex(len(s[1])), "write data")
f.write(s[1])
addr += len(s[1])
if addr < spi_size:
debug_print("Offset ",hex(addr), " Length", hex(spi_size - addr),
"fill with 0xff")
f.write(b'\xff' * (spi_size - addr))
f.flush()
if __name__ == '__main__':
main()
| 31.832402 | 103 | 0.663449 | [
"BSD-3-Clause"
] | DHowett/fw-ectool | chip/mchp/util/pack_ec.py | 17,094 | Python |
#!/usr/bin/env python
"""Configuration parameters for the client."""
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib.rdfvalues import crypto
# General Client options.
config_lib.DEFINE_string("Client.name", "GRR",
"The name of the client. This will be used as a base "
"name to generate many other default parameters such "
"as binary names and service names. Note that on "
"Linux we lowercase the name to confirm with most "
"linux naming conventions.")
config_lib.DEFINE_string("Client.binary_name", "%(Client.name)",
"The name of the client binary.")
config_lib.DEFINE_list("Client.labels", [], "Labels for this client.")
config_lib.DEFINE_string("Client.company_name", "GRR Project",
"The name of the company which made the client.")
config_lib.DEFINE_string("Client.description", "%(name) %(platform) %(arch)",
"A description of this specific client build.")
config_lib.DEFINE_string("Client.platform", "windows",
"The platform we are running on.")
config_lib.DEFINE_string("Client.arch", "amd64",
"The architecture we are running on.")
config_lib.DEFINE_string("Client.build_time", "Unknown",
"The time the client was built.")
config_lib.DEFINE_string("Client.deploy_time", "Unknown",
"The time the client was deployed.")
config_lib.DEFINE_string("Client.build_environment", None,
"The output of Uname.FromCurrentSystem.signature() "
"on the system the client was built on.")
config_lib.DEFINE_integer("Client.rsa_key_length", 2048,
"The key length of the client keys in bits.")
config_lib.DEFINE_string(
name="Client.install_path",
default=r"%(SystemRoot|env)\\System32\\%(name)\\%(Source.version_string)",
help="Where the client binaries are installed.")
config_lib.DEFINE_string(
name="Client.component_path",
default=r"%(Client.install_path)/components",
help="Where the client components are installed on the client.")
config_lib.DEFINE_string(
name="Client.component_url_stem",
default="%(Frontend.static_url_path_prefix)components/",
help="A URL path where components will be served from.")
config_lib.DEFINE_semantic(
rdfvalue.RDFURN,
"Client.component_aff4_stem",
default="%(Frontend.static_aff4_prefix)/components/",
description="A common AFF4 stem where components will be served from.")
config_lib.DEFINE_string(
name="Client.rekall_profile_cache_path",
default=r"%(Client.install_path)\\rekall_profiles",
help="Where GRR stores cached Rekall profiles needed for memory analysis")
config_lib.DEFINE_list(name="Client.server_urls",
default=[],
help="Base URL for client control.")
# Deprecated. Remove when all installations switch to Client.server_urls.
config_lib.DEFINE_list("Client.control_urls", ["http://localhost:8080/control"],
"List of URLs of the controlling server.")
config_lib.DEFINE_integer("Client.http_timeout", 100,
"Timeout for HTTP requests.")
config_lib.DEFINE_string("Client.plist_path",
"/Library/LaunchDaemons/com.google.code.grrd.plist",
"Location of our launchctl plist.")
config_lib.DEFINE_string("Client.plist_filename", None,
"Filename of launchctl plist.")
config_lib.DEFINE_string("Client.plist_label", None,
"Identifier label for launchd")
config_lib.DEFINE_string("Client.plist_label_prefix", None,
"Domain for launchd label.")
config_lib.DEFINE_float("Client.poll_min", 0.2,
"Minimum time between polls in seconds.")
config_lib.DEFINE_float("Client.poll_max", 5,
"Maximum time between polls in seconds.")
config_lib.DEFINE_float("Client.error_poll_min", 15,
"Minimum time between polls in seconds if the server "
"reported an error.")
config_lib.DEFINE_float("Client.poll_slew", 1.15, "Slew of poll time.")
config_lib.DEFINE_integer("Client.connection_error_limit", 60 * 24,
"If the client encounters this many connection "
"errors, it exits and restarts. Retries are one "
"minute apart.")
config_lib.DEFINE_integer("Client.retry_error_limit", 10,
"If the client encounters this many connection "
"errors, it searches for a new proxy/server url "
"combination.")
config_lib.DEFINE_list(
name="Client.proxy_servers",
help="List of valid proxy servers the client should try.",
default=[])
config_lib.DEFINE_integer("Client.max_post_size", 40000000,
"Maximum size of the post.")
config_lib.DEFINE_integer("Client.max_out_queue", 51200000,
"Maximum size of the output queue.")
config_lib.DEFINE_integer("Client.foreman_check_frequency", 1800,
"The minimum number of seconds before checking with "
"the foreman for new work.")
config_lib.DEFINE_float("Client.rss_max", 1000,
"Maximum memory footprint in MB (soft limit). "
"Exceeding this will result in an orderly shutdown.")
config_lib.DEFINE_float("Client.rss_max_hard", 2000,
"Maximum memory footprint in MB (hard limit). "
"Exceeding this will result in aborting the current "
"client action and restarting.")
config_lib.DEFINE_string(
name="Client.tempfile_prefix",
help="Prefix to use for temp files created by the GRR client.",
default="tmp%(Client.name)")
config_lib.DEFINE_list(
name="Client.tempdir_roots",
help="List of temporary directories to use on the client.",
default=["/var/tmp/"])
config_lib.DEFINE_string(
name="Client.grr_tempdir",
help="Default subdirectory in the temp directory to use for GRR.",
default="%(Client.name)")
config_lib.DEFINE_list(
name="Client.vfs_virtualroots",
help=("If this is set for a VFS type, client VFS operations will always be"
" relative to the given root. Format is os:/mount/disk."),
default=[])
# Windows client specific options.
config_lib.DEFINE_string("Client.config_hive",
r"HKEY_LOCAL_MACHINE",
help="The registry hive where the client "
"configuration will be stored.")
config_lib.DEFINE_string("Client.config_key",
r"Software\\GRR",
help="The registry key where client configuration "
"will be stored.")
# Client Cryptographic options. Here we define defaults for key values.
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"Client.private_key",
description="Client private key in pem format. If not provided this "
"will be generated by the enrollment process.",)
config_lib.DEFINE_semantic(
crypto.RDFX509Cert,
"CA.certificate",
description="Trusted CA certificate in X509 pem format",)
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.executable_signing_public_key",
description="public key for verifying executable signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.executable_signing_private_key",
description="Private keys for signing executables. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.driver_signing_public_key",
description="public key for verifying driver signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.driver_signing_private_key",
description="Private keys for signing drivers. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_integer("Client.server_serial_number", 0,
"Minimal serial number we accept for server cert.")
config_lib.DEFINE_integer(
"Client.gc_frequency", 10,
"Defines how often the client calls garbage collection (seconds).")
# The following configuration options are defined here but are used in
# the windows nanny code (grr/client/nanny/windows_nanny.h).
config_lib.DEFINE_string("Nanny.child_binary",
"GRR.exe",
help="The location to the client binary.")
config_lib.DEFINE_string("Nanny.child_command_line",
"%(Nanny.child_binary)",
help="The command line to launch the client binary.")
config_lib.DEFINE_string("Nanny.logfile", "%(Logging.path)/nanny.log",
"The file where we write the nanny transaction log.")
config_lib.DEFINE_string("Nanny.service_name",
"GRR Service",
help="The name of the nanny.")
config_lib.DEFINE_string("Nanny.service_description",
"GRR Service",
help="The description of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key",
r"%(Client.config_key)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key_hive",
r"%(Client.config_hive)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.statusfile", "%(Logging.path)/nanny.status",
"The file where we write the nanny status.")
config_lib.DEFINE_string("Nanny.status", "",
"The regkey where we write the nanny status.")
config_lib.DEFINE_string("Nanny.binary",
r"%(Client.install_path)\\%(service_binary_name)",
help="The full location to the nanny binary.")
config_lib.DEFINE_string("Nanny.service_binary_name",
"%(Client.name)service.exe",
help="The executable name of the nanny binary.")
config_lib.DEFINE_integer("Nanny.unresponsive_kill_period", 60,
"The time in seconds after which the nanny kills us.")
config_lib.DEFINE_integer("Network.api", 3,
"The version of the network protocol the client "
"uses.")
config_lib.DEFINE_string("Network.compression",
default="ZCOMPRESS",
help="Type of compression (ZCOMPRESS, UNCOMPRESSED)")
# Installer options.
config_lib.DEFINE_string(
name="Installer.logfile",
default="%(Logging.path)/%(Client.name)_installer.txt",
help=("A specific log file which is used for logging the "
"installation process."))
config_lib.DEFINE_list("Installer.old_key_map", [
"HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate->Client.private_key",
"HKEY_LOCAL_MACHINE\\Software\\GRR\\server_serial_number"
"->Client.server_serial_number",
], """
A mapping of old registry values which will be copied to new values. The old
value location must start with a valid hive name, followed by a key name, and
end with the value name. The source location must be separated from the new
parameter name by a -> symbol.
This setting allows to carry over settings from obsolete client installations to
newer versions of the client which may store the same information in other
locations.
For example:
HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate -> Client.private_key
""")
| 40.506757 | 80 | 0.643453 | [
"Apache-2.0"
] | theGreenJedi/grr | grr/config/client.py | 11,990 | Python |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="inference-tools",
version="0.5.2",
author="Chris Bowman",
author_email="[email protected]",
description="A collection of python tools for Bayesian data analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/C-bowman/inference-tools",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | 31.761905 | 74 | 0.677661 | [
"MIT"
] | Shimwell/inference-tools | setup.py | 667 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 62.94069 | 1,390 | 0.645665 | [
"Apache-2.0"
] | MiaoRachelYu/python | kubernetes/client/apis/batch_v1_api.py | 91,264 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-12-06 16:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roster', '0021_auto_20180825_1843'),
]
operations = [
migrations.AlterField(
model_name='student',
name='track',
field=models.CharField(choices=[('A', 'Weekly'), ('B', 'Biweekly'), ('C', 'Correspondence'), ('E', 'External'), ('N', 'Not applicable')], max_length=5),
),
]
| 26.857143 | 164 | 0.597518 | [
"MIT"
] | AmoleR/otis-web | roster/migrations/0022_auto_20181206_1148.py | 564 | Python |
import argparse
import logging
import json
import os
def submission(origin_file, topics, runtag, output_file):
with open(output_file, 'a') as fout, open(origin_file, 'r') as fin:
for line in fin:
data = line.strip().split(' ')
if data[0] in topics:
continue
data[-1] = runtag
fout.write(' '.join(data) + '\n')
def ensemble(folder, ratio, clf_list, runtag, output):
ensemble_dict = {}
for clf in clf_list:
with open('{}/{}/rerank_{}.txt'.format(folder, clf, ratio), 'r') as f:
for line in f:
data = line.split()
topic, docid, score = data[0], data[2], float(data[4])
if topic not in ensemble_dict:
ensemble_dict[topic] = {}
if docid not in ensemble_dict[topic]:
ensemble_dict[topic][docid] = 0
ensemble_dict[topic][docid] += score
with open(output, 'w') as f:
for topic in ensemble_dict:
for rank, (docid, score) in enumerate(sorted(ensemble_dict[topic].items(),
key=lambda x: -x[1])):
f.write('{} Q0 {} {} {} {}\n'.format(topic, docid, rank + 1, score, runtag))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, help='config file', required=True)
args = parser.parse_args()
config_file = args.config
# Load configuration
with open(config_file) as f:
config = json.load(f)
model_directory = os.path.join(config['working_directory'], 'models')
assert os.path.isdir(model_directory)
for run in config['runs']:
runtag = run['runtag']
weight = run['weight']
output = os.path.join(config['working_directory'], run['output'])
logging.info(f'Preparing run for {runtag}')
ensemble(model_directory, weight, run['classifiers'], runtag, output)
submission(config['target']['run'], config['topics'], runtag, output)
| 36.016667 | 92 | 0.574734 | [
"Apache-2.0"
] | AceZhan/anserini | src/main/python/ecir2019_ccrf/generate_runs.py | 2,161 | Python |
# author : chenxi
# encoding:utf-8
import time
import run
if __name__ == "__main__":
mutex = 1
user = 0
users = []
while True:
if mutex == 1:
mutex = mutex - 1
try:
users.append(user)
users[user] = run.Score()
users[user].run_server()
user = user + 1
mutex = mutex +1
print mutex
except Exception:
time.sleep(3)
mutex = mutex +1
print mutex
continue
else:
print mutex | 16.851852 | 30 | 0.564835 | [
"MPL-2.0"
] | atomchan/CCUScore | main.py | 455 | Python |
# -*- coding: utf-8 -*-
#
# Submittable API Client documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 15:21:21 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
dirname = os.path.dirname
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(dirname(dirname(dirname(os.path.abspath(__file__)))), 'submittable_api_client'))
print sys.path
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Submittable API Client'
copyright = u'2014, Shawn Rider'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SubmittableAPIClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SubmittableAPIClient.tex', u'Submittable API Client Documentation',
u'Shawn Rider', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'submittableapiclient', u'Submittable API Client Documentation',
[u'Shawn Rider'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SubmittableAPIClient', u'Submittable API Client Documentation',
u'Shawn Rider', 'SubmittableAPIClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.961977 | 112 | 0.721865 | [
"MIT"
] | shawnr/submittable-api-client | docs/source/conf.py | 8,406 | Python |
"""Carbon Scraper Plugin for Userbot. //text in creative way.
usage: .karb //as a reply to any text message
Thanks to @r4v4n4 for vars,,, Random RGB feature by @PhycoNinja13b"""
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from telethon import events
from urllib.parse import quote_plus
from urllib.error import HTTPError
from time import sleep
import asyncio
import os
import random
from userbot.utils import admin_cmd
#@borg.on(events.NewMessage(pattern=r"\.karb ", outgoing=True))
@borg.on(admin_cmd(pattern="karb"))
async def carbon_api(e):
RED = random.randint(0,256)
GREEN = random.randint(0,256)
BLUE = random.randint(0,256)
THEME= [ "3024-night",
"a11y-dark",
"blackboard",
"base16-dark",
"base16-light",
"cobalt",
"dracula",
"duotone-dark",
"hopscotch",
"lucario",
"material",
"monokai",
"night-owl",
"nord",
"oceanic-next",
"one-light",
"one-dark",
"panda-syntax",
"paraiso-dark",
"seti",
"shades-of-purple",
"solarized",
"solarized%20light",
"synthwave-84",
"twilight",
"verminal",
"vscode",
"yeti",
"zenburn",
]
CUNTHE = random.randint(0, len(THEME) - 1)
The = THEME[CUNTHE]
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
""" A Wrapper for carbon.now.sh """
await e.edit("⬜⬜⬜⬜⬜")
CARBON = 'https://carbon.now.sh/?bg=rgba({R}%2C{G}%2C.{B}%2C1)&t={T}&wt=none&l=auto&ds=false&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Fira%20Code&fs=14px&lh=152%25&si=false&es=2x&wm=false&code={code}'
CARBONLANG = "en"
textx = await e.get_reply_message()
pcode = e.text
if pcode[6:]:
pcode = str(pcode[6:])
elif textx:
pcode = str(textx.message) # Importing message to module
code = quote_plus(pcode) # Converting to urlencoded
url = CARBON.format(code=code, R=RED, G=GREEN, B=BLUE, T=The, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.GOOGLE_CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('--disable-gpu')
prefs = {'download.default_directory' : './'}
chrome_options.add_experimental_option('prefs', prefs)
await e.edit("⬛⬛⬜⬜⬜")
driver = webdriver.Chrome(executable_path=Config.CHROME_DRIVER, options=chrome_options)
driver.get(url)
download_path = './'
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_path}}
command_result = driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
sleep(5) # this might take a bit.
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
sleep(5)
await e.edit("⬛⬛⬛⬜⬜")
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
sleep(5) #Waiting for downloading
await e.edit("⬛⬛⬛⬛⬛")
file = './carbon.png'
await e.edit("✅RGB Karbon Completed, Uploading RGB Karbon✅")
await e.client.send_file(
e.chat_id,
file,
caption="Carbonised by [TeleBot](https://t.me/TeleBotHelp)",
force_document=False,
reply_to=e.message.reply_to_msg_id,
)
os.remove('./carbon.png')
# Removing carbon.png after uploading
await e.delete() # Deleting msg
| 20.615764 | 235 | 0.590203 | [
"MIT"
] | Fregiant16/fregiantuserbot | userbot/plugins/carbonRGB (2).py | 4,229 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
__author__ = 'Andreas Bader'
__version__ = "0.01"
# db_folders -> List of DB Folder (for space check)
# db_client -> name of ycsb client
# db_args -> special ycsb arguments for this db
# db_name -> name of this db (e.g. for workload file)
# db_desc -> more detailed name/description
# jvm_args -> special jvm_args for this db and ycsb
# prerun_once -> list of commands to run local once before ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# postrun_once -> list of commands to run local once after ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# prerun -> list of commands to run before ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# postrun -> list of commands to run after ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# prerun_master -> list of commands to run before ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_master -> list of commands to run after ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_slaves -> list of commands to run before ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_slaves -> list of commands to run after ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_dict -> list of commands to run before ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# postrun_dict -> list of commands to run after ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# check -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (systemctl start xyz oftern returns true even if start failed somehow. Check that here!)
# check_master -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (only on master(first=ID 0) vm or local))
# check_slaves -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (all without master(=ID 0)) vms or local))
# check_dict -> list of commands to run after prerun for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# basic -> True/False, if True this is a basic database, so no need to ssh for space checking
# sequence -> which vm should be provisioned first? (for all postrun/prerun dicts/lists. First number is considered master db vm, rest are slaves.)
# include -> which base modules should be imported and added to the dictionary (standard functions that are reusable). Warning: infinite import loop possible!
# the following variables are possible in prerun_once, postrun_once, prerun, prerun_master, prerun_slaves, check, check_master, check_slaves, postrun, postrun_master, postrun_slaves, prerun_dict, postrun_dict, check_dict, db_args:
# %%IP%% -> IP of (actual) db vm
# %%IPgen%% -> IP of (actual) generator vm (on which this script runs)
# %%IPn%% -> IP of db vm number n (e.g. %%IP2%%)
# %%IPall%% -> give String with IP of all vms)
# %%HN%% -> Hostname of (actual) db vm
# %%HNgen%% -> Hostname of (actual) generator vm (on which this script runs)
# %%HNn%% -> Hostname of db vm number n (e.g. %%HN2%%)
# %%HNall%% -> give String with Hostname of all vms)
# %%SSH%% -> if SSH should be used (set at the beginning)
# Order of Preruns/Postruns:
# 1. prerun/postrun/check, 2. prerun_master/postrun_master/check_master, 3. preun_skaves/postrun_slaves/check_slaves, 4.prerun_dict/postrun_dict/check_dict
# General Order:
# prerun -> check -> ycsb -> postrun
def getDict():
dbConfig={}
dbConfig["db_folders"]=["/tmp/druid/indexCache", "/tmp/persistent/zk_druid", "/tmp/persistent/task/", "/tmp/druid/localStorage", "/var/lib/mysql"]
dbConfig["db_client"]="druid"
dbConfig["db_args"]="-p zookeeperip=%%IP0%% -p queryip=%%IP1%% -p zookeeperport=2181 -p queryport=8090 -p replicants=1"
dbConfig["db_name"]="druid_cl5_rf1"
dbConfig["db_desc"]="Druid (Broker,Coordinator,Historical,MiddleManager,Overlord) on 5 VMs with Replication Factor 1. Ingest via Tranquility/Finagle, Query via REST."
dbConfig["jvm_args"]="-jvm-args='-Xmx4096m'"
dbConfig["prerun_once"]= []
dbConfig["postrun_once"]= []
dbConfig["prerun"]= ["%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP0%%|g\" /home/vagrant/config/_common/common.runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP1%%|g\" /home/vagrant/config/broker/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP0%%|g\" /home/vagrant/config/coordinator/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP2%%|g\" /home/vagrant/config/historical/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP3%%|g\" /home/vagrant/config/middleManager/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP4%%|g\" /home/vagrant/config/overlord/runtime.properties'"]
dbConfig["postrun"]= []
dbConfig["prerun_master"]= []
dbConfig["postrun_master"]= []
dbConfig["prerun_slaves"]= []
dbConfig["postrun_slaves"]= []
dbConfig["prerun_dict"]= {
0 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_coordinator.service'"],
1 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_broker.service'"],
2 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_historical.service'"],
3 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_middlemanager.service'"],
4 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_overlord.service'",
"bash -c 'sleep 180'"]
}
dbConfig["postrun_dict"]= {}
dbConfig["check"]= []
dbConfig["check_master"]= []
dbConfig["check_slaves"]= []
dbConfig["check_dict"]= {
0 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_coordinator.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_coordinator.service | grep -c \"active (running)\")-1))'"],
1 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_broker.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_broker.service | grep -c \"active (running)\")-1))'"],
2 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_historical.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_historical.service | grep -c \"active (running)\")-1))'"],
3 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_middlemanager.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_middlemanager.service | grep -c \"active (running)\")-1))'"],
4 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_overlord.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_overlord.service | grep -c \"active (running)\")-1))'"]
}
dbConfig["basic"]= False
dbConfig["sequence"]=[0,1,2,3,4]
dbConfig["include"] = []
return dbConfig | 83.816327 | 230 | 0.660701 | [
"Apache-2.0"
] | TSDBBench/Overlord | vagrant_files/generator/files/databases/druid_cl5_rf1.py | 8,214 | Python |
"""Tensorflow trainer class."""
import logging
import math
import os
from typing import Callable, Dict, Optional
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import TFPreTrainedModel, shape_list
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput
from .training_args_tf import TFTrainingArguments
logger = logging.getLogger(__name__)
class TFTrainer:
model: TFPreTrainedModel
args: TFTrainingArguments
# something similar to a PT Dataset.
# This is just temporary before to have
# a framework-agnostic approach for datasets.
train_dataset: Optional[tf.data.Dataset]
eval_dataset: Optional[tf.data.Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.gradient_accumulator = GradientAccumulator()
self._setup_training()
def _setup_training(self) -> None:
"""
Setup the different steps to train a model:
- check if all the data are given
- create the proper strategy
- create the features
- prepare the model settings
"""
self._prepare_dataset()
with self.args.strategy.scope():
self._create_optimizer()
_ = self.optimizer.iterations
self._set_loss_and_metric()
self._create_checkpoint_manager()
self._create_summary_writer()
def _set_loss_and_metric(self) -> None:
"""
Create the training loss and metric with their name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
try:
self.loss = tf.keras.losses.get(
{
"class_name": self.args.loss_name,
"config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE},
}
)
except TypeError:
self.loss = tf.keras.losses.get(
{"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}}
)
def _create_summary_writer(self) -> None:
"""
Create a summary writer to be able to read the logs in Tensorboard.
"""
self.writer = tf.summary.create_file_writer(self.args.logging_dir)
def _prepare_dataset(self) -> None:
"""
Prepare the training, validation and test data.
"""
if self.train_dataset is not None:
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
if self.args.max_steps > 0:
self.train_steps = self.args.max_steps
else:
self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size)
self.train_dataset = (
self.train_dataset.cache()
.shuffle(self.num_train_examples)
.batch(self.args.train_batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
if self.args.max_steps > 0:
self.train_dataset = self.train_dataset.repeat(-1)
self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset)
else:
self.train_steps = 0
if self.eval_dataset is not None:
self.eval_dataset = (
self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE)
)
self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset)
def _create_optimizer(self) -> None:
"""
Create the training optimizer with its name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
if self.args.optimizer_name == "adamw":
self.optimizer = create_optimizer(
self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr
)
else:
try:
self.optimizer = tf.keras.optimizers.get(
{
"class_name": self.args.optimizer_name,
"config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon},
}
)
except TypeError:
# This is for the case where the optimizer is not Adam-like such as SGD
self.optimizer = tf.keras.optimizers.get(
{"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}}
)
logger.info("Created an/a {} optimizer".format(self.args.optimizer_name))
def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None:
"""
Create a checkpoint manager in order to be able to make the training
fault-tolerant.
Args:
max_to_keep: the maximum number of checkpoints to keep in the checkpoint path.
load_model: if we want to start the training from the latest checkpoint.
"""
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep)
if load_model:
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
@tf.function
def _evaluate_steps(self, per_replica_features, per_replica_labels):
"""
One step evaluation across replica.
Args:
per_replica_features: the batched features.
per_replica_labels: the batched labels.
Returns:
The loss corresponding to the given batch.
"""
per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2(
self._run_model, args=(per_replica_features, per_replica_labels, False)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss, per_replica_logits
def _prediction_loop(
self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
logger.info("***** Running %s *****", description)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
step: int = 1
for features, labels in dataset:
step = tf.convert_to_tensor(step, dtype=tf.int64)
loss, logits = self._evaluate_steps(features, labels)
loss = tf.reduce_mean(loss)
if not prediction_loss_only:
if self.args.n_gpu > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
step += 1
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = loss.numpy()
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def evaluate(
self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
"""
if eval_dataset is None:
eval_dataset = self.eval_dataset
output = self._prediction_loop(eval_dataset, description="Evaluation")
return output.metrics
def train(self) -> None:
"""
Train method to train the model.
"""
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
iterations = self.optimizer.iterations
if iterations.numpy() > 0:
logger.info("Start the training from the last checkpoint")
start_epoch = (iterations.numpy() // self.train_steps) + 1
else:
start_epoch = 1
tf.summary.experimental.set_step(iterations)
epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
logger.info(" Num Epochs = %d", epochs)
logger.info(" Total optimization steps = %d", self.train_steps)
for epoch in range(start_epoch, int(epochs + 1)):
for training_loss in self._training_steps():
step = iterations.numpy()
if self.args.debug:
with self.writer.as_default():
tf.summary.scalar("loss", training_loss, step=step)
if step == 1 and self.args.debug:
with self.writer.as_default():
tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir)
if self.args.evaluate_during_training and step % self.args.eval_steps == 0:
logs = {}
results = self.evaluate()
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
if callable(self.optimizer.learning_rate):
logs["learning_rate"] = self.optimizer.learning_rate(step).numpy()
else:
logs["learning_rate"] = self.optimizer.learning_rate.numpy()
logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs))
with self.writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=step)
if step % self.args.logging_steps == 0:
logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy()))
if step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path))
if step % self.train_steps == 0:
break
def _training_steps(self):
"""
Returns a generator over training steps (i.e. parameters update).
"""
for i, loss in enumerate(self._accumulate_next_gradients()):
if i % self.args.gradient_accumulation_steps == 0:
self._apply_gradients()
yield loss
@tf.function
def _apply_gradients(self):
"""Applies the gradients (cross-replica)."""
self.args.strategy.experimental_run_v2(self._step)
def _step(self):
"""Applies gradients and resets accumulation."""
gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync
gradients = [
gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients
]
gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
def _accumulate_next_gradients(self):
"""Accumulates the gradients from the next element in dataset."""
iterator = iter(self.train_dataset)
@tf.function
def _accumulate_next():
per_replica_features, per_replica_labels = next(iterator)
return self._accumulate_gradients(per_replica_features, per_replica_labels)
while True:
try:
yield _accumulate_next()
except tf.errors.OutOfRangeError:
break
def _accumulate_gradients(self, per_replica_features, per_replica_labels):
"""Accumulates the gradients across all the replica."""
per_replica_loss = self.args.strategy.experimental_run_v2(
self._forward, args=(per_replica_features, per_replica_labels)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss
def _forward(self, features, labels):
"""Forwards a training example and accumulates the gradients."""
per_example_loss, _ = self._run_model(features, labels, True)
gradients = tf.gradients(per_example_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
self.gradient_accumulator(gradients)
return per_example_loss
def _run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Args:
features: the batched features.
labels: the batched labels.
training: run the model in training mode or not
"""
if self.args.mode == "text-classification" or self.args.mode == "token-classification":
logits = self.model(features, training=training)[0]
else:
logits = self.model(features, training=training)
if self.args.mode == "token-classification":
active_loss = tf.reshape(labels, (-1,)) != -1
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
loss = self.loss(labels, reduced_logits)
elif self.args.mode == "question-answering":
start_loss = self.loss(labels["start_position"], logits[0])
end_loss = self.loss(labels["end_position"], logits[1])
loss = (start_loss + end_loss) / 2.0
else:
loss = self.loss(labels, logits)
loss += sum(self.model.losses) * (1.0 / self.args.n_gpu)
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
Args:
test_dataset: something similar to a PT Dataset. This is just
temporary before to have a framework-agnostic approach for datasets.
"""
test_dataset = test_dataset.batch(self.args.eval_batch_size)
test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset)
return self._prediction_loop(test_dataset, description="Prediction")
def save_model(self) -> None:
"""
Save the pretrained model and create a Tensorflow saved model.
"""
logger.info("Saving model in {}".format(self.args.output_dir))
path = os.path.join(self.args.output_dir, "saved_model")
logger.info("Saving model in {}".format(path))
os.makedirs(path, exist_ok=True)
self.model.save_pretrained(self.args.output_dir)
| 39.844394 | 119 | 0.608086 | [
"Apache-2.0"
] | 52Pig/transformers | src/transformers/trainer_tf.py | 17,412 | Python |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.unselected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattergeo.unselected.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.unselected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattergeo.unselected.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly.graph_objs.scattergeo.unselected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.unselected.Textfont
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Unselected
marker
plotly.graph_objs.scattergeo.unselected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.unselected.Textfont
instance or dict with compatible properties
Returns
-------
Unselected
"""
super(Unselected, self).__init__('unselected')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Unselected
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Unselected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (unselected as v_unselected)
# Initialize validators
# ---------------------
self._validators['marker'] = v_unselected.MarkerValidator()
self._validators['textfont'] = v_unselected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Textfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__('textfont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Textfont
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Textfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (textfont as v_textfont)
# Initialize validators
# ---------------------
self._validators['color'] = v_textfont.ColorValidator()
self._validators['colorsrc'] = v_textfont.ColorsrcValidator()
self._validators['family'] = v_textfont.FamilyValidator()
self._validators['familysrc'] = v_textfont.FamilysrcValidator()
self._validators['size'] = v_textfont.SizeValidator()
self._validators['sizesrc'] = v_textfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self['maxpoints']
@maxpoints.setter
def maxpoints(self, val):
self['maxpoints'] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['token']
@token.setter
def token(self, val):
self['token'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__('stream')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Stream
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (stream as v_stream)
# Initialize validators
# ---------------------
self._validators['maxpoints'] = v_stream.MaxpointsValidator()
self._validators['token'] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('maxpoints', None)
self['maxpoints'] = maxpoints if maxpoints is not None else _v
_v = arg.pop('token', None)
self['token'] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.selected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scattergeo.selected.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.selected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly.graph_objs.scattergeo.selected.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly.graph_objs.scattergeo.selected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.selected.Textfont instance
or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Selected
marker
plotly.graph_objs.scattergeo.selected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.selected.Textfont instance
or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__('selected')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Selected
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Selected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (selected as v_selected)
# Initialize validators
# ---------------------
self._validators['marker'] = v_selected.MarkerValidator()
self._validators['textfont'] = v_selected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['autocolorscale']
@autocolorscale.setter
def autocolorscale(self, val):
self['autocolorscale'] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['cauto']
@cauto.setter
def cauto(self, val):
self['cauto'] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmax']
@cmax.setter
def cmax(self, val):
self['cmax'] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmid']
@cmid.setter
def cmid(self, val):
self['cmid'] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmin']
@cmin.setter
def cmin(self, val):
self['cmin'] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A number that will be interpreted as a color
according to scattergeo.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self['coloraxis']
@coloraxis.setter
def coloraxis(self, val):
self['coloraxis'] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/blob/master/READ
ME.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We
add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
plotly.graph_objs.scattergeo.marker.colorbar.Ti
ckformatstop instance or dict with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scattergeo.marker.colorbar.tickformatstopdefa
ults), sets the default property values to use
for elements of
scattergeo.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objs.scattergeo.marker.colorbar.Ti
tle instance or dict with compatible properties
titlefont
Deprecated: Please use
scattergeo.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scattergeo.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scattergeo.marker.ColorBar
"""
return self['colorbar']
@colorbar.setter
def colorbar(self, val):
self['colorbar'] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis']
Returns
-------
str
"""
return self['colorscale']
@colorscale.setter
def colorscale(self, val):
self['colorscale'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# gradient
# --------
@property
def gradient(self):
"""
The 'gradient' property is an instance of Gradient
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.Gradient
- A dict of string/value properties that will be passed
to the Gradient constructor
Supported dict properties:
color
Sets the final color of the gradient fill: the
center color for radial, the right for
horizontal, or the bottom for vertical.
colorsrc
Sets the source reference on plot.ly for color
.
type
Sets the type of gradient used to fill the
markers
typesrc
Sets the source reference on plot.ly for type
.
Returns
-------
plotly.graph_objs.scattergeo.marker.Gradient
"""
return self['gradient']
@gradient.setter
def gradient(self, val):
self['gradient'] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on plot.ly for width
.
Returns
-------
plotly.graph_objs.scattergeo.marker.Line
"""
return self['line']
@line.setter
def line(self, val):
self['line'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on plot.ly for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['opacitysrc']
@opacitysrc.setter
def opacitysrc(self, val):
self['opacitysrc'] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['reversescale']
@reversescale.setter
def reversescale(self, val):
self['reversescale'] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showscale']
@showscale.setter
def showscale(self, val):
self['showscale'] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['sizemin']
@sizemin.setter
def sizemin(self, val):
self['sizemin'] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self['sizemode']
@sizemode.setter
def sizemode(self, val):
self['sizemode'] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['sizeref']
@sizeref.setter
def sizeref(self, val):
self['sizeref'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, 'circle', 100, 'circle-open', 200, 'circle-dot', 300,
'circle-open-dot', 1, 'square', 101, 'square-open', 201,
'square-dot', 301, 'square-open-dot', 2, 'diamond', 102,
'diamond-open', 202, 'diamond-dot', 302,
'diamond-open-dot', 3, 'cross', 103, 'cross-open', 203,
'cross-dot', 303, 'cross-open-dot', 4, 'x', 104, 'x-open',
204, 'x-dot', 304, 'x-open-dot', 5, 'triangle-up', 105,
'triangle-up-open', 205, 'triangle-up-dot', 305,
'triangle-up-open-dot', 6, 'triangle-down', 106,
'triangle-down-open', 206, 'triangle-down-dot', 306,
'triangle-down-open-dot', 7, 'triangle-left', 107,
'triangle-left-open', 207, 'triangle-left-dot', 307,
'triangle-left-open-dot', 8, 'triangle-right', 108,
'triangle-right-open', 208, 'triangle-right-dot', 308,
'triangle-right-open-dot', 9, 'triangle-ne', 109,
'triangle-ne-open', 209, 'triangle-ne-dot', 309,
'triangle-ne-open-dot', 10, 'triangle-se', 110,
'triangle-se-open', 210, 'triangle-se-dot', 310,
'triangle-se-open-dot', 11, 'triangle-sw', 111,
'triangle-sw-open', 211, 'triangle-sw-dot', 311,
'triangle-sw-open-dot', 12, 'triangle-nw', 112,
'triangle-nw-open', 212, 'triangle-nw-dot', 312,
'triangle-nw-open-dot', 13, 'pentagon', 113,
'pentagon-open', 213, 'pentagon-dot', 313,
'pentagon-open-dot', 14, 'hexagon', 114, 'hexagon-open',
214, 'hexagon-dot', 314, 'hexagon-open-dot', 15,
'hexagon2', 115, 'hexagon2-open', 215, 'hexagon2-dot',
315, 'hexagon2-open-dot', 16, 'octagon', 116,
'octagon-open', 216, 'octagon-dot', 316,
'octagon-open-dot', 17, 'star', 117, 'star-open', 217,
'star-dot', 317, 'star-open-dot', 18, 'hexagram', 118,
'hexagram-open', 218, 'hexagram-dot', 318,
'hexagram-open-dot', 19, 'star-triangle-up', 119,
'star-triangle-up-open', 219, 'star-triangle-up-dot', 319,
'star-triangle-up-open-dot', 20, 'star-triangle-down',
120, 'star-triangle-down-open', 220,
'star-triangle-down-dot', 320,
'star-triangle-down-open-dot', 21, 'star-square', 121,
'star-square-open', 221, 'star-square-dot', 321,
'star-square-open-dot', 22, 'star-diamond', 122,
'star-diamond-open', 222, 'star-diamond-dot', 322,
'star-diamond-open-dot', 23, 'diamond-tall', 123,
'diamond-tall-open', 223, 'diamond-tall-dot', 323,
'diamond-tall-open-dot', 24, 'diamond-wide', 124,
'diamond-wide-open', 224, 'diamond-wide-dot', 324,
'diamond-wide-open-dot', 25, 'hourglass', 125,
'hourglass-open', 26, 'bowtie', 126, 'bowtie-open', 27,
'circle-cross', 127, 'circle-cross-open', 28, 'circle-x',
128, 'circle-x-open', 29, 'square-cross', 129,
'square-cross-open', 30, 'square-x', 130, 'square-x-open',
31, 'diamond-cross', 131, 'diamond-cross-open', 32,
'diamond-x', 132, 'diamond-x-open', 33, 'cross-thin', 133,
'cross-thin-open', 34, 'x-thin', 134, 'x-thin-open', 35,
'asterisk', 135, 'asterisk-open', 36, 'hash', 136,
'hash-open', 236, 'hash-dot', 336, 'hash-open-dot', 37,
'y-up', 137, 'y-up-open', 38, 'y-down', 138,
'y-down-open', 39, 'y-left', 139, 'y-left-open', 40,
'y-right', 140, 'y-right-open', 41, 'line-ew', 141,
'line-ew-open', 42, 'line-ns', 142, 'line-ns-open', 43,
'line-ne', 143, 'line-ne-open', 44, 'line-nw', 144,
'line-nw-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['symbol']
@symbol.setter
def symbol(self, val):
self['symbol'] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on plot.ly for symbol .
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['symbolsrc']
@symbolsrc.setter
def symbolsrc(self, val):
self['symbolsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objs.scattergeo.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
plotly.graph_objs.scattergeo.marker.Gradient instance
or dict with compatible properties
line
plotly.graph_objs.scattergeo.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
gradient=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Marker
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objs.scattergeo.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
plotly.graph_objs.scattergeo.marker.Gradient instance
or dict with compatible properties
line
plotly.graph_objs.scattergeo.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
Returns
-------
Marker
"""
super(Marker, self).__init__('marker')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Marker
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (marker as v_marker)
# Initialize validators
# ---------------------
self._validators['autocolorscale'] = v_marker.AutocolorscaleValidator()
self._validators['cauto'] = v_marker.CautoValidator()
self._validators['cmax'] = v_marker.CmaxValidator()
self._validators['cmid'] = v_marker.CmidValidator()
self._validators['cmin'] = v_marker.CminValidator()
self._validators['color'] = v_marker.ColorValidator()
self._validators['coloraxis'] = v_marker.ColoraxisValidator()
self._validators['colorbar'] = v_marker.ColorBarValidator()
self._validators['colorscale'] = v_marker.ColorscaleValidator()
self._validators['colorsrc'] = v_marker.ColorsrcValidator()
self._validators['gradient'] = v_marker.GradientValidator()
self._validators['line'] = v_marker.LineValidator()
self._validators['opacity'] = v_marker.OpacityValidator()
self._validators['opacitysrc'] = v_marker.OpacitysrcValidator()
self._validators['reversescale'] = v_marker.ReversescaleValidator()
self._validators['showscale'] = v_marker.ShowscaleValidator()
self._validators['size'] = v_marker.SizeValidator()
self._validators['sizemin'] = v_marker.SizeminValidator()
self._validators['sizemode'] = v_marker.SizemodeValidator()
self._validators['sizeref'] = v_marker.SizerefValidator()
self._validators['sizesrc'] = v_marker.SizesrcValidator()
self._validators['symbol'] = v_marker.SymbolValidator()
self._validators['symbolsrc'] = v_marker.SymbolsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('autocolorscale', None)
self['autocolorscale'
] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop('cauto', None)
self['cauto'] = cauto if cauto is not None else _v
_v = arg.pop('cmax', None)
self['cmax'] = cmax if cmax is not None else _v
_v = arg.pop('cmid', None)
self['cmid'] = cmid if cmid is not None else _v
_v = arg.pop('cmin', None)
self['cmin'] = cmin if cmin is not None else _v
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('coloraxis', None)
self['coloraxis'] = coloraxis if coloraxis is not None else _v
_v = arg.pop('colorbar', None)
self['colorbar'] = colorbar if colorbar is not None else _v
_v = arg.pop('colorscale', None)
self['colorscale'] = colorscale if colorscale is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('gradient', None)
self['gradient'] = gradient if gradient is not None else _v
_v = arg.pop('line', None)
self['line'] = line if line is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('opacitysrc', None)
self['opacitysrc'] = opacitysrc if opacitysrc is not None else _v
_v = arg.pop('reversescale', None)
self['reversescale'] = reversescale if reversescale is not None else _v
_v = arg.pop('showscale', None)
self['showscale'] = showscale if showscale is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizemin', None)
self['sizemin'] = sizemin if sizemin is not None else _v
_v = arg.pop('sizemode', None)
self['sizemode'] = sizemode if sizemode is not None else _v
_v = arg.pop('sizeref', None)
self['sizeref'] = sizeref if sizeref is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
_v = arg.pop('symbol', None)
self['symbol'] = symbol if symbol is not None else _v
_v = arg.pop('symbolsrc', None)
self['symbolsrc'] = symbolsrc if symbolsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self['dash']
@dash.setter
def dash(self, val):
self['dash'] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Line
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__('line')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Line
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (line as v_line)
# Initialize validators
# ---------------------
self._validators['color'] = v_line.ColorValidator()
self._validators['dash'] = v_line.DashValidator()
self._validators['width'] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('dash', None)
self['dash'] = dash if dash is not None else _v
_v = arg.pop('width', None)
self['width'] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['align']
@align.setter
def align(self, val):
self['align'] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['alignsrc']
@alignsrc.setter
def alignsrc(self, val):
self['alignsrc'] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bgcolorsrc']
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self['bgcolorsrc'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bordercolorsrc']
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self['bordercolorsrc'] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.scattergeo.hoverlabel.Font
"""
return self['font']
@font.setter
def font(self, val):
self['font'] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self['namelength']
@namelength.setter
def namelength(self, val):
self['namelength'] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['namelengthsrc']
@namelengthsrc.setter
def namelengthsrc(self, val):
self['namelengthsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__('hoverlabel')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (hoverlabel as v_hoverlabel)
# Initialize validators
# ---------------------
self._validators['align'] = v_hoverlabel.AlignValidator()
self._validators['alignsrc'] = v_hoverlabel.AlignsrcValidator()
self._validators['bgcolor'] = v_hoverlabel.BgcolorValidator()
self._validators['bgcolorsrc'] = v_hoverlabel.BgcolorsrcValidator()
self._validators['bordercolor'] = v_hoverlabel.BordercolorValidator()
self._validators['bordercolorsrc'
] = v_hoverlabel.BordercolorsrcValidator()
self._validators['font'] = v_hoverlabel.FontValidator()
self._validators['namelength'] = v_hoverlabel.NamelengthValidator()
self._validators['namelengthsrc'
] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('align', None)
self['align'] = align if align is not None else _v
_v = arg.pop('alignsrc', None)
self['alignsrc'] = alignsrc if alignsrc is not None else _v
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bgcolorsrc', None)
self['bgcolorsrc'] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('bordercolorsrc', None)
self['bordercolorsrc'
] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop('font', None)
self['font'] = font if font is not None else _v
_v = arg.pop('namelength', None)
self['namelength'] = namelength if namelength is not None else _v
_v = arg.pop('namelengthsrc', None)
self['namelengthsrc'
] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.scattergeo import unselected
from plotly.graph_objs.scattergeo import selected
from plotly.graph_objs.scattergeo import marker
from plotly.graph_objs.scattergeo import hoverlabel
| 37.224723 | 85 | 0.552 | [
"MIT"
] | Jonathan-MW/plotly.py | plotly/graph_objs/scattergeo/__init__.py | 104,192 | Python |
"""MxShop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
# from django.contrib import admin
import xadmin
from MxShop.settings import MEDIA_ROOT
from django.views.static import serve
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from rest_framework.authtoken import views
from rest_framework_jwt.views import obtain_jwt_token
from goods.views import GoodsListViewSet, CategoryViewset, HotSearchsViewset, BannerViewset
from goods.views import IndexCategoryViewset
from users.views import SmsCodeViewset, UserViewset
from user_operation.views import UserFavViewset, LeavingMessageViewset, AddressViewset
from trade.views import ShoppingCartViewset, OrderViewset
router = DefaultRouter()
#配置goods的url
router.register(r'goods', GoodsListViewSet, base_name="goods")
#配置category的url
router.register(r'categorys', CategoryViewset, base_name="categorys")
router.register(r'codes', SmsCodeViewset, base_name="codes")
router.register(r'hotsearchs', HotSearchsViewset, base_name="hotsearchs")
router.register(r'users', UserViewset, base_name="users")
#收藏
router.register(r'userfavs', UserFavViewset, base_name="userfavs")
#留言
router.register(r'messages', LeavingMessageViewset, base_name="messages")
#收货地址
router.register(r'address', AddressViewset, base_name="address")
#购物车url
router.register(r'shopcarts', ShoppingCartViewset, base_name="shopcarts")
#订单相关url
router.register(r'orders', OrderViewset, base_name="orders")
#轮播图url
router.register(r'banners', BannerViewset, base_name="banners")
#首页商品系列数据
router.register(r'indexgoods', IndexCategoryViewset, base_name="indexgoods")
goods_list = GoodsListViewSet.as_view({
'get': 'list',
})
from trade.views import AlipayView
from django.views.generic import TemplateView
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
url('', include('social_django.urls', namespace='social')),
url(r'^', include(router.urls)),
url(r'^index/', TemplateView.as_view(template_name="index.html"), name="index"),
url(r'docs/', include_docs_urls(title="慕学生鲜")),
#drf自带的token认证模式
url(r'^api-token-auth/', views.obtain_auth_token),
#jwt的认证接口
url(r'^login/', obtain_jwt_token),
url(r'^alipay/return/', AlipayView.as_view(), name="alipay"),
url(r'^ueditor/',include('DjangoUeditor.urls' )),
]
| 31.938776 | 91 | 0.755911 | [
"BSD-2-Clause"
] | ScorpioDoctor/DjangoVueShop | MxShop/MxShop/urls.py | 3,226 | Python |
from inspect import isclass
from django.conf import settings
from django.core.files.storage import get_storage_class
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {0} for {addon} deleted.')
editor_format = _(u'{user} deleted {0} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class MAKE_PREMIUM(_LOG):
id = 50
format = _(u'{addon} changed to premium.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class REFUND_REQUESTED(_LOG):
id = 56
format = _(u'Refund requested for {addon}')
class REFUND_DECLINED(_LOG):
id = 57
format = _(u'Refund declined for {addon} for {0}.')
class REFUND_GRANTED(_LOG):
id = 58
format = _(u'Refund granted for {addon} for {0}.')
class REFUND_INSTANT(_LOG):
id = 59
format = _(u'Instant refund granted for {addon}.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class RECEIPT_CHECKED(_LOG):
id = 65
format = _(u'Valid receipt was checked for {addon}.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} disabled.')
short = _(u'App disabled')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATED_HIGH_REFUNDS(_LOG):
id = 69
format = _(u'{addon} escalated because of high number of refund requests.')
short = _(u'High Refund Requests')
keep = True
review_queue = True
class REREVIEW_MANIFEST_CHANGE(_LOG):
id = 70
format = _(u'{addon} re-reviewed because of manifest change.')
short = _(u'Manifest Change')
keep = True
review_queue = True
class REREVIEW_PREMIUM_TYPE_UPGRADE(_LOG):
id = 71
format = _(u'{addon} re-reviewed because app upgraded premium type.')
short = _(u'Premium Type Upgrade')
keep = True
review_queue = True
class REREVIEW_CLEARED(_LOG):
id = 72
format = _(u'Re-review cleared for {addon}.')
short = _(u'Re-review cleared')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class WEBAPP_RESUBMIT(_LOG):
id = 77
format = _(u'{addon} resubmitted for review.')
short = _(u'App Resubmission')
keep = True
review_queue = True
class ESCALATION_VIP_APP(_LOG):
id = 78
format = _(u'{addon} auto-escalated because its a VIP app.')
short = _(u'VIP auto-escalation')
keep = True
review_queue = True
class REREVIEW_MANIFEST_URL_CHANGE(_LOG):
id = 79
format = _(u'{addon} re-reviewed because of manifest URL change.')
short = _(u'Manifest URL Change')
keep = True
review_queue = True
class ESCALATION_PRERELEASE_APP(_LOG):
id = 80
format = _(u'{addon} auto-escalated because its a prerelease app.')
short = _(u'Prerelease auto-escalation')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
class PRIORITY_REVIEW_REQUESTED(_LOG):
id = 128
format = _(u'Priority review requested for {addon}.')
short = _(u'Priority Review')
keep = True
review_queue = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False)
or l.id in LOG_ADMINS)]
def log(action, *args, **kw):
"""
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from amo import get_user, logger_log
from mkt.developers.models import (ActivityLog, ActivityLogAttachment,
AppLog, CommentLog, GroupLog, UserLog,
VersionLog)
from mkt.access.models import Group
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
from mkt.versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
if 'attachments' in kw:
formset = kw['attachments']
storage = get_storage_class()()
for form in formset:
data = form.cleaned_data
if 'attachment' in data:
attachment = data['attachment']
storage.save('%s/%s' % (settings.REVIEWER_ATTACHMENTS_PATH,
attachment.name), attachment)
ActivityLogAttachment(activity_log=al,
description=data['description'],
mimetype=attachment.content_type,
filepath=attachment.name).save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Webapp:
AppLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
# Webapp first since Webapp subclasses Addon.
if isinstance(arg, Webapp):
AppLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
| 23.909209 | 79 | 0.628241 | [
"BSD-3-Clause"
] | muffinresearch/zamboni | apps/amo/log.py | 18,434 | Python |
# -*- coding: latin-1 -*-
#
# Turn off logging in extensions (too loud!)
from vb2py.test.testframework import *
import vb2py.extensions
import vb2py.utils
vb2py.extensions.disableLogging()
from vb2py.vbparser import buildParseTree, VBParserError
#
# Set some config options which are appropriate for testing
import vb2py.config
Config = vb2py.config.VB2PYConfig()
Config.setLocalOveride("General", "ReportPartialConversion", "No")
tests = []
# String methods
tests.extend([
'a = "hello".Length',
'a = ("hello").Length',
'a = ("hello" + "world").Length',
'a = ("hello" + "world").Length + 2',
])
# Expression calls
tests.extend([
'a = (a + b).Truncate(2)',
'(a + b).SendToDestination("email.com")',
'(a + b).SendToDestination',
])
tests.append(
"""
Function B()
Return 12
End Function
"""
)
tests.append((
"""
Public Class MyObject
Public Property A As Integer
Get
Return 10
End Get
Set(Value as Integer)
X = Value
End Set
End Property
End Class
"""
))
# VB.NET
tests.append("""
Class MyClass
A = 1
End Class
""")
# Decorated Class
tests.append("""
<Decorator.Thing()> Class MyClass
A = 1
End Class
""")
tests.append("""
<Decorator.Thing()> _
Class MyClass
A = 1
End Class
""")
# Handlers
tests.append("""
Class MyClass
Public Sub DoIt() Handles Button.Click
End Sub
End Class
""")
# Shared methods
tests.append("""
Class MyClass
Public Shared Sub DoIt()
End Sub
Public Shared Function DoIt()
End Function
End Class
""")
tests.append("""
Module Digests
Public Const a = ""
End Module
""")
class ParsingTest(unittest.TestCase):
"""Holder class which gets built into a whole test case"""
def getTestMethod(vb):
"""Create a test method"""
def testMethod(self):
try:
buildParseTree(vb, dialect='vb.net')
except VBParserError:
raise Exception("Unable to parse ...\n%s" % vb)
return testMethod
# Add tests to main test class
for idx in range(len(tests)):
setattr(ParsingTest, "test%d" % idx, getTestMethod(tests[idx]))
if __name__ == "__main__":
main()
| 16.757576 | 67 | 0.623418 | [
"BSD-3-Clause"
] | ceprio/xl_vb2py | vb2py/test/testdotnet.py | 2,212 | Python |
# -*- coding: utf-8 -*-
"""
pyvisa-py.protocols.usbtmc
~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements Session to control USBTMC instruments
Loosely based on PyUSBTMC:python module to handle USB-TMC(Test and
Measurement class) devices.
by Noboru Yamamot, Accl. Lab, KEK, JAPAN
This file is an offspring of the Lantz Project.
:copyright: 2014-2018 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import enum
from pyvisa.compat import struct
import time
from collections import namedtuple
import warnings
import usb
from .usbutil import (find_devices, find_interfaces, find_endpoint,
usb_find_desc)
import sys
if sys.version_info < (3, 2):
def array_to_bytes(arr):
return arr.tostring()
else:
def array_to_bytes(arr):
return arr.tobytes()
class MsgID(enum.IntEnum):
"""From USB-TMC table2
"""
dev_dep_msg_out = 1
request_dev_dep_msg_in = 2
dev_dep_msg_in = 2
vendor_specific_out = 126
request_vendor_specific_in = 127
vendor_specific_in = 127
trigger = 128 # for USB488
class Request(enum.IntEnum):
initiate_abort_bulk_out = 1
check_abort_bulk_out_status = 2
initiate_abort_bulk_in = 3
check_abort_bulk_in_status = 4
initiate_clear = 5
check_clear_status = 6
get_capabilities = 7
indicator_pulse = 64
class UsbTmcStatus(enum.IntEnum):
success = 1
pending = 2
failed = 0x80
transfer_not_in_progress = 0x81
split_not_in_progress = 0x82
split_in_progress = 0x83
def find_tmc_devices(vendor=None, product=None, serial_number=None,
custom_match=None, **kwargs):
"""Find connected USBTMC devices. See usbutil.find_devices for more info.
"""
def is_usbtmc(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xfe,
bInterfaceSubClass=3))
return find_devices(vendor, product, serial_number, is_usbtmc, **kwargs)
class BulkOutMessage(object):
"""The Host uses the Bulk-OUT endpoint to send USBTMC command messages to
the device.
"""
@staticmethod
def build_array(btag, eom, chunk):
size = len(chunk)
return (struct.pack('BBBx', MsgID.dev_dep_msg_out, btag,
~btag & 0xFF) +
struct.pack("<LBxxx", size, eom) +
chunk +
b'\0' * ((4 - size) % 4))
class BulkInMessage(namedtuple('BulkInMessage', 'msgid btag btaginverse '
'transfer_size transfer_attributes data')):
"""The Host uses the Bulk-IN endpoint to read USBTMC response messages from
the device.
The Host must first send a USBTMC command message that expects a response
before attempting to read a USBTMC response message.
"""
@classmethod
def from_bytes(cls, data):
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
if msgid != MsgID.dev_dep_msg_in:
warnings.warn('Unexpected MsgID format. Consider updating the device\'s firmware. See https://github.com/pyvisa/pyvisa-py/issues/20')
return BulkInMessage.from_quirky(data)
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
# Truncate data to the specified length (discard padding).
data = data[12:12+transfer_size]
return cls(msgid, btag, btaginverse, transfer_size,
transfer_attributes, data)
@classmethod
def from_quirky(cls, data):
"""Constructs a correct response for quirky devices"""
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
data = data.rstrip(b'\x00')
# check whether it contains a ';' and if throw away the first 12 bytes
if ';' in str(data):
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
data = data[12:]
else:
transfer_size = 0
transfer_attributes = 1
return cls(msgid, btag, btaginverse, transfer_size, transfer_attributes, data)
@staticmethod
def build_array(btag, transfer_size, term_char=None):
"""
:param transfer_size:
:param btag:
:param term_char:
:return:
"""
if term_char is None:
transfer_attributes = 0
term_char = 0
else:
transfer_attributes = 2
return (struct.pack('BBBx', MsgID.request_dev_dep_msg_in, btag,
~btag & 0xFF) +
struct.pack("<LBBxx", transfer_size, transfer_attributes,
term_char))
class USBRaw(object):
"""Base class for drivers that communicate with instruments
via usb port using pyUSB
"""
#: Configuration number to be used. If None, the default will be used.
CONFIGURATION = None
#: Interface index it be used
INTERFACE = (0, 0)
#: Receive and Send endpoints to be used. If None the first IN (or OUT)
#: BULK endpoint will be used.
ENDPOINTS = (None, None)
find_devices = staticmethod(find_devices)
def __init__(self, vendor=None, product=None, serial_number=None,
device_filters=None, timeout=None, **kwargs):
super(USBRaw, self).__init__()
# Timeout expressed in ms as an integer and limited to 2**32-1
# If left to None pyusb will use its default value
self.timeout = timeout
device_filters = device_filters or {}
devices = list(self.find_devices(vendor, product, serial_number, None,
**device_filters))
if not devices:
raise ValueError('No device found.')
elif len(devices) > 1:
desc = '\n'.join(str(dev) for dev in devices)
raise ValueError('{} devices found:\n{}\nPlease narrow the search'
' criteria'.format(len(devices), desc))
self.usb_dev = devices[0]
try:
if self.usb_dev.is_kernel_driver_active(0):
self.usb_dev.detach_kernel_driver(0)
except (usb.core.USBError, NotImplementedError) as e:
pass
try:
self.usb_dev.set_configuration()
except usb.core.USBError as e:
raise Exception('failed to set configuration\n %s' % e)
try:
self.usb_dev.set_interface_altsetting()
except usb.core.USBError as e:
pass
self.usb_intf = self._find_interface(self.usb_dev, self.INTERFACE)
self.usb_recv_ep, self.usb_send_ep =\
self._find_endpoints(self.usb_intf, self.ENDPOINTS)
def _find_interface(self, dev, setting):
return self.usb_dev.get_active_configuration()[self.INTERFACE]
def _find_endpoints(self, interface, setting):
recv, send = setting
if recv is None:
recv = find_endpoint(interface, usb.ENDPOINT_IN,
usb.ENDPOINT_TYPE_BULK)
else:
recv = usb_find_desc(interface, bEndpointAddress=recv)
if send is None:
send = find_endpoint(interface, usb.ENDPOINT_OUT,
usb.ENDPOINT_TYPE_BULK)
else:
send = usb_find_desc(interface, bEndpointAddress=send)
return recv, send
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
try:
return self.usb_send_ep.write(data)
except usb.core.USBError as e:
raise ValueError(str(e))
def read(self, size):
"""Receive raw bytes to the instrument.
:param size: number of bytes to receive
:return: received bytes
:return type: bytes
"""
if size <= 0:
size = 1
data = array_to_bytes(self.usb_recv_ep.read(size, self.timeout))
return data
def close(self):
return usb.util.dispose_resources(self.usb_dev)
class USBTMC(USBRaw):
# Maximum number of bytes per transfer (for sending and receiving).
RECV_CHUNK = 1024 ** 2
find_devices = staticmethod(find_tmc_devices)
def __init__(self, vendor=None, product=None, serial_number=None,
**kwargs):
super(USBTMC, self).__init__(vendor, product, serial_number, **kwargs)
self.usb_intr_in = find_endpoint(self.usb_intf, usb.ENDPOINT_IN,
usb.ENDPOINT_TYPE_INTERRUPT)
self.usb_dev.reset()
self.usb_dev.set_configuration()
time.sleep(0.01)
self._get_capabilities()
self._btag = 0
if not (self.usb_recv_ep and self.usb_send_ep):
msg = "TMC device must have both Bulk-In and Bulk-out endpoints."
raise ValueError(msg)
def _get_capabilities(self):
self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE),
Request.get_capabilities,
0x0000,
self.usb_intf.index,
0x0018,
timeout=self.timeout)
def _find_interface(self, dev, setting):
interfaces = find_interfaces(dev, bInterfaceClass=0xFE,
bInterfaceSubClass=3)
if not interfaces:
raise ValueError('USB TMC interface not found.')
elif len(interfaces) > 1:
pass
return interfaces[0]
def _abort_bulk_in(self, btag):
"""Request that the device abort a pending Bulk-IN operation."""
abort_timeout_ms = 5000
# Send INITIATE_ABORT_BULK_IN.
# According to USBTMC 1.00 4.2.1.4:
# wValue = bTag value of transfer to be aborted
# wIndex = Bulk-IN endpoint
# wLength = 0x0002 (length of device response)
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT),
Request.initiate_abort_bulk_in,
btag,
self.usb_recv_ep.bEndpointAddress,
0x0002,
timeout=abort_timeout_ms)
if data[0] != UsbTmcStatus.success:
# Abort Bulk-IN failed. Ignore it.
return
# Read remaining data from Bulk-IN endpoint.
self.usb_recv_ep.read(self.RECV_CHUNK, abort_timeout_ms)
# Send CHECK_ABORT_BULK_IN_STATUS until it completes.
# According to USBTMC 1.00 4.2.1.5:
# wValue = 0x0000
# wIndex = Bulk-IN endpoint
# wLength = 0x0008 (length of device response)
for retry in range(100):
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT),
Request.check_abort_bulk_in_status,
0x0000,
self.usb_recv_ep.bEndpointAddress,
0x0008,
timeout=abort_timeout_ms)
if data[0] != UsbTmcStatus.pending:
break
time.sleep(0.05)
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
begin, end, size = 0, 0, len(data)
bytes_sent = 0
raw_write = super(USBTMC, self).write
# Send all data via one or more Bulk-OUT transfers.
# Set the EOM flag on the last transfer only.
# Send at least one transfer (possibly empty).
while (end == 0) or (end < size):
begin, end = end, begin + self.RECV_CHUNK
self._btag = (self._btag % 255) + 1
eom = (end >= size)
data = BulkOutMessage.build_array(self._btag, eom, data[begin:end])
bytes_sent += raw_write(data)
return size
def read(self, size):
recv_chunk = self.RECV_CHUNK
if size > 0 and size < recv_chunk:
recv_chunk = size
header_size = 12
max_padding = 511
eom = False
raw_read = super(USBTMC, self).read
raw_write = super(USBTMC, self).write
received = bytearray()
while not eom:
self._btag = (self._btag % 255) + 1
req = BulkInMessage.build_array(self._btag, recv_chunk, None)
raw_write(req)
try:
resp = raw_read(recv_chunk + header_size + max_padding)
response = BulkInMessage.from_bytes(resp)
except (usb.core.USBError, ValueError):
# Abort failed Bulk-IN operation.
self._abort_bulk_in(self._btag)
raise
received.extend(response.data)
# Detect EOM only when device sends all expected bytes.
if len(response.data) >= response.transfer_size:
eom = response.transfer_attributes & 1
return bytes(received)
| 31.393519 | 145 | 0.595045 | [
"MIT"
] | circuitfox/pyvisa-py | pyvisa-py/protocols/usbtmc.py | 13,564 | Python |
print('-'*20)
print('CADASTRE UMA PESSOA')
print('-'*20)
total = totalm = totalf = 0
while True:
idade = int(input('Idade: '))
if idade >= 18:
total += 1
sexo = ' '
while sexo not in 'MF':
sexo = str(input('Sexo: [M/F]')).strip().upper()[0]
# observações!
if sexo == 'M':
totalm += 1
if sexo == 'F' and idade < 20:
totalf +=1
resposta = ' '
while resposta not in 'SN':
resposta = str(input('Quer continuar? [S/N]')).upper().strip()[0]
if resposta == 'N':
break
print('===== FIM DO PROGRAMA =====')
print(f'Total de pessoas com mais de 18 anos: {total}')
print(f'Ao todo temos {totalm} homens cadastrados')
print(f'E temos {totalf} mulher com menos de 20 anos') | 25.9 | 73 | 0.537967 | [
"MIT"
] | GabrielSantos25/Python | Exercicios em python/ex69.py | 779 | Python |
import os
import moderngl
import numpy as np
from objloader import Obj
from PIL import Image
from pyrr import Matrix44
import data
from window import Example, run_example
class CrateExample(Example):
title = "Crate"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform mat4 Mvp;
in vec3 in_vert;
in vec3 in_norm;
in vec2 in_text;
out vec3 v_vert;
out vec3 v_norm;
out vec2 v_text;
void main() {
gl_Position = Mvp * vec4(in_vert, 1.0);
v_vert = in_vert;
v_norm = in_norm;
v_text = in_text;
}
''',
fragment_shader='''
#version 330
uniform vec3 Light;
uniform sampler2D Texture;
in vec3 v_vert;
in vec3 v_norm;
in vec2 v_text;
out vec4 f_color;
void main() {
float lum = clamp(dot(normalize(Light - v_vert), normalize(v_norm)), 0.0, 1.0) * 0.8 + 0.2;
f_color = vec4(texture(Texture, v_text).rgb * lum, 1.0);
}
''',
)
self.mvp = self.prog['Mvp']
self.light = self.prog['Light']
obj = Obj.open(data.find('crate.obj'))
img = Image.open(data.find('crate.png')).transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
self.texture = self.ctx.texture(img.size, 3, img.tobytes())
self.texture.use()
self.vbo = self.ctx.buffer(obj.pack('vx vy vz nx ny nz tx ty'))
self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, 'in_vert', 'in_norm', 'in_text')
def render(self, time, frame_time):
angle = time
self.ctx.clear(1.0, 1.0, 1.0)
self.ctx.enable(moderngl.DEPTH_TEST)
camera_pos = (np.cos(angle) * 5.0, np.sin(angle) * 5.0, 2.0)
proj = Matrix44.perspective_projection(45.0, self.aspect_ratio, 0.1, 1000.0)
lookat = Matrix44.look_at(
camera_pos,
(0.0, 0.0, 0.5),
(0.0, 0.0, 1.0),
)
self.mvp.write((proj * lookat).astype('f4').tobytes())
self.light.value = camera_pos
self.vao.render()
if __name__ == '__main__':
run_example(CrateExample)
| 27.604396 | 111 | 0.513137 | [
"MIT"
] | einarf/ModernGL | examples/crate.py | 2,512 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
class TestV1ScaleIOVolumeSource(unittest.TestCase):
""" V1ScaleIOVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOVolumeSource(self):
"""
Test V1ScaleIOVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_scale_io_volume_source.V1ScaleIOVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| 22.355556 | 105 | 0.72167 | [
"Apache-2.0"
] | MiaoRachelYu/python | kubernetes/test/test_v1_scale_io_volume_source.py | 1,006 | Python |
import datetime
import json
import os
import re
import time
import uuid
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union
from urllib.parse import urljoin
# if simplejson is installed, `requests` defaults to using it instead of json
# this allows the client to gracefully handle either json or simplejson
try:
from simplejson.errors import JSONDecodeError
except ImportError:
from json.decoder import JSONDecodeError
import pendulum
import toml
from slugify import slugify
import prefect
from prefect.utilities.exceptions import (
AuthorizationError,
ClientError,
VersionLockError,
)
from prefect.utilities.graphql import (
EnumValue,
GraphQLResult,
compress,
parse_graphql,
with_args,
)
from prefect.utilities.logging import create_diagnostic_logger
if TYPE_CHECKING:
from prefect.core import Flow
import requests
JSONLike = Union[bool, dict, list, str, int, float, None]
# type definitions for GraphQL results
TaskRunInfoResult = NamedTuple(
"TaskRunInfoResult",
[
("id", str),
("task_id", str),
("task_slug", str),
("version", int),
("state", "prefect.engine.state.State"),
],
)
FlowRunInfoResult = NamedTuple(
"FlowRunInfoResult",
[
("id", str),
("name", str),
("flow_id", str),
("parameters", Dict[str, Any]),
("context", Dict[str, Any]),
("version", int),
("scheduled_start_time", datetime.datetime),
("state", "prefect.engine.state.State"),
("task_runs", List[TaskRunInfoResult]),
],
)
class Client:
"""
Client for communication with Prefect Cloud
If the arguments aren't specified the client initialization first checks the prefect
configuration and if the server is not set there it checks the current context. The
token will only be present in the current context.
Args:
- api_server (str, optional): the URL to send all GraphQL requests
to; if not provided, will be pulled from `cloud.graphql` config var
- api_token (str, optional): a Prefect Cloud API token, taken from
`config.cloud.auth_token` if not provided. If this token is USER-scoped, it may
be used to log in to any tenant that the user is a member of. In that case,
ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself
will be used as authorization.
"""
def __init__(self, api_server: str = None, api_token: str = None):
self._access_token = None
self._refresh_token = None
self._access_token_expires_at = pendulum.now()
self._active_tenant_id = None
self._attached_headers = {} # type: Dict[str, str]
self.logger = create_diagnostic_logger("Diagnostics")
# store api server
self.api_server = api_server or prefect.context.config.cloud.get("graphql")
# store api token
self._api_token = api_token or prefect.context.config.cloud.get(
"auth_token", None
)
if prefect.config.backend == "cloud":
if not self._api_token:
# if no api token was passed, attempt to load state from local storage
settings = self._load_local_settings()
self._api_token = settings.get("api_token")
if self._api_token:
self._active_tenant_id = settings.get("active_tenant_id")
if self._active_tenant_id:
try:
self.login_to_tenant(tenant_id=self._active_tenant_id)
except AuthorizationError:
# if an authorization error is raised, then the token is invalid and should
# be cleared
self.logout_from_tenant()
else:
# TODO: Separate put this functionality and clean up initial tenant access handling
if not self._active_tenant_id:
tenant_info = self.graphql({"query": {"tenant": {"id"}}})
if tenant_info.data.tenant:
self._active_tenant_id = tenant_info.data.tenant[0].id
def create_tenant(self, name: str, slug: str = None) -> str:
"""
Creates a new tenant.
Note this route only works when run against Prefect Server.
Args:
- name (str): the name of the tenant to create
- slug (str, optional): the slug of the tenant to create; defaults to name
Returns:
- str: the ID of the newly created tenant, or the ID of the currently active tenant
Raises:
- ValueError: if run against Prefect Cloud
"""
if prefect.config.backend != "server":
msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/"
raise ValueError(msg)
if slug is None:
slug = slugify(name)
tenant_info = self.graphql(
{
"mutation($input: create_tenant_input!)": {
"create_tenant(input: $input)": {"id"}
}
},
variables=dict(input=dict(name=name, slug=slug)),
)
return tenant_info.data.create_tenant.id
# -------------------------------------------------------------------------
# Utilities
def get(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> dict:
"""
Convenience function for calling the Prefect API with token auth and GET request
Args:
- path (str): the path of the API url. For example, to GET
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the GET request to;
defaults to `self.api_server`
- headers (dict, optional): Headers to pass with the request
- params (dict): GET parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Dictionary representation of the request made
"""
response = self._request(
method="GET",
path=path,
params=params,
server=server,
headers=headers,
token=token,
retry_on_api_error=retry_on_api_error,
)
if response.text:
return response.json()
else:
return {}
def post(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> dict:
"""
Convenience function for calling the Prefect API with token auth and POST request
Args:
- path (str): the path of the API url. For example, to POST
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the POST request to;
defaults to `self.api_server`
- headers(dict): headers to pass with the request
- params (dict): POST parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Dictionary representation of the request made
"""
response = self._request(
method="POST",
path=path,
params=params,
server=server,
headers=headers,
token=token,
retry_on_api_error=retry_on_api_error,
)
if response.text:
return response.json()
else:
return {}
def graphql(
self,
query: Any,
raise_on_error: bool = True,
headers: Dict[str, str] = None,
variables: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> GraphQLResult:
"""
Convenience function for running queries against the Prefect GraphQL API
Args:
- query (Any): A representation of a graphql query to be executed. It will be
parsed by prefect.utilities.graphql.parse_graphql().
- raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL
returns any `errors`.
- headers (dict): any additional headers that should be passed as part of the
request
- variables (dict): Variables to be filled into a query with the key being
equivalent to the variables that are accepted by the query
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Data returned from the GraphQL query
Raises:
- ClientError if there are errors raised by the GraphQL mutation
"""
result = self.post(
path="",
server=self.api_server,
headers=headers,
params=dict(query=parse_graphql(query), variables=json.dumps(variables)),
token=token,
retry_on_api_error=retry_on_api_error,
)
if raise_on_error and "errors" in result:
if "UNAUTHENTICATED" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif "Malformed Authorization header" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif (
result["errors"][0].get("extensions", {}).get("code")
== "VERSION_LOCKING_ERROR"
):
raise VersionLockError(result["errors"])
raise ClientError(result["errors"])
else:
return GraphQLResult(result) # type: ignore
def _send_request(
self,
session: "requests.Session",
method: str,
url: str,
params: Dict[str, JSONLike] = None,
headers: dict = None,
) -> "requests.models.Response":
if prefect.context.config.cloud.get("diagnostics") is True:
self.logger.debug(f"Preparing request to {url}")
clean_headers = {
head: re.sub("Bearer .*", "Bearer XXXX", val)
for head, val in headers.items() # type: ignore
}
self.logger.debug(f"Headers: {clean_headers}")
self.logger.debug(f"Request: {params}")
start_time = time.time()
if method == "GET":
response = session.get(url, headers=headers, params=params, timeout=30)
elif method == "POST":
response = session.post(url, headers=headers, json=params, timeout=30)
elif method == "DELETE":
response = session.delete(url, headers=headers, timeout=30)
else:
raise ValueError("Invalid method: {}".format(method))
if prefect.context.config.cloud.get("diagnostics") is True:
end_time = time.time()
self.logger.debug(f"Response: {response.json()}")
self.logger.debug(
f"Request duration: {round(end_time - start_time, 4)} seconds"
)
# Check if request returned a successful status
response.raise_for_status()
return response
def _request(
self,
method: str,
path: str,
params: Dict[str, JSONLike] = None,
server: str = None,
headers: dict = None,
token: str = None,
retry_on_api_error: bool = True,
) -> "requests.models.Response":
"""
Runs any specified request (GET, POST, DELETE) against the server
Args:
- method (str): The type of request to be made (GET, POST, DELETE)
- path (str): Path of the API URL
- params (dict, optional): Parameters used for the request
- server (str, optional): The server to make requests against, base API
server is used if not specified
- headers (dict, optional): Headers to pass with the request
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- requests.models.Response: The response returned from the request
Raises:
- ClientError: if the client token is not in the context (due to not being logged in)
- ValueError: if a method is specified outside of the accepted GET, POST, DELETE
- requests.HTTPError: if a status code is returned that is not `200` or `401`
"""
if server is None:
server = self.api_server
assert isinstance(server, str) # mypy assert
if token is None:
token = self.get_auth_token()
# 'import requests' is expensive time-wise, we should do this just-in-time to keep
# the 'import prefect' time low
import requests
url = urljoin(server, path.lstrip("/")).rstrip("/")
params = params or {}
headers = headers or {}
if token:
headers["Authorization"] = "Bearer {}".format(token)
headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__)
if self._attached_headers:
headers.update(self._attached_headers)
session = requests.Session()
retry_total = 6 if prefect.config.backend == "cloud" else 1
retries = requests.packages.urllib3.util.retry.Retry(
total=retry_total,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504],
method_whitelist=["DELETE", "GET", "POST"],
)
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries))
response = self._send_request(
session=session, method=method, url=url, params=params, headers=headers
)
# parse the response
try:
json_resp = response.json()
except JSONDecodeError as exc:
if prefect.config.backend == "cloud" and "Authorization" not in headers:
raise ClientError(
"Malformed response received from Cloud - please ensure that you "
"have an API token properly configured."
) from exc
else:
raise ClientError("Malformed response received from API.") from exc
# check if there was an API_ERROR code in the response
if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error:
success, retry_count = False, 0
# retry up to six times
while success is False and retry_count < 6:
response = self._send_request(
session=session,
method=method,
url=url,
params=params,
headers=headers,
)
if "API_ERROR" in str(response.json().get("errors")):
retry_count += 1
time.sleep(0.25 * (2 ** (retry_count - 1)))
else:
success = True
return response
def attach_headers(self, headers: dict) -> None:
"""
Set headers to be attached to this Client
Args:
- headers (dict): A dictionary of headers to attach to this client. These headers
get added on to the existing dictionary of headers.
"""
self._attached_headers.update(headers)
# -------------------------------------------------------------------------
# Auth
# -------------------------------------------------------------------------
@property
def _local_settings_path(self) -> Path:
"""
Returns the local settings directory corresponding to the current API servers
"""
path = "{home}/client/{server}".format(
home=prefect.context.config.home_dir,
server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"),
)
return Path(os.path.expanduser(path)) / "settings.toml"
def _save_local_settings(self, settings: dict) -> None:
"""
Writes settings to local storage
"""
self._local_settings_path.parent.mkdir(exist_ok=True, parents=True)
with self._local_settings_path.open("w+") as f:
toml.dump(settings, f)
def _load_local_settings(self) -> dict:
"""
Loads settings from local storage
"""
if self._local_settings_path.exists():
with self._local_settings_path.open("r") as f:
return toml.load(f) # type: ignore
return {}
def save_api_token(self) -> None:
"""
Saves the API token in local storage.
"""
settings = self._load_local_settings()
settings["api_token"] = self._api_token
self._save_local_settings(settings)
def get_auth_token(self) -> str:
"""
Returns an auth token:
- if no explicit access token is stored, returns the api token
- if there is an access token:
- if there's a refresh token and the access token expires in the next 30 seconds,
then we refresh the access token and store the result
- return the access token
Returns:
- str: the access token
"""
if not self._access_token:
return self._api_token
expiration = self._access_token_expires_at or pendulum.now()
if self._refresh_token and pendulum.now().add(seconds=30) > expiration:
self._refresh_access_token()
return self._access_token
def get_available_tenants(self) -> List[Dict]:
"""
Returns a list of available tenants.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- List[Dict]: a list of dictionaries containing the id, slug, and name of
available tenants
"""
result = self.graphql(
{"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}},
# use the API token to see all available tenants
token=self._api_token,
) # type: ignore
return result.data.tenant # type: ignore
def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool:
"""
Log in to a specific tenant
NOTE: this should only be called by users who have provided a USER-scoped API token.
Args:
- tenant_slug (str): the tenant's slug
- tenant_id (str): the tenant's id
Returns:
- bool: True if the login was successful
Raises:
- ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided
- ValueError: if the `tenant_id` is not a valid UUID
- ValueError: if no matching tenants are found
"""
if tenant_slug is None and tenant_id is None:
raise ValueError(
"At least one of `tenant_slug` or `tenant_id` must be provided."
)
elif tenant_id:
try:
uuid.UUID(tenant_id)
except ValueError as exc:
raise ValueError("The `tenant_id` must be a valid UUID.") from exc
tenant = self.graphql(
{
"query($slug: String, $id: uuid)": {
"tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"}
}
},
variables=dict(slug=tenant_slug, id=tenant_id),
# use the API token to query the tenant
token=self._api_token,
) # type: ignore
if not tenant.data.tenant: # type: ignore
raise ValueError("No matching tenants found.")
tenant_id = tenant.data.tenant[0].id # type: ignore
if prefect.config.backend == "cloud":
payload = self.graphql(
{
"mutation($input: switch_tenant_input!)": {
"switch_tenant(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(tenant_id=tenant_id)),
# Use the API token to switch tenants
token=self._api_token,
) # type: ignore
self._access_token = payload.data.switch_tenant.access_token # type: ignore
self._access_token_expires_at = pendulum.parse( # type: ignore
payload.data.switch_tenant.expires_at # type: ignore
) # type: ignore
self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore
self._active_tenant_id = tenant_id
# save the tenant setting
settings = self._load_local_settings()
settings["active_tenant_id"] = self._active_tenant_id
self._save_local_settings(settings)
return True
def logout_from_tenant(self) -> None:
self._access_token = None
self._refresh_token = None
self._active_tenant_id = None
# remove the tenant setting
settings = self._load_local_settings()
settings["active_tenant_id"] = None
self._save_local_settings(settings)
def _refresh_access_token(self) -> bool:
"""
Refresh the client's JWT access token.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- bool: True if the refresh succeeds
"""
payload = self.graphql(
{
"mutation($input: refresh_token_input!)": {
"refresh_token(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(access_token=self._access_token)),
# pass the refresh token as the auth header
token=self._refresh_token,
) # type: ignore
self._access_token = payload.data.refresh_token.access_token # type: ignore
self._access_token_expires_at = pendulum.parse( # type: ignore
payload.data.refresh_token.expires_at # type: ignore
) # type: ignore
self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore
return True
# -------------------------------------------------------------------------
# Actions
# -------------------------------------------------------------------------
def register(
self,
flow: "Flow",
project_name: str = None,
build: bool = True,
set_schedule_active: bool = True,
version_group_id: str = None,
compressed: bool = True,
no_url: bool = False,
) -> str:
"""
Push a new flow to Prefect Cloud
Args:
- flow (Flow): a flow to register
- project_name (str, optional): the project that should contain this flow.
- build (bool, optional): if `True`, the flow's environment is built
prior to serialization; defaults to `True`
- set_schedule_active (bool, optional): if `False`, will set the schedule to
inactive in the database to prevent auto-scheduling runs (if the Flow has a
schedule). Defaults to `True`. This can be changed later.
- version_group_id (str, optional): the UUID version group ID to use for versioning
this Flow in Cloud; if not provided, the version group ID associated with this
Flow's project and name will be used.
- compressed (bool, optional): if `True`, the serialized flow will be; defaults to
`True` compressed
- no_url (bool, optional): if `True`, the stdout from this function will not
contain the URL link to the newly-registered flow in the Cloud UI
Returns:
- str: the ID of the newly-registered flow
Raises:
- ClientError: if the register failed
"""
required_parameters = {p for p in flow.parameters() if p.required}
if flow.schedule is not None and required_parameters:
required_names = {p.name for p in required_parameters}
if not all(
[
required_names <= set(c.parameter_defaults.keys())
for c in flow.schedule.clocks
]
):
raise ClientError(
"Flows with required parameters can not be scheduled automatically."
)
if any(e.key for e in flow.edges) and flow.result is None:
warnings.warn(
"No result handler was specified on your Flow. Cloud features such as "
"input caching and resuming task runs from failure may not work properly.",
stacklevel=2,
)
if compressed:
create_mutation = {
"mutation($input: create_flow_from_compressed_string_input!)": {
"create_flow_from_compressed_string(input: $input)": {"id"}
}
}
else:
create_mutation = {
"mutation($input: create_flow_input!)": {
"create_flow(input: $input)": {"id"}
}
}
project = None
if project_name is None:
raise TypeError(
"'project_name' is a required field when registering a flow."
)
query_project = {
"query": {
with_args("project", {"where": {"name": {"_eq": project_name}}}): {
"id": True
}
}
}
project = self.graphql(query_project).data.project # type: ignore
if not project:
raise ValueError(
"Project {} not found. Run `prefect create project '{}'` to create it.".format(
project_name, project_name
)
)
serialized_flow = flow.serialize(build=build) # type: Any
# Set Docker storage image in environment metadata if provided
if isinstance(flow.storage, prefect.environments.storage.Docker):
flow.environment.metadata["image"] = flow.storage.name
serialized_flow = flow.serialize(build=False)
# If no image ever set, default metadata to all_extras image on current version
if not flow.environment.metadata.get("image"):
version = prefect.__version__.split("+")[0]
flow.environment.metadata[
"image"
] = f"prefecthq/prefect:all_extras-{version}"
serialized_flow = flow.serialize(build=False)
# verify that the serialized flow can be deserialized
try:
prefect.serialization.flow.FlowSchema().load(serialized_flow)
except Exception as exc:
raise ValueError(
"Flow could not be deserialized successfully. Error was: {}".format(
repr(exc)
)
) from exc
if compressed:
serialized_flow = compress(serialized_flow)
res = self.graphql(
create_mutation,
variables=dict(
input=dict(
project_id=(project[0].id if project else None),
serialized_flow=serialized_flow,
set_schedule_active=set_schedule_active,
version_group_id=version_group_id,
)
),
retry_on_api_error=False,
) # type: Any
flow_id = (
res.data.create_flow_from_compressed_string.id
if compressed
else res.data.create_flow.id
)
if not no_url:
# Generate direct link to Cloud flow
flow_url = self.get_cloud_url("flow", flow_id)
prefix = "└── "
print("Flow URL: {}".format(flow_url))
# Extra information to improve visibility
msg = (
f" {prefix}ID: {flow_id}\n"
f" {prefix}Project: {project_name}\n"
f" {prefix}Labels: {list(flow.environment.labels)}"
)
print(msg)
return flow_id
def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str:
"""
Convenience method for creating Prefect Cloud URLs for a given subdirectory.
Args:
- subdirectory (str): the subdirectory to use (e.g., `"flow-run"`)
- id (str): the ID of the page
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory
and ID
Example:
```python
from prefect import Client
client = Client()
client.get_cloud_url("flow-run", "424242-ca-94611-111-55")
# returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55"
```
"""
# Generate direct link to UI
if prefect.config.backend == "cloud":
tenant_slug = self.get_default_tenant_slug(as_user=as_user)
else:
tenant_slug = ""
base_url = (
re.sub("api-", "", prefect.config.cloud.api)
if re.search("api-", prefect.config.cloud.api)
else re.sub("api", "cloud", prefect.config.cloud.api)
)
full_url = prefect.config.cloud.api
if tenant_slug:
full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id])
elif prefect.config.backend == "server":
full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id])
return full_url
def get_default_tenant_slug(self, as_user: bool = True) -> str:
"""
Get the default tenant slug for the currently authenticated user
Args:
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the slug of the current default tenant for this user
"""
if as_user:
query = {
"query": {"user": {"default_membership": {"tenant": "slug"}}}
} # type: dict
else:
query = {"query": {"tenant": {"slug"}}}
res = self.graphql(query)
if as_user:
user = res.get("data").user[0]
slug = user.default_membership.tenant.slug
else:
slug = res.get("data").tenant[0].slug
return slug
def create_project(self, project_name: str, project_description: str = None) -> str:
"""
Create a new Project
Args:
- project_name (str): the project that should contain this flow
- project_description (str, optional): the project description
Returns:
- str: the ID of the newly-created project
Raises:
- ClientError: if the project creation failed
"""
project_mutation = {
"mutation($input: create_project_input!)": {
"create_project(input: $input)": {"id"}
}
}
res = self.graphql(
project_mutation,
variables=dict(
input=dict(
name=project_name,
description=project_description,
tenant_id=self._active_tenant_id,
)
),
) # type: Any
return res.data.create_project.id
def create_flow_run(
self,
flow_id: str = None,
context: dict = None,
parameters: dict = None,
scheduled_start_time: datetime.datetime = None,
idempotency_key: str = None,
run_name: str = None,
version_group_id: str = None,
) -> str:
"""
Create a new flow run for the given flow id. If `start_time` is not provided, the flow
run will be scheduled to start immediately. If both `flow_id` and `version_group_id`
are provided, only the `flow_id` will be used.
Args:
- flow_id (str, optional): the id of the Flow you wish to schedule
- context (dict, optional): the run context
- parameters (dict, optional): a dictionary of parameter values to pass to the flow run
- scheduled_start_time (datetime, optional): the time to schedule the execution
for; if not provided, defaults to now
- idempotency_key (str, optional): an idempotency key; if provided, this run will
be cached for 24 hours. Any subsequent attempts to create a run with the same
idempotency key will return the ID of the originally created run (no new run
will be created after the first). An error will be raised if parameters or
context are provided and don't match the original. Each subsequent request
will reset the TTL for 24 hours.
- run_name (str, optional): The name assigned to this flow run
- version_group_id (str, optional): if provided, the unique unarchived flow within
this version group will be scheduled to run. This input can be used as a
stable API for running flows which are regularly updated.
Returns:
- str: the ID of the newly-created flow run
Raises:
- ClientError: if the GraphQL query is bad for any reason
"""
create_mutation = {
"mutation($input: create_flow_run_input!)": {
"create_flow_run(input: $input)": {"id": True}
}
}
if not flow_id and not version_group_id:
raise ValueError("One of flow_id or version_group_id must be provided")
if flow_id:
inputs = dict(flow_id=flow_id)
else:
inputs = dict(version_group_id=version_group_id) # type: ignore
if parameters is not None:
inputs.update(parameters=parameters) # type: ignore
if context is not None:
inputs.update(context=context) # type: ignore
if idempotency_key is not None:
inputs.update(idempotency_key=idempotency_key) # type: ignore
if scheduled_start_time is not None:
inputs.update(
scheduled_start_time=scheduled_start_time.isoformat()
) # type: ignore
if run_name is not None:
inputs.update(flow_run_name=run_name) # type: ignore
res = self.graphql(create_mutation, variables=dict(input=inputs))
return res.data.create_flow_run.id # type: ignore
def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult:
"""
Retrieves version and current state information for the given flow run.
Args:
- flow_run_id (str): the id of the flow run to get information for
Returns:
- GraphQLResult: an object representing information about the flow run
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"id": True,
"name": True,
"flow_id": True,
"parameters": True,
"context": True,
"version": True,
"scheduled_start_time": True,
"serialized_state": True,
# load all task runs except dynamic task runs
with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): {
"id": True,
"task": {"id": True, "slug": True},
"version": True,
"serialized_state": True,
},
}
}
}
result = self.graphql(query).data.flow_run_by_pk # type: ignore
if result is None:
raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id))
# convert scheduled_start_time from string to datetime
result.scheduled_start_time = pendulum.parse(result.scheduled_start_time)
# create "state" attribute from serialized_state
result.state = prefect.engine.state.State.deserialize(
result.pop("serialized_state")
)
# reformat task_runs
task_runs = []
for tr in result.task_runs:
tr.state = prefect.engine.state.State.deserialize(
tr.pop("serialized_state")
)
task_info = tr.pop("task")
tr.task_id = task_info["id"]
tr.task_slug = task_info["slug"]
task_runs.append(TaskRunInfoResult(**tr))
result.task_runs = task_runs
result.context = (
result.context.to_dict() if result.context is not None else None
)
result.parameters = (
result.parameters.to_dict() if result.parameters is not None else None
)
return FlowRunInfoResult(**result)
def update_flow_run_heartbeat(self, flow_run_id: str) -> None:
"""
Convenience method for heartbeating a flow run.
Does NOT raise an error if the update fails.
Args:
- flow_run_id (str): the flow run ID to heartbeat
"""
mutation = {
"mutation": {
with_args(
"update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def update_task_run_heartbeat(self, task_run_id: str) -> None:
"""
Convenience method for heartbeating a task run.
Does NOT raise an error if the update fails.
Args:
- task_run_id (str): the task run ID to heartbeat
"""
mutation = {
"mutation": {
with_args(
"update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State":
"""
Retrieves the current state for a flow run.
Args:
- flow_run_id (str): the id for this flow run
Returns:
- State: a Prefect State object
"""
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"serialized_state": True,
}
}
}
flow_run = self.graphql(query).data.flow_run_by_pk
return prefect.engine.state.State.deserialize(flow_run.serialized_state)
def set_flow_run_state(
self,
flow_run_id: str,
state: "prefect.engine.state.State",
version: int = None,
) -> "prefect.engine.state.State":
"""
Sets new state for a flow run in the database.
Args:
- flow_run_id (str): the id of the flow run to set state for
- state (State): the new state for this flow run
- version (int, optional): the current version of the flow run state. This is optional
but it can be supplied to enforce version-locking.
Returns:
- State: the state the current flow run should be considered in
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
mutation = {
"mutation($input: set_flow_run_states_input!)": {
"set_flow_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
flow_run_id=flow_run_id,
version=version,
)
]
)
),
) # type: Any
state_payload = result.data.set_flow_run_states.states[0]
if state_payload.status == "QUEUED":
# If appropriate, the state attribute of the Queued state can be
# set by the caller of this method
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def get_latest_cached_states(
self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime
) -> List["prefect.engine.state.State"]:
"""
Pulls all Cached states for the given task that were created after the provided date.
Args:
- task_id (str): the task id for this task run
- cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the
task id alone will be used
- created_after (datetime.datetime): the earliest date the state should have been
created at
Returns:
- List[State]: a list of Cached states created after the given date
"""
args = {
"where": {
"state": {"_eq": "Cached"},
"state_timestamp": {"_gte": created_after.isoformat()},
},
"order_by": {"state_timestamp": EnumValue("desc")},
"limit": 100,
} # type: Dict[str, Any]
# if a cache key was provided, match it against all tasks
if cache_key is not None:
args["where"].update({"cache_key": {"_eq": cache_key}})
# otherwise match against only this task, across all cache keys
else:
args["where"].update({"task_id": {"_eq": task_id}})
query = {"query": {with_args("task_run", args): "serialized_state"}}
result = self.graphql(query) # type: Any
deserializer = prefect.engine.state.State.deserialize
valid_states = [
deserializer(res.serialized_state) for res in result.data.task_run
]
return valid_states
def get_task_run_info(
self, flow_run_id: str, task_id: str, map_index: Optional[int] = None
) -> TaskRunInfoResult:
"""
Retrieves version and current state information for the given task run.
Args:
- flow_run_id (str): the id of the flow run that this task run lives in
- task_id (str): the task id for this task run
- map_index (int, optional): the mapping index for this task run; if
`None`, it is assumed this task is _not_ mapped
Returns:
- NamedTuple: a tuple containing `id, task_id, version, state`
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
mutation = {
"mutation": {
with_args(
"get_or_create_task_run",
{
"input": {
"flow_run_id": flow_run_id,
"task_id": task_id,
"map_index": -1 if map_index is None else map_index,
}
},
): {
"id": True,
}
}
}
result = self.graphql(mutation) # type: Any
if result is None:
raise ClientError("Failed to create task run.")
task_run_id = result.data.get_or_create_task_run.id
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"version": True,
"serialized_state": True,
"task": {"slug": True},
}
}
}
task_run = self.graphql(query).data.task_run_by_pk # type: ignore
if task_run is None:
raise ClientError('Task run ID not found: "{}"'.format(task_run_id))
state = prefect.engine.state.State.deserialize(task_run.serialized_state)
return TaskRunInfoResult(
id=task_run_id,
task_id=task_id,
task_slug=task_run.task.slug,
version=task_run.version,
state=state,
)
def set_task_run_name(self, task_run_id: str, name: str) -> bool:
"""
Set the name of a task run
Args:
- task_run_id (str): the id of a task run
- name (str): a name for this task run
Returns:
- bool: whether or not the task run name was updated
"""
mutation = {
"mutation($input: set_task_run_name_input!)": {
"set_task_run_name(input: $input)": {
"success": True,
}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name))
)
return result.data.set_task_run_name.success
def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State":
"""
Retrieves the current state for a task run.
Args:
- task_run_id (str): the id for this task run
Returns:
- State: a Prefect State object
"""
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"serialized_state": True,
}
}
}
task_run = self.graphql(query).data.task_run_by_pk
return prefect.engine.state.State.deserialize(task_run.serialized_state)
def set_task_run_state(
self,
task_run_id: str,
state: "prefect.engine.state.State",
version: int = None,
cache_for: datetime.timedelta = None,
) -> "prefect.engine.state.State":
"""
Sets new state for a task run.
Args:
- task_run_id (str): the id of the task run to set state for
- state (State): the new state for this task run
- version (int, optional): the current version of the task run state. This is optional
but it can be supplied to enforce version-locking.
- cache_for (timedelta, optional): how long to store the result of this task for,
using the serializer set in config; if not provided, no caching occurs
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
Returns:
- State: the state the current task run should be considered in
"""
mutation = {
"mutation($input: set_task_run_states_input!)": {
"set_task_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
task_run_id=task_run_id,
version=version,
)
]
)
),
) # type: Any
state_payload = result.data.set_task_run_states.states[0]
if state_payload.status == "QUEUED":
# If appropriate, the state attribute of the Queued state can be
# set by the caller of this method
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def set_secret(self, name: str, value: Any) -> None:
"""
Set a secret with the given name and value.
Args:
- name (str): the name of the secret; used for retrieving the secret
during task runs
- value (Any): the value of the secret
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the secret-setting was unsuccessful
"""
mutation = {
"mutation($input: set_secret_input!)": {
"set_secret(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(name=name, value=value))
) # type: Any
if not result.data.set_secret.success:
raise ValueError("Setting secret failed.")
def get_task_tag_limit(self, tag: str) -> Optional[int]:
"""
Retrieve the current task tag concurrency limit for a given tag.
Args:
- tag (str): the tag to update
Raises:
- ClientError: if the GraphQL query fails
"""
query = {
"query": {
with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): {
"limit": True
}
}
}
result = self.graphql(query) # type: Any
if result.data.task_tag_limit:
return result.data.task_tag_limit[0].limit
else:
return None
def update_task_tag_limit(self, tag: str, limit: int) -> None:
"""
Update the task tag concurrency limit for a given tag; requires tenant admin permissions.
Args:
- tag (str): the tag to update
- limit (int): the concurrency limit to enforce on the tag; should be a value >= 0
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided
"""
if limit < 0:
raise ValueError("Concurrency limits must be >= 0")
mutation = {
"mutation($input: update_task_tag_limit_input!)": {
"update_task_tag_limit(input: $input)": {"id"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(tag=tag, limit=limit))
) # type: Any
if not result.data.update_task_tag_limit.id:
raise ValueError("Updating the task tag concurrency limit failed.")
def delete_task_tag_limit(self, limit_id: str) -> None:
"""
Deletes a given task tag concurrency limit; requires tenant admin permissions.
Args:
- limit_id (str): the ID of the tag to delete
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided
"""
mutation = {
"mutation($input: delete_task_tag_limit_input!)": {
"delete_task_tag_limit(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(limit_id=limit_id))
) # type: Any
if not result.data.delete_task_tag_limit.success:
raise ValueError("Deleting the task tag concurrency limit failed.")
def write_run_logs(self, logs: List[Dict]) -> None:
"""
Uploads a collection of logs to Cloud.
Args:
- logs (List[Dict]): a list of log entries to add
Raises:
- ValueError: if uploading the logs fail
"""
mutation = {
"mutation($input: write_run_logs_input!)": {
"write_run_logs(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(logs=logs))
) # type: Any
if not result.data.write_run_logs.success:
raise ValueError("Writing logs failed.")
def register_agent(
self,
agent_type: str,
name: str = None,
labels: List[str] = None,
agent_config_id: str = None,
) -> str:
"""
Register an agent with a backend API
Args:
- agent_type (str): The type of agent being registered
- name: (str, optional): The name of the agent being registered
- labels (List[str], optional): A list of any present labels on the agent
being registered
- agent_config_id (str, optional): The ID of an agent configuration to register with
Returns:
- The agent ID as a string
"""
mutation = {
"mutation($input: register_agent_input!)": {
"register_agent(input: $input)": {"id"}
}
}
result = self.graphql(
mutation,
variables=dict(
input=dict(
type=agent_type,
name=name,
labels=labels or [],
tenant_id=self._active_tenant_id,
agent_config_id=agent_config_id,
)
),
)
if not result.data.register_agent.id:
raise ValueError("Error registering agent")
return result.data.register_agent.id
def get_agent_config(self, agent_config_id: str) -> dict:
"""
Get agent config settings
Args:
- agent_config_id (str): The ID of an agent configuration to retrieve
Returns:
- dict: the agent configuration's `settings`
"""
query = {
"query": {
with_args(
"agent_config", {"where": {"id": {"_eq": agent_config_id}}}
): {"settings": True}
}
}
result = self.graphql(query) # type: Any
return result.data.agent_config[0].settings
| 35.985834 | 101 | 0.55071 | [
"Apache-2.0"
] | zmac12/prefect | src/prefect/client/client.py | 55,892 | Python |
# Enter your code here. Read input from STDIN. Print output to STOUT
import re
for _ in range(int(input())):
try:
re.compile(input())
print (True)
except re.error:
print (False)
| 24.666667 | 69 | 0.581081 | [
"MIT"
] | 3agwa/Competitive-Programming-1 | Hackerrank_python/9.erros and exceptions/66.Incorrect Regex.py | 222 | Python |
cor = {'traço': '\033[35m', 'ex': '\033[4;31m', 'título': '\033[1;34m', 'str': '\033[1;33m', 'reset': '\033[m'}
print('{}-=-{}'.format(cor['traço'], cor['reset'])*18, '{} Exercício 026 {}'.format(cor['ex'], cor['reset']),
'{}-=-{}'.format(cor['traço'], cor['reset'])*18)
print('{}Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra "A", em que posição '
'ela aparece a \nprimeira vez e em que posição ela aparece a última vez.{}'.format(cor['título'], cor['reset']))
print('{}-=-{}'.format(cor['traço'], cor['reset'])*42)
frase = str(input('Digite uma frase: ')).strip().upper()
print('A letra "A" aparece {}{}{} vezes na frase.'.format(cor['str'], frase.count('A'), cor['reset']))
print('A primeira vez que a letra "A" apareceu foi na posição {}{}{}.'
.format(cor['str'], frase.find('A') + 1, cor['reset']))
print('A última vez que a letra "A" apareceu foi na posição {}{}{}.'
.format(cor['str'], frase.rfind('A') + 1, cor['reset']))
| 76.615385 | 118 | 0.593373 | [
"MIT"
] | WesleyOlliver/CursoPython | ex026.py | 1,014 | Python |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .agent import agent
from .ci import ci
from .clean import clean
from .config import config
from .create import create
from .dep import dep
from .docs import docs
from .env import env
from .meta import meta
from .release import release
from .run import run
from .test import test
from .validate import validate
ALL_COMMANDS = (agent, ci, clean, config, create, dep, docs, env, meta, release, run, test, validate)
| 28.052632 | 101 | 0.763602 | [
"BSD-3-Clause"
] | 0gajun/integrations-core | datadog_checks_dev/datadog_checks/dev/tooling/commands/__init__.py | 533 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/framework/formats/annotation/rasterization.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/framework/formats/annotation/rasterization.proto',
package='mediapipe',
syntax='proto2',
serialized_options=_b('\n-com.google.mediapipe.formats.annotation.protoB\022RasterizationProto'),
serialized_pb=_b('\n:mediapipe/framework/formats/annotation/rasterization.proto\x12\tmediapipe\"|\n\rRasterization\x12\x33\n\x08interval\x18\x01 \x03(\x0b\x32!.mediapipe.Rasterization.Interval\x1a\x36\n\x08Interval\x12\t\n\x01y\x18\x01 \x02(\x05\x12\x0e\n\x06left_x\x18\x02 \x02(\x05\x12\x0f\n\x07right_x\x18\x03 \x02(\x05\x42\x43\n-com.google.mediapipe.formats.annotation.protoB\x12RasterizationProto')
)
_RASTERIZATION_INTERVAL = _descriptor.Descriptor(
name='Interval',
full_name='mediapipe.Rasterization.Interval',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='y', full_name='mediapipe.Rasterization.Interval.y', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left_x', full_name='mediapipe.Rasterization.Interval.left_x', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right_x', full_name='mediapipe.Rasterization.Interval.right_x', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=197,
)
_RASTERIZATION = _descriptor.Descriptor(
name='Rasterization',
full_name='mediapipe.Rasterization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='interval', full_name='mediapipe.Rasterization.interval', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_RASTERIZATION_INTERVAL, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=197,
)
_RASTERIZATION_INTERVAL.containing_type = _RASTERIZATION
_RASTERIZATION.fields_by_name['interval'].message_type = _RASTERIZATION_INTERVAL
DESCRIPTOR.message_types_by_name['Rasterization'] = _RASTERIZATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Rasterization = _reflection.GeneratedProtocolMessageType('Rasterization', (_message.Message,), dict(
Interval = _reflection.GeneratedProtocolMessageType('Interval', (_message.Message,), dict(
DESCRIPTOR = _RASTERIZATION_INTERVAL,
__module__ = 'mediapipe.framework.formats.annotation.rasterization_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.Rasterization.Interval)
))
,
DESCRIPTOR = _RASTERIZATION,
__module__ = 'mediapipe.framework.formats.annotation.rasterization_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.Rasterization)
))
_sym_db.RegisterMessage(Rasterization)
_sym_db.RegisterMessage(Rasterization.Interval)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 35.547619 | 405 | 0.763563 | [
"MIT"
] | mesquita97/HandTalks | GestureVolume/venv/lib/python3.8/site-packages/mediapipe/framework/formats/annotation/rasterization_pb2.py | 4,479 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Luis Rodriguez <[email protected]>
#
import threading
import time
import json
class Watertank(object):
"""
Watertank Model
Output example:
{"water": 0.0, "inputs": [0.5, 0.5], "temperatures": [716, 20], "outputs": [1.0]}
Changes that have been applied lately to this model (Dec 2015)
- There is no longer a separate temperatures mode. Now there is a single model with temperatures.
- There are no longer temperature working ranges, temperature warnings, or temperature overloads. The
model will not prevent the pumps from working. Instead, the temperature will increase indefinitely. The experiment
client can thus deal with temperatures however it wishes (and it can in fact ignore them), with no effect.
- As a result of the previous change, temperature is no longer reported as in the [0,1] range according to the range.
Now it is reported in raw form.
"""
def __init__(self, tank_capacity, inputs, outputs, water_level):
self.initialize(tank_capacity, inputs, outputs, water_level)
def initialize(self, tank_capacity, inputs, outputs, water_level):
"""
Initializes the simulation with the specified data.
@param tank_capacity Capacity of the water tank, in liters.
@param Array containing the flow volume of the inputs (such as water pumps), in liters per second.
The flow can be modified dynamically, but no inputs can be added.
@param Array containing the outputs (such as a water hose or evaporation), in liters per second.
The flow can be modified dynamically, but no inputs can be added.
@param water_level The starting water level. Value from 0 to 1.
"""
self.tank_capacity = tank_capacity
self.inputs = inputs
self.outputs = outputs
self.current_volume = water_level * tank_capacity
self.firstPumpTemperature = 20
self.secondPumpTemperature = 20
self.firstPumpWorkRange = [20, 200]
self.secondPumpWorkRange = [20, 200]
self.pumpTemperatureVariationPerSeconds = 6 # Enough for 30 seconds?
self.simlock = threading.RLock()
self._thread = None
self._autoupdating = False
self._autoupdating_interval = 1000
def update(self, delta):
"""
Updates the simulation. Can be done automatically if the autoupdater is used.
@param delta Delta in seconds.
@see autoupdater_start
"""
total_output = 0
for out in self.outputs:
total_output += out * delta
# Calculates how much the pumps are putting in.
total_input = 0
# Handle inputs
pump1, pump2 = self.inputs
# If the first pump is turned on we increase the temperature and the total water input
if pump1 > 0:
# We multiply by 1.1 so that its temperature raises faster.
self.firstPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds * 1.1
total_input += pump1 * delta
else:
self.firstPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds
self.firstPumpTemperature = max(20, self.firstPumpTemperature)
total_input -= pump1 * delta
# If the second pump is turned on we increase the temperature and the total water input
if pump2 > 0:
self.secondPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds
total_input += pump2 * delta
else:
self.secondPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds
self.secondPumpTemperature = max(20, self.secondPumpTemperature)
total_input -= pump2 * delta
increment = total_input - total_output
with self.simlock:
self.current_volume += increment
# Ensure the volume stays realistic
if self.current_volume >= self.tank_capacity:
self.current_volume = self.tank_capacity
elif self.current_volume < 0:
self.current_volume = 0.0
def t_updater(self):
"""
This internal method is used by the autoupdating thread to update
the simulation every few seconds (specified as the autoupdater interval).
"""
while self._autoupdating:
time.sleep(self._autoupdating_interval)
self.update(self._autoupdating_interval)
def autoupdater_start(self, interval):
"""
Starts the autoupdating thread. That is, a thread that will call update
every so often. If started, it should eventually be stopped. Otherwise,
it will run forever in the background.
@param interval Interval between updates, in seconds.
@see autoupdater_stop
"""
self._autoupdating = True
self._autoupdating_interval = interval
self._thread = threading.Thread(None, self.t_updater)
self._thread.start()
def autoupdater_stop(self):
"""
Stops the autoupdating thread. This method is non-blocking. It will signal
the thread to stop, but may take a while before it *really* does stop.
There is a blocking version of this method.
@see autoupdater_join
"""
self._autoupdating = False
def autoupdater_join(self):
"""
Stops the autoupdating thread, and joins that thread until it really does stop.
May block forever if for some reason the thread won't stop, but that
should not happen.
"""
self._autoupdating = False
self._thread.join(0)
def set_input(self, input_number, input_flow):
"""
Sets the value for an input in the simulation.
@param input_number Number identifying the input. The input should exist.
@param input_flow New flow of the input, in liters per second.
"""
with self.simlock:
self.inputs[input_number] = input_flow
def set_output(self, output_number, output_flow):
"""
Sets the value for an output in the simulation.
@param output_number Number identifying the output. The output should exist.
@param output_flow New flow of the output, in liters per second.
"""
with self.simlock:
self.outputs[output_number] = output_flow
def set_inputs(self, inputs):
"""
Redefines the whole array of inputs.
@param inputs Array containing the flow of every input.
"""
with self.simlock:
self.inputs = inputs
def set_outputs(self, outputs):
"""
Redefines the whole array of outputs.
@param outputs Array containing the flow of every output.
"""
with self.simlock:
self.outputs = outputs
def get_temperatures(self):
"""
Get temperatures.
:return:
"""
return [self.firstPumpTemperature, self.secondPumpTemperature]
def get_water_volume(self):
"""
Gets the current water volume in liters. It will vary dynamically according to the
simulation's state.
"""
with self.simlock:
return self.current_volume
def get_water_level(self):
"""
Gets the current water level, as a number from 0 to 1 (empty to full). It will vary dynamically
according to the simulation's state.
"""
with self.simlock:
return 1.0 * self.current_volume / self.tank_capacity
def get_json_state(self, input_capacities, output_capacities):
"""
Gets a json-encoded description of the simulation's state.
As of now, it takes output and input capacities as arguments because the JSON state
is described through relative values. (For instance, first output at 0.3 capacity).
@param input_capacities An array containing the maximum capacities of the input.
@param output_capacities An array containing the maximum capacities of the output.
"""
if len(self.inputs) != len(input_capacities):
return "{}"
inputs = []
for inp, cap in zip(self.inputs, input_capacities):
inputs.append(1.0 * inp / cap)
outputs = []
for inp, cap in zip(self.outputs, output_capacities):
outputs.append(1.0 * inp / cap)
state = {"water": self.get_water_level(), "inputs": inputs, "outputs": outputs}
# Report the RAW temperature
temperatures = [0, 0]
temperatures[0] = self.firstPumpTemperature
temperatures[1] = self.secondPumpTemperature
state["temperatures"] = temperatures
return json.dumps(state)
if __name__ == '__main__':
from mock import patch
import unittest
def fake_sleep(t):
# TODO
a = [1 for i in range(100000)] # very fast kludge to add minor delay
b = len(a)
pass
class TestWatertankSimulation(unittest.TestCase):
def test_nothing(self):
pass
def _get_state(self, w):
js = w.get_json_state([20, 20], [100])
d = json.loads(js)
return d
@patch("time.sleep", fake_sleep)
def test_waterlevel_increase_decrease(self):
w = Watertank(1000, [100, 100], [100], 0.5)
w.autoupdater_start(1)
initial_level = self._get_state(w)["water"]
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
other_level = self._get_state(w)["water"]
# Check that the water level did increase
self.assertGreater(other_level, initial_level)
w.set_outputs([400])
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
dec_level = self._get_state(w)["water"]
# Check that the water level did decrease
self.assertGreater(other_level, dec_level)
@patch("time.sleep", fake_sleep)
def test_temperature_increase_decrease(self):
w = Watertank(1000, [100, 100], [100], 0.5)
w.autoupdater_start(1)
t0 = self._get_state(w)["temperatures"][0]
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
t1 = self._get_state(w)["temperatures"][0]
# Check that the water level did increase
self.assertGreater(t1, t0)
w.set_inputs([0, 0])
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
t2 = self._get_state(w)["temperatures"][0]
# Check that the water level did decrease
self.assertGreater(t1, t2)
# @patch("time.sleep", fake_sleep)
# def test_first(self):
# w = Watertank(1000, [100, 100], [100], 0.5)
# w.autoupdater_start(1)
#
# i = 0
# while (i < 15):
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# time.sleep(0.5)
# i += 1
#
# print "...."
# i = 0
# w.set_outputs([100])
# w.set_inputs([10, 10])
# while (i < 30):
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# time.sleep(0.5)
# i += 1
#
# w.autoupdater_join()
#
# @patch("time.sleep", fake_sleep)
# def test_second(self):
# w = Watertank(1000, [100, 100], [100], 0.5)
#
# i = 0
# while i < 15:
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# w.update(1)
# i += 1
#
# print "...."
# i = 0
# w.set_outputs([100])
# w.set_inputs([10, 10])
# while i < 15:
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# w.update(1)
# i += 1
unittest.main() | 34.988981 | 123 | 0.596016 | [
"BSD-2-Clause"
] | LR-FGMM/weblabdeusto | server/src/experiments/ud_xilinx/watertank_simulation.py | 12,701 | Python |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/npc/base/shared_dantari_base_male.iff"
result.attribute_template_id = 9
result.stfName("npc_name","dantari_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.764706 | 74 | 0.731868 | [
"MIT"
] | SWGANHServices/GameServer_Legacy | data/scripts/templates/object/creature/npc/base/shared_dantari_base_male.py | 455 | Python |
import datetime
import itertools
import logging
import os
import platform
import time
from collections import defaultdict
from operator import itemgetter
from typing import (
AbstractSet,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import ujson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files import File
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import override as override_language
from django.utils.translation import ugettext as _
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat
from analytics.models import StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import get_emoji_file_name
from zerver.lib.exceptions import (
ErrorCode,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MentionData, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import (
MessageDict,
access_message,
render_markdown,
truncate_body,
truncate_topic,
update_first_visible_message_id,
)
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
get_subscribed_stream_ids_for_user,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_for_send_message,
check_stream_name,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileFieldData
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions
from zerver.models import (
MAX_MESSAGE_LENGTH,
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
EmailChangeStatus,
Message,
MultiuseInvite,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
ScheduledEmail,
ScheduledMessage,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_stream,
get_stream_by_id_in_realm,
get_stream_cache_key,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
query_for_ids,
realm_filters_for_realm,
stream_name_in_use,
validate_attachment_request,
)
from zerver.tornado.event_queue import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import downgrade_now, update_license_ledger_if_needed
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[str], AbstractSet[str]]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {
'id': user_id,
'flags': ['read']
}
# Store an event in the log for re-importing messages
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of
# a stream, such as its name/description.
if stream.is_public():
# For a public stream, this is everyone in the realm
# except unsubscribed guest users
return public_stream_user_ids(stream)
else:
# for a private stream, it's subscribers plus realm admins.
return private_stream_user_ids(
stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()}
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST)
guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0,
UserProfile.ROLE_REALM_OWNER: 0,
UserProfile.ROLE_MEMBER: 0,
UserProfile.ROLE_GUEST: 0}
for value_dict in list(UserProfile.objects.filter(
realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))):
human_counts[value_dict['role']] = value_dict['role__count']
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def notify_new_user(user_profile: UserProfile) -> None:
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email)
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
with override_language(user_profile.realm.default_language):
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"@_**{user_profile.full_name}|{user_profile.id}**",
user_count=user_count
)
internal_send_stream_message(
user_profile.realm,
sender,
signup_notifications_stream,
_("signups"),
message
)
# We also send a notification to the Zulip administrative realm
admin_realm = sender.realm
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
with override_language(admin_realm.default_language):
# We intentionally use the same strings as above to avoid translation burden.
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>",
user_count=user_count
)
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
user_profile.realm.display_subdomain,
message
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in
user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(recipient_id__in=recipient_ids,
date_sent__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list(
'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Mapping[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
realm_creation: bool=False) -> None:
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
prereg_user.status = confirmation_settings.STATUS_ACTIVE
prereg_user.save(update_fields=['status'])
streams = prereg_user.streams.all()
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
_("{user} accepted your invitation to join Zulip!").format(user=f"{user_profile.full_name} <`{user_profile.email}`>")
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(
email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\
.update(status=confirmation_settings.STATUS_REVOKED)
if prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\
.update(status=confirmation_settings.STATUS_REVOKED)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if user_profile.realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.delivery_email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(user_profile.realm, user_profile, user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar and
# user_avatar_url_field_optional = False :(
client_gravatar=False,
user_avatar_url_field_optional=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={})
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services = get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot['owner_id'] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,
bot_type: Optional[int]=None, role: Optional[int]=None,
bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,
timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream]=None,
default_events_register_stream: Optional[Stream]=None,
default_all_public_streams: Optional[bool]=None,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
source_profile: Optional[UserProfile]=None,
realm_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> UserProfile:
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name,
role=role, bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile)
event_time = user_profile.date_joined
if not acting_user:
acting_user = user_profile
RealmAuditLog.objects.create(
realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation)
return user_profile
def do_activate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any,
acting_user: Optional[UserProfile] = None) -> None:
"""Takes in a realm object, the name of an attribute to update, the
value to update and and the user who initiated the update.
"""
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
f'Cannot update {name}: {value} is not an instance of {property_type}')
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: {'property': name, 'value': old_value},
RealmAuditLog.NEW_VALUE: {'property': name, 'value': value}
}))
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile, realm)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
UserProfile.objects.bulk_update(user_profiles, ['email'])
for user_profile in user_profiles:
flush_user_profile(sender=UserProfile, instance=user_profile)
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool],
acting_user: Optional[UserProfile]=None) -> None:
old_value = realm.authentication_methods_dict()
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
updated_value = realm.authentication_methods_dict()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=timezone_now(),
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: {'property': 'authentication_methods', 'value': old_value},
RealmAuditLog.NEW_VALUE: {'property': 'authentication_methods', 'value': updated_value}
}))
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=updated_value),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.allow_community_topic_editing = allow_community_topic_editing
realm.save(update_fields=['allow_message_editing',
'allow_community_topic_editing',
'message_content_edit_limit_seconds',
],
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
allow_community_topic_editing=allow_community_topic_editing),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_deleting(realm: Realm,
message_content_delete_limit_seconds: int) -> None:
realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds
realm.save(update_fields=['message_content_delete_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Optional[Stream],
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
event = dict(type="realm", op="deactivated",
realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
def do_scrub_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user, acting_user=acting_user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SCRUBBED)
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails([user_profile.id])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(type="realm_user", op="remove",
person=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True, acting_user: Optional[UserProfile]=None) -> None:
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
# This stream has already been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=stream.realm, acting_user=acting_user,
modified_stream=stream, event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time=event_time)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id,
new_email=user_profile.email)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id,
delivery_email=new_email)
event = dict(type='realm_user', op='update', person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url,
})
language = user_profile.default_language
send_email('zerver/emails/confirm_new_email', to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language, context=context,
realm=user_profile.realm)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: str,
email_to_fullname: Callable[[str], str]) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient_id = message['message'].sender.recipient_id
if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2:
content = (
_("Congratulations on your first reply!") +
" "
":tada:"
"\n"
"\n" +
_("Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
)
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender, content)
def render_incoming_message(message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False) -> str:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton = realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except MarkdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
push_notify_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int]=set(),
possible_wildcard_mention: bool=True) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = stream_topic.get_active_subscriptions().annotate(
user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'),
user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'),
user_profile_wildcard_mentions_notify=F(
'user_profile__wildcard_mentions_notify'),
).values(
'user_profile_id',
'push_notifications',
'email_notifications',
'wildcard_mentions_notify',
'user_profile_email_notifications',
'user_profile_push_notifications',
'user_profile_wildcard_mentions_notify',
'is_muted',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row['is_muted']:
return False
if row['user_profile_id'] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row['user_profile_' + setting]
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send('push_notifications', row)
}
stream_email_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send('email_notifications', row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine which users would receive a wildcard mention
# notification for this message should the message indeed
# contain a wildcard mention.
#
# We don't have separate values for push/email
# notifications here; at this stage, we're just
# determining whether this wildcard mention should be
# treated as a mention (and follow the user's mention
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row['user_profile_id']
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered markdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id',
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications'],
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle'],
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via markdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
)
return info
def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int], active_user_ids: Set[int],
recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s',
user_profile_id, bot_type,
)
return
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = 'mention'
# PM triggers for personal and huddle messages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
return
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for message in messages:
scheduled_message = ScheduledMessage()
scheduled_message.sender = message['message'].sender
scheduled_message.recipient = message['message'].recipient
topic_name = message['message'].topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = message['message'].content
scheduled_message.sending_client = message['message'].sending_client
scheduled_message.stream = message['stream']
scheduled_message.realm = message['realm']
scheduled_message.scheduled_timestamp = message['deliver_at']
if message['delivery_type'] == 'send_later':
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif message['delivery_type'] == 'remind':
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: bool=False,
mark_as_read: Sequence[int]=[]) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids: List[int] = []
new_messages: List[MutableMapping[str, Any]] = []
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed: Set[str] = set()
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['stream_email_user_ids'] = info['stream_email_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = markdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if message['message'].mentions_wildcard:
message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids']
else:
message['wildcard_mention_user_ids'] = []
'''
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
'''
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
# Claim attachments in message
for message in messages:
if do_claim_attachments(message['message'],
message['message'].potential_attachment_path_ids):
message['message'].has_attachment = True
message['message'].save(update_fields=['has_attachment'])
ums: List[UserMessageLite] = []
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
stream_push_user_ids = message['stream_push_user_ids'],
stream_email_user_ids = message['stream_email_user_ids'],
mentioned_user_ids=mentioned_user_ids,
mark_as_read=mark_as_read,
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
for message in messages:
do_widget_post_save_actions(message)
for message in messages:
realm_id: Optional[int] = None
if message['message'].is_stream_message():
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related().get(id=stream_id)
assert message['stream'] is not None # assert needed because stubs for django are missing
realm_id = message['stream'].realm_id
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(message['message'], realm_id)
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
'''
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
'''
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
stream_email_notify=(user_id in message['stream_email_user_ids']),
wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['stream'].first_message_id is None:
message['stream'].first_message_id = message['message'].id
message['stream'].save(update_fields=["first_message_id"])
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(message['realm'], event, users)
if links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
},
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
'''
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
'''
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read: Sequence[int] = []) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=0,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the markdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if (um.user_profile_id == message.sender.id and
message.sent_by_human()) or \
um.user_profile_id in mark_as_read:
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
um.flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
um.user_profile_id not in stream_push_user_ids and
um.user_profile_id not in stream_email_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
'''
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
'''
if not ums:
return
vals = [
(um.user_profile_id, um.message_id, um.flags)
for um in ums
]
query = SQL('''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
''')
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def do_add_submessage(realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
send_event(realm, event, target_user_ids)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: str) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event: Dict[str, Any] = {
'type': 'reaction',
'op': op,
'user_id': user_profile.id,
# TODO: We plan to remove this redundant user_dict object once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type,
}
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
#
# However, to ensure that reactions do live-update for any user
# who has actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications.
ums = UserMessage.objects.filter(message=message.id)
send_event(user_profile.realm, event, [um.user_profile_id for um in ums])
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: str, emoji_code: str, reaction_type: str) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: str, reaction_type: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm,
sender: UserProfile,
recipient_user_profiles: List[UserProfile],
operator: str) -> None:
sender_dict = {'user_id': sender.id, 'email': sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type='typing',
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [
user.id
for user in recipient_user_profiles
if user.is_active
]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile,
user_ids: List[int],
operator: str) -> None:
realm = sender.realm
if len(user_ids) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
''' The next chunk of code will go away when we upgrade old mobile
users away from versions of mobile that send emails. For the
small number of very outdated mobile clients, we do double work
here in terms of fetching users, but this structure reduces lots
of other unnecessary duplicated code and will make it convenient
to mostly delete code when we desupport old versions of the
app.'''
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(
user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def ensure_stream(realm: Realm,
stream_name: str,
invite_only: bool=False,
stream_description: str="",
acting_user: Optional[UserProfile]=None) -> Stream:
return create_stream_if_needed(realm, stream_name,
invite_only=invite_only,
stream_description=stream_description,
acting_user=acting_user)[0]
def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {}
for user_profile in recipient_profiles:
recipient_profiles_map[user_profile.id] = user_profile
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map):
del recipient_profiles_map[sender.id]
assert len(recipient_profiles_map) != 0
if len(recipient_profiles_map) == 1:
user_profile = list(recipient_profiles_map.values())[0]
return user_profile.recipient
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids: Set[int] = {user_id for user_id in recipient_profiles_map}
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile],
sender: UserProfile,
allow_deactivated: bool=False) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy and
not allow_deactivated) or user_profile.realm.deactivated:
raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email))
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile, allow_deactivated: bool=False) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(user_profiles, sender,
allow_deactivated=allow_deactivated)
return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Iterable[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,
topic: str, body: str, realm: Optional[Realm]=None) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: str) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender: UserProfile, client: Client, message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str, realm: Optional[Realm]=None,
forged: bool=False, forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id,
widget_content)
return do_send_messages([message])[0]
def check_schedule_message(sender: UserProfile, client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str], message_content: str,
delivery_type: str, deliver_at: datetime.datetime,
realm: Optional[Realm]=None,
forwarder_user_profile: Optional[UserProfile]=None,
) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm=realm,
forwarder_user_profile=forwarder_user_profile)
message['deliver_at'] = deliver_at
message['delivery_type'] = delivery_type
recipient = message['message'].recipient
if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and
recipient.type_id != sender.id)):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([message])[0]
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
))
def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,
realm: Realm,
content: str) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
def send_pm_if_empty_stream(stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str]=None,
stream_id: Optional[int]=None) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": f"`{sender.delivery_email}`",
"stream_id": stream_id,
"stream_name": f"#**{stream_name}**",
"new_stream_link": "#streams/new",
}
if sender.bot_owner is not None:
with override_language(sender.bot_owner.default_language):
if stream is None:
if stream_id is not None:
content = _("Your bot {bot_identity} tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID.").format(**arg_dict)
else:
assert(stream_name is not None)
content = _("Your bot {bot_identity} tried to send a message to stream "
"{stream_name}, but that stream does not exist. "
"Click [here]({new_stream_link}) to create it.").format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _("Your bot {bot_identity} tried to send a message to "
"stream {stream_name}. The stream exists but "
"does not have any subscribers.").format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm,
sender: UserProfile) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm,
sender: UserProfile) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(realm: Realm, sender: UserProfile,
user_profiles: Sequence[UserProfile]) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender: UserProfile, client: Client, addressee: Addressee,
message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,
forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> Dict[str, Any]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
recipient = stream.recipient
# This will raise JsonableError if there are problems.
if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT:
access_stream_for_send_message(
sender=sender,
stream=stream,
forwarder_user_profile=forwarder_user_profile)
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
check_private_message_policy(realm, sender, user_profiles)
# API Super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles,
forwarded_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
if widget_content is not None:
try:
widget_content = ujson.loads(widget_content)
except Exception:
raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))
try:
check_widget_content(widget_content)
except ValidationError as error:
raise JsonableError(_('Widgets: {error_msg}').format(
error_msg=error.message,
))
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm,
'widget_content': widget_content}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str) -> Optional[Dict[str, Any]]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name, acting_user=sender)
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg)
return None
def internal_prep_stream_message(
realm: Realm, sender: UserProfile,
stream: Stream, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[int]:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
realm: Realm,
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool=False) -> Optional[int]:
message = internal_prep_stream_message(
realm, sender, stream,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message], email_gateway=email_gateway)
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm, sender, stream_name,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id))
def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Guest users can access subscribed public stream's subscribers
if user_profile.is_guest:
if check_user_subscribed(user_profile):
return
# We could put an AssertionError here; in that we don't have
# any code paths that would allow a guest user to access other
# streams in the first place.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if (stream_dict["invite_only"] and not check_user_subscribed(user_profile)):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_recipient.populate_with(stream_id=stream_dict["id"],
recipient_id=stream_dict["recipient_id"])
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: sub_dict[stream_dict["id"]],
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
'''
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
'''
query = SQL('''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
''')
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
'''
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
'''
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True,
)
return subscriptions
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
recent_traffic: Dict[int, int],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
sub_dicts = []
for (subscription, stream) in sub_pairs:
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
if field_name == "active":
# Skip the "active" field, it's implied by context
continue
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict['in_home_view'] = not subscription.is_muted
sub_dict['email_address'] = encode_email_address(stream, show_sender=True)
sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic)
sub_dict['subscribers'] = stream_user_ids(stream)
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add",
subscriptions=sub_dicts)
send_event(user_profile.realm, event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
'''
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
if stream.invite_only:
# PRIVATE STREAMS
# Realm admins can access all private stream subscribers. Send them an
# event even if they aren't subscribed to stream.
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
user_ids_to_notify = []
user_ids_to_notify.extend(realm_admin_ids)
user_ids_to_notify.extend(subscribed_user_ids)
return set(user_ids_to_notify) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list)
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max('id'))['id__max']
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str]={},
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams}
recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()]
stream_map: Dict[int, Stream] = {}
for stream in streams:
stream_map[recipients_map[stream.id]] = stream
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
realm = users[0].realm
already_subscribed: List[Tuple[UserProfile, Stream]] = []
subs_to_activate: List[Tuple[Subscription, Stream]] = []
new_subs: List[Tuple[UserProfile, int, Stream]] = []
for user_profile in users:
needs_new_sub: Set[int] = set(recipient_ids)
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add: List[Tuple[Subscription, Stream]] = []
for (user_profile, recipient_id, stream) in new_subs:
if stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event: Dict[str, object] = dict(
type="stream",
op="occupy",
streams=[stream.to_dict() for stream in new_occupied_streams],
)
send_event(realm, event, active_user_ids(realm.id))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list)
new_streams: Set[Tuple[int, int]] = set()
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and
user.id not in realm_admin_ids]
send_stream_creation_event(stream, new_users_ids)
stream_ids = {stream.id for stream in streams}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,
recent_traffic)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
stream_id=stream.id,
user_id=new_user_id)
send_event(realm, event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path('audio/notification_sounds')
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if '.' in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == '.ogg':
available_notification_sounds.append(root)
return available_notification_sounds
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate: List[Tuple[Subscription, Stream]] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'client_id': acting_client.id,
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def send_peer_remove_event(stream: Stream) -> None:
if stream.is_in_zephyr_realm and not stream.invite_only:
return
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
stream_id=stream.id,
user_id=removed_user.id)
send_event(our_realm, event, peer_user_ids)
for stream in streams:
send_peer_remove_event(stream=stream)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(our_realm, event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream, acting_user=acting_user)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: str, stream_name: str, property: str,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile: UserProfile, sub: Subscription,
stream: Stream, property_name: str, value: Any,
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
log_subscription_property_change(user_profile.email, stream.name,
database_property_name, database_value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=event_property_name,
value=event_value,
stream_id=stream.id,
name=stream.name)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: str,
acting_user: Optional[UserProfile]) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time, extra_data=old_name)
payload = dict(user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(user_profile.realm,
dict(type='realm_bot',
op="delete",
bot=dict(
user_id=user_profile.id,
)),
{previous_owner.id})
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id})
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
update_users)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event: Dict[str, Any] = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
api_key=new_api_key,
)),
bot_owner_user_ids(user_profile))
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
# since we're sending the URL anyway.
user_id=user_profile.id,
)
send_event(user_profile.realm,
dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str,
skip_notify: bool=False, acting_user: Optional[UserProfile]=None) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={'avatar_source': avatar_source},
event_time=event_time, acting_user=acting_user)
if not skip_notify:
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user)
delete_avatar_image(user)
def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(realm,
dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def do_change_logo_source(realm: Realm, logo_source: str, night: bool, acting_user: Optional[UserProfile]=None) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm, event_time=timezone_now(),
acting_user=acting_user)
event = dict(type='realm',
op='update_dict',
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night))
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(realm: Realm, plan_type: int) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=['plan_type'])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm, event_time=timezone_now(),
extra_data={'old_value': old_value, 'new_value': plan_type})
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb'])
event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type,
'extra_data': {'upload_quota': realm.upload_quota_bytes()}}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_user_role(user_profile: UserProfile, value: int, acting_user: Optional[UserProfile]=None) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(user_id=user_profile.id, role=user_profile.role))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None:
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
def do_change_stream_invite_only(stream: Stream, invite_only: bool,
history_public_to_subscribers: Optional[bool]=None) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:
stream.is_web_public = is_web_public
stream.save(update_fields=['is_web_public'])
def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None:
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=['stream_post_policy'])
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream,
new_name: str,
user_profile: UserProfile,
log: bool=True) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT)
with override_language(stream.realm.default_language):
internal_send_stream_message(
stream.realm,
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_('{user_name} renamed stream {old_stream_name} to {new_stream_name}.').format(
user_name=f"@_**{user_profile.full_name}|{user_profile.id}**",
old_stream_name=f"**{old_name}**",
new_stream_name=f"**{new_name}**",
),
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=['description', 'rendered_description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_message_retention_days(stream: Stream, message_retention_days: Optional[int]=None) -> None:
stream.message_retention_days = message_retention_days
stream.save(update_fields=['message_retention_days'])
event = dict(
op="update",
type="stream",
property="message_retention_days",
value=message_retention_days,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(string_id: str, name: str,
emails_restricted_to_domains: Optional[bool]=None) -> Realm:
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:", acting_user=None)
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.", acting_user=None)
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"emails_restricted_to_domains": emails_restricted_to_domains})
sender = get_system_bot(settings.NOTIFICATION_BOT)
admin_realm = sender.realm
# Send a notification to the admin realm
with override_language(admin_realm.default_language):
signup_message = _("Signups enabled")
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str,
value: Union[bool, int, str], log: bool=True) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
f'Cannot update {name}: {value} is not an instance of {notification_setting_type}')
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event['language_name'] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group {}').format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(realm: Realm, group_name: str,
description: str, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name))
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name))
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
if stream in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: str) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: str) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related().filter(realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile_id: int,
client_id: int,
query: str,
count: int,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id = user_profile_id,
client_id = client_id,
query = query,
defaults={'last_visit': log_time, 'count': count})
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,
status: int, new_user_input: bool) -> None:
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_user_status(user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int) -> None:
if away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
)
event = dict(
type='user_status',
user_id=user_profile.id,
)
if away is not None:
event['away'] = away
if status_text is not None:
event['status_text'] = status_text
send_event(realm, event, active_user_ids(realm.id))
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event('bankruptcy')
# First, we clear mobile push notifications. This is safer in the
# event that the below logic times out and we're killed.
all_push_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list("message_id", flat=True)[0:10000]
do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids)
msgs = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
client: Client,
stream: Stream,
topic_name: Optional[str]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
recipient = stream.recipient
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_update_mobile_push_notification(message: Message,
prior_mention_user_ids: Set[int],
stream_push_user_ids: Set[int]) -> None:
# Called during the message edit code path to remove mobile push
# notifications for users who are no longer mentioned following
# the edit. See #15428 for details.
#
# A perfect implementation would also support updating the message
# in a sent notification if a message was edited to mention a
# group rather than a user (or vise versa), though it is likely
# not worth the effort to do such a change.
if not message.is_stream_message():
return
remove_notify_users = prior_mention_user_ids - message.mentions_user_ids - stream_push_user_ids
do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id])
def do_clear_mobile_push_notifications_for_ids(user_profile_ids: List[int],
message_ids: List[int]) -> None:
if len(message_ids) == 0:
return
# This function supports clearing notifications for several users
# only for the message-edit use case where we'll have a single message_id.
assert len(user_profile_ids) == 1 or len(message_ids) == 1
messages_by_user = defaultdict(list)
notifications_to_update = list(UserMessage.objects.filter(
message_id__in=message_ids,
user_profile_id__in=user_profile_ids,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list('user_profile_id', 'message_id'))
for (user_id, message_id) in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile_id,
"message_ids": event_message_ids,
})
def do_update_message_flags(user_profile: UserProfile,
client: Client,
operation: str,
flag: str,
messages: List[int]) -> int:
valid_flags = [item for item in UserMessage.flags
if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
def notify_topic_moved_streams(user_profile: UserProfile,
old_stream: Stream, old_topic: str,
new_stream: Stream, new_topic: Optional[str],
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
# happened.
sender = get_system_bot(settings.NOTIFICATION_BOT)
if new_topic is None:
new_topic = old_topic
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if send_notification_to_new_thread:
with override_language(new_stream.realm.default_language):
internal_send_stream_message(
new_stream.realm, sender, new_stream, new_topic,
_("This topic was moved here from {old_location} by {user}").format(
old_location=old_topic_link, user=user_mention,
),
)
if send_notification_to_old_thread:
with override_language(old_stream.realm.default_language):
# Send a notification to the old stream that the topic was moved.
internal_send_stream_message(
old_stream.realm, sender, old_stream, old_topic,
_("This topic was moved by {user} to {new_location}").format(
user=user_mention, new_location=new_topic_link,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[str],
rendered_content: Optional[str]) -> None:
event: Dict[str, Any] = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id}
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
sender_id: int
recipient_id: int
topic: str
stream_id: int
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile: UserProfile, message: Message,
new_stream: Optional[Stream], topic_name: Optional[str],
propagate_mode: str, send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool, content: Optional[str],
rendered_content: Optional[str], prior_mention_user_ids: Set[int],
mention_user_ids: Set[int], mention_data: Optional[MentionData]=None) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
message.last_edit_time = timestamp
event: Dict[str, Any] = {
'type': 'update_message',
'user_id': user_profile.id,
'edit_timestamp': datetime_to_timestamp(timestamp),
'message_id': message.id,
}
edit_history_event: Dict[str, Any] = {
'user_id': user_profile.id,
'timestamp': event['edit_timestamp'],
}
changed_messages = [message]
stream_being_edited = None
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm)
event['stream_name'] = stream_being_edited.name
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
assert rendered_content is not None
# mention_data is required if there's a content edit.
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in message.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
message.mentions_user_ids.update(members)
update_user_message_flags(message, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
event['is_me_message'] = Message.is_status_message(content, rendered_content)
# message.has_image and message.has_link will have been
# already updated by markdown rendering in the caller.
message.has_attachment = check_attachment_reference_change(message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['stream_email_user_ids'] = list(info['stream_email_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if message.mentions_wildcard:
event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids'])
else:
event['wildcard_mention_user_ids'] = []
do_update_mobile_push_notification(message, prior_mention_user_ids, info['stream_push_user_ids'])
if topic_name is not None or new_stream is not None:
orig_topic_name = message.topic_name()
event["propagate_mode"] = propagate_mode
event["stream_id"] = message.recipient.type_id
if new_stream is not None:
assert content is None
assert message.is_stream_message()
assert stream_being_edited is not None
edit_history_event['prev_stream'] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
# When messages are moved from one stream to another, some
# users may lose access to those messages, including guest
# users and users not subscribed to the new stream (if it is a
# private stream). For those users, their experience is as
# though the messages were deleted, and we should send a
# delete_message event to them instead.
subscribers = get_active_subscriptions_for_stream_id(
stream_id).select_related("user_profile")
subs_to_new_stream = list(get_active_subscriptions_for_stream_id(
new_stream.id).select_related("user_profile"))
new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream]
# Get users who aren't subscribed to the new_stream.
subs_losing_usermessages = [
sub for sub in subscribers
if sub.user_profile_id not in new_stream_sub_ids
]
# Users who can longer access the message without some action
# from administrators.
#
# TODO: Extend this list to also contain users losing access
# due to the messages moving to a private stream they are not
# subscribed to.
subs_losing_access = [
sub for sub in subs_losing_usermessages
if sub.user_profile.is_guest
]
ums = ums.exclude(user_profile_id__in=[
sub.user_profile_id for sub in subs_losing_usermessages])
if topic_name is not None:
topic_name = truncate_topic(topic_name)
message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = topic_links(message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
delete_event_notify_user_ids: List[int] = []
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
messages_list = update_messages_for_topic_edit(
message=message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
)
changed_messages += messages_list
if new_stream is not None:
assert stream_being_edited is not None
message_ids = [msg.id for msg in changed_messages]
# Delete UserMessage objects for users who will no
# longer have access to these messages. Note: This could be
# very expensive, since it's N guest users x M messages.
UserMessage.objects.filter(
user_profile_id__in=[sub.user_profile_id for sub in
subs_losing_usermessages],
message_id__in=message_ids,
).delete()
delete_event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
'message_type': 'stream',
'stream_id': stream_being_edited.id,
'topic': orig_topic_name,
}
delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access]
send_event(user_profile.realm, delete_event, delete_event_notify_user_ids)
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event['message_ids'] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by
# definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already
# in users_to_be_notified list. This is the case where a
# user both has a UserMessage row and is a current
# Subscriber
subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums])
if new_stream is not None:
assert delete_event_notify_user_ids is not None
subscribers = subscribers.exclude(user_profile_id__in=delete_event_notify_user_ids)
# All users that are subscribed to the stream must be
# notified when a message is edited
subscriber_ids = [user.user_profile_id for user in subscribers]
if new_stream is not None:
# TODO: Guest users don't see the new moved topic
# unless breadcrumb message for new stream is
# enabled. Excluding these users from receiving this
# event helps us avoid a error trackeback for our
# clients. We should figure out a way to inform the
# guest users of this new topic if sending a 'message'
# event for these messages is not an option.
#
# Don't send this event to guest subs who are not
# subscribers of the old stream but are subscribed to
# the new stream; clients will be confused.
old_stream_unsubbed_guests = [
sub for sub in subs_to_new_stream
if sub.user_profile.is_guest
and sub.user_profile_id not in subscriber_ids
]
subscribers = subscribers.exclude(user_profile_id__in=[
sub.user_profile_id for sub in old_stream_unsubbed_guests])
subscriber_ids = [user.user_profile_id for user in subscribers]
users_to_be_notified += list(map(subscriber_info, subscriber_ids))
send_event(user_profile.realm, event, users_to_be_notified)
if (len(changed_messages) > 0 and new_stream is not None and
stream_being_edited is not None):
# Notify users that the topic was moved.
notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name,
new_stream, topic_name, send_notification_to_old_thread,
send_notification_to_new_thread)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
# TODO: We should plan to remove `sender_id` here.
event['recipient_id'] = sample_message.recipient_id
event['sender_id'] = sample_message.sender_id
archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event['stream_id'] = stream_id
event['topic'] = sample_message.topic_name()
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
subscriber_ids = [user.user_profile_id for user in subscribers]
users_to_notify = list(map(subscriber_info, subscriber_ids))
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
event['message_type'] = message_type
send_event(realm, event, users_to_notify)
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id'))
if message_ids:
move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS['messages_in_stream:is_bot:day']
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property,
end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values('stream_id').annotate(value=Sum('value'))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,
recent_traffic: Dict[int, int]) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def get_web_public_subs(realm: Realm) -> SubHelperT:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict['is_muted'] = False
stream_dict['color'] = get_next_color()
stream_dict['desktop_notifications'] = True
stream_dict['audible_notifications'] = True
stream_dict['push_notifications'] = True
stream_dict['email_notifications'] = True
stream_dict['pin_to_top'] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id,
stream.date_created,
{})
stream_dict['stream_weekly_traffic'] = stream_weekly_traffic
stream_dict['email_address'] = ''
subscribed.append(stream_dict)
return (subscribed, [], [])
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
*Subscription.API_FIELDS, "recipient_id").order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids: Set[int] = set()
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values(
*Stream.API_FIELDS,
# date_created is used as an input for the stream_weekly_traffic computed field.
"date_created",
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts}
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient,
)
else:
# If we're not including subscribers, always return None,
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
# Copy Subscription.API_FIELDS except for "active", which is
# used to determine where to the put the field.
for field_name in Subscription.API_FIELDS:
stream_dict[field_name] = sub[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
stream_dict['in_home_view'] = not stream_dict['is_muted']
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
# Add a few computed fields not directly from the data models.
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
stream_dict['email_address'] = encode_email_address_helper(
stream["name"], stream["email_token"], show_sender=True)
# Construct and add subscribers data
subscribers: Optional[List[int]] = subscriber_map[stream["id"]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore (or a realm administrator).
if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin):
subscribers = None
# Guest users lose access to subscribers when they are unsubscribed.
if not sub["active"] and user_profile.is_guest:
subscribers = None
if subscribers is not None:
stream_dict['subscribers'] = subscribers
# is_active is represented in this structure by which list we include it in.
is_active = stream_dict.pop("active")
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
else:
never_subscribed_stream_ids = set()
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
# Backwards-compatibility addition of removed field.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
if is_public or user_profile.is_realm_admin:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool=False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, _ = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
if include_subscribers:
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([
email_dict[user_id] for user_id in sub['subscribers']
])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
'''
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions, alert words, or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
'''
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags: Iterable[str] = user_flags.get(user_id, [])
mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags
private_message = is_pm and user_id != sender_id
alerted = 'has_alert_word' in flags
if mentioned or private_message or alerted:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, Confirmation.INVITATION)
context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email,
'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}
from_name = f"{referrer.full_name} (via Zulip)"
send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name,
from_address=FromAddress.tokenized_no_reply_address(),
language=referrer.realm.default_language, context=context,
realm=referrer.realm)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: str, errors: List[Tuple[str, str, bool]],
sent_invitations: bool) -> None:
self._msg: str = msg
self.errors: List[Tuple[str, str, bool]] = errors
self.sent_invitations: bool = sent_invitations
def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:
'''An upper bound on the number of invites sent in the last `days` days'''
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
'''Discourage using invitation emails as a vector for carrying spam.'''
msg = _("You do not have enough remaining invites. "
"Please contact {email} to have your limit raised. "
"No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None:
check_invite_limit(user_profile.realm, len(invitee_emails))
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if (user_profile.date_joined > timezone_now() - min_age
and not user_profile.is_realm_admin):
raise InvitationError(
_("Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."),
[], sent_invitations=False)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == '':
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
'''
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
'''
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as=invite_as,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
invited_by_user_id=invitee.referred_by.id,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False))
if not user_profile.is_realm_admin:
# We do not return multiuse invites to non-admin users.
return invites
lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm,
type=Confirmation.MULTIUSE_INVITE,
date_sent__gte=lowest_datetime)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
invites.append(dict(invited_by_user_id=invite.referred_by.id,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
id=invite.id,
link_url=confirmation_url(confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE),
invited_as=invite.invited_as,
is_multiuse=True))
return invites
def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int,
streams: Sequence[Stream] = []) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actually want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type,
object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: str,
author: UserProfile,
image_file: File) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
# The only user-controlled portion of 'emoji_file_name' is an extension,
# which can not contain '..' or '/' or '\', making it difficult to exploit
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=['file_name'])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str,
date_muted: Optional[datetime.datetime]=None) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain, acting_user: Optional[UserProfile]=None) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'emails_restricted_to_domains', False, acting_user=acting_user)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
exists_expression = Exists(
Subscription.objects.filter(active=True, user_profile__is_active=True,
user_profile__realm=realm,
recipient_id=OuterRef('recipient_id')),
)
occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \
.annotate(occupied=exists_expression).filter(occupied=True)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile, include_public: bool=True,
include_subscribed: bool=True, include_all_active: bool=False,
include_default: bool=False, include_owner_subscribed: bool=False,
) -> List[Dict[str, Any]]:
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(user_profile: UserProfile, op: str,
attachment_dict: Dict[str, Any]) -> None:
event = {
'type': 'attachment',
'op': op,
'attachment': attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id, path_id, message.id,
)
continue
claimed = True
attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(message: Message) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(message.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
op=operation,
fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(realm: Realm,
field_subtype: str) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
field = CustomProfileField(realm=realm, name=field_data['name'],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data['hint'],
field_data=ujson.dumps(dict(subtype=field_subtype)))
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,
hint: str='',
field_data: Optional[ProfileFieldData]=None) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm, 'delete')
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: str, hint: str='',
field_data: Optional[ProfileFieldData]=None) -> None:
field.name = name
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
notify_realm_custom_profile_fields(realm, 'update')
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
fields = CustomProfileField.objects.filter(realm=realm)
for field in fields:
if field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for field in fields:
field.order = order_mapping[field.id]
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'update')
def notify_user_update_custom_profile_data(user_profile: UserProfile,
field: Dict[str, Union[int, str, List[int], None]]) -> None:
data = dict(id=field['id'])
if field['type'] == CustomProfileField.USER:
data["value"] = ujson.dumps(field['value'])
else:
data['value'] = field['value']
if field['rendered_value']:
data['rendered_value'] = field['rendered_value']
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]],
) -> None:
with transaction.atomic():
for field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile,
field_id=field['id'])
if not created and field_value.value == str(field['value']):
# If the field value isn't actually being changed to a different one,
# and always_notify is disabled, we have nothing to do here for this field.
# Note: field_value.value is a TextField() so we need to cast field['value']
# to a string for the comparison in this if.
continue
field_value.value = field['value']
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(str(field['value']))
field_value.save(update_fields=['value', 'rendered_value'])
else:
field_value.save(update_fields=['value'])
notify_user_update_custom_profile_data(user_profile, {
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type})
def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None:
try:
field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile)
field_value.delete()
notify_user_update_custom_profile_data(user_profile, {'id': field_id,
'value': None,
'rendered_value': None,
'type': field.field_type})
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],
description: str) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=['name'])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(bot_profile: UserProfile,
service_interface: int,
service_payload_url: str) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(base_url=service.base_url,
interface=service.interface,
token=service.token)],
),
),
bot_owner_user_ids(bot_profile))
def do_update_bot_config_data(bot_profile: UserProfile,
config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile))
def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [{'config_data': get_bot_config(user_profile),
'service_name': services[0].name,
}]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],
realm: Realm) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts
if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [{'config_data': bot_config,
'service_name': services[0].name,
}]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(user_profile: UserProfile,
include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner_id': botdict['bot_owner__id'],
'avatar_url': avatar_url_from_dict(botdict),
'services': services_by_ids[botdict['id']],
}
for botdict in result]
def do_send_user_group_members_update_event(event_name: str,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int,
realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
context = {'confirmation_url': url,
'realm_uri': realm.uri,
'realm_name': realm.name}
language = realm.default_language
send_email_to_admins(
'zerver/emails/realm_reactivation', realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language, context=context)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm, dict(type="has_zoom_token", value=token is not None), [user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type='realm_export',
exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `ujson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = ujson.loads(export_extra_data)
export_path = export_data.get('export_path')
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update({'deleted_timestamp': timezone_now().timestamp()})
export.extra_data = ujson.dumps(export_data)
export.save(update_fields=['extra_data'])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream,
topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
| 43.178697 | 133 | 0.659801 | [
"Apache-2.0"
] | gutalavijay1111/zulip-vijay | zerver/lib/actions.py | 257,820 | Python |
from hashedml.hashedml import *
| 16 | 31 | 0.8125 | [
"MIT"
] | mtingers/hashedml | hashedml/__init__.py | 32 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.