metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jezus-es-a-haverok/CaffShop",
"score": 2
}
|
#### File: caffshop/webshop/views.py
```python
from django.urls import reverse
from django.utils import timezone
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.contrib.auth.decorators import login_required
from .filters import CAFFFilter
from .models import CAFF
from .models import Comment
from django.http import HttpResponseRedirect
from django.http import FileResponse
from django.shortcuts import render, get_object_or_404
from .forms import UploadCAFFForm
from .forms import CommentForm
import datetime
from PIL import Image
from django.core.files import File as DjangoFile
import numpy as np
import sys
import io
from django.contrib import messages
sys.path.append("../lib")
import libcaffparser
class CAFFListView(ListView):
model = CAFF
paginate_by = 100 # if pagination is desired
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filter'] = CAFFFilter(self.request.GET, queryset=self.get_queryset())
return context
@login_required
def upload_caff(request):
if request.method == 'POST':
form = UploadCAFFForm(request.POST, request.FILES)
if form.is_valid():
content = form.cleaned_data['content'].read()
caff = libcaffparser.parse(content, len(content))
if caff.getCode() == libcaffparser.OK:
# Convert to image
array = np.array(caff.getThumbnail()[0:caff.getWidth() * caff.getHeight() * 3], dtype=np.uint8)
B = np.reshape(array, (caff.getHeight(), caff.getWidth(), 3))
thumbnailImage = Image.fromarray(B, 'RGB')
# Save into buffer
buf = io.BytesIO()
thumbnailImage.save(buf, format='PNG')
byte_im = buf.getvalue()
# Save to file with django
file_obj1 = DjangoFile(open("thumbnailImage.png", mode='wb+'), name=str(form.cleaned_data['name']))
file_obj1.write(byte_im)
# Create CAFF object
record = CAFF()
record.content = request.FILES['content']
record.name = form.cleaned_data['name']
record.thumbnail = file_obj1
record.user = request.user
record.creator = caff.getCreator()
record.creation_date = datetime.datetime(
year=caff.getYear(),
month=caff.getMonth(),
day=caff.getDay(),
hour=caff.getHour(),
minute=caff.getMin(),
)
record.upload_date = datetime.datetime.now()
record.tags = caff.getTags()
record.captions = caff.getCaptions()
record.save()
return HttpResponseRedirect(reverse('caff_detailView', args=(record.id,)))
else:
messages.error(request, 'Invalid CAFF file')
else:
form = UploadCAFFForm()
return render(request, 'webshop/upload.html', {'form': form})
@login_required
def caff_detailview(request, id):
template_name = 'webshop/caff_detailView.html'
caff = get_object_or_404(CAFF, id=id)
comments = caff.comments.filter(active=True)
new_comment = None
# Comment posted
if request.method == 'POST':
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
# Create Comment object but don't save to database yet
new_comment = comment_form.save(commit=False)
# Assign the current post to the comment
new_comment.image = caff
new_comment.user = request.user
# Save the comment to the database
new_comment.save()
else:
comment_form = CommentForm()
return render(request, template_name, {'caff': caff,
'comments': comments,
'new_comment': new_comment,
'comment_form': comment_form})
@login_required
def caff_download(request, id):
template_name = 'webshop/caff_detailView.html'
caff = get_object_or_404(CAFF, id=id)
response = FileResponse(open("media/" + str(caff.content), 'rb'))
return response
```
|
{
"source": "jezyk2796/projectgroup",
"score": 2
}
|
#### File: jezyk2796/projectgroup/app.py
```python
from datetime import datetime
from bson.json_util import dumps
from flask import Flask, render_template, request, redirect, url_for
from flask_login import LoginManager, login_user, login_required, logout_user, current_user
from flask_socketio import SocketIO, join_room, leave_room
from pymongo.errors import DuplicateKeyError
from db import get_user, save_user, save_room, add_room_members, get_rooms_for_user, get_room, is_room_member, \
get_room_members, is_room_admin, update_room, remove_room_members, save_message, get_messages
app = Flask(__name__)
app.secret_key = "<KEY>"
socketio = SocketIO(app)
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.init_app(app)
@app.route('/')
def home():
rooms = []
if current_user.is_authenticated:
rooms = get_rooms_for_user(current_user.username)
return render_template("index.html", rooms=rooms)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
message = ''
if request.method == 'POST':
username = request.form.get('username')
password_input = request.form.get('password')
user = get_user(username)
if user and user.check_password(password_input):
login_user(user)
return redirect(url_for('home'))
else:
message = 'Failed to login!'
return render_template('login.html', message=message)
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
message = ''
if request.method == 'POST':
username = request.form.get('username')
email = request.form.get('email')
password = request.form.get('password')
try:
save_user(username, email, password)
return redirect(url_for('login'))
except DuplicateKeyError:
message = "User already exists!"
return render_template('signup.html', message=message)
@app.route("/logout/")
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/create-room/', methods=['GET', 'POST'])
@login_required
def create_room():
message = ''
if request.method == 'POST':
room_name = request.form.get('room_name')
usernames = [username.strip()
for username in request.form.get('members').split(',')]
if len(room_name) and len(usernames):
room_id = save_room(room_name, current_user.username)
if current_user.username in usernames:
usernames.remove(current_user.username)
add_room_members(room_id, room_name, usernames,
current_user.username)
return redirect(url_for('view_room', room_id=room_id))
else:
message = "Failed to create room"
return render_template('create_room.html', message=message)
@app.route('/rooms/<room_id>/edit', methods=['GET', 'POST'])
@login_required
def edit_room(room_id):
room = get_room(room_id)
if room and is_room_admin(room_id, current_user.username):
existing_room_members = [member['_id']['username']
for member in get_room_members(room_id)]
room_members_str = ",".join(existing_room_members)
message = ''
if request.method == 'POST':
room_name = request.form.get('room_name')
room['name'] = room_name
update_room(room_id, room_name)
new_members = [username.strip()
for username in request.form.get('members').split(',')]
members_to_add = list(
set(new_members) - set(existing_room_members))
members_to_remove = list(
set(existing_room_members) - set(new_members))
if len(members_to_add):
add_room_members(room_id, room_name,
members_to_add, current_user.username)
if len(members_to_remove):
remove_room_members(room_id, members_to_remove)
message = 'Room edited successfully'
room_members_str = ",".join(new_members)
return render_template('edit_room.html', room=room, room_members_str=room_members_str, message=message)
else:
return "Room not found", 404
@app.route('/rooms/<room_id>/')
@login_required
def view_room(room_id):
room = get_room(room_id)
if room and is_room_member(room_id, current_user.username):
room_members = get_room_members(room_id)
messages = get_messages(room_id)
return render_template('view_room.html', username=current_user.username, room=room, room_members=room_members,
messages=messages)
else:
return "Room not found", 404
@app.route('/rooms/<room_id>/messages/')
@login_required
def get_older_messages(room_id):
room = get_room(room_id)
if room and is_room_member(room_id, current_user.username):
page = int(request.args.get('page', 0))
messages = get_messages(room_id, page)
return dumps(messages)
else:
return "Room not found", 404
@socketio.on('send_message')
def handle_send_message_event(data):
app.logger.info("{} has sent message to the room {}: {}".format(data['username'],
data['room'],
data['message']))
data['created_at'] = datetime.now().strftime("%d %b, %H:%M")
save_message(data['room'], data['message'], data['username'])
socketio.emit('receive_message', data, room=data['room'])
@socketio.on('join_room')
def handle_join_room_event(data):
app.logger.info("{} has joined the room {}".format(
data['username'], data['room']))
join_room(data['room'])
socketio.emit('join_room_announcement', data, room=data['room'])
@socketio.on('leave_room')
def handle_leave_room_event(data):
app.logger.info("{} has left the room {}".format(
data['username'], data['room']))
leave_room(data['room'])
socketio.emit('leave_room_announcement', data, room=data['room'])
@login_manager.user_loader
def load_user(username):
return get_user(username)
if __name__ == '__main__':
socketio.run(app, debug=True)
```
|
{
"source": "Jezza34000/weback-hass",
"score": 2
}
|
#### File: custom_components/weback/__init__.py
```python
from datetime import timedelta
import logging
import random
import string
import voluptuous as vol
from weback_unofficial.client import WebackApi
from weback_unofficial.vacuum import CleanRobot
from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "weback"
SCAN_INTERVAL = timedelta(seconds=60)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
WEBACK_DEVICES = "weback_devices"
SUPPORTED_DEVICES = ["_CLEAN_ROBOT", "_CLEAN_ROBOT_LSLAM_BV"]
def setup(hass, config):
"""Set up the Weback component."""
_LOGGER.debug("Creating new Weback component")
hass.data[WEBACK_DEVICES] = []
weback_api = WebackApi(
config[DOMAIN].get(CONF_USERNAME), config[DOMAIN].get(CONF_PASSWORD),
)
devices = weback_api.device_list()
_LOGGER.debug("Weback devices: %s", devices)
for device in devices:
_LOGGER.info(
"Discovered Weback device %s with nickname %s",
device["Thing_Name"],
device["Thing_Nick_Name"],
)
# Fetching device description to check if this device is supported by platform
description = weback_api.get_device_description(device["Thing_Name"])
if description.get("thingTypeName") not in SUPPORTED_DEVICES:
_LOGGER.info("Device not supported by this integration")
continue
vacuum = CleanRobot(device["Thing_Name"], weback_api, None, description)
hass.data[WEBACK_DEVICES].append(vacuum)
if hass.data[WEBACK_DEVICES]:
_LOGGER.debug("Starting vacuum components")
discovery.load_platform(hass, "vacuum", DOMAIN, {}, config)
return True
```
|
{
"source": "jezzab/pyvizio",
"score": 2
}
|
#### File: pyvizio/api/item.py
```python
from typing import Any, Dict, Optional, Union
from pyvizio.api._protocol import (
ACTION_MODIFY,
ENDPOINT,
ITEM_CNAME,
PATH_MODEL,
ResponseKey,
)
from pyvizio.api.base import CommandBase, InfoCommandBase
from pyvizio.helpers import dict_get_case_insensitive, get_value_from_path
class GetDeviceInfoCommand(InfoCommandBase):
"""Command to get device info."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get device info."""
super(GetDeviceInfoCommand, self).__init__(ENDPOINT[device_type]["DEVICE_INFO"])
self.paths = PATH_MODEL[device_type]
def process_response(self, json_obj: Dict[str, Any]) -> Dict[str, Any]:
"""Return response to command to get device info."""
return dict_get_case_insensitive(json_obj, ResponseKey.ITEMS, [{}])[0]
class GetModelNameCommand(GetDeviceInfoCommand):
"""Command to get device model name."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get device model name."""
super(GetModelNameCommand, self).__init__(device_type)
def process_response(self, json_obj: Dict[str, Any]) -> Optional[str]:
"""Return response to command to get device model name."""
return get_value_from_path(
dict_get_case_insensitive(
super(GetModelNameCommand, self).process_response(json_obj),
ResponseKey.VALUE,
{},
),
self.paths,
)
class Item(object):
"""Individual item setting."""
def __init__(self, json_obj: Dict[str, Any]) -> None:
"""Initialize individual item setting."""
self.id = None
id = dict_get_case_insensitive(json_obj, ResponseKey.HASHVAL)
if id is not None:
self.id = int(id)
self.c_name = dict_get_case_insensitive(json_obj, ResponseKey.CNAME)
self.type = dict_get_case_insensitive(json_obj, ResponseKey.TYPE)
self.name = dict_get_case_insensitive(json_obj, ResponseKey.NAME)
self.value = dict_get_case_insensitive(json_obj, ResponseKey.VALUE)
self.min = None
min = dict_get_case_insensitive(json_obj, ResponseKey.MINIMUM)
if min is not None:
self.min = int(min)
self.max = None
max = dict_get_case_insensitive(json_obj, ResponseKey.MAXIMUM)
if max is not None:
self.max = int(max)
self.center = None
center = dict_get_case_insensitive(json_obj, ResponseKey.CENTER)
if center is not None:
self.center = int(center)
self.choices = dict_get_case_insensitive(json_obj, ResponseKey.ELEMENTS, [])
def __repr__(self) -> str:
return f"{type(self).__name__}({self.__dict__})"
def __eq__(self, other) -> bool:
return self is other or (
self.c_name == other.c_name
and self.type == other.type
and self.name == other.name
and self.value == other.value
)
class DefaultReturnItem(object):
"""Mock individual item setting response when item is not found."""
def __init__(self, value: Any) -> None:
"""Initialize mock individual item setting response when item is not found."""
self.value = value
def __repr__(self) -> str:
return f"{type(self).__name__}({self.__dict__})"
def __eq__(self, other) -> bool:
return self is other or self.__dict__ == other.__dict__
class ItemInfoCommandBase(InfoCommandBase):
"""Command to get individual item setting."""
def __init__(
self, device_type: str, item_name: str, default_return: Union[int, str] = None
) -> None:
"""Initialize command to get individual item setting."""
super(ItemInfoCommandBase, self).__init__(ENDPOINT[device_type][item_name])
self.item_name = item_name.upper()
self.default_return = default_return
def process_response(self, json_obj: Dict[str, Any]) -> Any:
"""Return response to command to get individual item setting."""
items = [
Item(item)
for item in dict_get_case_insensitive(json_obj, ResponseKey.ITEMS, [])
]
for itm in items:
if itm.c_name.lower() in (
ITEM_CNAME.get(self.item_name, ""),
self.item_name,
) and (
itm.value is not None
or itm.center is not None
or itm.choices is not None
):
return itm
if self.default_return is not None:
return DefaultReturnItem(self.default_return)
return None
class ItemCommandBase(CommandBase):
"""Command to set value of individual item setting."""
def __init__(
self, device_type: str, item_name: str, id: int, value: Union[int, str]
) -> None:
"""Initialize command to set value of individual item setting."""
super(ItemCommandBase, self).__init__(ENDPOINT[device_type][item_name])
self.item_name = item_name
self.VALUE = value
# noinspection SpellCheckingInspection
self.HASHVAL = int(id)
self.REQUEST = ACTION_MODIFY.upper()
class GetCurrentPowerStateCommand(ItemInfoCommandBase):
"""Command to get current power state of device."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get current power state of device."""
super(GetCurrentPowerStateCommand, self).__init__(device_type, "POWER_MODE", 0)
class GetCurrentChargingStatusCommand(ItemInfoCommandBase):
"""Command to get current charging status of device."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get current charging status of device."""
super(GetCurrentChargingStatusCommand, self).__init__(device_type, "CHARGING_STATUS", 0)
class GetBatteryLevelCommand(ItemInfoCommandBase):
"""Command to get current battery level (will be 0 if charging) of device."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get current battery level (will be 0 if charging) of device."""
super(GetBatteryLevelCommand, self).__init__(device_type, "BATTERY_LEVEL", 0)
class GetESNCommand(ItemInfoCommandBase):
"""Command to get device ESN (electronic serial number?)."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get device ESN (electronic serial number?)."""
super(GetESNCommand, self).__init__(device_type, "ESN")
class GetSerialNumberCommand(ItemInfoCommandBase):
"""Command to get device serial number."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get device serial number."""
super(GetSerialNumberCommand, self).__init__(device_type, "SERIAL_NUMBER")
class GetVersionCommand(ItemInfoCommandBase):
"""Command to get SmartCast software version."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get SmartCast software version."""
super(GetVersionCommand, self).__init__(device_type, "VERSION")
class AltItemInfoCommandBase(ItemInfoCommandBase):
"""Command to get individual item setting."""
def __init__(
self,
device_type: str,
endpoint_name: str,
item_name: str,
default_return: Union[int, str] = None,
) -> None:
"""Initialize command to get individual item setting."""
super(ItemInfoCommandBase, self).__init__(ENDPOINT[device_type][endpoint_name])
self.item_name = item_name.upper()
self.default_return = default_return
class GetAltESNCommand(AltItemInfoCommandBase):
"""Command to get device ESN (electronic serial number?)."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get device ESN (electronic serial number?)."""
super(GetAltESNCommand, self).__init__(device_type, "_ALT_ESN", "ESN")
class GetAltSerialNumberCommand(AltItemInfoCommandBase):
"""Command to get device serial number."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get device serial number."""
super(GetAltSerialNumberCommand, self).__init__(
device_type, "_ALT_SERIAL_NUMBER", "SERIAL_NUMBER"
)
class GetAltVersionCommand(AltItemInfoCommandBase):
"""Command to get SmartCast software version."""
def __init__(self, device_type: str) -> None:
"""Initialize command to get SmartCast software version."""
super(GetAltVersionCommand, self).__init__(
device_type, "_ALT_VERSION", "VERSION"
)
```
#### File: pyvizio/discovery/ssdp.py
```python
import http.client
import io
import socket
from pyvizio.const import DEFAULT_TIMEOUT
class SSDPDevice(object):
"""Representation of Vizio device discovered via SSDP."""
def __init__(self, ip, name, model, udn) -> None:
self.ip = ip
self.name = name
self.model = model
self.udn = udn
def __repr__(self) -> str:
return f"{type(self).__name__}({self.__dict__})"
def __eq__(self, other) -> bool:
return self is other or self.__dict__ == other.__dict__
class SSDPResponse(object):
"""SSDP discovery response."""
class _FakeSocket(io.BytesIO):
"""Fake socket to retrieve SSDP response."""
def makefile(self, *args, **kw):
return self
def __init__(self, response):
"""Initialize SSDP response."""
r = http.client.HTTPResponse(self._FakeSocket(response))
r.begin()
self.location = r.getheader("location")
self.usn = r.getheader("usn")
self.st = r.getheader("st")
self.cache = r.getheader("cache-control").split("=")[1]
def __repr__(self) -> str:
return f"{type(self).__name__}({self.__dict__})"
def __eq__(self, other) -> bool:
return self is other or self.__dict__ == other.__dict__
def discover(service, timeout=DEFAULT_TIMEOUT, retries=1, mx=3):
"""Return all discovered SSDP services of a given service name over given timeout period."""
group = ("192.168.127.12", 1900)
message = "\r\n".join(
[
"M-SEARCH * HTTP/1.1",
"HOST: {0}:{1}",
'MAN: "ssdp:discover"',
"ST: {st}",
"MX: {mx}",
"",
"",
]
)
socket.setdefaulttimeout(timeout)
responses = {}
for _ in range(retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
message_bytes = message.format(*group, st=service, mx=mx).encode("utf-8")
sock.sendto(message_bytes, group)
while True:
try:
response = SSDPResponse(sock.recv(1024))
responses[response.location] = response
except socket.timeout:
break
return list(responses.values())
```
|
{
"source": "Jezza/deploy",
"score": 2
}
|
#### File: gitlab/data/fetch_from_gitlab.py
```python
import json
import requests
def fetch_from_gitlab(token, endpoint, **kwargs):
url = 'https://gitlab.kitware.com/api/v4' + endpoint
response = requests.get(url, headers={'PRIVATE-TOKEN': token}, params=kwargs)
return response.json()
def write_result(token, name, endpoint, dumpall=False):
print('Writing out %s...' % name)
result = fetch_from_gitlab(token, endpoint)
if not dumpall:
if type(result) == list:
result = result[0]
# Remove any keys from the result.
result.pop('private_token', None)
result.pop('runners_token', None)
if type(result.get('identities')) == list:
result['identities'] = []
with open('%s.json' % name, 'w+') as fout:
json.dump(result, fout, indent = 2, separators=(',', ': '), sort_keys=True)
fout.write('\n')
REPO = 'utils%2Frust-gitlab'
USER = 11 # kwrobot
COMMIT = 'de4ac3cf96cb8a0893be22b03f5171d934f9d392'
ISSUE_ID = 6 # https://gitlab.kitware.com/utils/rust-gitlab/issues/6
MR_ID = 35 # https://gitlab.kitware.com/utils/rust-gitlab/merge_requests/35
MR_DISCUSSION_ID = 158 # https://gitlab.kitware.com/utils/rust-gitlab/merge_requests/35
NOTE_ID = 177359
PIPELINE_ID = 145400
GROUP_ID = 498 # https://gitlab.kitware.com/utils
if __name__ == '__main__':
import sys
token = sys.argv[1]
write_result(token, 'user_public', '/user')
write_result(token, 'user', '/users/%d' % USER)
write_result(token, 'project', '/projects/%s' % REPO)
write_result(token, 'project_hook', '/projects/%s/hooks' % REPO)
write_result(token, 'member', '/groups/utils/members')
write_result(token, 'repo_branch', '/projects/%s/repository/branches/master' % REPO)
write_result(token, 'repo_commit_detail', '/projects/%s/repository/commits/%s?stats=true' % (REPO, COMMIT))
write_result(token, 'commit_note', '/projects/%s/repository/commits/%s/comments' % (REPO, COMMIT))
write_result(token, 'commit_status', '/projects/%s/repository/commits/%s/statuses' % (REPO, COMMIT))
write_result(token, 'issue', '/projects/%s/issues/%d' % (REPO, ISSUE_ID))
write_result(token, 'merge_request', '/projects/%s/merge_requests/%d' % (REPO, MR_ID))
write_result(token, 'issue_reference', '/projects/%s/merge_requests/%d/closes_issues' % (REPO, MR_ID))
write_result(token, 'note', '/projects/%s/merge_requests/%d/notes' % (REPO, MR_ID))
write_result(token, 'discussion', '/projects/%s/merge_requests/%d/discussions' % (REPO, MR_ID), dumpall=True)
write_result(token, 'award_emoji', '/projects/%s/merge_requests/%d/notes/%d/award_emoji' % (REPO, MR_ID, NOTE_ID))
write_result(token, 'resource_label_event', '/projects/%s/issues/%d/resource_label_events' % (REPO, ISSUE_ID))
write_result(token, 'pipeline_basic', '/projects/%s/pipelines' % REPO)
write_result(token, 'pipeline', '/projects/%s/pipelines/%d' % (REPO, PIPELINE_ID))
write_result(token, 'group', '/groups/%s' % GROUP_ID)
# FIXME: these are hidden behind a `403 forbidden`, so we use a hardcoded example instead.
# write_result(token, 'pipeline_variable', '/projects/%s/pipelines/%d/variables' % (REPO, PIPELINE_ID))
```
|
{
"source": "JezzaHehn/pyifs",
"score": 3
}
|
#### File: JezzaHehn/pyifs/baseforms.py
```python
from colour import Color
from math import sqrt
class Transform(object):
def __init__(self, rng):
self.r, self.g, self.b = Color(hsl=(rng.random(), 1, 0.5)).rgb
self.rng = rng
def transform_colour(self, r, g, b):
r = (self.r + r) / 2.0
g = (self.g + g) / 2.0
b = (self.b + b) / 2.0
return r, g, b
def get_name(self):
return self.__class__.__name__
class ComplexTransform(Transform):
def transform(self, px, py):
z = complex(px, py)
z2 = self.f(z)
return z2.real, z2.imag
class MoebiusBase(ComplexTransform):
"""
This applies a random Moebius transform and then its inverse.
"""
def __init__(self, rng, xform):
super(MoebiusBase, self).__init__(rng)
self.coef_a = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_b = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_c = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.coef_d = complex(rng.gauss(0, 0.2), rng.gauss(0, 0.2))
self.xform = xform
self.transform_colour = self.xform.transform_colour
def get_name(self):
return "Moeb" + self.xform.get_name()
def f(self, z):
# apply pre-Moebius (az+b)/(cz+d)
z = (self.coef_a * z + self.coef_b) / (self.coef_c * z + self.coef_d)
# apply inner transform
z = complex(*self.xform.transform(z.real, z.imag))
# return post-Moebius (dz-b)/(-cz+a), which is inverse of pre-Moebius
return (self.coef_d * z - self.coef_b) / (-self.coef_c * z + self.coef_a)
class SphericalBase(Transform):
"""
Since the spherical transform is its own inverse, it can simply be applied twice.
"""
def __init__(self, rng, xform):
super(SphericalBase, self).__init__(rng)
self.xform = xform
def get_name(self):
return "Spheri" + self.xform.get_name()
def transform(self, px, py):
# first spherical
r2 = sqrt(px**2 + py**2)**2
px, py = px/r2, py/r2
# inner transform
px, py = self.xform.transform(px, py)
# second spherical
r2 = sqrt(px**2 + py**2)**2
return px/r2, py/r2
```
#### File: JezzaHehn/pyifs/transforms.py
```python
import random, baseforms
import numpy as np
from math import cos, sin, pi, atan, atan2, sqrt
# r = sqrt(px**2 + py**2)
# theta = atan(px/py)
# phi = atan(py/px)
class Linear(baseforms.Transform):
def __init__(self, rng):
super(Linear, self).__init__(rng)
self.coef_a = rng.uniform(-1, 1)
self.coef_b = rng.uniform(-1, 1)
self.coef_c = rng.uniform(-1, 1)
self.coef_d = rng.uniform(-1, 1)
def transform(self, px, py):
return (self.coef_a * px + self.coef_b * py, self.coef_c * px + self.coef_d * py)
class Moebius(baseforms.ComplexTransform):
def __init__(self, rng):
super(Moebius, self).__init__(rng)
self.coef_a = complex(rng.uniform(-1, 1), rng.uniform(-1, 1))
self.coef_b = complex(rng.uniform(-1, 1), rng.uniform(-1, 1))
self.coef_c = complex(rng.uniform(-1, 1), rng.uniform(-1, 1))
self.coef_d = complex(rng.uniform(-1, 1), rng.uniform(-1, 1))
def f(self, z):
return (self.coef_a * z + self.coef_b) / (self.coef_c * z + self.coef_d)
class InverseJulia(baseforms.ComplexTransform):
def __init__(self, rng):
super(InverseJulia, self).__init__(rng)
r = sqrt(self.rng.random()) * 0.4 + 0.8
theta = 2 * pi * self.rng.random()
self.c = complex(r * cos(theta), r * sin(theta))
def f(self, z):
z2 = self.c - z
theta = atan2(z2.imag, z2.real) * 0.5
sqrt_r = self.rng.choice([1, -1]) * ((z2.imag * z2.imag + z2.real * z2.real) ** 0.25)
return complex(sqrt_r * cos(theta), sqrt_r * sin(theta))
class Bubble(baseforms.Transform):
def __init__(self, rng):
super(Bubble, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
r2 = 4 / (r**2 + 4)
return r2*px, r2*py
class Sinusoidal(baseforms.Transform):
def __init__(self, rng):
super(Sinusoidal, self).__init__(rng)
def transform(self, px, py):
return sin(px), sin(py)
class Spherical(baseforms.Transform):
def __init__(self, rng):
super(Spherical, self).__init__(rng)
def transform(self, px, py):
r2 = sqrt(px**2 + py**2)**2
return px/r2, py/r2
class Horseshoe(baseforms.Transform):
def __init__(self, rng):
super(Horseshoe, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
return (px-py)*(px+py)/r, 2*px*py/r
class Polar(baseforms.Transform):
def __init__(self, rng):
super(Polar, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
theta = atan(px/py)
return theta/pi, r-1
class Handkerchief(baseforms.Transform):
def __init__(self, rng):
super(Handkerchief, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
theta = atan(px/py)
return r * sin(theta+r), r * cos(theta-r)
class Heart(baseforms.Transform):
def __init__(self, rng):
super(Heart, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
theta = atan(px/py)
return r * sin(theta*r), -r * cos(theta*r)
class Disc(baseforms.Transform):
def __init__(self, rng):
super(Disc, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
thpi = atan(px/py)/pi
return thpi * sin(pi*r), thpi * cos(pi*r)
class Spiral(baseforms.Transform):
def __init__(self, rng):
super(Spiral, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
theta = atan(px/py)
return (cos(theta)+sin(r))/r, (sin(theta)-cos(r))/r
class Hyperbolic(baseforms.Transform):
def __init__(self, rng):
super(Hyperbolic, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
theta = atan(px/py)
return sin(theta)/r, r * cos(theta)
class Diamond(baseforms.Transform):
def __init__(self, rng):
super(Diamond, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
theta = atan(px/py)
return sin(theta)*cos(r), cos(theta)*sin(r)
class Ex(baseforms.Transform):
def __init__(self, rng):
super(Ex, self).__init__(rng)
def transform(self, px, py):
r = sqrt(px**2 + py**2)
theta = atan(px/py)
p03 = sin(theta + r)**3
p13 = cos(theta - r)**3
return r * (p03 + p13), r * (p03 - p13)
class Swirl(baseforms.Transform):
def __init__(self, rng):
super(Swirl, self).__init__(rng)
def transform(self, px, py):
r2 = sqrt(px**2 + py**2)**2
return px*sin(r2) - py*cos(r2), px*cos(r2) + py*sin(r2)
```
|
{
"source": "jezzlucena/django-opp-trans",
"score": 2
}
|
#### File: django-opp-trans/op_trans/asgi.py
```python
import os
from django.core.asgi import get_asgi_application
from op_trans.websocket import websocket_application
from op_trans.redis_cli import RedisCli
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'op_trans.settings')
django_application = get_asgi_application()
async def application(scope, receive, send):
RedisCli.get()
if scope['type'] == 'http':
await django_application(scope, receive, send)
elif scope['type'] == 'websocket':
await websocket_application(scope, receive, send)
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
```
#### File: django-opp-trans/op_trans/redis_cli.py
```python
import redis
class RedisCli:
instance = None
@classmethod
def get(cls):
if not cls.instance:
cls.instance = redis.Redis(host='localhost', port=6379, db=0)
return cls.instance
```
|
{
"source": "jezznar/PoE-Pvt-League-Ranker-Overlay",
"score": 2
}
|
#### File: PoE-Pvt-League-Ranker-Overlay/pplo/charData.py
```python
import sys
import json
import operator
from jsonpath_ng.ext import parse
import logging
LOGGER = logging.getLogger(__name__)
FORMAT = "[%(filename)s:%(lineno)s][%(funcName)s()] %(message)s"
logging.basicConfig(filename='main.log', format=FORMAT)
LOGGER.addHandler(logging.StreamHandler(sys.stdout))
LOGGER.setLevel(logging.DEBUG)
class CharData:
def __init__(self, charName):
try:
with open('pplo/data/classes.json') as f:
self.classData = json.load(f)
except Exception as e:
LOGGER.error(e)
sys.exit()
self.charName = charName
self.charData = None
def getCharDataFromRankData(self, plRankData):
jpExpr = parse(f"$.entries[?(@.character.name == '{self.charName}')]")
foundValues = [match.value for match in jpExpr.find(plRankData)]
if foundValues:
self.charData = foundValues[0]
def getGlobalCharRank(self):
self.globalRank = self.charData['rank']
return self.charData['rank']
def getClassCharRank(self, plRankData):
className = self.charData['character']['class']
baseClass = [ thisClass for thisClass in self.classData if className in self.classData[thisClass] ][0]
relatedClasses = self.classData[baseClass]
classRanks = []
for rc in relatedClasses:
jpExpr = parse(f"$.entries[?(@.character.class == {rc})]")
foundValues = [match.value for match in jpExpr.find(plRankData)]
classRanks += foundValues
classRanks.sort(key=operator.itemgetter('rank'))
self.classRank = next((index for (index, d) in enumerate(classRanks) if d['character']['name'] == self.charName), None) + 1
return self.classRank
def getAscendancyCharRank(self, plRankData, ascendancy=None):
if(ascendancy == None):
ascendancy = self.charData['character']['class']
jpExpr = parse(f"$.entries[?(@.character.class == {ascendancy})]")
classRanks = [match.value for match in jpExpr.find(plRankData)]
ascendancyRank = next((index for (index, d) in enumerate(classRanks) if d['character']['name'] == self.charName), None) + 1
if(ascendancy == None):
self.ascendancyRank = ascendancyRank
return ascendancyRank
```
#### File: PoE-Pvt-League-Ranker-Overlay/pplo/plData.py
```python
import json
import requests
import sys
from time import sleep
from urllib.parse import urlparse, parse_qs
import logging
LOGGER = logging.getLogger(__name__)
FORMAT = "[%(filename)s:%(lineno)s][%(funcName)20s()] %(message)s"
logging.basicConfig(filename='main.log', format=FORMAT)
LOGGER.addHandler(logging.StreamHandler(sys.stdout))
LOGGER.setLevel(logging.DEBUG)
class PLData:
def __init__(self, url=None, leagueName=None):
self.leagueName = None
if url:
parsedURL = urlparse(url)
parsedQueries = parse_qs(parsedURL.query)
self.leagueName = parsedQueries['id'][0]
if leagueName:
self.leagueName = leagueName
if(not self.leagueName):
raise Exception('InitError', 'league url or league name should be provided')
self.poeLadderUrl = 'https://www.pathofexile.com/api/ladders'
self.rankData = None
def getRankData(self):
url_params = {
'offset': 0,
'id': self.leagueName
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:95.0) Gecko/20100101 Firefox/95.0'
}
finalRankData = {}
count = 1
totalPages = 0
currentPage = 0
while True:
try:
LOGGER.info(f'Fetching Data.. {count}')
thisRankData = requests.get(
url=self.poeLadderUrl,
params = url_params,
timeout = 2,
headers = headers
)
thisRankData = json.loads(thisRankData.text)
if(totalPages == 0):
if(int(thisRankData['total'] / 200) > 10):
totalPages = 10
if('entries' not in finalRankData.keys()):
finalRankData = thisRankData
else:
if('entries' in thisRankData.keys()):
finalRankData['entries'] += thisRankData['entries']
currentPage += 1
# STOP PROCESSING DATA IF PAGES GO BEYOND 10 PAGES OF 200 ITEMS EACH
# TOTAL OF 2000 DATA POINTS, HENCE RANK IS GREATER THAN 2000
if(currentPage + 1 > 10 or
len(thisRankData['entries']) == 0 or
len(thisRankData['entries']) == thisRankData['total']
):
LOGGER.info('Done Fetching Data.')
break
if(len(finalRankData['entries']) < thisRankData['total']):
url_params['offset'] = currentPage * 200
sleep(2)
count += 1
except Exception as e:
LOGGER.info(f'ERROR: {e}')
sleep(2)
continue
self.rankData = finalRankData
```
|
{
"source": "jf0021/SWC-Python-2.7",
"score": 4
}
|
#### File: SWC-Python-2.7/code/arith.py
```python
import sys
def main():
if len(sys.argv) != 4:
print 'Need exactly 3 arguments'
else:
operator = sys.argv[1]
if not operator in ['add', 'subtract', 'multiply', 'divide']:
print 'Operator is not one of add, subtract, multiply, or divide: bailing out'
else:
try:
operand1, operand2 = float(sys.argv[2]), float(sys.argv[3])
except ValueError:
print 'cannot convert input to a number: bailing out'
return
do_arithmetic(operand1, operator, operand2)
def do_arithmetic(operand1, operator, operand2):
if operator == 'add':
value = operand1 + operand2
elif operator == 'subtract':
value = operand1 - operand2
elif operator == 'multiply':
value = operand1 * operand2
elif operator == 'divide':
value = operand1 / operand2
print value
main()
```
#### File: SWC-Python-2.7/code/my_ls.py
```python
import sys
import glob
def main():
'''prints names of all files with sys.argv as suffix'''
if len(sys.argv) < 2:
print 'Argument list cannot be empty'
else:
suffix = sys.argv[1] # NB: behaviour is not as you'd expect if sys.argv[1] is *
glob_input = '*.' + suffix # construct the input
glob_output = glob.glob(glob_input) # call the glob function
for item in glob_output: # print the output
print item
return
main()
```
|
{
"source": "j-f1/forked-deno",
"score": 2
}
|
#### File: forked-deno/tools/target_test.py
```python
import os
import sys
from test_util import DenoTestCase, run_tests
from util import executable_suffix, tests_path, run, run_output
class TestTarget(DenoTestCase):
@staticmethod
def check_exists(filename):
if not os.path.exists(filename):
print "Required target doesn't exist:", filename
print "Run ./tools/build.py"
sys.exit(1)
def test_executable_exists(self):
self.check_exists(self.deno_exe)
def _test(self, executable):
"Test executable runs and exits with code 0."
bin_file = os.path.join(self.build_dir, executable + executable_suffix)
self.check_exists(bin_file)
run([bin_file], quiet=True)
def test_libdeno(self):
self._test("libdeno_test")
def test_cli(self):
self._test("cli_test")
def test_core(self):
self._test("deno_core_test")
def test_core_http_benchmark(self):
self._test("deno_core_http_bench_test")
def test_ts_library_builder(self):
result = run_output([
"node", "./node_modules/.bin/ts-node", "--project",
"tools/ts_library_builder/tsconfig.json",
"tools/ts_library_builder/test.ts"
],
quiet=True)
self.assertEqual(result.code, 0)
assert "ts_library_builder ok" in result.out
def test_no_color(self):
t = os.path.join(tests_path, "no_color.js")
result = run_output([self.deno_exe, "run", t],
merge_env={"NO_COLOR": "1"},
quiet=True)
assert result.out.strip() == "noColor true"
t = os.path.join(tests_path, "no_color.js")
result = run_output([self.deno_exe, "run", t], quiet=True)
assert result.out.strip() == "noColor false"
def test_exec_path(self):
cmd = [self.deno_exe, "run", "tests/exec_path.ts"]
result = run_output(cmd, quiet=True)
assert self.deno_exe in result.out.strip()
self.assertEqual(result.code, 0)
if __name__ == "__main__":
run_tests()
```
|
{
"source": "jf20541/ARIMA",
"score": 3
}
|
#### File: ARIMA/src/plot.py
```python
from statsmodels.graphics.tsaplots import plot_acf
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import config
import statsmodels.api as sm
df = pd.read_csv(config.TRAINING)
df = df["AdjClose"].dropna()
train, test = train_test_split(df, test_size=0.025, shuffle=False)
log_ret = pd.read_csv(config.LOG_TRAINING)
def train_test_plot():
"""Plotting Training/Testing Time Series"""
plt.title("Crude Oil Time-Series")
plt.plot(train, label="Training Set")
plt.plot(test, label="Test Set")
plt.xlabel("Number of Days")
plt.ylabel("Price in USD")
plt.legend(loc="upper left")
plt.savefig("../plots/Time-Series.jpg")
def ACF():
"""Plotting the Autocorrelation with Non-Stationary Time-Series"""
sm.graphics.tsa.plot_acf(df.values.squeeze(), lags=40)
plt.title("Crude Oil AdjClose Price Autocorrelation")
plt.savefig("../plots/ACF_Nonstationary.jpg")
def ACF_log():
"""Plotting the Autocorrelation with Log-Returns Time-Series"""
sm.graphics.tsa.plot_acf(log_ret["AdjClose"].values.squeeze(), lags=40)
plt.title("Crude Oil Log-Returns Autocorrelation")
plt.savefig("../plots/ACF_Stationary.jpg")
if __name__ == "__main__":
train_test_plot()
ACF()
ACF_log()
plt.show()
```
|
{
"source": "jf20541/BERT-NLP",
"score": 3
}
|
#### File: BERT-NLP/src/app.py
```python
from fastapi import FastAPI
import torch
import torch.nn as nn
import config
from transformers import BertModel, logging
from dataset import IMDBDataset
logging.set_verbosity_warning()
logging.set_verbosity_error()
app = FastAPI()
class BERT(nn.Module):
def __init__(self):
super(BERT, self).__init__()
self.bert = BertModel.from_pretrained(config.BERT_PATH)
self.bert_drop = nn.Dropout(config.DROP_OUT)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
_, o2 = self.bert(ids, mask, token_type_ids, return_dict=False)
out = self.bert_drop(o2)
out = self.out(out)
return torch.sigmoid(out)
model = BERT()
model.load_state_dict(torch.load(config.MODEL_PATH, map_location=torch.device('cpu')))
@app.get("/predict")
def fetch_prediction(text: str):
dataset = IMDBDataset([text], [-1])
prediction = float(list(model.predict(dataset, batch_size=2))[0][0][0])
return {"sentence": text, "positive": prediction, "negative": 1 - prediction}
# uvicorn api:app --host 0.0.0.0 --port 12000 --reload
```
#### File: BERT-NLP/src/model.py
```python
import torch.nn as nn
from transformers import BertModel
import config
class BERT(nn.Module):
def __init__(self):
super(BERT, self).__init__()
# loading the model's weights from BERT path
self.bert = BertModel.from_pretrained(config.BERT_PATH)
# dropout of 30% (avoids overfitting)
self.bert_drop = nn.Dropout(config.DROP_OUT)
# BERT based model has a given 768 ouput features
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# o1 is sequence of hidden states for each token (MAX_LEN) vectors of size 768 for each batch
# o2 is CLS token from the BERT pooler output
o1, o2 = self.bert(ids, mask, token_type_ids, return_dict=False)
# pass through dropout
out = self.bert_drop(o2)
# pass through linear layer
return self.out(out)
```
|
{
"source": "jf20541/GRU-Sentiment-NLP",
"score": 3
}
|
#### File: jf20541/GRU-Sentiment-NLP/app.py
```python
from flask import Flask, render_template, request, jsonify
import torch
import torch.nn as nn
# function to make sentiment prediction
# find a way to save the sentence
# show the models result
app = Flask(__name__)
MODEL = None
DEVICE = 'cuda'
def sentence_prediction(sentence):
review = str(sentence)
# convert each features, sentiment to tensors
reviews = torch.tensor(review[idx, :], dtype=torch.long).unsqueeze(0)
reviews = reviews.to(DEVICE, dtype=torch.long)
targets = targets.to(DEVICE, dtype=torch.float)
outputs = MODEL(reviews)
return outputs
@app.route("/predict")
def predict():
sentence = request.args.get('sentence')
positive_prediction = sentence_prediction(sentence)
negative_prediction = 1 - positive_prediction
response = {}
response['response'] = {
'positive': positive_prediction,
'negative': negative_prediction,
'sentence': sentence
}
return jsonify(response)
class GRU(nn.Module):
def __init__(
self,
embedding_matrix,
vocab_size,
embedding_dim,
hidden_dim,
output_dim,
n_layers,
dropout,
):
super(GRU, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.embedding.weight = nn.Parameter(
torch.tensor(embedding_matrix, dtype=torch.float32)
)
self.embedding.weight.requires_grad = False
self.lstm = nn.GRU(
embedding_dim,
hidden_dim,
n_layers,
dropout=dropout,
bidirectional=True,
batch_first=True,
)
self.out = nn.Linear(hidden_dim * 4, output_dim)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.embedding(x)
h0, _ = self.lstm(x)
avg_pool = torch.mean(h0, 1)
max_pool, _ = torch.max(h0, 1)
out = torch.cat((avg_pool, max_pool), 1)
out = self.out(out)
return self.sigmoid(out)
if __name__ == "__main__":
MODEL =
app.run(port=12000, debug=True)
```
#### File: GRU-Sentiment-NLP/src/api.py
```python
from flask import Flask, render_template, request, jsonify
import torch
import torch.nn as nn
from bigru_model import BIGRU
from bilstm_model import BILSTM
from embeddings import GloVeEmbedding
import pandas as pd
import config
from keras.preprocessing.text import Tokenizer
app = Flask(__name__)
MODEL = None
def sentence_prediction(sentence, model):
review = str(sentence)
reviews = torch.tensor(review[idx, :], dtype=torch.long).unsqueeze(0)
reviews = reviews.to(config.DEVICE, dtype=torch.long)
outputs = model(reviews).cpu().detach().numpy().tolist()
return outputs
@app.route("/predict")
def predict():
sentence = request.args.get("sentence")
positive_prediction = sentence_prediction(sentence, model=MODEL)
negative_prediction = 1 - positive_prediction
response = {}
response["response"] = {
"positive": str(positive_prediction),
"negative": str(negative_prediction),
"sentence": str(sentence),
}
return jsonify(response)
# @app.route("/", methods=["GET", "POST"])
# def predict():
# return render_template("index.html")
sentence = "I love this movie"
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentence.values.tolist())
glove = pd.read_csv(config.GLOVE_PARAMS, sep=" ", quoting=3, header=None, index_col=0)
glove_embedding = {key: val.values for key, val in glove.T.items()}
emb = GloVeEmbedding(sentence, glove)
embedding_matrix = emb.embedding_matrix(glove_embedding)
if __name__ == "__main__":
MODEL = BIGRU(
embedding_matrix,
embedding_matrix.shape[0],
embedding_matrix.shape[1],
128,
1,
2,
0.2,
)
MODEL.load_state_dict(torch.load(config.MODEL_PATH))
MODEL.to(config.DEVICE)
MODEL.eval()
app.run()
```
#### File: GRU-Sentiment-NLP/src/embeddings.py
```python
import numpy as np
from keras.preprocessing.text import Tokenizer
class GloVeEmbedding:
def __init__(self, dataframe, glove_params):
self.dataframe = dataframe
self.glove = glove_params
def create_embedding_matrix(self, word_index, embedding_dict=None, d_model=100):
"""Creates the embedding matrix save in numpy array
Args:
word_index (dict): dictionary with tokens
embedding_dict (dict, optional): dict with word embedding
d_model (int): dimension of word pretrained embedding (Defaults to 100) Glove embedding is 100
Returns:
[array]: array with embedding vectors for all known words
"""
embedding_matrix = np.zeros((len(word_index) + 1, d_model))
for word, index in word_index.items():
if word in embedding_dict:
embedding_matrix[index] = embedding_dict[word]
return embedding_matrix
def embedding_matrix(self, glove_embedding):
# tokenize review words
tokenizer = Tokenizer()
tokenizer.fit_on_texts(self.dataframe.values.tolist())
return self.create_embedding_matrix(
tokenizer.word_index, embedding_dict=glove_embedding
)
class FastTextEmbedding:
def __init__(self, dataframe, fasttext_params):
self.dataframe = dataframe
self.fast = fasttext_params
def create_embedding_matrix(self, word_index, embedding_dict=None, d_model=300):
"""Creates the embedding matrix save in numpy array
Args:
word_index (dict): dictionary with tokens
embedding_dict (dict, optional): dict with word embedding
d_model (int): dimension of word pretrained embedding (Defaults to 300)
Returns:
[array]: array with embedding vectors for all known words
"""
embedding_matrix = np.zeros((len(word_index) + 1, d_model))
for word, index in word_index.items():
if word in embedding_dict:
embedding_matrix[index] = embedding_dict[word]
return embedding_matrix
def embedding_matrix(self, fasttext_embedding):
# tokenize review words
tokenizer = Tokenizer()
tokenizer.fit_on_texts(self.dataframe.values.tolist())
return self.create_embedding_matrix(
tokenizer.word_index, embedding_dict=fasttext_embedding
)
```
#### File: GRU-Sentiment-NLP/src/train.py
```python
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score
from sklearn.model_selection import train_test_split
import torch
import tensorflow as tf
from keras.preprocessing.text import Tokenizer
from dataset import IMDBDataset
from bilstm_model import BILSTM
from bigru_model import BIGRU
from engine import Engine
from embeddings import GloVeEmbedding, FastTextEmbedding
import config
import codecs
def train():
df = pd.read_csv(config.TRAINING_FILE_CLEAN)
# class allows to vectorize a text corpus
tokenizer = Tokenizer()
# updates internal vocabulary based on a list of sequences
tokenizer.fit_on_texts(df.review.values.tolist())
# read the vector representations for words
glove = pd.read_csv(
config.GLOVE_PARAMS, sep=" ", quoting=3, header=None, index_col=0
)
# load and access a word vectors
glove_embedding = {key: val.values for key, val in glove.T.items()}
# load fasttext embeddings
fasttext_embedding = {}
fasttext = codecs.open("../input/fasttext/wiki.simple.vec", encoding="utf-8")
for line in fasttext:
values = line.rstrip().rsplit(" ")
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
fasttext_embedding[word] = coefs
fasttext.close()
# define features and target values
targets = df[["sentiment"]]
features = df["review"]
# hold-out based validation 80% training and 20% testing set
x_train, x_test, y_train, y_test = train_test_split(
features, targets, test_size=0.2, stratify=targets
)
# only most frequent words will be taken into account
x_train = tokenizer.texts_to_sequences(x_train.values)
x_test = tokenizer.texts_to_sequences(x_test.values)
# transforms a list of sequencesinto a 2D Numpy array of shape (num_samples, maxlen)
x_train = tf.keras.preprocessing.sequence.pad_sequences(
x_train, maxlen=config.MAX_LENGTH
)
x_test = tf.keras.preprocessing.sequence.pad_sequences(
x_test, maxlen=config.MAX_LENGTH
)
# define target values in arrays
y_train = y_train.sentiment.values
y_test = y_test.sentiment.values
# initialize custom dataset
train_dataset = IMDBDataset(x_train, y_train)
test_dataset = IMDBDataset(x_test, y_test)
# initialize dataloader from custom dataset and defined batch size for training/testing set
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=config.TRAIN_BATCH_SIZE
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=config.TEST_BATCH_SIZE
)
# initialize GloVeEmbedding Class
emb = GloVeEmbedding(df.review, glove)
embedding_matrix = emb.embedding_matrix(glove_embedding)
# initialize FastTextEmbedding
fast = FastTextEmbedding(df.review, fasttext)
embedding_matrix = fast.embedding_matrix(fasttext_embedding)
# initialize GRU model with defined parameters
# embedding_matrix (rows, dims), hidden size, num of layers, and dropout respectivaly
model = BILSTM(
embedding_matrix,
embedding_matrix.shape[0],
embedding_matrix.shape[1],
128,
1,
2,
0.2,
)
model.to(config.DEVICE)
# initialize Adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)
# adjust the learning rate based on the number of epochs
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", patience=5, factor=0.3, verbose=True
)
# initialize Engine class with model, optimizer, and device
eng = Engine(model, optimizer, config.DEVICE)
for epochs in range(config.EPOCHS):
# initiating training and evaluation function
train_targets, train_outputs = eng.train_fn(train_loader, scheduler)
eval_targets, eval_outputs = eng.eval_fn(test_loader)
# binary classifier
eval_outputs = np.array(eval_outputs) >= 0.5
train_outputs = np.array(train_outputs) >= 0.5
# calculating accuracy score and precision score
train_metric = accuracy_score(train_targets, train_outputs)
eval_metric = accuracy_score(eval_targets, eval_outputs)
prec_score = precision_score(eval_targets, eval_outputs)
print(
f"Epoch:{epochs+1}/{config.EPOCHS}, Train Accuracy: {train_metric:.2f}%, Eval Accuracy: {eval_metric:.2f}%, Eval Precision: {prec_score:.4f}"
)
print(confusion_matrix(eval_targets, eval_outputs))
# save Bi-GRU's parameters
torch.save(model.state_dict(), config.MODEL_PATH)
if __name__ == "__main__":
train()
```
|
{
"source": "jf20541/InforProject",
"score": 3
}
|
#### File: InforProject/src/dataset.py
```python
import torch
class InforDataset:
def __init__(self, features, target):
self.target = target
self.features = features
def __len__(self):
# length of the dataset
return self.target.shape[0]
def __getitem__(self, idx):
# convert each features, target to tensors
return {
"target": torch.tensor(self.target[idx], dtype=torch.float),
"features": torch.tensor(self.features[idx, :], dtype=torch.float),
}
```
|
{
"source": "jf20541/MultiRegressionModel",
"score": 3
}
|
#### File: MultiRegressionModel/src/LR_main.py
```python
import pandas as pd
import math
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import LR_config
def model(feature, target):
"""Initiate Linear Regression Model
Args:
feature [float]: pandas series, feature variable
target [foat]: pandas series, target variable
Returns:
[float]: slope-coefficient and intercept
"""
lr = LinearRegression()
feature = feature.values.reshape(-1, 1)
target = target.values.reshape(-1, 1)
# fit the model
lr.fit(feature, target)
# define slope and intercept
slope = lr.coef_[0][0]
intercept = lr.intercept_[0]
return slope, intercept
if __name__ == "__main__":
df = pd.read_csv(LR_config.TRAINING_FILE)
slope, intercept = model(df["MSFT"], df["SPY"])
print(f"Slope: {slope:.2f} and Intercept: {intercept:.2f}")
```
#### File: MultiRegressionModel/src/LR_model.py
```python
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import LR_config
class LinearRegression:
def __init__(self, lr=0.0001, n_iters=100):
self.lr = lr
self.n_iters = n_iters
self.weights = None
self.bias = None
def fit(self, train, target):
# init parameters
n_samples, n_features = train.shape
self.weights = np.zeros(n_features)
self.bias = 0
# iterate process
for _ in range(self.n_iters):
# y = mx + b
pred = np.dot(train, self.weights) + self.bias
# taking the derivative
# np.dot = sum product over the last axis of a and b array
dw = (1 / n_samples) * np.dot(train.T, (pred - target))
db = (1 / n_samples) * np.sum(pred - target)
self.weights -= self.lr * dw
self.bias -= self.lr * db
def predict(self, train):
return np.dot(train, self.weights) + self.bias
if __name__ == "__main__":
df = pd.read_csv(LR_config.TRAINING_FILE)
# define target and feature
feature, target = df["MSFT"], df["SPY"]
# split the data 80% training, 20% testing (no shuffling) since its time-series
x_train, x_test, y_train, y_test = train_test_split(
feature, target, test_size=0.2, random_state=42, shuffle=False
)
# initiate LinearRegression, fit and predict the model
model = LinearRegression()
model.fit(x_train, y_train)
pred = model.predict(x_test)
# evaluate performance
r2, mse = r2_score(y_test, pred), mean_squared_error(y_test, pred)
print(f"R-Squared: {r2}, Mean Squared Error: {mse}")
```
|
{
"source": "jf20541/PCARiskModel",
"score": 3
}
|
#### File: PCARiskModel/src/data.py
```python
import pandas as pd
import yfinance as yf
import config
def fetch_data():
tickers = ["XLB", "XLE", "XLF", "XLI", "XLK", "XLP", "XLU", "XLV", "XLY"]
df = yf.download(tickers, start="2020-06-09", end="2021-06-09")
df = df["Adj Close"]
df = df.pct_change()[1:]
if df.isnull().values.any() == False:
df.to_csv(config.TRAINING_FILE, index_label=False)
print("No Null values found")
else:
print("Null values Found, clean data")
if __name__ == "__main__":
fetch_data()
```
|
{
"source": "jf20541/PortfolioVarianceBW",
"score": 3
}
|
#### File: PortfolioVarianceBW/src/main.py
```python
import pandas as pd
import config
from sklearn.linear_model import LinearRegression
import numpy as np
class VarianceModel:
def __init__(self, timeseries):
self.timeseries = timeseries
def factors(self):
"""Returns: calculated median and mean factor as float"""
factor_1 = self.timeseries.mean(axis=1)
factor_2 = self.timeseries.median(axis=1)
return factor_1, factor_2
def idiosyncratic_variance(self):
"""Idiosyncratic Variance: type of investment risk that is endemic to an individual asset
Returns: [float]: numpy array of individual variance (risk)
"""
return np.var(self.timeseries)
def factor_exposure(self, factor_return, asset_return):
"""
Args:
factor_return [float]: calculate the exposed of each asset to each factor,
asset_return [float]: daily returns from time-series
Returns:
[float]: coefficient from Linear Regression
"""
lr = LinearRegression()
X = np.array(factor_return).T
y = np.array(asset_return.values)
lr.fit(X, y)
return lr.coef_
def exposure(self):
"""Exposure from individual assets from systemic exposure
Returns:
[float-array]: collected all assets exposures
"""
all_exposure = []
for i in range(len(self.timeseries.columns)):
all_exposure.append(
self.factor_exposure(
self.factors(), self.timeseries[self.timeseries.columns[i]]
)
)
factor_exposure_a = np.array(all_exposure)
return factor_exposure_a
def factor_covariance(self):
"""Returns: [float]: array-like of factor covariance"""
return np.cov(self.factors()[0], self.factors()[1], ddof=1)
def asset_weights(self, asset_weights):
"""Rebalancing weights to sum 1
Args: asset_weights [float]: array-like for asset's respective weights
Returns: [float]: rebalanced weights so sum equal to 100
"""
weights = np.array(asset_weights)
rebalance_weights = weights / np.sum(weights)
return rebalance_weights
if __name__ == "__main__":
df = pd.read_csv(config.TRAINING_FILE)
model = VarianceModel(df)
B = model.exposure()
F = model.factor_covariance()
S = np.diag(model.idiosyncratic_variance())
X = model.asset_weights(config.WEIGHTS)
var_portfolio = X.T.dot(B.dot(F).dot(B.T) + S).dot(X)
print(f"Bridgewater Associates Portfolio Variance is {var_portfolio:.8f}")
```
|
{
"source": "jf20541/Pruning-DeepNeuralNetwork",
"score": 3
}
|
#### File: Pruning-DeepNeuralNetwork/src/engine.py
```python
import torch
class Engine:
def __init__(self, model, optimizer):
self.model = model
self.optimizer = optimizer
def loss_fn(self, outputs, targets):
""" Computes the cross entropy loss between input and target.
Training a classification problem with 10 classes.
"""
return torch.nn.CrossEntropyLoss()(outputs, targets)
def train_fn(self, train_loader):
""" Loop over our training set and feed tensors inputs to NN model and optimize
Args:
train_loader: iterable over a training set
Returns: accuracy score
"""
# set training mode
self.model.train()
for _, (features, targets) in enumerate(train_loader):
# initialize
correct, total = 0, 0
features = features.reshape(features.shape[0], -1)
# set gradients to zero
self.optimizer.zero_grad()
# forward
outputs = self.model(features)
# calculate CrossEntropy loss function
loss = self.loss_fn(outputs, targets)
# backward propagation
loss.backward()
# run optimizer
self.optimizer.step()
# calculating accuracy
_, pred = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (targets == pred).sum().item()
accuracy = correct / total
return accuracy
def eval_fn(self, test_loader):
""" Loop over our testing set and feed the tensor inputs to NN model and optimize
Args:
test_loader: iterable over a testing set
Returns: accuracy score
"""
self.model.eval()
# initialize
correct, total = 0, 0
# disabled gradient calculation
with torch.no_grad():
for _, (features, targets) in enumerate(test_loader):
features = features.reshape(features.shape[0], -1)
outputs = self.model(features)
# calculating accuracy
_, pred = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (targets == pred).sum().item()
accuracy = correct / total
return accuracy
```
|
{
"source": "jf20541/Regression-OptimalParameters-Flask-Azure",
"score": 3
}
|
#### File: Regression-OptimalParameters-Flask-Azure/src/train_scaling.py
```python
import pandas as pd
from math import sqrt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
import optuna
import config
df = pd.read_csv(config.TRAINING_SCALE)
targets = df.price.values.reshape(-1, 1)
features = df[
[
"price",
"bedrooms",
"bathrooms",
"sqft_living",
"floors",
"condition",
"yr_built",
"yr_renovated",
]
].values
x_train, x_test, y_train, y_test = train_test_split(features, targets, test_size=0.25)
def create_model(trial):
"""Trial object and returns regression model to
generate a model and fit it on standard scaler training data
Args: trial [object]: process of evaluating an objective function
Raises: optuna.TrialPruned: terminates trial that does not meet a predefined condition based on value
Returns: [object]: optimal regression model
"""
model_type = trial.suggest_categorical(
"model_type",
[
"SVR",
"KNeighborsRegressor",
],
)
if model_type == "SVR":
kernel = trial.suggest_categorical(
"kernel", ["linear", "poly", "rbf", "sigmoid"]
)
regularization = trial.suggest_uniform("svm-regularization", 0.01, 10)
degree = trial.suggest_discrete_uniform("degree", 1, 5, 1)
model = SVR(kernel=kernel, C=regularization, degree=degree)
if model_type == "KNeighborsRegressor":
n_neighbors = trial.suggest_int("n_neighbors", 3, 10)
weights = trial.suggest_categorical(
"weights", ["uniform", "distance", "uniform"]
)
p = trial.suggest_int("p", 2, 5)
model = KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, p=p)
if trial.should_prune():
raise optuna.TrialPruned()
return model
def model_performance(model, x_test, y_test):
"""Evaluating suggested models hyperparameters performance (RMSE)
Args: trial [object]: process of evaluating an objective function
Raises: optuna.TrialPruned: terminates trial that does not meet a predefined condition based on value
Returns: [object]: optimal regression model
"""
pred = model.predict(x_test)
return sqrt(mean_squared_error(y_test, pred))
def objective(trial):
"""Passes to an objective function, gets parameter suggestions,
manage the trial's state, and sets defined attributes of the trial
Args:
trial [object]: manage the trial states
Returns: [object]: sets optimal model and hyperparameters
"""
model = create_model(trial)
model.fit(x_train, y_train.ravel())
return model_performance(model, x_test, y_test)
if __name__ == "__main__":
# minimize the return value of objective function
study = optuna.create_study(direction="minimize")
# define number of trials to 500
study.optimize(objective, n_trials=1000)
# get optimal model and its hyper-parameters
best_model = create_model(study.best_trial)
best_model.fit(x_train, y_train.ravel())
trial = study.best_trial
print(f"Performance: {model_performance(best_model, x_test, y_test)}")
print(f"Best hyperparameters: {trial.params}")
```
|
{
"source": "jf20541/SpotifyETL",
"score": 3
}
|
#### File: YelpAPI/src/data.py
```python
import pandas as pd
def clean_data():
df = pd.read_csv('../inputs/train_api.csv')
drop_col = ['image_url', 'is_closed', 'url', 'phone', 'display_phone','distance',
'coordinates.latitude', 'coordinates.longitude', 'location.address2',
'location.address3', 'location.zip_code', 'location.display_address',
'transactions', 'location.country', 'id', 'Unnamed: 0', 'location.address1',
'location.state']
df = df.drop(drop_col, axis=1).reset_index()
return df
if __name__ == '__main__':
clean_data()
```
#### File: SpotifyETL/src/extract.py
```python
import pandas as pd
import requests
import json
from datetime import datetime
import datetime
import config
def extract_data():
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {config.TOKEN}",
}
# Convert time to Unix timestamp
unix_time = (datetime.date.today() - datetime.timedelta(1)).strftime("%s")
# request and convert to json format
data = requests.get(
f"https://api.spotify.com/v1/me/player/recently-played?after={unix_time}",
headers=headers,
)
data = data.json()
# initiate values as empty list
songs = []
artists = []
time = []
popularity = []
explicit = []
release_date = []
# append values to empty list
for song in data["items"]:
songs.append(song["track"]["name"])
artists.append(song["track"]["album"]["artists"][0]["name"])
time.append(song["played_at"])
popularity.append(song["track"]["popularity"])
explicit.append(song["track"]["explicit"])
release_date.append(song["track"]["album"]["release_date"])
# define dict to change variables
spot_dict = {
"songs": songs,
"artists": artists,
"time": time,
"popularity": popularity,
"explicit": explicit,
"release_date": release_date,
}
df = pd.DataFrame(spot_dict)
```
|
{
"source": "jf20541/XGBoostClfCatBoostClf",
"score": 3
}
|
#### File: XGBoostClfCatBoostClf/src/data.py
```python
import pandas as pd
import config
from sklearn.preprocessing import LabelEncoder
def clean_colname(df):
# lower case columns, no spaces & dashes
df.columns = [
x.lower().replace(" ", "_").replace("-", "_").replace(".", "_")
for x in df.columns
]
return df.columns
class MultiColumnLabelEncoder:
def __init__(self, columns=None):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
output = X.copy()
if self.columns is not None:
for col in self.columns:
output[col] = LabelEncoder().fit_transform(output[col])
else:
for colname, col in output.iteritems():
output[colname] = LabelEncoder().fit_transform(col)
return output
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
if __name__ == "__main__":
# import all files
test = pd.read_csv(config.TESTING_FILE)
sample = pd.read_csv(config.SAMPLING_FILE)
train = pd.read_csv(config.TRAINING_FILE)
# set index and concat train and test
test = test.set_index("id").join(sample.set_index("id"))
train = train.set_index("id")
df = pd.concat([train, test])
col = clean_colname(df)
df.columns = col
# inititate label encode class and fit_transform columns
df = MultiColumnLabelEncoder(
columns=["gender", "vehicle_age", "vehicle_damage"]
).fit_transform(df)
if df.isnull().sum().any() == False:
print("Data is Clean, No Null Values Found")
df.to_csv(config.CLEAN_FILE, index=False)
else:
print("Found Null Values")
```
#### File: XGBoostClfCatBoostClf/src/main.py
```python
import pandas as pd
import numpy as np
from functools import partial
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, roc_auc_score
from skopt import gp_minimize, space
from xgboost import XGBClassifier
import config
def optimize(params, param_names, x, y):
"""Takes all arguments from search space and traning features/target
Initializes the models by setting the chosen param and runs StratifiedKFold
Args:
params [dict]: convert params to dict
param_names [list]: make a list of param names
x [float]: feature values
y [int]: target values are binary
Returns:
[float]: Returns an accuracy score for 5 Folds
"""
# set the parameters as dictionaries
params = dict(zip(param_names, params))
# initiate XGBClassifier and K-fold (5)
model = XGBClassifier(objective="binary:logistic", **params)
kf = StratifiedKFold(n_splits=5)
# create empty list for metric and loop over folds
acc = []
for idx in kf.split(X=x, y=y):
train_idx, test_idx = idx[0], idx[1]
xtrain, xtest = x[train_idx], x[test_idx]
ytrain, ytest = y[train_idx], y[test_idx]
model.fit(xtrain, ytrain)
pred = model.predict(xtest)
# append mean-accuracy to empty list
fold_accuracy = roc_auc_score(ytest, pred)
acc.append(fold_accuracy)
# return negative acc to find max optimization
return -np.mean(acc)
# import csv file and set as array
df = pd.read_csv(config.CLEAN_FILE)
targets = df["response"].values
features = df.drop("response", axis=1).values
# define the range of input values to test the BayesOptimization to create prop-distribution
param_space = [
space.Integer(4, 24, name="max_depth"),
space.Integer(1, 9, name="gamma"),
space.Integer(20, 150, name="reg_alpha"),
space.Real(0.01, 1, prior="uniform", name="reg_lambda"),
space.Integer(1, 10, name="min_child_weight"),
space.Real(0.05, 0.30, prior="uniform", name="eta"),
space.Real(0.5, 1, prior="uniform", name="colsample_bytree"),
space.Real(0.6, 0.95, prior="uniform", name="base_score"),
]
param_names = [
"max_depth",
"gamma",
"reg_alpha",
"reg_lambda",
"min_child_weight",
"eta",
"colsample_bytree",
"base_score",
]
# define the loss function to minimize (acc will be negative)
optimization_function = partial(
optimize, param_names=param_names, x=features, y=targets
)
# initiate gp_minimize for Bayesian optimization using Gaussian Processes.
result = gp_minimize(
optimization_function,
dimensions=param_space,
n_calls=10,
n_random_starts=10,
verbose=10,
)
print(dict(zip(param_names, result.x)))
```
|
{
"source": "jf248/scrape-the-plate",
"score": 3
}
|
#### File: scrape-the-plate/fixtures/main.py
```python
import sys
from fixtures.parseargs import parse_args
from fixtures.csv import CsvToFixtureFactory
def main():
args = parse_args(sys.argv[1:])
CsvToFixtureFactory(args.path).create_fixture()
print(
"""
Finished! Run "$ ./manage.py loaddata {path}initial_data.json" to load
fixtures
""".format(path=args.path)
)
if __name__ == "__main__":
main()
```
#### File: scrape-the-plate/recipes/models.py
```python
from __future__ import unicode_literals
from recipes.utils import general
from django.db import models
from django.utils.text import slugify
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from recipes import ImageField
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import get_user_model
class AbstractModel(models.Model):
def normalize(self):
pass
def save(self, *args, **kwargs):
self.normalize()
super().save(*args, **kwargs)
@classmethod
def filter_user(cls, user):
if user:
return cls.objects.filter(user=user.id)
return cls.objects.filter(user=-1)
@classmethod
def filter_user_and_None(cls, user):
if user:
return cls.objects.filter(
models.Q(user=user.id) | models.Q(user=None)
)
return cls.objects.filter(user=True)
@classmethod
def filter_user_and_public(cls, user):
if user:
return cls.objects.filter(
models.Q(user=user.id) | models.Q(public=True)
)
return cls.objects.filter(public=True)
@classmethod
def get_all(cls):
return list(cls.objects.all())
class Meta:
abstract = True
class User(AbstractUser, AbstractModel):
def normalize(self):
self.email = self.username
class Meta:
ordering = ['id']
class Tag(AbstractModel):
name = models.CharField(max_length=255, unique=True)
user = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, blank=True, null=True
)
class Meta:
ordering = ['name']
class Source(AbstractModel):
name = models.CharField(max_length=255, unique=True)
domain_name = models.CharField(max_length=255, blank=True)
@classmethod
def get_id_from_domain_name(cls, domain_name):
try:
source = cls.objects.get(domain_name=domain_name)
return source.id
except ObjectDoesNotExist:
return None
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class GroceryGroup(AbstractModel):
user = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, blank=True, null=True
)
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class GroceryItem(AbstractModel):
user = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, blank=True, null=True
)
name = models.CharField(max_length=255, unique=True)
group = models.ForeignKey(GroceryGroup, on_delete=models.PROTECT)
def save(self, *args, **kwargs):
# self.name = general.singular_lower_stripped(self.name)
super().save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class GroceryPhrase(AbstractModel):
user = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, blank=True, null=True
)
grocery_item = models.ForeignKey(GroceryItem, on_delete=models.CASCADE)
text = models.CharField(max_length=255, unique=True)
def save(self, *args, **kwargs):
self.text = general.singular_lower_stripped(
self.text, simple_only=True)
super(GroceryPhrase, self).save(*args, **kwargs)
def __str__(self):
return self.text
class Ingredient(AbstractModel):
grocery_item = models.ForeignKey(
GroceryItem, on_delete=models.PROTECT, blank=True, null=True)
text = models.CharField(max_length=255)
# group = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.text
class Book(AbstractModel):
title = models.CharField(max_length=255)
user = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, blank=True, null=True
)
class Meta:
ordering = ['title']
class Recipe(AbstractModel):
public = models.BooleanField(default=False)
user = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, blank=True, null=True
)
title = models.CharField(max_length=255)
slug = models.SlugField(editable=False, max_length=255)
ingredients = models.ManyToManyField(Ingredient, blank=True)
preparation = models.TextField(blank=True)
notes = models.TextField(blank=True)
tags = models.ManyToManyField(Tag, blank=True)
source = models.ForeignKey(
Source, on_delete=models.PROTECT, null=True, blank=True
)
url = models.URLField(blank=True)
book = models.ForeignKey(
Book, on_delete=models.PROTECT, null=True, blank=True
)
page = models.IntegerField(null=True, blank=True)
prep_time = models.IntegerField(null=True, blank=True)
cook_time = models.IntegerField(null=True, blank=True)
serves = models.IntegerField(blank=True, null=True)
image = ImageField(
upload_to=general.path_and_rename("images/recipes"),
default="images/recipes/default.png",
blank=True
)
def clean(self):
self.normalize()
if self._slug_exists():
raise ValidationError(
{'title': 'A recipe with that title already exists.'})
def __str__(self):
return self.slug
class Meta:
ordering = ['slug']
def normalize(self):
d = self.__dict__
if 'title' in d:
d['slug'] = slugify(self.title)
def _slug_exists(self):
qs = Recipe.objects.filter(slug=self.slug, user=self.user)
if self.pk:
qs = qs.exclude(pk=self.pk)
return qs.exists()
```
#### File: scrape-the-plate/recipes/permissions.py
```python
from rest_framework import permissions
class IsAuthenticatedOrReadOnly(permissions.IsAuthenticatedOrReadOnly):
pass
class AllowAny(permissions.AllowAny):
pass
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.user == request.user
class IsOwnerOrIsPublic(permissions.BasePermission):
"""
Custom permission to only allow all access to object if owner or
public=True on the object.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet.
return (obj.user == request.user) or (obj.public)
```
#### File: scrape-the-plate/recipes/serializers.py
```python
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.models.fields.related import ManyToManyField, ForeignKey
from rest_framework import serializers, validators
from rest_framework.fields import CharField
from django.contrib.auth import get_user_model
from . import models
UserModel = get_user_model()
class UniqueFieldsMixin(serializers.ModelSerializer):
"""
Remove all UniqueValidator validators from fields. The validator prevents
us
using a writable nested serializer. Uniqueness is tested any way in the
ModelValidateMixin
@see https://github.com/beda-software/drf-writable-nested/blob/master \
/drf_writable_nested/mixins.py
"""
def get_fields(self):
self._unique_fields = []
fields = super(UniqueFieldsMixin, self).get_fields()
for field_name, field in fields.items():
is_unique = any([isinstance(validator, validators.UniqueValidator)
for validator in field.validators])
if is_unique:
self._unique_fields.append(field_name)
field.validators = [
validator for validator in field.validators
if not isinstance(validator, validators.UniqueValidator)]
return fields
class UpdateListMixin(serializers.Serializer):
# Serializer doesn't include id by default.
# Make id visible in .validated_data:
id = serializers.IntegerField(required=False)
class CustomListSerializer(serializers.ListSerializer):
"""
Adds an update method to the ListSerializer.
- Any existing instances not in the update instances will be deleted
(checks id field of the update instance)
- Any completely new instances (no id field) will be created
"""
def update(self, instances, validated_data):
ids_for_updates = [item['id'] for item in validated_data
if item.get('id') is not None]
# Delete instances not in the list
instances.exclude(id__in=ids_for_updates).delete()
# Save the new instances
ret = []
for item in validated_data:
id_ = item.get('id')
if id_ is None:
ret.append(self.child.create(item))
else:
instance = instances.get(id=id_)
ret.append(self.child.update(instance, item))
return ret
def __init__(self, *args, **kwargs):
setattr(self.Meta, 'list_serializer_class', self.CustomListSerializer)
super().__init__(*args, **kwargs)
class NestedMixin(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Find all the nested serializers, check they are ToMany relational
# types and add to self.Meta.nested_fields
self.Meta.nested_fields = [name for name, field in self.fields.items()
if isinstance(field,
serializers.BaseSerializer)]
for field_name in self.Meta.nested_fields:
field = self.Meta.model._meta.get_field(field_name)
assert (
isinstance(field, ManyToManyField) or
isinstance(field, ForeignKey)
), (
'Nested field %s, is not a relational field' % field_name
)
# assert relation.to_many, (
# 'Nested field %s, is not a to_many relational field'
# % field_name
# )
def validate(self, data, *args, **kwargs):
"""
Remove nested data before running .validate()
"""
no_nested = {**data}
for field in self.Meta.nested_fields:
no_nested.pop(field, None)
super().validate(no_nested, *args, **kwargs)
return data
@transaction.atomic
def create(self, validated_data):
nested = self.pop_nested(validated_data)
instance = super().create(validated_data)
if nested:
self.update_nested(nested, instance)
return instance
@transaction.atomic
def update(self, instance, validated_data):
nested = self.pop_nested(validated_data)
instance = super().update(instance, validated_data)
if nested:
self.update_nested(nested, instance)
return instance
def pop_nested(self, validated_data):
nested = {}
for field_name in self.Meta.nested_fields:
value = validated_data.pop(field_name, None)
nested[field_name] = value
return nested
def update_nested(self, nested, instance):
"""
Call the nested serializer's update method and set the returned items
"""
for nested_field_name, attrs in nested.items():
serializer = self.get_fields()[nested_field_name]
nested_field = self.Meta.model._meta.get_field(nested_field_name)
if isinstance(nested_field, ManyToManyField):
related_manager = getattr(instance, nested_field_name)
related_objects = related_manager.all()
ret = serializer.update(related_objects, (attrs or []))
related_manager.set(ret)
elif isinstance(nested_field, ForeignKey):
related_manager = nested_field.related_model.objects
# 3 cases:
# No attrs - delete any existing related instance
# attrs with id - update the existing related instance
# attrs without id - create new related instance
if not attrs:
setattr(instance, nested_field_name, None)
else:
nested_id = attrs.get('id', None)
if nested_id:
nested_instance = related_manager.get(
pk=nested_id
)
serializer.update(nested_instance, attrs)
setattr(instance, nested_field_name, nested_instance)
else:
nested_instance = serializer.create(attrs)
setattr(instance, nested_field_name, nested_instance)
else:
raise TypeError(
'{0} is marked as a nested field but is neither'
'a ManyToManyField or a ForeignKey field'.format(
nested_field_name)
)
# Save the instance
instance.save()
class ModelValidateMixin(object):
"""
This mixin implements a custom validate method to run the model's own
validation checks by calling model.clean().
This allows us to keep validation better encapsulated at the model level
and avoid duplication of validation.
"""
def validate(self, attrs):
# Either:
# Load instance from self.instance or load from attrs.id and make
# changes to the fields from attrs
# Or:
# Create a new instance using attrs
instance = self.instance
ModelClass = self.Meta.model
meta = ModelClass._meta
pk_name = meta.pk.name
if instance is None and hasattr(attrs, pk_name):
instance = self.Meta.model.objects.filter(
pk=attrs[pk_name]).first()
if instance is None:
instance = ModelClass()
# We catch any django ValidationErrors and raise drf ValidationError's
# instead.
try:
instance.clean()
except ValidationError as e:
raise serializers.ValidationError(e.args[0])
attrs = super().validate(attrs)
return attrs
class CustomSerializer(
UniqueFieldsMixin, UpdateListMixin, NestedMixin, ModelValidateMixin,
serializers.ModelSerializer
):
def get_field_names(self, declared_fields, info):
"""
Overwriten to allow .Meta.fields='__all__' to be used and then add
extra fields on top with .Meta.extra_fields
"""
expanded_fields = super().get_field_names(declared_fields, info)
if getattr(self.Meta, 'extra_fields', None):
return expanded_fields + self.Meta.extra_fields
else:
return expanded_fields
class GroceryGroupSerializer(CustomSerializer):
class Meta:
model = models.GroceryGroup
fields = '__all__'
validators = []
class GroceryItemSerializer(CustomSerializer):
group = GroceryGroupSerializer()
class Meta:
model = models.GroceryItem
fields = '__all__'
extra_fields = ['group']
validators = []
class TagSerializer(CustomSerializer):
class Meta:
model = models.Tag
fields = '__all__'
class SourceSerializer(CustomSerializer):
class Meta:
model = models.Source
fields = '__all__'
class IngredientSerializer(CustomSerializer):
grocery_item = GroceryItemSerializer(required=False, allow_null=True)
class Meta:
model = models.Ingredient
fields = '__all__'
validators = []
class ListField(CharField):
"""
A field for converting lists of strings stored in a model as one string.
"""
delimiter = '\n\n'
def to_internal_value(self, data):
if isinstance(data, list):
data = self.delimiter.join(data)
return super().to_internal_value(data)
def to_representation(self, value):
value = super().to_representation(value)
if value == '':
return []
else:
return value.split(self.delimiter)
class RecipeSerializer(CustomSerializer):
# Overwrite the default ingredients field to use a nested serializer
ingredients = IngredientSerializer(many=True, required=False)
preparation = ListField(allow_blank=True, required=False, style={
'base_template': 'textarea.html'})
class Meta:
model = models.Recipe
exclude = ('image',)
validators = []
class UserSerializer(CustomSerializer):
class Meta:
model = UserModel
fields = ['id', 'first_name', 'last_name']
class BookSerializer(CustomSerializer):
class Meta:
model = models.Book
fields = '__all__'
class AuthSerializer(CustomSerializer):
# Write-only password
password = serializers.CharField(write_only=True)
def create(self, validated_data):
user = UserModel.objects.create_user(**validated_data)
return user
class Meta:
model = UserModel
fields = '__all__'
```
#### File: recipes/tests/utils.py
```python
from PIL import Image
from django.contrib.sessions.backends.base import SessionBase
from django.core.files import File
from django.test import RequestFactory
from django.urls import resolve
from io import BytesIO
from unittest.mock import MagicMock
class RouteTesterMixin(object):
"""
Mixin for testing urls resolve correctly
"""
def route_test(self, url, view, name=None, kwargs={}):
response = resolve(url)
# view may be a function or class-based view
if hasattr(response.func, 'view_class'):
self.assertEqual(response.func.view_class, view)
else:
self.assertEqual(response.func, view)
if name:
self.assertEqual(response.view_name, name)
if kwargs:
self.assertEqual(response.kwargs, kwargs)
class TestHelper(object):
@staticmethod
def get_image_file(
name='test.png', ext='png',
size=(50, 50), color=(256, 0, 0)):
file_obj = BytesIO()
image = Image.new("RGBA", size=size, color=color)
image.save(file_obj, ext)
file_obj.seek(0)
return File(file_obj, name=name)
class FormTesterMixin(object):
"""
Mixin for testing form's generate expected errors.
"""
def assertFormError(self, form_cls, expected_error_name,
expected_error_msg, data):
test_form = form_cls(data=data)
self.assertFalse(test_form.is_valid())
self.assertEquals(
test_form.errors[expected_error_name],
expected_error_msg,
)
class SessionFactory(object):
"""
Factory class for creating mock session objects. Use instead of mocking
with a simple to mock session attributes e.g. session.modified.
"""
def _getitem(self, name):
return self.dict_[name]
def _setitem(self, name, val):
self.dict_[name] = val
def create_session(self, spec=SessionBase):
session = MagicMock(spec=spec)
self.dict_ = {}
session.__getitem__.side_effect = self._getitem
session.__setitem__.side_effect = self._setitem
return session
class ViewTesterMixin(object):
"""
Mixin to be used with a django.tests.TestCase object.
"""
@staticmethod
def setup_generic_view(view, request, *args, **kwargs):
"""
Helper function to allow unit testing of class-based views.
Mimic as_view() returned callable, but returns view instance.
args and kwargs are the same as you would pass to reverse()
http://tech.novapost.fr/django-unit-test-your-views-en.html
"""
view.request = request
view.args = args
view.kwargs = kwargs
return view
def assertCorrectResponse(self, view_func, kwargs={},
expected_text_list=[], status_code=200,
html=False):
"""
Wrapper round assertContains. Sets up a request and gets a response for
the view_func and test that all items in expected_text_list are present
in the reponse.
"""
request = RequestFactory().get('')
request.session = SessionFactory().create_session()
response = view_func(request, **kwargs)
# Force the reponse to be rendered. Will produce error if e.g. template
# doesn't exist
response.render()
if expected_text_list:
for expected_text in expected_text_list:
self.assertContains(response, expected_text,
status_code=status_code, html=html)
else:
# Just test status code
self.assertEqual(response.status_code, status_code)
class ModelTesterMixin:
"""
Mixin for model testing.
"""
def assertMeta(self, model_cls, required_fields, unique_fields, ordering):
"""
Tests on model meta to ensure not changed accidentally.
"""
# (Note: Set up lists to be checked rather than loops that
# fail-on-first.)
# Required fields
required_fields_with_blank_true = [
f for f in required_fields
if model_cls._meta.get_field(f).blank
]
self.assertEqual(required_fields_with_blank_true, [],
'Required fields don''t match')
# Unique fields
unique_fields_with_unique_false = [
f for f in unique_fields
if not model_cls._meta.get_field(f).unique
]
self.assertEqual(unique_fields_with_unique_false, [],
'Unique fields don''t match')
# Ordering
self.assertEqual(model_cls._meta.ordering, ordering,
'Ordering doesn''t match')
```
|
{
"source": "JF6/kusto-logging",
"score": 2
}
|
#### File: kusto-logging/tests/test_ql.py
```python
import logging
import time
import threading
from queue import Queue
from logging.handlers import QueueHandler, QueueListener
import pytest
from azure.kusto.data import KustoConnectionStringBuilder
from azure.kusto.data.exceptions import KustoServiceError
from kusto.logging import KustoHandler
from test_setup import BaseTestKustoLogging
def do_logging(numberOfMessages):
nb_of_tests = numberOfMessages
for i in range(nb_of_tests):
logging.warning("Test {} warning {} from thread {}".format(__file__, i, threading.get_ident()))
class TestKustoQueueListenerMemoryHandlerLogging(BaseTestKustoLogging):
@classmethod
def setup_class(cls):
super().setup_class()
if not cls.is_live_testing_ready:
pytest.skip("No backend end available", allow_module_level=True)
queue_cap = 5000
cls.kh = KustoHandler(
kcsb=cls.kcsb, database=cls.test_db, table=cls.test_table, useStreaming=True, capacity=queue_cap, flushLevel=logging.CRITICAL, retries=[]
)
cls.kh.setLevel(logging.DEBUG)
cls.q = Queue()
cls.qh = QueueHandler(cls.q)
cls.ql = QueueListener(cls.q, cls.kh)
cls.ql.start()
logger = logging.getLogger()
logger.addHandler(cls.qh)
logger.setLevel(logging.DEBUG)
@classmethod
def teardown_class(cls):
cls.ql.stop()
cls.qh.flush()
retries = 50
while retries:
time.sleep(1)
if cls.q.empty():
break
logging.getLogger().removeHandler(cls.ql)
super().teardown_class()
def test_info_logging(self, caplog):
caplog.set_level(logging.CRITICAL, logger="adal-python")
caplog.set_level(logging.CRITICAL, logger="urllib3.connectionpool")
nb_of_tests = 30000
for i in range(0, nb_of_tests):
logging.info("Test %s info %d", __file__, i)
logging.critical("Flush")
self.assert_rows_added(nb_of_tests, logging.INFO, timeout=10000)
def test_debug_logging(self, caplog):
caplog.set_level(logging.CRITICAL, logger="adal-python")
caplog.set_level(logging.CRITICAL, logger="urllib3.connectionpool")
caplog.set_level(logging.CRITICAL, logger="msal.application")
nb_of_tests = 40000
for i in range(0, nb_of_tests):
logging.debug("Test debug %d", i)
logging.critical("Flush")
self.assert_rows_added(nb_of_tests, logging.DEBUG, timeout=500)
def test_error_logging(self, caplog):
caplog.set_level(logging.CRITICAL, logger="adal-python")
caplog.set_level(logging.CRITICAL, logger="urllib3.connectionpool")
caplog.set_level(logging.CRITICAL, logger="msal.application")
nb_of_tests = 20000
for i in range(0, nb_of_tests):
logging.error("Test error %d", i)
logging.critical("Flush")
self.assert_rows_added(nb_of_tests, logging.ERROR, timeout=500)
def test_critical_logging(self, caplog):
caplog.set_level(logging.CRITICAL, logger="adal-python")
caplog.set_level(logging.CRITICAL, logger="urllib3.connectionpool")
caplog.set_level(logging.CRITICAL, logger="msal.application")
nb_of_tests = 20
for i in range(0, nb_of_tests):
logging.critical("Test critical %d", i)
self.assert_rows_added(nb_of_tests, logging.CRITICAL)
def test_mt_warning_logging(self, caplog):
"""multithreading test"""
caplog.set_level(logging.CRITICAL, logger="adal-python")
caplog.set_level(logging.CRITICAL, logger="urllib3.connectionpool")
caplog.set_level(logging.CRITICAL, logger="msal.application")
logging_threads = []
expected_results = 0
for i in range(16):
nb_of_logging = i * 100
x = threading.Thread(target=do_logging, args=(nb_of_logging,))
x.start()
expected_results += nb_of_logging
logging_threads.append(x)
for t in logging_threads:
t.join()
logging.critical("Flush")
self.assert_rows_added(expected_results, logging.WARNING)
```
#### File: kusto-logging/tests/test_setup.py
```python
import os
import sys
import time
import random
import pandas
import logging
from azure.kusto.data import KustoClient, KustoConnectionStringBuilder
from azure.kusto.data.exceptions import KustoServiceError
from azure.kusto.ingest import KustoStreamingIngestClient, IngestionProperties, DataFormat
from kusto.logging import (
KustoHandler,
)
class BaseTestKustoLogging:
"""Base class for logging tests."""
@classmethod
def setup_class(cls):
"""create the Kusto table and initialize kcsb info"""
global has_one_test_failed
has_one_test_failed = False
cls.is_live_testing_ready = True
engine_cs = os.environ.get("ENGINE_CONNECTION_STRING")
app_id = os.environ.get("APP_ID")
app_key = os.environ.get("APP_KEY")
auth_id = os.environ.get("AUTH_ID")
if engine_cs and app_id and app_key and auth_id:
cls.kcsb = KustoConnectionStringBuilder.with_aad_application_key_authentication(engine_cs, app_id, app_key, auth_id)
cls.test_db = os.environ.get("TEST_DATABASE")
cls.client = KustoClient(cls.kcsb)
python_version = "_".join([str(v) for v in sys.version_info[:3]])
cls.test_table = "python_test_{0}_{1}_{2}".format(python_version, str(int(time.time())), random.randint(1, 100000))
with open("kusto-logging/tests/createTable.kql") as table_template:
tbl_create = table_template.read()
cls.client.execute(cls.test_db, tbl_create.format(cls.test_table))
timeout = 200
csv_ingest_props = IngestionProperties(
cls.test_db,
cls.test_table,
data_format=DataFormat.CSV,
flush_immediately=True,
)
# Wait for the table to be able to ingest.
streaming_ingest_client = KustoStreamingIngestClient(cls.kcsb)
df = pandas.DataFrame.from_dict({"msg": ["Flush"]})
while timeout > 0:
time.sleep(1)
timeout -= 1
try:
streaming_ingest_client.ingest_from_dataframe(df, csv_ingest_props)
response = cls.client.execute(cls.test_db, "{} | where name == 'Flush' | count".format(cls.test_table))
except KustoServiceError:
continue
if response is not None:
row = response.primary_results[0][0]
actual = int(row["Count"])
# this is done to allow for data to arrive properly
if actual >= 1:
cls.is_live_testing_ready = True
break
else:
print("At least one env variable not found, live_testing_ready is false")
cls.is_live_testing_ready = False
@classmethod
def teardown_class(cls):
"""cleanup table after testing (if successful)"""
# logging.getLogger().removeHandler(cls.kh)
global has_one_test_failed
if not has_one_test_failed:
cls.client.execute(cls.test_db, ".drop table {} ifexists".format(cls.test_table))
logging.basicConfig(force=True) # in order for the tests to chain
@classmethod
# assertions
def assert_rows_added(cls, expected: int, level: int, timeout=60):
actual = 0
while timeout > 0:
time.sleep(1)
timeout -= 1
try:
response = cls.client.execute(
cls.test_db, "{} | where levelno=={} | where msg has 'Test' | where msg != 'Flush' | count".format(cls.test_table, level)
)
except KustoServiceError:
continue
if response is not None:
row = response.primary_results[0][0]
actual = int(row["Count"])
# this is done to allow for data to arrive properly
if actual >= expected:
break
assert actual == expected, "Row count expected = {0}, while actual row count = {1}".format(expected, actual)
```
|
{
"source": "jf87/BLEva",
"score": 2
}
|
#### File: CPSBench/plot/plot_between_model.py
```python
from __future__ import division
import os
import util
import matplotlib.pyplot as plt
import seaborn as sns
SAVEPATH = './output/'
def add_phone_id(df, id_map):
return df
def plot_rssi():
# get dataframe for RSSI
path_rssi = "../data/rssi"
df_rssi = util.process_folder(path_rssi, filter_benchmark='rssi')
# df_rssi["Phone Model"] = df_rssi["Phone Model"].astype("category")
df_rssi = df_rssi[(df_rssi['RSSI (dBm)'] < 0)]
id_map = {} # {phone model: [next_id, {old id : new id}}]
# add phone id
df_rssi = df_rssi[(df_rssi["Phone Model"].isin([
"Asus Nexus 7", "LG Nexus 5", "Motorola MotoE2", "Motorola Nexus 6"])) &
(df_rssi['Device ID'] != 'unknown')]
df_rssi['Phone ID'] = ""
df_rssi.index = range(len(df_rssi))
for index, row in df_rssi.iterrows():
model = row['Phone Model']
if model not in id_map:
id_map[model] = [1, {}]
device = row['Device ID']
if device not in id_map[model][1]:
id_map[model][1][device] = "Phone " + str(id_map[model][0])
id_map[model][0] += 1
df_rssi.set_value(index, 'Phone ID', id_map[model][1][device])
# get dataframe for latency
path_latency = "../data/soc/adv-prr-latency/1280ms"
df_latency = util.process_folder(path_latency, filter_scan_mode="balanced",
filter_replicas=None,
filter_benchmark='first')
# df_latency["Phone Model"] = df_latency["Phone Model"].astype("category")
df_latency = df_latency.rename(columns={'OS Timestamp (ms)': 'OS Timestamp (s)'})
df_latency["OS Timestamp (s)"] = df_latency["OS Timestamp (s)"] / 1000.0
# add phone id
df_latency = df_latency[(df_latency["Phone Model"].isin([
"Asus Nexus 7", "LG Nexus 5", "Motorola MotoE2", "Motorola Nexus 6"])) &
(df_latency['Device ID'] != 'unknown')]
df_latency['Phone ID'] = ""
df_latency.index = range(len(df_latency))
for index, row in df_latency.iterrows():
model = row['Phone Model']
if model not in id_map:
id_map[model] = [1, {}]
device = row['Device ID']
if device not in id_map[model][1]:
id_map[model][1][device] = "Phone " + str(id_map[model][0])
id_map[model][0] += 1
df_latency.set_value(index, 'Phone ID', id_map[model][1][device])
print df_rssi["Phone Model"].unique()
print df_latency["Phone Model"].unique()
print id_map
# figure 12
# sns.set_context("paper")
# sns.set(style="whitegrid", font_scale=1.5)
fig, axarr = plt.subplots(1, 2, sharey='row', figsize=(10, 4))
# fig.subplots_adjust(wspace=0, hspace=0)
print df_rssi[(df_rssi['Distance (m)'] == 1.0)]
hue_order = sorted(df_rssi['Phone ID'].unique())
df_rssi["Phone Model"] = df_rssi["Phone Model"].astype("category")
sns.boxplot(x="RSSI (dBm)", y="Phone Model", data=df_rssi[(
df_rssi['Distance (m)'] == 1.0)], hue="Phone ID", hue_order=hue_order,
ax=axarr[0]) # , size=10, aspect=2.0, legend=False)
# Now add the legend with some customizations.
axarr[0].legend_.remove() # (frameon=True, framealpha=1, edgecolor='0')
axarr[0].set_title("Distance = 1 m", fontweight="bold")
axarr[0].set_ylabel('')
axarr[0].set_xlabel('RSSI (dBm)')
axarr[0].set_xlim(-110, -50)
df_latency["Phone Model"] = df_latency["Phone Model"].astype("category")
sns.boxplot(x="OS Timestamp (s)", y="Phone Model", data=df_latency,
hue="Phone ID", hue_order=hue_order, ax=axarr[1]) # , size=10, aspect=2.0, legend=False)
# Now add the legend with some customizations.
axarr[1].legend_.remove() # (frameon=True, framealpha=1, edgecolor='0')
axarr[1].set_title("Scan Mode = balanced", fontweight="bold")
axarr[1].set_ylabel('')
axarr[1].set_xlabel('OS Timestamp (s)')
# axarr[1].set_xlim(-110, )
fig.tight_layout()
if not os.path.exists(SAVEPATH):
os.makedirs(SAVEPATH)
fig.savefig(SAVEPATH + 'between_model.pdf')
if __name__ == '__main__':
util.set_style(font_scale=1.5)
util.set_colors()
plot_rssi()
```
#### File: src/bled112/multiprocessing_logging.py
```python
from __future__ import absolute_import, division, unicode_literals
import logging
import multiprocessing
import sys
import threading
import traceback
__version__ = '0.2.4'
def install_mp_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcessingHandler(
'mp-handler-{0}'.format(i), sub_handler=orig_handler)
logger.removeHandler(orig_handler)
logger.addHandler(handler)
class MultiProcessingHandler(logging.Handler):
def __init__(self, name, sub_handler=None):
super(MultiProcessingHandler, self).__init__()
if sub_handler is None:
sub_handler = logging.StreamHandler()
self.sub_handler = sub_handler
self.queue = multiprocessing.Queue(-1)
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
# The thread handles receiving records asynchronously.
t = threading.Thread(target=self.receive, name=name)
t.daemon = True
t.start()
def setFormatter(self, fmt):
logging.Handler.setFormatter(self, fmt)
self.sub_handler.setFormatter(fmt)
def receive(self):
while True:
try:
record = self.queue.get()
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe.
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
self.sub_handler.close()
logging.Handler.close(self)
```
|
{
"source": "jf87/e-mission-server",
"score": 3
}
|
#### File: ext_service/transit_matching/match_stops.py
```python
import logging
import json
import requests
import attrdict as ad
import itertools
import copy
import time
try:
config_file = open('conf/net/ext_service/overpass_server.json')
except:
print("overpass not configured, falling back to default overleaf.de")
config_file = open('conf/net/ext_service/overpass_server.json.sample')
try:
query_file = open('conf/net/ext_service/overpass_transit_stops_query_template')
except:
print("transit stops query not configured, falling back to default")
query_file = open('conf/net/ext_service/overpass_transit_stops_query_template.sample')
config_data = json.load(config_file)
url = config_data["url"]
query_string = "".join(query_file.readlines())
def get_public_transit_stops(min_lat, min_lon, max_lat, max_lon):
bbox_string = "%s,%s,%s,%s" % (min_lat, min_lon, max_lat, max_lon)
logging.debug("bbox_string = %s" % bbox_string)
overpass_public_transit_query_template = query_string
overpass_query = overpass_public_transit_query_template.format(bbox=bbox_string)
response = requests.post(url + "api/interpreter", data=overpass_query)
try:
all_results = response.json()["elements"]
except json.decoder.JSONDecodeError as e:
logging.info("Unable to decode response with status_code %s, text %s" %
(response.status_code, response.text))
time.sleep(5)
logging.info("Retrying after 5 second sleep")
response = requests.post(url + "api/interpreter", data=overpass_query)
try:
all_results = response.json()["elements"]
except json.decoder.JSONDecodeError as e:
logging.info("Unable to decode response with status_code %s, text %s" %
(response.status_code, response.text))
if response.status_code == 429:
logging.info("Checking when a slot is available")
response = requests.get(url + "api/status")
status_string = response.text.split("\n")
try:
available_slots = int(status_string[3].split(" ")[0])
if available_slots > 0:
logging.info("No need to wait")
response = requests.post(url + "api/interpreter", data=overpass_query)
all_results = response.json()["elements"]
# Some api/status returns 0 slots available and then when they will be available
elif available_slots == 0:
min_waiting_time = min(int(status_string[4].split(" ")[5]), int(status_string[5].split(" ")[5]))
time.sleep(min_waiting_time)
logging.info("Retrying after " + str(min_waiting_time) + " second sleep")
response = requests.post(url + "api/interpreter", data=overpass_query)
all_results = response.json()["elements"]
except ValueError as e:
# And some api/status directly returns when the slots will be available
try:
min_waiting_time = min(int(status_string[3].split(" ")[5]), int(status_string[4].split(" ")[5]))
time.sleep(min_waiting_time)
logging.info("Retrying after " + str(min_waiting_time) + " second sleep")
response = requests.post(url + "api/interpreter", data=overpass_query)
all_results = response.json()["elements"]
except ValueError as e:
logging.info("Unable to find availables slots")
all_results = []
else:
all_results = []
relations = [ad.AttrDict(r) for r in all_results if r["type"] == "relation" and r["tags"]["type"] == "route"]
logging.debug("Found %d relations with ids %s" % (len(relations), [r["id"] for r in relations]))
stops = [ad.AttrDict(r) for r in all_results if r["type"] != "relation"]
logging.debug("Found %d stops" % len(stops))
rel_map = {}
for relation in relations:
rel_nodes_ids = [rm.ref for rm in relation.members if (rm.type == "node")]
rel_stop_node_ids = [rm.ref for rm in relation.members if (rm.type == "node")
and (rm.type == "stop")]
# logging.debug("for relation number of nodes = %d, number of stop nodes = %d" % (len(rel_nodes_ids),
# len(rel_stop_node_ids)))
if len(rel_nodes_ids) == 0:
logging.debug("route relation with no nodes = %s" % relation["tags"])
rel_map[relation.id] = rel_nodes_ids
# logging.debug("rel_map = %s" % rel_map)
for stop in stops:
stop["routes"] = []
for relation in relations:
rel_nodes_ids = rel_map[relation["id"]]
if stop.id in rel_nodes_ids:
stop["routes"].append({"id": relation["id"], "tags": relation["tags"]})
return stops
# https://gis.stackexchange.com/a/19761
def get_stops_near(loc, distance_in_meters):
COORDS = "coordinates"
# 10001.965729km = 90 degrees
# 1km = 90/10001.965729 degrees
# 1000m = 90/10001.965729 degrees
# 1m = 90/(10001.965729 * 1000)
meter2deg = 90/(10001.965729 * 1000)
bbox_delta = meter2deg * distance_in_meters
lon = loc[COORDS][0]
lat = loc[COORDS][1]
stops = get_public_transit_stops(lat - bbox_delta, lon - bbox_delta, lat + bbox_delta, lon + bbox_delta)
logging.debug("Found %d stops" % len(stops))
for i, stop in enumerate(stops):
logging.debug("STOP %d: %s" % (i, stop))
return stops
def get_predicted_transit_mode(start_stops, end_stops):
"""
This is a set of checks, in decreasing order of confidence.
The advantage of doing it this way is that we can add more checks as we
encounter more use cases.
"""
# https://stackoverflow.com/a/11574353/4040267
p_start_routes = list(itertools.chain.from_iterable([extract_routes(s) for s in start_stops]))
p_end_routes = list(itertools.chain.from_iterable([extract_routes(s) for s in end_stops]))
rel_id_matches = get_rel_id_match(p_start_routes, p_end_routes)
logging.debug("len(start_routes) = %d, len(end_routes) = %d, len(rel_id_matches) = %d" %
(len(p_start_routes), len(p_end_routes), len(rel_id_matches)))
if len(rel_id_matches) > 0:
return [rim.tags.route for rim in rel_id_matches]
# Did not find matching routes. Let's see if stops are both "railway",
# if so, we can mark as LIGHT_RAIL, TRAIN, TRAM or SUBWAY
p_start_train = [extract_railway_modes(s.tags) for s in start_stops]
p_start_train = set(itertools.chain.from_iterable(set(i) for i in p_start_train))
p_end_train = [extract_railway_modes(s.tags) for s in end_stops]
p_end_train = set(itertools.chain.from_iterable(set(i) for i in p_end_train))
logging.debug("len(start_train) = %d, len(end_train) = %d" %
(len(p_start_train), len(p_end_train)))
if len(p_start_train) > 0 and len(p_end_train) > 0:
p_intersection_train = p_start_train & p_end_train
p_intersection_train = list(p_intersection_train)
logging.debug("Start and end have both " + str(p_intersection_train) + ", returning " + str(p_intersection_train))
return p_intersection_train
# Did not find matching routes. Let's see if any stops have a "highway" =
# "bus_stop" tag
is_bus_stop = lambda s: "highway" in s.tags and \
s.tags.highway == "bus_stop"
# Older bus stops sometimes have `route_ref`, which is a `;` separated list
# of routes. This is now deprecated, but until everything is moved over, we
# have to use it :(
# https://help.openstreetmap.org/questions/33325/bus-stops-by-line-bus
start_bus_stops = [s for s in start_stops if is_bus_stop(s)]
end_bus_stops = [s for s in end_stops if is_bus_stop(s)]
logging.debug("%d start stops -> %d start bus stops, %d end stops -> %d end bus stops" % (len(start_stops), len(start_bus_stops), len(end_stops), len(end_bus_stops)))
start_bus_route_ref = create_routes_from_ref(start_bus_stops)
end_bus_route_ref = create_routes_from_ref(end_bus_stops)
route_ref_matches = get_rel_id_match(start_bus_route_ref, end_bus_route_ref)
logging.debug("len(start_bus_route_ref) = %d, len(end_bus_route_ref) = %d, len(rel_id_matches) = %d" %
(len(start_bus_route_ref), len(end_bus_route_ref), len(route_ref_matches)))
if len(route_ref_matches) > 0:
return [rim.tags.route for rim in route_ref_matches]
p_start_bus = [is_bus_stop(s) for s in start_stops]
p_end_bus = [is_bus_stop(s) for s in end_stops]
# If there are no route refs either, let's be a bit careful, since without
# routes, we could end up with a lot of false positives.
# In general, in areas with a high density of routes, we expect to have
# good OSM coverage with route information, so the first checks will match
# And if we fall through to here, we probably don't have a dense public
# transit network. So to avoid misclassifying car trips as bus, let's check
# that the density is low.
logging.debug("len(start_bus) = %s, len(end_bus) = %s" % (
(len(p_start_bus), len(p_end_bus))))
if is_true(p_start_bus) and is_true(p_end_bus):
# Query for bus stops in the surrounding area and compare the density
# overall versus the density here
# If the density of bus stops is much higher here, then the chances are
# that it is a bus trip since otherwise, by random chance, it should
# have started anywhere in the space
logging.debug("Both start and end are bus stops, validating...")
if validate_simple_bus_stops(start_stops, end_stops):
logging.debug("Validation succeeded, returning BUS")
return ['BUS']
# No public transit matches, return None
return None
def get_rel_id_match(p_start_routes, p_end_routes):
train_mode_list = ['funicular', 'miniature', 'rail', 'railway',
'light_rail', 'subway', 'monorail', 'tram', 'aerialway', 'tracks']
logging.debug("About to find matches in lists: %s \n %s" %
([p.id for p in p_start_routes],
[p.id for p in p_end_routes]))
matching_routes = []
for sr in p_start_routes:
for er in p_end_routes:
if sr.id == er.id:
matching_routes.append(sr)
elif sr.tags.route in train_mode_list and er.tags.route in train_mode_list and sr.tags.route == er.tags.route:
if "network" in sr.tags and "network" in er.tags:
if sr.tags.network == er.tags.network:
logging.debug("network matches between %d and %d", sr.id,er.id)
matching_routes.append(sr)
logging.debug("matching routes = %s" % [(r.id,
r.tags.ref if "ref" in r.tags else r.tags.name) for r in matching_routes])
return matching_routes
def extract_routes(stop):
p_modes = []
if "routes" in stop:
for route in stop.routes:
p_modes.append(route)
logging.debug("After iterating through routes, potential modes = %s" %
[(p.id, p.tags.route) for p in p_modes])
return p_modes
def create_routes_from_ref(bus_stop_list):
created_routes = []
route_ref_bus_stop_list = [s for s in bus_stop_list if "route_ref" in s.tags]
for s in route_ref_bus_stop_list:
logging.debug("Splitting route ref %s" % s.tags.route_ref)
# route_ref is a ; separated list of routes. We want to split them up
route_list = s.tags.route_ref.split(';')
for route in route_list:
# the id of the stop represents the stop, not the route
# so we create an id from the route
re = {"id": route,
"tags": {"ref": route, "name": route, "route": "bus", "type": "route"}}
# 'tags': {'bus': 'yes', 'gtfs_id': '0300315', 'gtfs_location_type': '0', 'gtfs_stop_code': '57566', 'highway': 'bus_stop', 'name': 'Addison St:Oxford St', 'network': 'AC Transit', 'public_transport': 'platform', 'ref': '57566', 'route_ref': '65'}
# #65 bus stop doesn't have an operator tag, only network
if "operator" in s.tags:
re["operator"] = s.tags.operator
elif "network" in s.tags:
re["operator"] = s.tags.network
# logging.debug("Converted stop %s + route_ref %s -> route %s" %
# (s, route, re))
created_routes.append(ad.AttrDict(re))
logging.debug("%d bus stops -> %d bus stops with refs -> %d routes" %
(len(bus_stop_list), len(route_ref_bus_stop_list), len(created_routes)))
return created_routes
def is_true(bool_array):
import functools as ft
ret_val = ft.reduce(lambda x, y: x or y, bool_array, False)
logging.debug("is_true(%s) = %s" % (bool_array, ret_val))
return ret_val
def validate_simple_bus_stops(p_start_stops, p_end_stops):
is_no_route_bus_stop = lambda s: "highway" in s.tags and \
s.tags.highway == "bus_stop" and \
"route_ref" not in s.tags and \
"routes" not in s
start_bus_stops = [s for s in p_start_stops if is_no_route_bus_stop(s)]
end_bus_stops = [s for s in p_end_stops if is_no_route_bus_stop(s)]
# at least one of the sides should be sparse
if len(start_bus_stops) == 1 or len(end_bus_stops) == 1:
logging.debug("One side is sparse, valid bus stop")
return True
logging.debug("Both side are dense, invalid bus stop")
return False
def extract_railway_modes(stop):
p_modes = []
if "railway" in stop:
if "subway" in stop:
p_modes.append("SUBWAY")
if "train" in stop:
p_modes.append("TRAIN")
if "tram" in stop:
p_modes.append("TRAM")
if "light_rail" in stop:
p_modes.append("LIGHT_RAIL")
logging.debug("After extracting data from tags, potential modes = %s" %
[p for p in p_modes])
return p_modes
```
|
{
"source": "jf990/arcgis-runtime-samples-dotnet",
"score": 2
}
|
#### File: tools/metadata_tools/sample_metadata.py
```python
import json
import os
from distutils.dir_util import copy_tree
from shutil import copyfile, rmtree
import re
import requests
from datetime import datetime
from csproj_utils import *
from file_utils import *
class sample_metadata:
'''
This class represents a sample.
Use populate_from_* to populate from content.
Use try_replace_from_common_readme to read external readme content and replace the sample's content if the common content is 'better'.
Use flush_to_* to write out the sample to disk.
Use emit_standalone_solution to write out the sample as a standalone Visual Studio solution.
'''
arcgis_runtime_latest = "100.5.0" # store latest Runtime version, for use with packages
def reset_props(self):
self.formal_name = ""
self.friendly_name = ""
self.category = ""
self.nuget_packages = {}
self.keywords = []
self.relevant_api = []
self.since = ""
self.images = []
self.source_files = []
self.redirect_from = []
self.offline_data = []
self.description = ""
self.how_to_use = []
self.how_it_works = ""
self.use_case = ""
self.data_statement = ""
self.Additional_info = ""
self.ignore = False
def __init__(self):
self.reset_props()
def populate_from_json(self, path_to_json):
# formal name is the name of the folder containing the json
pathparts = sample_metadata.splitall(path_to_json)
self.formal_name = pathparts[-2]
# open json file
with open(path_to_json, 'r') as json_file:
data = json.load(json_file)
keys = data.keys()
for key in ["category", "keywords", "images", "redirect_from", "description", "ignore"]:
if key in keys:
setattr(self, key, data[key])
if "title" in keys:
self.friendly_name = data["title"]
if "packages" in keys:
self.nuget_packages = data["packages"]
if "relevant_apis" in keys:
self.relevant_api = data["relevant_apis"]
if "snippets" in keys:
self.source_files = data["snippets"]
# manually correct nuget package if needed
packages = self.nuget_packages.keys()
self.nuget_packages["Esri.ArcGISRuntime"] = self.arcgis_runtime_latest
if self.category == "Hydrography":
self.nuget_packages["Esri.ArcGISRuntime.Hydrography"] = self.arcgis_runtime_latest
if self.category in ["Local Server", "LocalServer"]:
self.nuget_packages["Esri.ArcGISRuntime.LocalServices"] = self.arcgis_runtime_latest
return
def populate_from_readme(self, platform, path_to_readme):
# formal name is the name of the folder containing the json
pathparts = sample_metadata.splitall(path_to_readme)
self.formal_name = pathparts[-2]
# populate redirect_from; it is based on a pattern
real_platform = platform
if real_platform in ["XFI", "XFA", "XFU"]:
real_platform = "Forms"
redirect_string = f"/net/latest/{real_platform.lower()}/sample-code/{self.formal_name.lower()}.htm"
self.redirect_from.append(redirect_string)
# category is the name of the folder containing the sample folder
self.category = pathparts[-3]
# if category is 'Hydrography', add the hydrography package
if self.category == "Hydrography":
self.nuget_packages["Esri.ArcGISRuntime.Hydrography"] = self.arcgis_runtime_latest
elif self.category == "LocalServer" or self.category == "Local Server":
self.nuget_packages["Esri.ArcGISRuntime.LocalServices"] = self.arcgis_runtime_latest
# add the ArcGIS Runtime package always
self.nuget_packages["Esri.ArcGISRuntime"] = self.arcgis_runtime_latest
# read the readme content into a string
readme_contents = ""
try:
readme_file = open(path_to_readme, "r")
readme_contents = readme_file.read()
readme_file.close()
except Exception as err:
# not a sample, skip
print(f"Error populating sample from readme - {path_to_readme} - {err}")
return
# break into sections
readme_parts = readme_contents.split("\n\n") # a blank line is two newlines
# extract human-readable name
title_line = readme_parts[0].strip()
if not title_line.startswith("#"):
title_line = title_line.split("#")[1]
self.friendly_name = title_line.strip("#").strip()
if len(readme_parts) < 3:
# can't handle this, return early
return
if len(readme_parts) < 5: # old style readme
# Take just the first description paragraph
self.description = readme_parts[1]
self.images.append(sample_metadata.extract_image_from_image_string(readme_parts[2]))
return
else:
self.description = readme_parts[1]
self.images.append(sample_metadata.extract_image_from_image_string(readme_parts[2]))
# Read through and add the rest of the sections
examined_readme_part_index = 2
current_heading = ""
para_part_accumulator = []
while examined_readme_part_index < len(readme_parts):
current_part = readme_parts[examined_readme_part_index]
examined_readme_part_index += 1
if not current_part.startswith("#"):
para_part_accumulator.append(current_part)
continue
else:
# process existing heading, skipping if nothing to add
if len(para_part_accumulator) != 0:
self.populate_heading(current_heading, para_part_accumulator)
# get started with new heading
current_heading = current_part
para_part_accumulator = []
# do the last segment
if current_heading != "" and len(para_part_accumulator) > 0:
self.populate_heading(current_heading, para_part_accumulator)
return
def try_replace_with_common_readme(self, platform, path_to_common_dir, path_to_net_readme):
'''
Will read the common readme and replace the sample's readme if found wanting
path_to_common_dir is the path to the samples design folder
Precondition: populate_from_readme already called
'''
# skip if the existing readme is good enough; it is assumed that any sample with tags already has a good readme
if len(self.keywords) > 0:
return
# determine if matching readme exists; if not, return early
match_name = None
dirs = os.listdir(path_to_common_dir)
for dir in dirs:
if dir.lower() == self.formal_name.lower():
match_name = dir
if match_name == None:
return
# create a new sample_metadata, call populate from readme on the design readme
readme_path = os.path.join(path_to_common_dir, match_name, "readme.md")
if not os.path.exists(readme_path):
return
compare_sample = sample_metadata()
compare_sample.populate_from_readme(platform, readme_path)
# fix the image content
compare_sample.images = [f"{compare_sample.formal_name}.jpg"]
# fix the category
compare_sample.category = self.category
# call flush_to_readme on the newly created sample object
compare_sample.flush_to_readme(path_to_net_readme)
# re-read to pick up any new info
self.reset_props()
self.populate_from_readme(platform, path_to_net_readme)
def flush_to_readme(self, path_to_readme):
template_text = f"# {self.friendly_name}\n\n"
# add the description
if self.description != "":
template_text += f"{self.description}\n\n"
# add the image
if len(self.images) > 0:
template_text += f"\n\n"
# add "Use case" - use_case
if self.use_case != "":
template_text += "## Use case\n\n"
template_text += f"{self.use_case}\n\n"
# add 'How to use the sample' - how_to_use
if self.how_to_use != "" and len(self.how_to_use) > 0:
template_text += "## How to use the sample\n\n"
template_text += f"{self.how_to_use}\n\n"
# add 'How it works' - how_it_works
if len(self.how_it_works) > 0:
template_text += "## How it works\n\n"
stepIndex = 1
for step in self.how_it_works:
if not step.startswith("***"): # numbered steps
template_text += f"{stepIndex}. {step}\n"
stepIndex += 1
else: # sub-bullets
template_text += f" * {step.strip('***')}\n"
template_text += "\n"
# add 'Relevant API' - relevant_api
if len(self.relevant_api) > 0:
template_text += "## Relevant API\n\n"
for api in self.relevant_api:
template_text += f"* {api}\n"
template_text += "\n"
# add 'Offline data' - offline_data
if len(self.offline_data) > 0:
template_text += "## Offline data\n\n"
template_text += "This sample downloads the following items from ArcGIS Online automatically:\n\n"
for item in self.offline_data:
# get the item's name from AGOL
request_url = f"https://www.arcgis.com/sharing/rest/content/items/{item}?f=json"
agol_result = requests.get(url=request_url)
data = agol_result.json()
name = data["name"]
# write out line
template_text += f"* [{name}](https://www.arcgis.com/home/item.html?id={item}) - {data['snippet']}\n"
template_text += "\n"
# add 'About the data' - data_statement
if self.data_statement != "":
template_text += "## About the data\n\n"
template_text += f"{self.data_statement}\n\n"
# add 'Additional information' - additional_info
if self.Additional_info != "":
template_text += "## Additional information\n\n"
template_text += f"{self.Additional_info}\n\n"
# add 'Tags' - keywords
template_text += "## Tags\n\n"
template_text += ", ".join(self.keywords)
template_text += "\n"
# write the output
with open(path_to_readme, 'w+') as file:
file.write(template_text)
return
def flush_to_json(self, path_to_json):
data = {}
data["title"] = self.friendly_name
data["category"] = self.category
data["keywords"] = self.keywords
data["relevant_apis"] = self.relevant_api
data["images"] = self.images
data["snippets"] = self.source_files
data["redirect_from"] = self.redirect_from
data["description"] = self.description
data["ignore"] = self.ignore
data["offline_data"] = self.offline_data
data["nuget_packages"] = self.nuget_packages
data["formal_name"] = self.formal_name
with open(path_to_json, 'w+') as json_file:
json.dump(data, json_file, indent=4, sort_keys=True)
return
def emit_standalone_solution(self, platform, sample_dir, output_root):
'''
Produces a standalone sample solution for the given sample
platform: one of: Android, iOS, UWP, WPF, XFA, XFI, XFU
output_root: output folder; should not be specific to the platform
sample_dir: path to the folder containing the sample's code
'''
# create output dir
output_dir = os.path.join(output_root, platform, self.formal_name)
if os.path.exists(output_dir):
rmtree(output_dir)
os.makedirs(output_dir)
# copy template files over - find files in template
script_dir = os.path.split(os.path.realpath(__file__))[0]
template_dir = os.path.join(script_dir, "templates", "solutions", platform)
copy_tree(template_dir, output_dir)
# copy sample files over
copy_tree(sample_dir, output_dir)
# copy any out-of-dir files over (e.g. Android layouts, download manager)
if len(self.source_files) > 0:
for file in self.source_files:
if ".." in file:
source_path = os.path.join(sample_dir, file)
dest_path = os.path.join(output_dir, "Resources", "layout", os.path.split(file)[1])
copyfile(source_path, dest_path)
# Add forms packages if needed
if platform in ["XFA", "XFI", "XFU"]:
self.nuget_packages["Esri.ArcGISRuntime.Xamarin.Forms"] = self.arcgis_runtime_latest
# accumulate list of source, xaml, axml, and resource files
all_source_files = self.source_files
# generate list of replacements
replacements = {}
replacements["$$project$$"] = self.formal_name
replacements[".slntemplate"] = ".sln" # replacement needed to prevent template solutions from appearing in Visual Studio git browser
replacements["$$embedded_resources$$"] = "" # TODO
replacements["$$nuget_packages$$"] = get_csproj_xml_for_nuget_packages(self.nuget_packages)
replacements["$$code_and_xaml$$"] = get_csproj_xml_for_code_files(all_source_files, platform)
replacements["$$axml_files$$"] = get_csproj_xml_for_android_layout(all_source_files)
replacements["$$current_year$$"] = str(datetime.now().year)
replacements["$$friendly_name$$"] = self.friendly_name
# rewrite files in output - replace template fields
sample_metadata.rewrite_files_in_place(output_dir, replacements)
# write out the sample file
self.emit_dot_sample_file(platform, output_dir)
return
def emit_dot_sample_file(self, platform, output_dir):
output_xml = "<ArcGISRuntimeSDKdotNetSample>\n"
# basic metadata
output_xml += f"\t<SampleName>{self.formal_name}</SampleName>\n"
output_xml += f"\t<SampleDescription>{self.description}</SampleDescription>\n"
output_xml += f"\t<ScreenShot>{self.images[0]}</ScreenShot>\n"
# code files, including XAML
output_xml += "\t<CodeFiles>\n"
for source_file in self.source_files:
output_xml += f"\t\t<CodeFile>{source_file}</CodeFile>\n"
output_xml += "\t</CodeFiles>\n"
# xaml files
output_xml += "\t<XAMLParseFiles>\n"
for source_file in self.source_files:
if source_file.endswith(".xaml"):
output_xml += f"\t\t<XAMLParseFile>{source_file}</XAMLParseFile>\n"
output_xml += "\t</XAMLParseFiles>\n"
# exe
if platform == "WPF":
output_xml += "\t<DllExeFile>bin\debug\ArcGISRuntime.exe</DllExeFile>\n"
elif platform == "UWP" or platform == "XFU":
output_xml += "\t<DllExeFile>obj\\x86\Debug\intermediatexaml\ArcGISRuntime.exe</DllExeFile>\n"
elif platform == "Android" or platform == "XFA":
output_xml += "\t<DllExeFile>bin\debug\ArcGISRuntime.dll</DllExeFile>\n"
elif platform == "iOS" or platform == "XFI":
output_xml += "\t<DllExeFile>bin\iPhone\debug\ArcGISRuntime.exe</DllExeFile>\n"
output_xml += "</ArcGISRuntimeSDKdotNetSample>\n"
filename = os.path.join(output_dir, f"{self.formal_name}.sample")
safe_write_contents(filename, output_xml)
def populate_snippets_from_folder(self, platform, path_to_readme):
'''
Take a path to a readme file
Populate the snippets from: any .xaml, .cs files in the directory;
any .axml files referenced from .cs files on android
'''
# populate files in the directory
sample_dir = os.path.split(path_to_readme)[0]
for file in os.listdir(sample_dir):
if os.path.splitext(file)[1] in [".axml", ".xaml", ".cs"]:
self.source_files.append(file)
# populate AXML layouts for Android
if platform == "Android" and os.path.splitext(file)[1] == ".cs":
# search for line matching SetContentView(Resource.Layout.
referencing_file_path = os.path.join(sample_dir, file)
referencing_file_contents = safe_read_contents(referencing_file_path)
for line in referencing_file_contents.split("\n"):
if "SetContentView(Resource.Layout." in line:
# extract name of layout
layout_name = line.split("Layout.")[1].strip().strip(";").strip(")")
# add the file path to the snippets list
self.source_files.append(f"../../../Resources/layout/{layout_name}.axml")
elif ".Inflate(Resource.Layout." in line:
# extract name of layout
layout_name = line.split("Layout.")[1].strip().strip(";").strip(", null)")
# add the file path to the snippets list
self.source_files.append(f"../../../Resources/layout/{layout_name}.axml")
def rewrite_files_in_place(source_dir, replacements_dict):
'''
Takes a dictionary of strings and replacements, applies the replacements to all the files in a directory.
Used when generating sample solutions.
'''
for r, d, f in os.walk(source_dir):
for sample_dir in d:
sample_metadata.rewrite_files_in_place(os.path.join(r, sample_dir), replacements_dict)
for sample_file_name in f:
sample_file_fullpath = os.path.join(r, sample_file_name)
extension = os.path.splitext(sample_file_fullpath)[1]
if extension in [".cs", ".xaml", ".sln", ".md", ".csproj", ".shproj", ".axml"]:
# open file, read into string
original_contents = safe_read_contents(sample_file_fullpath)
# make replacements
new_content = original_contents
for tag in replacements_dict.keys():
new_content = new_content.replace(tag, replacements_dict[tag])
# write out new file
if new_content != original_contents:
os.remove(sample_file_fullpath)
safe_write_contents(sample_file_fullpath, new_content)
# rename any files (e.g. $$project$$.sln becomes AccessLoadStatus.sln)
new_name = sample_file_fullpath
for tag in replacements_dict.keys():
if tag in sample_file_fullpath:
new_name = new_name.replace(tag, replacements_dict[tag])
if new_name != sample_file_fullpath:
os.rename(sample_file_fullpath, new_name)
def splitall(path):
## Credits: taken verbatim from https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s16.html
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def extract_image_from_image_string(image_string) -> str:
'''
Takes an image string in the form of 
or <img src="path_toImage.jpg" width="350"/>
and returns 'path_toImage.jpg'
'''
image_string = image_string.strip()
if image_string.startswith("!"): # Markdown-style string
# find index of last )
close_char_index = image_string.rfind(")")
# find index of last (
open_char_index = image_string.rfind("(")
# return original string if it can't be processed further
if close_char_index == -1 or open_char_index == -1:
return image_string
# read between those chars
substring = image_string[open_char_index + 1:close_char_index]
return substring
else: # HTML-style string
# find index of src="
open_match_string = "src=\""
open_char_index = image_string.rfind(open_match_string)
# return original string if can't be processed further
if open_char_index == -1:
return image_string
# adjust open_char_index to account for search string
open_char_index += len(open_match_string)
# read from after " to next "
close_char_index = image_string.find("\"", open_char_index)
# read between those chars
substring = image_string[open_char_index:close_char_index]
return substring
def populate_heading(self, heading_part, body_parts):
'''
param: heading_part - string starting with ##, e.g. 'Use case'
param: body_parts - list of constituent strings
output: determines which field the content belongs in and adds appropriately
e.g. lists will be turned into python list instead of string
'''
# normalize string for easier decisions
heading_parts = heading_part.strip("#").strip().lower().split()
# use case
if "use" in heading_parts and "case" in heading_parts:
content = "\n\n".join(body_parts)
self.use_case = content
return
# how to use
if "use" in heading_parts and "how" in heading_parts:
content = "\n\n".join(body_parts)
self.how_to_use = content
return
# how it works
if "works" in heading_parts and "how" in heading_parts:
step_strings = []
lines = body_parts[0].split("\n")
cleaned_lines = []
for line in lines:
if not line.strip().startswith("*"): # numbered steps
line_parts = line.split('.')
cleaned_lines.append(".".join(line_parts[1:]).strip())
else: # sub-bullets
cleaned_line = line.strip().strip("*").strip()
cleaned_lines.append(f"***{cleaned_line}")
self.how_it_works = cleaned_lines
return
# relevant API
if "api" in heading_parts or "apis" in heading_parts:
lines = body_parts[0].split("\n")
cleaned_lines = []
for line in lines:
# removes nonsense formatting
cleaned_line = line.strip("*").strip("-").split("-")[0].strip("`").strip().strip("`").replace("::", ".")
cleaned_lines.append(cleaned_line)
self.relevant_api = list(dict.fromkeys(cleaned_lines))
self.relevant_api.sort()
return
# offline data
if "offline" in heading_parts:
content = "\n".join(body_parts)
# extract any guids - these are AGOL items
regex = re.compile('[0-9a-f]{8}[0-9a-f]{4}[1-5][0-9a-f]{3}[89ab][0-9a-f]{3}[0-9a-f]{12}', re.I)
matches = re.findall(regex, content)
self.offline_data = list(dict.fromkeys(matches))
return
# about the data
if "data" in heading_parts and "about" in heading_parts:
content = "\n\n".join(body_parts)
self.data_statement = content
return
# additional info
if "additional" in heading_parts:
content = "\n\n".join(body_parts)
self.Additional_info = content
return
# tags
if "tags" in heading_parts:
tags = body_parts[0].split(",")
cleaned_tags = []
for tag in tags:
cleaned_tags.append(tag.strip())
cleaned_tags.sort()
self.keywords = cleaned_tags
return
```
|
{
"source": "jfaach/stock-app",
"score": 2
}
|
#### File: api/stock_email/sender.py
```python
from django.core.mail import send_mail
class Email:
def __init__(self, from_mail, to_mail, subject, message):
self.from_mail = from_mail
self.to_mail = to_mail
self.subject = subject
self.message = message
def send(self):
send_mail(
self.subject,
self.message,
self.from_mail,
[self.to_mail],
fail_silently=False,
)
```
#### File: api/stock_user/views.py
```python
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .serializers import *
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
from stocks.models import Stock
from .models import StockUser
from rest_framework import status
# Create your views here.
@api_view(["GET"])
def stock_user_list(request):
data = []
nextPage = 1
previousPage = 1
user = request.user.id
stock_user = StockUser.objects.all().filter(user=user)
page = request.GET.get("page", 1)
paginator = Paginator(stock_user, 10)
try:
data = paginator.page(page)
except PageNotAnInteger:
data = paginator.page(1)
except EmptyPage:
data = paginator.page(paginator.num_pages)
serializer = StockUserSerializer(data, context={"request": request}, many=True)
if data.has_next():
nextPage = data.next_page_number()
if data.has_previous():
previousPage = data.previous_page_number()
return Response(
{
"data": serializer.data,
"count": paginator.count,
"numpages": paginator.num_pages,
"nextlink": "/api/stockuser/?page=" + str(nextPage),
"prevlink": "/api/stockuser/?page=" + str(previousPage),
}
)
@api_view(["POST"])
def save_stock(request):
if request.method == "POST":
user = request.user.id
stock = request.data["stock"].upper()
stocks = Stock.objects.filter(symbol=stock)
if len(stocks) == 0:
return Response("Stock not exist", status=status.HTTP_400_BAD_REQUEST)
stock = stocks[0]
stock_id = stock.id
data = request.data
data["user"] = user
data["stock"] = stock_id
serializer = StockUserSerializerSave(data=data, context={"request": request})
if serializer.is_valid():
serializer.save()
stock_user = StockUser.objects.all().filter(user=user, stock=stock_id)
serializer = StockUserSerializer(stock_user, many=True)
return Response(serializer.data[0])
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["POST"])
def delete_stock(request):
if request.method == "POST":
user = request.user.id
stock = request.data["id"]
stocks = StockUser.objects.filter(id=stock, user=user).delete()
dict_response = {"Success": True}
return Response(dict_response)
```
|
{
"source": "jfabdo/claircli",
"score": 2
}
|
#### File: claircli/claircli/docker_image.py
```python
import logging
from docker.auth import INDEX_NAME, resolve_repository_name
from docker.utils import parse_repository_tag
from .docker_registry import DOCKER_HUP_REGISTRY, LocalRegistry, RemoteRegistry
logger = logging.getLogger(__name__)
class Image(object):
def __init__(self, name, registry=None):
self.name = name
self._layers = []
self._manifest = None
reg, repo, tag = self.parse_id(name)
self.repository = repo
self.tag = tag or 'latest'
if reg == INDEX_NAME:
reg = DOCKER_HUP_REGISTRY
self.repository = 'library/{}'.format(repo)
if isinstance(registry, LocalRegistry):
self.registry = registry
else:
self.registry = RemoteRegistry(reg)
@classmethod
def parse_id(cls, name):
reg_repo, tag = parse_repository_tag(name)
reg, repo = resolve_repository_name(reg_repo)
return reg, repo, tag
def __iter__(self):
return iter(self.layers)
def __len__(self):
return len(self.layers)
def __str__(self):
return '<Image: {}>'.format(self.name)
@property
def manifest(self):
if not self._manifest:
self._manifest = self.registry.get_manifest(self)
return self._manifest
@property
def layers(self):
if not self._layers:
manifest = self.manifest
if isinstance(self.registry, LocalRegistry):
self._layers = [e.replace('/layer.tar', '')
for e in manifest[0]['Layers']]
elif manifest['schemaVersion'] == 1:
self._layers = [e['blobSum']
for e in manifest['fsLayers']][::-1]
elif manifest['schemaVersion'] == 2:
self._layers = [e['digest'] for e in manifest['layers']]
else:
raise ValueError(
'Wrong schemaVersion [%s]' % manifest['schemaVersion'])
return self._layers
def clean(self):
if isinstance(self.registry, LocalRegistry):
self.registry.clean_image(self)
```
|
{
"source": "jfabellera/facial-time-lapse",
"score": 3
}
|
#### File: facial-time-lapse/src/align-faces.py
```python
import glob
import pickle
import face_recognition as fr
from imutils import face_utils
import numpy as np
import argparse
import imutils
import dlib
import cv2
import os
# calculate center of a shape (average of all points)
def centroid(arr):
length = arr.shape[0]
sum_x = np.sum(arr[:, 0])
sum_y = np.sum(arr[:, 1])
return int(round(sum_x / length)), int(round(sum_y / length))
# calculate polar angle of 2 points
def angle(x1, y1, x2, y2):
return np.degrees(np.arctan((y2 - y1) / (x2 - x1)))
# calculate distance between two points
def distance(x1, y1, x2, y2):
return np.sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2))
# rotates an image around some point (center_x, center_y)
def rotate_image(img, rot_angle, center_x, center_y):
center = (center_x, center_y)
rot_mat = cv2.getRotationMatrix2D(center, rot_angle, 1.0)
result = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
# translate an image by some horizontal and vertical offset
def translate_image(img, hor_shift, vert_shift):
h, w = img.shape[:2]
tran_mat = np.float32([[1, 0, hor_shift], [0, 1, vert_shift]])
result = cv2.warpAffine(img, tran_mat, (w, h))
return result
# scale an image by some magnitude from the center, and crop to 1920x1080
def scale_image(img, scale, desired_width, desired_height):
h, w = img.shape[:2]
result = cv2.resize(img, (int(scale * w), int(scale * h)), interpolation=cv2.INTER_CUBIC)
center = (int(result.shape[0] / 2), int(result.shape[1] / 2))
background = np.zeros((desired_height, desired_width, 3), np.uint8)
h, w = result.shape[:2]
if w >= desired_width:
crop_x = int(desired_width / 2)
else:
crop_x = int(w / 2)
if h >= desired_height:
crop_y = int(desired_height / 2)
else:
crop_y = int(h / 2)
result = result[(center[0] - crop_y):(center[0] + crop_y), (center[1] - crop_x):(center[1] + crop_x)]
h, w = result.shape[:2]
background[int(desired_height / 2 - h / 2):int(desired_height / 2 + h / 2), int(desired_width / 2 - w / 2):int(desired_width / 2 + w / 2)] = result[0:h, 0:w]
return background
def classify_unknown(image, trained_enc):
face_locations = fr.face_locations(image)
unknown_face_encodings = fr.face_encodings(image, face_locations)
face_names = []
for face in unknown_face_encodings:
matches = fr.compare_faces(trained_enc, face, tolerance=0.45)
name = "unknown"
face_distances = fr.face_distance(trained_enc, face)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
# name = trained_name[best_match_index]
name = 'me'
face_names.append(name)
if 'me' in face_names:
face_temp = face_locations[face_names.index('me')]
elif len(face_locations) > 0:
face_temp = face_locations[0]
else:
return None
face = dlib.rectangle(face_temp[3], face_temp[0], face_temp[1], face_temp[2])
return face
def main():
# construct the argument parser and parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--shape-predictor", help="path to facial landmark predictor", default="predictor.dat")
parser.add_argument("-f", "--faces", help="path to trained faces", default="trained_faces.dat")
parser.add_argument("-s", "--source", action="store", required=True, dest="source",
help="source directory of images to align", default="")
parser.add_argument("-d", "--destination", action="store", dest="destination",
help="destination directory for indexed align", default="")
parser.add_argument("-t", "--type", action="store", dest="type",
help="file extension for images to align", choices=['jpg', 'png'], default="jpg")
parser.add_argument("-W", "--width", action="store", dest="width",
help="width of output image", default=1920, type=int)
parser.add_argument("-H", "--height", action="store", dest="height",
help="height of output image", default=1080, type=int)
parser.add_argument("-S", "--scale", action="store", dest="scale",
help="pixel distance between eyes", default=200, type=int)
parser.add_argument("-G", "--gui", action="store_true", help=argparse.SUPPRESS)
args = vars(parser.parse_args())
# initialize dlib's face detector (HOG-based) and then create the facial landmark predictor
# detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# import trained faces into readable dictionary
trained = pickle.load(open(args["faces"], "rb"))
trained_enc = list(trained.values())
# input validation for source directory
if os.path.isdir(args["source"]):
os.chdir(args["source"])
else:
print("Source directory could not be found.")
exit()
# if source and destination directories are the same, make sure user wants to overwrite original files
if not args["gui"] and args["destination"] == args["source"]:
choice = input("Destination directory same as source directory. Modify files in directory [ORIGINALS WILL BE "
"LOST]? (y/n) ")
if choice.lower() != 'y':
exit()
# if there is no specified destination directory, make sure user wants to overwrite original files
if not args["gui"] and not args["destination"]:
choice = input("Destination directory not specified. Modify files in directory [ORIGINALS WILL BE LOST]? (y/n) ")
if choice.lower() == 'y':
args["destination"] = args["source"]
# input validation for destination directory
if not os.path.isdir(args["destination"]):
print("Destination directory could not be found.")
exit()
# retrieve the files of the correct type from the directory and store into an array
files = glob.glob("*." + args["type"])
total = len(files)
cnt = 0
# iterate through all of the different files in the directory
for file in files:
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(args["source"] + "\\" + file)
image = imutils.resize(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
# faces = detector(gray, 1)
face = classify_unknown(image, trained_enc)
if face is not None:
shape = predictor(gray, face)
shape = face_utils.shape_to_np(shape) # 68 points held in a np array
clone = image.copy()
landmarks = face_utils.FACIAL_LANDMARKS_IDXS
height, width = image.shape[:2]
# find centroids which will be used for aligning
right_eye_centroid = centroid(shape[landmarks["right_eye"][0]:landmarks["right_eye"][1]])
left_eye_centroid = centroid(shape[landmarks["left_eye"][0]:landmarks["left_eye"][1]])
nose_centroid = centroid(shape[landmarks["nose"][0]:landmarks["nose"][1]])
# calculate angle (negated because of flipped coordinate grid) and distance between the two eyes
eye_angle = -1 * angle(right_eye_centroid[0], right_eye_centroid[1], left_eye_centroid[0], left_eye_centroid[1])
eye_distance = distance(right_eye_centroid[0], right_eye_centroid[1], left_eye_centroid[0],
left_eye_centroid[1])
# re-center image based on the nose centroid
clone = translate_image(clone, width / 2 - nose_centroid[0], height / 2 - nose_centroid[1])
# rotate the to counteract the calculate angle error after re-centering
clone = rotate_image(clone, -1 * eye_angle, width / 2, height / 2)
# scale the image so the eye distance is of the desired value
clone = scale_image(clone, args["scale"] / eye_distance, args["width"], args["height"])
# output the file
cv2.imwrite(args["destination"]+"\\"+file, clone)
if args["gui"]:
cnt += 1
print((cnt/total) * 100)
else:
print(args["destination"]+"\\"+file+" written")
if __name__ == '__main__':
main()
```
|
{
"source": "jfaccioli/web-scraping-challenge",
"score": 3
}
|
#### File: web-scraping-challenge/Mission_to_Mars/app.py
```python
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
# Create an instance of our Flask app.
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
data_dict = mongo.db.data_dict.find_one()
return render_template("index.html", data=data_dict)
@app.route("/scrape")
def scrape():
data_dict = mongo.db.data_dict
mars_data = scrape_mars.scrape()
data_dict.update({}, mars_data, upsert=True)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True)
```
|
{
"source": "jfaccioni/clovars",
"score": 3
}
|
#### File: clovars/bio/treatment.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from clovars.scientific import Gaussian, get_curve
if TYPE_CHECKING:
from clovars.scientific import Curve
class Treatment:
"""Class representing a Treatment that influences Cells."""
def __init__(
self,
name: str | None = None,
division_curve: Curve | None = None,
death_curve: Curve | None = None,
signal_disturbance: dict | None = None,
fitness_memory_disturbance: int | None = None,
) -> None:
"""Initializes a Treatment instance."""
if name is None:
name = "Treatment"
if division_curve is None:
division_curve = Gaussian()
self.division_curve = division_curve
if death_curve is None:
death_curve = Gaussian()
self.name = name
self.death_curve = death_curve
self.signal_disturbance = signal_disturbance
self.fitness_memory_disturbance = fitness_memory_disturbance
def division_chance(
self,
x: float,
) -> float:
"""Returns the division curve PDF evaluated at x."""
return self.division_curve(x=x)
def death_chance(
self,
x: float,
) -> float:
"""Returns the death curve PDF evaluated at x."""
return self.death_curve(x=x)
def plot(
self,
plot_division: bool = True,
plot_death: bool = True,
*args,
**kwargs,
) -> None:
"""Plots the Treatment's curves."""
if plot_division is True:
self.division_curve.plot_pdf(label='Division', *args, **kwargs)
if plot_death is True:
self.death_curve.plot_pdf(label='Death', *args, **kwargs)
def get_treatment(
name: str = '',
division_curve: dict[str, Any] | None = None,
death_curve: dict[str, Any] | None = None,
signal_disturbance: dict[str, Any] | None = None,
fitness_memory_disturbance: int | None = None,
) -> Treatment:
"""Returns a Treatment instance based on the input parameters."""
division_curve = division_curve if division_curve is not None else {}
death_curve = death_curve if death_curve is not None else {}
return Treatment(
name=name,
division_curve=get_curve(**division_curve),
death_curve=get_curve(**death_curve),
signal_disturbance=signal_disturbance,
fitness_memory_disturbance=fitness_memory_disturbance,
)
```
#### File: clovars/gui/params_manager.py
```python
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Any
PARAMS_FILE_NAME = 'params.json'
CELL_CSV_FILE_NAME = 'cells.csv'
COLONY_CSV_FILE_NAME = 'colony.csv'
RUN_OUTPUT_FOLDER = "."
VIEW_INPUT_FOLDER = RUN_OUTPUT_FOLDER
VIEW_OUTPUT_FOLDER = os.path.join(RUN_OUTPUT_FOLDER, 'view')
ANALYSIS_INPUT_FOLDER = RUN_OUTPUT_FOLDER
ANALYSIS_OUTPUT_FOLDER = os.path.join(RUN_OUTPUT_FOLDER, 'analysis')
@dataclass
class ParamsManager:
"""DataClass responsible for managing all parameters that the user interacts with through the GUI."""
_run_params: RunParams | None = None
_view_params: ViewParams | None = None
_analysis_params: AnalysisParams | None = None
def __post_init__(self) -> None:
if self._run_params is None:
self._run_params = RunParams()
if self._view_params is None:
self._view_params = ViewParams()
if self._analysis_params is None:
self._analysis_params = AnalysisParams()
def get_run_params(self) -> dict[str, Any]:
"""Returns the parameters currently stored in the RunSettings instance."""
return self._run_params.to_simulation()
def get_view_params(self) -> dict[str, Any]:
"""Returns the parameters currently stored in the ViewSettings instance."""
return self._view_params.to_simulation()
def get_analysis_params(self) -> dict[str, Any]:
"""Returns the parameters currently stored in the AnalysisSettings instance."""
return self._analysis_params.to_simulation()
@dataclass
class RunParams:
"""DataClass holding the parameters needed to run a simulation."""
colony_params_list: list[ColonyParams] | None = None
well_radius: float = 13351.624 # in µm
output_folder: str = RUN_OUTPUT_FOLDER
params_file_name: str = PARAMS_FILE_NAME
cell_csv_file_name: str = CELL_CSV_FILE_NAME
colony_csv_file_name: str = COLONY_CSV_FILE_NAME
warn_on_overwrite: bool = True
stop_at_frame: int | None = 100
stop_at_single_colony_size: int | None = None
stop_at_all_colonies_size: int | None = None
delta: int = 3600
verbose: bool = True
def __post_init__(self):
"""Sets default attributes if their value is None."""
if self.colony_params_list is None:
self.colony_params_list = []
def to_simulation(self) -> dict[str, Any]:
"""Returns dictionary of run simulation parameters, as expected by the run simulation function."""
if not (colony_data := [colony_params.to_simulation() for colony_params in self.colony_params_list]):
colony_data = [{}] # Creates at least one Colony
return {
'colony_data': colony_data,
'well_settings': {
'well_radius': self.well_radius,
},
'simulation_writer_settings': {
'output_folder': self.output_folder,
'parameters_file_name': self.params_file_name,
'cell_csv_file_name': self.cell_csv_file_name,
'colony_csv_file_name': self.colony_csv_file_name,
'warn_on_overwrite': self.warn_on_overwrite,
},
'simulation_runner_settings': {
'delta': self.delta,
'stop_conditions': {
'stop_at_frame': self.stop_at_frame,
'stop_at_single_colony_size': self.stop_at_single_colony_size,
'stop_at_all_colonies_size': self.stop_at_all_colonies_size,
},
},
'verbose': self.verbose,
}
@dataclass
class ColonyParams:
"""DataClass holding the parameters needed to instantiate a Colony."""
copies: int = 1 # copies of this Colony to create
initial_size: int = 1 # number of cell in the Colony
radius: float = 20.0 # in µm
max_speed: float = 0.020351 # in µm/seconds
fitness_memory: float = 0.0 # between 0 and 1
signal_params: CellSignalParams | None = None
treatment_params_list: list[TreatmentParams] | None = None
def __post_init__(self):
"""Sets the default attributes if their value is None."""
if self.treatment_params_list is None:
self.treatment_params_list = []
def to_simulation(self) -> dict:
"""Returns a dictionary of Colony parameters, as expected by the run simulation function."""
treatment_data = {
treatment_params.frame_added: treatment_params.to_simulation()
for treatment_params in self.treatment_params_list
}
if (signal_params := self.signal_params) is None:
signal_params = CellSignalParams()
signal = signal_params.to_simulation()
return {
'copies': self.copies,
'initial_size': self.initial_size,
'treatment_data': treatment_data,
'cells': {
'radius': self.radius,
'max_speed': self.max_speed,
'fitness_memory': self.fitness_memory,
'signal': signal,
},
}
@dataclass
class TreatmentParams:
"""DataClass holding the parameters needed to instantiate a Treatment."""
name: str
frame_added: int
division_curve_params: CurveParams | None = None
death_curve_params: CurveParams | None = None
signal_disturbance_params: CellSignalParams | None = None
def to_simulation(self) -> dict:
"""Returns a dictionary of Treatment parameters, as expected by the run simulation function."""
if (division_curve_params := self.division_curve_params) is not None:
division_curve_params = CurveParams()
division_curve = division_curve_params.to_simulation()
if (death_curve_params := self.death_curve_params) is not None:
death_curve_params = CurveParams()
death_curve = death_curve_params.to_simulation()
if (signal_disturbance_params := self.signal_disturbance_params) is not None:
signal_disturbance_params = CellSignalParams()
signal_disturbance = signal_disturbance_params.to_simulation()
return {
'name': self.name,
'division_curve': division_curve,
'death_curve': death_curve,
'signal_disturbance': signal_disturbance,
}
@dataclass
class CurveParams:
"""DataClass holding the parameters needed to instantiate a Curve."""
name: str = 'Gaussian'
mean: float = 0.0
std: float = 1.0
k: float = 1.0
a: float = 1.0
s: float = 1.0
def to_simulation(self) -> dict:
"""Returns a dictionary of Curve parameters, as expected by the run simulation function."""
return {
'name': self.name,
'mean': self.mean,
'std': self.std,
'k': self.k,
'a': self.a,
's': self.s,
}
@dataclass
class CellSignalParams:
"""DataClass holding the parameters needed to instantiate a CellSignal."""
name: str = 'Gaussian'
initial_value: float = 0.0
period: float = 0.05
noise: float = 0.05
stochastic_weight: float = 0.05
mean: float = 0.05
std: float = 0.05
k: float = 0.05
def to_simulation(self) -> dict:
"""Returns a dictionary of CellSignal parameters, as expected by the run simulation function."""
return {
'name': self.name,
'initial_value': self.initial_value,
'period': self.period,
'noise': self.noise,
'stochastic_weight': self.stochastic_weight,
'mean': self.mean,
'std': self.std,
'k': self.k,
}
@dataclass
class ViewParams:
"""DataClass holding the parameters needed to view a simulation."""
output_folder: str = VIEW_OUTPUT_FOLDER
simulation_input_folder: str = VIEW_INPUT_FOLDER
parameters_file_name: str = PARAMS_FILE_NAME
cell_csv_file_name: str = CELL_CSV_FILE_NAME
colony_csv_file_name: str = COLONY_CSV_FILE_NAME
colormap_name: str = 'plasma'
dpi: int = 120
show_ete3: bool = False
render_ete3: bool = False
ete3_tree_layout: str = 'signal'
ete3_file_name: str = 'tree'
ete3_file_extension: str = 'png'
show_3D: bool = True
render_3D: bool = False
matplotlib3d_file_name: str = '3D'
matplotlib3d_file_extension: str = 'png'
show_gaussians: bool = False
render_gaussians: bool = False
division_gaussian: bool = False
death_gaussian: bool = False
gaussians_file_name: str = 'gaussians'
gaussians_file_extension: str = 'png'
verbose: bool = True
def to_simulation(self) -> dict[str, Any]:
"""Returns dictionary of view simulation parameters, as expected by the view simulation function."""
return {
'output_folder': self.output_folder,
'simulation_loader_settings': {
'simulation_input_folder': self.simulation_input_folder,
'parameters_file_name': self.parameters_file_name,
'cell_csv_file_name': self.cell_csv_file_name,
'colony_csv_file_name': self.colony_csv_file_name,
},
'view_settings': {
'colormap_name': self.colormap_name,
'dpi': self.dpi,
'show_ete3': self.show_ete3,
'render_ete3': self.render_ete3,
'ete3_tree_layout': self.ete3_tree_layout,
'ete3_file_name': self.ete3_file_name,
'ete3_file_extension': self.ete3_file_extension,
'show_3D': self.show_3D,
'render_3D': self.render_3D,
'matplotlib3d_file_name': self.matplotlib3d_file_name,
'matplotlib3d_file_extension': self.matplotlib3d_file_extension,
'show_gaussians': self.show_gaussians,
'render_gaussians': self.render_gaussians,
'division_gaussian': self.division_gaussian,
'death_gaussian': self.death_gaussian,
'gaussians_file_name': self.gaussians_file_name,
'gaussians_file_extension': self.gaussians_file_extension,
},
'verbose': self.verbose,
}
@dataclass
class AnalysisParams:
"""DataClass holding the parameters needed to view a simulation."""
output_folder: str = ANALYSIS_OUTPUT_FOLDER
simulation_input_folder: str = ANALYSIS_INPUT_FOLDER
parameters_file_name: str = PARAMS_FILE_NAME
cell_csv_file_name: str = CELL_CSV_FILE_NAME
colony_csv_file_name: str = COLONY_CSV_FILE_NAME
compare_treatments: bool = False
treatments_bootstrap_n: int = 1000
plot_dynafit: bool = False
dynafit_start_day: float = 6.0
dynafit_end_day: float = 9.0
cs_group_size_filter: int = 5
cs_merge: bool = False
cs_bins: int = 10
dynafit_bootstrap_n: int = 100
use_log_colony_size: bool = False
show_cell_fate_distributions: bool = False
render_cell_fate_distributions: bool = False
join_treatments: bool = False
distributions_file_name: str = 'dist'
distributions_file_extension: str = 'png'
show_cell_fitness_distributions: bool = False
verbose: bool = True
def to_simulation(self) -> dict[str, Any]:
"""Returns dictionary of analyse simulation parameters, as expected by the analyse_simulation function."""
return {
'output_folder': self.output_folder,
'simulation_loader_settings': {
'simulation_input_folder': self.simulation_input_folder,
'parameters_file_name': self.parameters_file_name,
'cell_csv_file_name': self.cell_csv_file_name,
'colony_csv_file_name': self.colony_csv_file_name,
},
'analysis_settings': {
'compare_treatments': self.compare_treatments,
'treatments_bootstrap_n': self.treatments_bootstrap_n,
'plot_dynafit': self.plot_dynafit,
'dynafit_start_day': self.dynafit_start_day,
'dynafit_end_day': self.dynafit_end_day,
'cs_group_size_filter': self.cs_group_size_filter,
'cs_merge': self.cs_merge,
'cs_bins': self.cs_bins,
'dynafit_bootstrap_n': self.dynafit_bootstrap_n,
'use_log_colony_size': self.use_log_colony_size,
'show_cell_fate_distributions': self.show_cell_fate_distributions,
'render_cell_fate_distributions': self.render_cell_fate_distributions,
'join_treatments': self.join_treatments,
'distributions_file_name': self.distributions_file_name,
'distributions_file_extension': self.distributions_file_extension,
'show_cell_fitness_distributions': self.show_cell_fitness_distributions,
},
'verbose': self.verbose,
}
```
#### File: clovars/scientific/brownian_motion.py
```python
from __future__ import annotations
from scipy.stats import norm
from clovars.scientific import reflect_around_interval
def brownian_motion(
current_value: float,
scale: float,
) -> float:
"""Simulates a brownian motion of the current value, scaled by a given factor."""
fluctuation = norm.rvs(loc=0, scale=(1 - scale) ** 2)
return current_value + fluctuation
def bounded_brownian_motion(
current_value: float,
scale: float,
lower_bound: float = 0.0,
upper_bound: float = 1.0,
) -> float:
"""Bounds the result of a brownian motion by reflecting it back into the interval bounds."""
new_value = brownian_motion(current_value=current_value, scale=scale)
bounded_new_value = reflect_around_interval(x=new_value, lower_bound=lower_bound, upper_bound=upper_bound)
return bounded_new_value
```
#### File: clovars/scientific/cell_signal.py
```python
from __future__ import annotations
import copy
import random
from functools import partial
from typing import Optional
import numpy as np
from scipy.stats import norm, exponnorm
from clovars.scientific import reflect_around_interval
class CellSignal:
"""Represents an abstract Feature that can fluctuate over time."""
def __init__(
self,
initial_value: float = 0.0,
) -> None:
"""Initializes a CellSignal instance."""
self.initial_value = initial_value
if not (-1.0 <= self.initial_value <= 1.0):
raise ValueError(f"{self.__class__.__name__} initial value must be in the interval [-1, 1]")
self.value = self.initial_value
def split(self) -> CellSignal:
"""Copies the values from the CellSignal and returns a new CellSignal instance."""
return copy.copy(self)
def oscillate(
self,
*args,
**kwargs,
) -> None:
"""Oscillates the current Feature value, adding it to the list of values."""
self.value = reflect_around_interval(x=self.get_new_value(*args, **kwargs), lower_bound=-1.0, upper_bound=1.0)
def get_new_value(
self,
*args,
**kwargs,
) -> float:
"""Abstract method meant to be implemented by subclasses of CellSignal."""
raise NotImplementedError # using @abstractmethod here makes testing harder
class SinusoidalCellSignal(CellSignal):
"""Represents a sinusoidal feature."""
def __init__(
self,
initial_value: float = 0.0,
period: int = 3600,
) -> None:
"""Initializes a SinusoidalCellSignal instance."""
super().__init__(initial_value=initial_value)
self.period = period
if self.period <= 0:
raise ValueError(f"{self.__class__.__name__} period cannot be <= zero")
def get_new_value(
self,
current_seconds: int,
*args,
**kwargs,
) -> float:
"""Implements the abstract method responsible for getting a new Signal value."""
return self.sine(current_seconds=current_seconds)
def sine(
self,
current_seconds: int,
) -> float:
"""Returns the sine wave evaluated at a specific point in time."""
amplitude = 1.0
vertical_shift = 0.0
period_in_radians = (2 * np.pi / self.period)
# The line below took me way longer to get right than I want to admit, but it actually works now
horizontal_shift = np.arcsin((self.initial_value - vertical_shift) / amplitude) / period_in_radians
return amplitude * np.sin(period_in_radians * (current_seconds + horizontal_shift)) + vertical_shift
class StochasticCellSignal(CellSignal):
"""Represents a stochastic feature."""
def __init__(
self,
initial_value: float = 0.0,
noise: float = 0.2,
) -> None:
"""Initializes a StochasticCellSignal instance."""
super().__init__(initial_value=initial_value)
self.noise = noise
if not (0 <= self.noise <= 1):
raise ValueError(f"{self.__class__.__name__} noise must be in the interval [0, 1]")
def get_new_value(
self,
*args,
**kwargs,
) -> float:
"""Implements the abstract method responsible for getting a new Signal value."""
return self.stochastic()
def stochastic(self) -> float:
"""Returns a random noise signal."""
noise = self.noise * random.uniform(-1, 1)
if (new_value := noise + self.value) > 1.0:
return 1.0
elif new_value < -1.0:
return -1.0
else:
return new_value
class StochasticSinusoidalCellSignal(SinusoidalCellSignal, StochasticCellSignal):
"""Represents a feature with sinusoidal and stochastic components."""
def __init__(
self,
initial_value: float = 0.0,
period: int = 3600,
noise: float = 0.2,
stochastic_weight: float = 0.5,
) -> None:
"""Initializes a StochasticSinusoidalCellSignal instance."""
SinusoidalCellSignal.__init__(self, initial_value=initial_value, period=period)
StochasticCellSignal.__init__(self, initial_value=initial_value, noise=noise)
self.stochastic_weight = stochastic_weight
if not 0 <= self.stochastic_weight <= 1:
raise ValueError("StochasticSinusoidalCellSignal stochastic weight must be in the interval [0, 1]")
self.sine_weight = 1.0 - self.stochastic_weight
def get_new_value(
self,
current_seconds: int,
*args,
**kwargs,
) -> float:
"""Implements the abstract method responsible for getting a new Signal value."""
sine_component = self.sine(current_seconds=current_seconds) * self.sine_weight
stochastic_component = self.stochastic() * self.stochastic_weight
return sine_component + stochastic_component
class GaussianCellSignal(CellSignal):
"""Represents a feature which oscillates around a mean."""
def __init__(
self,
initial_value: float = 0.0,
mean: float = 0.0,
std: float = 0.05,
) -> None:
"""Initializes a GaussianCellSignal instance."""
super().__init__(initial_value=initial_value)
self.mean = mean
self.std = std
if self.std <= 0:
raise ValueError(f"{self.__class__.__name__} std must be > 0.")
def get_new_value(
self,
*args,
**kwargs,
) -> float:
"""Implements the abstract method responsible for getting a new Signal value."""
return self.normal()
def normal(self) -> float:
"""Returns a Gaussian value floating around the GaussianCellSignal's current value."""
return self.value + norm.rvs(loc=self.mean, scale=self.std)
class DivisionGaussianCellSignal(GaussianCellSignal):
"""Represents a feature which oscillates around a mean, with different amplitudes when a cell division occurs."""
def __init__(
self,
initial_value: float = 0.0,
mean: float = 0.0,
std: float = 0.05,
std_division_scaling: float = 1.0,
) -> None:
"""Initializes a GaussianCellSignal instance."""
super().__init__(initial_value=initial_value, mean=mean, std=std)
self.std_division_scaling = std_division_scaling
def get_new_value(
self,
has_divided: bool = False,
*args,
**kwargs,
) -> float:
"""Implements the abstract method responsible for getting a new Signal value."""
return self.normal() if not has_divided else self.division_normal()
def division_normal(self) -> float:
"""Returns a Gaussian value floating around the GaussianCellSignal's current value."""
return self.value + norm.rvs(loc=self.mean, scale=self.std * self.std_division_scaling)
class EMGaussianCellSignal(CellSignal):
"""Represents a feature which oscillates around a mean, tailed towards the positive end."""
def __init__(
self,
initial_value: float = 0.0,
mean: float = 0.0,
std: float = 0.05,
k: float = 1.0,
) -> None:
"""Initializes a GaussianCellSignal instance."""
super().__init__(initial_value=initial_value)
self.mean = mean
self.std = std
self.k = k
def get_new_value(
self,
*args,
**kwargs,
) -> float:
"""Implements the abstract method responsible for getting a new Signal value."""
return self.em_normal()
def em_normal(self) -> float:
"""Returns an exponentially-modified Gaussian value floating around the EMGaussianCellSignal's current value."""
if (new_value := self.value + exponnorm.rvs(loc=self.mean, scale=self.std, K=self.k)) > 1.0:
return 1.0
elif new_value < -1.0:
return -1.0
else:
return new_value
class ConstantCellSignal(CellSignal):
"""Represents a constant feature."""
def get_new_value(
self,
*args,
**kwargs,
) -> float:
"""Implements the abstract method responsible for getting a new Signal value."""
return self.constant()
def constant(self) -> float:
"""Returns the constant signal."""
return self.value
def get_cell_signal(
name: str = '',
initial_value: Optional[float] = None,
period: Optional[float] = None,
noise: Optional[float] = None,
stochastic_weight: Optional[float] = None,
mean: Optional[float] = None,
std: Optional[float] = None,
std_division_scaling: Optional[float] = None,
k: Optional[float] = None,
) -> CellSignal:
"""Returns a CellSignal instance, according to the input parameters."""
name = name or "Gaussian"
initial_value = initial_value if initial_value is not None else 0.0
period = period if period is not None else 3600
noise = noise if noise is not None else 0.2
stochastic_weight = stochastic_weight if stochastic_weight is not None else 0.5
mean = mean if mean is not None else 0.0
std = std if std is not None else 1.0
std_division_scaling = std_division_scaling if std_division_scaling is not None else 1.0
k = k if k is not None else 1.0
signals = {
'Stochastic': partial(StochasticCellSignal, initial_value=initial_value, noise=noise),
'Sinusoidal': partial(SinusoidalCellSignal, initial_value=initial_value, period=period),
'StochasticSinusoidal': partial(
StochasticSinusoidalCellSignal,
initial_value=initial_value,
period=period,
noise=noise,
stochastic_weight=stochastic_weight
),
'Gaussian': partial(GaussianCellSignal, initial_value=initial_value, mean=mean, std=std),
'DivisionGaussian': partial(
DivisionGaussianCellSignal,
initial_value=initial_value,
mean=mean,
std=std,
std_division_scaling=std_division_scaling,
),
'EMGaussian': partial(EMGaussianCellSignal, initial_value=initial_value, mean=mean, std=std, k=k),
'Constant': partial(ConstantCellSignal, initial_value=initial_value),
}
if name == 'Random':
name = random.choice(list(signals.keys()))
if (signal := signals.get(name)) is None:
raise ValueError(f"Invalid signal type: {name}")
return signal()
```
#### File: simulation/view/treatment_drawer.py
```python
from __future__ import annotations
from typing import Generator, TYPE_CHECKING
from matplotlib import pyplot as plt
if TYPE_CHECKING:
from pathlib import Path
from clovars.bio import Treatment
class TreatmentDrawer:
"""Class containing functions to draw and display Treatments."""
def __init__(
self,
treatment_data: dict[tuple[str, int], Treatment],
) -> None:
"""Initializes a TreeDrawer instance."""
self.treatment_data = treatment_data
def display(
self,
show_division: bool,
show_death: bool,
) -> None:
"""Displays the Division and Death Gaussians for each Treatment in the Simulation."""
for figure, _ in self.yield_curves(show_death=show_death, show_division=show_division):
plt.show()
def render(
self,
show_division: bool,
show_death: bool,
folder_path: Path,
file_name: str,
file_extension: str,
) -> None:
"""Renders the Division and Death Gaussians for each Treatment in the Simulation."""
for figure, label in self.yield_curves(show_death=show_death, show_division=show_division):
fname = folder_path / f'{file_name}_{label}.{file_extension}'
figure.savefig(fname)
plt.close(figure)
def yield_curves(
self,
show_death: bool,
show_division: bool,
) -> Generator[tuple[plt.Figure, str], None, None]:
"""Sequentially yields gaussian Figures from the Simulation view."""
for (colony_name, treatment_frame), treatment in self.treatment_data.items():
fig, ax = plt.subplots()
suffix = ''
if show_death is True:
suffix += 'death'
treatment.death_curve.plot_pdf(ax=ax, x_steps=100_000, color='#E96F00', label='Death')
if show_division is True:
suffix += 'div'
treatment.division_curve.plot_pdf(ax=ax, x_steps=100_000, color='#0098B1', label='Division')
label = f'{treatment.name}_{suffix}'
fig.suptitle(
f'Treatment {treatment.name} added on frame {treatment_frame}'
f'\nfor colonies starting with {colony_name}'
)
plt.legend()
yield fig, label
```
#### File: simulation/view/tree_drawer_3D.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.cm import get_cmap
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import art3d
if TYPE_CHECKING:
from pathlib import Path
from clovars.abstract import CellNode
class TreeDrawer3D:
"""Class containing functions to draw and display Cell trees in 3D."""
valid_layouts = [
'family',
'time',
'age',
'generation',
'division',
'death',
'signal',
]
def __init__(
self,
colormap_name: str = 'viridis',
layout: str = 'family',
signal_values: pd.Series | None = None,
age_values: pd.Series | None = None,
time_values: pd.Series | None = None,
generation_values: pd.Series | None = None,
) -> None:
"""Initializes a TreeDrawer instance."""
self.colormap = get_cmap(colormap_name)
self.validate_layout(layout=layout)
self.layout = layout
self.time_normalizer = self.get_normalizer(values=time_values)
self.age_normalizer = self.get_normalizer(values=age_values)
self.generation_normalizer = self.get_normalizer(values=generation_values)
self.division_normalizer = self.get_normalizer(values=None)
self.death_normalizer = self.get_normalizer(values=None)
self.signal_normalizer = self.get_normalizer(values=signal_values)
def validate_layout(
self,
layout: str,
) -> None:
"""Raises a ValueError if the given layout isn't a valid option."""
if layout not in self.valid_layouts:
raise ValueError(f'Invalid layout: {layout}')
@staticmethod
def get_normalizer(values: pd.Series | None = None) -> Normalize:
"""Returns a Normalize instance that normalizes the values in the pandas Series between 0 and 1."""
if values is None:
return Normalize(vmin=0, vmax=1)
if values.empty:
return Normalize(vmin=0, vmax=1)
return Normalize(vmin=values.min(), vmax=values.max())
def display_trees(
self,
root_nodes: list[CellNode],
display_well: bool,
z_axis_ratio: float,
well_radius: float,
) -> None:
"""Displays the trees as a matplotlib 3D plot."""
self.plot_trees(
root_nodes=root_nodes,
display_well=display_well,
z_axis_ratio=z_axis_ratio,
well_radius=well_radius,
)
plt.show()
def render_trees(
self,
root_nodes: list[CellNode],
display_well: bool,
z_axis_ratio: float,
well_radius: float,
folder_path: Path,
file_name: str,
file_extension: str,
) -> None:
"""Renders the trees as a matplotlib 3D plot."""
figure = self.plot_trees(
root_nodes=root_nodes,
display_well=display_well,
z_axis_ratio=z_axis_ratio,
well_radius=well_radius,
)
fname = str(folder_path / f'{file_name}.{file_extension}')
figure.savefig(fname)
plt.close(figure)
def plot_trees(
self,
root_nodes: list[CellNode],
display_well: bool,
z_axis_ratio: float,
well_radius: float,
) -> plt.Figure:
"""Plots the trees in the Well as a 3D plot."""
figure = plt.figure(figsize=(12, 12))
ax = figure.add_subplot(projection='3d')
ax.set_box_aspect((1.0, 1.0, z_axis_ratio))
for root_node in root_nodes:
self.draw_tree(ax=ax, root_node=root_node)
ax.set_xlabel('X coordinate (µm)')
ax.set_ylabel('Y coordinate (µm)')
ax.set_zlabel('Time (h)')
if self.layout not in ('family', 'time'): # add colorbar for other layouts only
self.add_colorbar(figure=figure, ax=ax)
if display_well is True:
self.draw_well(ax=ax, well_radius=well_radius)
self.set_well_limits(ax=ax, well_radius=well_radius)
self.add_legend(ax=ax)
figure.tight_layout()
return figure
def draw_tree(
self,
ax: plt.Axes,
root_node: CellNode,
) -> None:
"""Draws the tree on a matplotlib 3D plot."""
for branch in root_node.yield_branches():
self.draw_branch(ax=ax, branch=branch)
self.draw_important_cells(ax=ax, root_node=root_node)
def draw_branch(
self,
ax: plt.Axes,
branch: list[CellNode],
) -> None:
"""Draws the branch on a matplotlib 3D plot."""
if self.layout == 'family':
xs, ys, zs = self.get_xyz_from_cell_nodes(cell_nodes=branch)
ax.plot(xs, ys, zs, color='0.7', alpha=0.7, linewidth=1, zorder=1)
else:
for i, _ in enumerate(branch):
branch_segment = branch[i:i+2]
self.draw_branch_segment(ax=ax, branch_segment=branch_segment)
@staticmethod
def get_xyz_from_cell_nodes(cell_nodes: list[CellNode]) -> tuple[list[float], list[float], list[float]]:
"""Returns the XYZ coordinates of each CellNode in the input list."""
xs = [node.x for node in cell_nodes]
ys = [node.y for node in cell_nodes]
zs = [node.simulation_hours for node in cell_nodes]
return xs, ys, zs
def draw_branch_segment(
self,
ax: plt.Axes,
branch_segment: list[CellNode],
) -> None:
"""Draws the branch segment on a matplotlib 3D plot."""
xs, ys, zs = self.get_xyz_from_cell_nodes(cell_nodes=branch_segment)
color = self.get_segment_color(branch_segment=branch_segment)
ax.plot(xs, ys, zs, color=color, linewidth=2, zorder=1)
def get_segment_color(
self,
branch_segment: list[CellNode],
) -> float:
"""Returns the color for the branch segment."""
values = {
'time': self.get_time_color,
'age': self.get_age_color,
'generation': self.get_generation_color,
'division': self.get_division_color,
'death': self.get_death_color,
'signal': self.get_signal_color,
}[self.layout](branch_segment=branch_segment)
return self.colormap(np.mean(values))
def get_time_color(
self,
branch_segment: list[CellNode],
) -> str:
"""Returns the branch color in the plot, when plotting the tree with the time layout."""
return self.time_normalizer([node.simulation_hours for node in branch_segment])
def get_age_color(
self,
branch_segment: list[CellNode],
) -> str:
"""Returns the branch color in the plot, when plotting the tree with the age layout."""
return self.age_normalizer([node.seconds_since_birth / 3600 for node in branch_segment]) # in hours
def get_generation_color(
self,
branch_segment: list[CellNode],
) -> str:
"""Returns the branch color in the plot, when plotting the tree with the generation layout."""
return self.generation_normalizer([node.generation for node in branch_segment])
def get_division_color(
self,
branch_segment: list[CellNode],
) -> str:
"""Returns the branch color in the plot, when plotting the tree with the division layout."""
return self.division_normalizer([node.division_threshold for node in branch_segment])
def get_death_color(
self,
branch_segment: list[CellNode],
) -> str:
"""Returns the branch color in the plot, when plotting the tree with the death layout."""
return self.death_normalizer([node.death_threshold for node in branch_segment])
def get_signal_color(
self,
branch_segment: list[CellNode],
) -> str:
"""Returns the branch color in the plot, when plotting the tree with the signal layout."""
return self.signal_normalizer([node.signal_value for node in branch_segment])
def draw_important_cells(
self,
ax: plt.Axes,
root_node: CellNode,
) -> None:
"""Draws important cells (root, parents, dead cells and leaf cells) in the tree."""
self.draw_root(ax=ax, root_node=root_node)
self.draw_parents(ax=ax, root_node=root_node)
self.draw_dead_cells(ax=ax, root_node=root_node)
self.draw_leaf_cells(ax=ax, root_node=root_node)
def draw_cell_nodes(
self,
ax: plt.Axes,
cell_nodes: list[CellNode],
node_marker: str = 'o',
node_color: str = 'gray',
node_size: float = 100.0,
node_zorder: int = 2,
) -> None:
"""Draws the given CellNodes on a matplotlib 3D plot."""
xs, ys, zs = self.get_xyz_from_cell_nodes(cell_nodes=cell_nodes)
ax.scatter(xs, ys, zs, marker=node_marker, color=node_color, s=node_size, zorder=node_zorder)
def draw_root(
self,
ax: plt.Axes,
root_node: CellNode,
) -> None:
"""Given a CellNode, draws the tree's root on a matplotlib 3D plot"""
self.draw_cell_nodes(ax=ax, cell_nodes=[root_node], node_marker='o', node_color='#3e5199')
def draw_parents(
self,
ax: plt.Axes,
root_node: CellNode,
) -> None:
"""Given a CellNode, draws the tree's parents on a matplotlib 3D plot"""
parent_nodes = root_node.search_nodes(fate_at_next_frame='division')
self.draw_cell_nodes(ax=ax, cell_nodes=parent_nodes, node_marker='o', node_color='#50993e')
def draw_dead_cells(
self,
ax: plt.Axes,
root_node: CellNode,
) -> None:
"""Given a CellNode, draws the tree's dead cells on a matplotlib 3D plot."""
dead_nodes = root_node.search_nodes(fate_at_next_frame='death')
self.draw_cell_nodes(ax=ax, cell_nodes=dead_nodes, node_marker='X', node_color='#993e50')
def draw_leaf_cells(
self,
ax: plt.Axes,
root_node: CellNode,
) -> None:
"""Draws the leaf Cells on a matplotlib 3D plot."""
dead_nodes = root_node.search_nodes(fate_at_next_frame='death')
leaf_nodes = [node for node in root_node.get_leaves() if node not in dead_nodes]
self.draw_cell_nodes(ax=ax, cell_nodes=leaf_nodes)
def add_colorbar(
self,
figure: plt.Figure,
ax: plt.Axes,
) -> None:
"""Adds a colorbar to the Figure."""
norm = {
'age': self.age_normalizer,
'generation': self.generation_normalizer,
'division': self.division_normalizer,
'death': self.death_normalizer,
'signal': self.signal_normalizer,
}[self.layout]
label = {
'age': 'Cell Age (h)',
'generation': 'Generation',
'division': 'Division Threshold',
'death': 'Death Threshold',
'signal': 'Signal Value',
}[self.layout]
mappable = plt.cm.ScalarMappable(norm=norm, cmap=self.colormap)
figure.colorbar(mappable=mappable, ax=ax, label=label, shrink=0.5)
@staticmethod
def draw_well(
ax: plt.Axes,
well_radius: float,
) -> None:
"""Draws the Well onto the bottom of the 3D plot."""
well_patch = plt.Circle((well_radius, well_radius), well_radius, color='#232323', alpha=0.3)
ax.add_patch(well_patch)
art3d.pathpatch_2d_to_3d(well_patch, z=0, zdir="z")
@staticmethod
def set_well_limits(
ax: plt.Axes,
well_radius: float,
) -> None:
"""Sets the 3D plot limits based on the drawn Well."""
ax.set_xlim(0, well_radius * 2)
ax.set_ylim(0, well_radius * 2)
ax.set_zlim(bottom=0)
@staticmethod
def add_legend(ax: plt.Axes) -> None:
"""Adds a legend to the Figure."""
handles = [
plt.Line2D([0], [0], marker='o', color='w', markerfacecolor='#3e5199', markersize=15, label='root cell'),
plt.Line2D([0], [0], marker='o', color='w', markerfacecolor='#50993e', markersize=15, label='parent cell'),
plt.Line2D([0], [0], marker='X', color='w', markerfacecolor='#993e50', markersize=15, label='dead cell'),
]
ax.legend(handles=handles)
```
#### File: simulation/view/view_simulation.py
```python
from __future__ import annotations
from typing import Any
from clovars.IO import SimulationLoader
from clovars.simulation import SimulationViewer
def view_simulation_function(
output_folder: str = 'view',
simulation_loader_settings: dict[str, Any] | None = None,
view_settings: dict[str, Any] | None = None,
verbose: bool = False,
) -> None:
"""Views the result of a Simulation."""
if simulation_loader_settings is None:
simulation_loader_settings = {}
if view_settings is None:
view_settings = {}
if verbose is True:
print('Loading Simulation parameters and trees...')
simulation_loader = SimulationLoader(settings=simulation_loader_settings)
if verbose is True:
print(f'loading simulations results from the following input folder:\n"{simulation_loader.input_folder}"\n')
print('Visualizing Simulation trees...')
simulation_viewer = SimulationViewer(
cell_data=simulation_loader.cell_data,
well_radius=simulation_loader.well_radius,
treatment_data=simulation_loader.treatments,
output_folder=output_folder,
verbose=verbose,
)
simulation_viewer.generate_output(settings=view_settings)
if verbose is True:
if not simulation_viewer.is_empty: # TODO: should check if render flags are on, not if dir is empty
print(f'Output files written to the following output folder: {output_folder}')
else:
print('No output files were written (modify the settings file if you want to save output files)')
simulation_viewer.delete_if_empty()
```
#### File: Figures and Videos/Figure 2B/produce_fig_2B.py
```python
import random
import sys
from pathlib import Path
import numpy as np
from clovars.main import main as clovars_main
RANDOMNESS_SEED = 31
def main():
"""Main function of this script."""
random.seed(RANDOMNESS_SEED)
np.random.seed(RANDOMNESS_SEED)
for scenario in ['i', 'ii', 'iii']:
sys.argv = ['', 'run', 'Fig_2B_run.toml', f'Fig_2B_colonies_{scenario}.toml']
clovars_main()
sys.argv = ['', 'view', 'Fig_2B_view.toml']
clovars_main()
remove_tree(path=Path('output'))
def remove_tree(path: Path):
"""Recursively deletes files and folders starting from path."""
# Source: https://stackoverflow.com/a/57892171/11161432
for child in path.iterdir():
if child.is_file():
child.unlink()
else:
remove_tree(child)
path.rmdir()
if __name__ == '__main__':
main()
```
#### File: Figures and Videos/Figure 2D/produce_fig_2D.py
```python
from pathlib import Path
import random
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from clovars.main import main as clovars_main
sns.set()
RANDOMNESS_SEED = 42
def main():
"""Main function of this script."""
random.seed(RANDOMNESS_SEED)
np.random.seed(RANDOMNESS_SEED)
dfs = []
for treatment_name in ['control', 'tmz']:
sys.argv = ['', 'run', f'Fig_2D_run_{treatment_name}.toml', f'Fig_2D_colonies_{treatment_name}.toml']
clovars_main()
path = Path('output')
data = pd.read_csv(path / f'colony_output_{treatment_name}.csv', index_col=None)
data['run_name'] = treatment_name
dfs.append(data)
remove_tree(path=path)
df = pd.concat(dfs, ignore_index=True)
fig, ax = plt.subplots()
palette = ['#50993e', '#993e50']
sns.lineplot(
data=df,
ax=ax,
x='simulation_days',
y='size',
hue='run_name',
palette=palette,
linestyle='dashed',
linewidth=5,
zorder=2,
)
sns.lineplot(
data=df,
ax=ax,
x='simulation_days',
y='size',
hue='run_name',
palette=palette,
linestyle='solid',
linewidth=2,
zorder=1,
alpha=0.7,
units='name',
estimator=None,
legend=False,
)
plt.show()
def remove_tree(path: Path):
"""Recursively deletes files and folders starting from path."""
# Source: https://stackoverflow.com/a/57892171/11161432
for child in path.iterdir():
if child.is_file():
child.unlink()
else:
remove_tree(child)
path.rmdir()
if __name__ == '__main__':
main()
```
#### File: Figures and Videos/Video S2/render_video_S2.py
```python
from __future__ import annotations
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.animation import FuncAnimation
sns.set(font_scale=2)
SCRIPTS_FOLDER_PATH = Path('..')
DATA_PATH = SCRIPTS_FOLDER_PATH / 'data' / 'memory_demo'
INPUT_PATHS = [
DATA_PATH / 'low_memory_control' / 'cell_output.csv',
DATA_PATH / 'high_memory_control' / 'cell_output.csv',
]
OUTPUT_PATH = SCRIPTS_FOLDER_PATH / 'Video S2' / 'Supplementary Video S2.mp4'
def main(
input_paths: list[Path],
output_path: Path,
) -> None:
"""Main function of this script."""
dfs = []
for path in input_paths:
df = pd.read_csv(path, index_col=0)
df['memory'], df['treatment'] = get_info_from_path_name(path=path)
dfs.append(df)
data = pd.concat(dfs, ignore_index=True)
data['$f_m$'] = data['memory'].astype(str)
fig, (top_ax, bottom_ax) = plt.subplots(figsize=(16, 16), nrows=2)
grouped_data = (
data
.groupby(['$f_m$', 'colony_name', 'simulation_hours'])['signal_value']
.mean()
.reset_index()
.rename(columns={'signal_value': 'colony_signal_mean'})
)
grouped_data['colony_signal_variance'] = (
data
.groupby(['$f_m$', 'colony_name', 'simulation_hours'])['signal_value']
.var()
.reset_index(drop=True)
)
grouped_data['colony_size'] = (
data
.groupby(['$f_m$', 'colony_name', 'simulation_hours'])['signal_value']
.count()
.reset_index(drop=True)
)
grouped_data['colony_size_jitter'] = grouped_data['colony_size'] + grouped_data['$f_m$'].apply(
lambda value: np.random.normal(loc={'0.0': -0.2, '0.9': +0.2}.get(value), scale=0.05)
)
max_hours = grouped_data["simulation_hours"].max()
def update(hour: float) -> None:
"""Updates the plot."""
print('\b' * 100, end='')
print( f'Video is: {round(100 * (hour/max_hours), 1)}% done...', end='')
hour_data = grouped_data.loc[grouped_data['simulation_hours'] == hour]
for ax, label in (
(top_ax, 'mean'),
(bottom_ax, 'variance')
):
ax.clear()
sns.scatterplot(
ax=ax,
data=hour_data,
x='colony_size_jitter',
y=f'colony_signal_{label}',
hue='$f_m$',
palette=['#029e73', '#de8f05'],
)
ax.set_title(f'Distribution of signal {label} in colonies (N=100 per $f_m$)')
ax.set_xlabel('Colony size')
ax.set_xticks([tick for tick in ax.get_xticks() if tick.is_integer()])
if ax.get_xlim()[-1] < 5:
ax.set_xlim(right=5)
ax.set_ylabel(f'Signal {label} in colonies')
top_ax.set_ylim(-1, 1)
top_ax.set_yticks([-1, 0, 1])
bottom_ax.set_ylim(0, 0.7)
if bottom_ax.get_yticks()[-1] == 0:
bottom_ax.set_yticks([])
fig.suptitle(f'Simulation time: {round(hour, 1)} hours')
fig.tight_layout()
ani = FuncAnimation(fig, update, frames=grouped_data['simulation_hours'].unique())
ani.save(str(output_path))
def get_info_from_path_name(path: Path) -> tuple[float, str]:
"""Returns the memory and treatment based on the Path's words."""
memory = 0.0 if 'low' in path.as_posix().lower() else 0.9
treatment = 'TMZ' if 'tmz' in path.as_posix().lower() else 'Control'
return memory, treatment
if __name__ == '__main__':
main(input_paths=INPUT_PATHS, output_path=OUTPUT_PATH)
```
#### File: scripts/RunView/run_and_view.py
```python
import sys
from clovars.main import main as clovars_main
def main() -> None:
"""Main function of this script."""
sys.argv = [sys.argv[0], 'run', 'run.toml', 'colonies.toml']
clovars_main()
sys.argv = [sys.argv[0], 'view', 'view.toml']
clovars_main()
if __name__ == '__main__':
main()
```
#### File: clovars/tests/__init__.py
```python
import unittest
from contextlib import contextmanager
from typing import Sequence
# Flag that ignores decorated_tests
SKIP_TESTS = True
class NotEmptyTestCase(unittest.TestCase):
"""TestCase extension with a method for asserting whether a sequence is empty or not."""
@contextmanager
def assertSequenceNotEmpty(self, sequence: Sequence):
"""ContextManager for asserting that a Sequence has at least one value in it."""
if len(sequence) < 1:
self.fail(f"Empty sequence of type {type(sequence)}")
yield
```
#### File: tests/test_bio/test_cell.py
```python
import unittest
from unittest import mock
from unittest.mock import MagicMock
from clovars.abstract import Circle
from clovars.bio import Cell, Treatment
from clovars.scientific import ConstantCellSignal, CellSignal, GaussianCellSignal, Gaussian
from clovars.utils import SimulationError
from tests import NotEmptyTestCase
class TestCell(NotEmptyTestCase):
"""Class representing unit-tests for clovars.bio.cell.Cell class."""
default_delta = 100
control_treatment = Treatment(
name="Control",
division_curve=Gaussian(loc=24.0, scale=5),
death_curve=Gaussian(loc=32, scale=5),
)
@classmethod
def setUpClass(cls) -> None:
"""Sets up the entire test suite by setting the default Treatment."""
pass
def setUp(self) -> None:
"""Sets up the test case subject (a Cell instance)."""
self.cell = Cell()
# def test_cell_has_default_treatment_class_attribute(self) -> None:
# """Tests whether a Cell has a "default_treatment" class attribute (a Treatment instance)."""
# self.assertTrue(hasattr(self.cell, 'default_treatment'))
# self.assertTrue(hasattr(Cell, 'default_treatment'))
# self.assertIsInstance(self.cell.default_treatment, Treatment)
def test_cell_has_name_attribute(self) -> None:
"""Tests whether a Cell has a "name" attribute (a string)."""
self.assertTrue(hasattr(self.cell, 'name'))
self.assertIsInstance(self.cell.name, str)
def test_cell_has_max_speed_attribute(self) -> None:
"""Tests whether a Cell has a "max_speed" attribute (a float value)."""
self.assertTrue(hasattr(self.cell, 'max_speed'))
self.assertIsInstance(self.cell.max_speed, float)
def test_cell_has_fate_attribute(self) -> None:
"""Tests whether a Cell has a "fate" attribute (a string)."""
self.assertTrue(hasattr(self.cell, 'fate'))
self.assertIsInstance(self.cell.fate, str)
def test_fate_attribute_starts_as_migration(self) -> None:
"""Tests whether a Cell starts with its "fate" attribute set to "migration"."""
self.assertEqual(Cell().fate, "migration")
def test_cell_has_seconds_since_birth_attribute(self) -> None:
"""Tests whether a Cell has a "seconds_since_birth" attribute (an integer)."""
self.assertTrue(hasattr(self.cell, 'seconds_since_birth'))
self.assertIsInstance(self.cell.seconds_since_birth, int)
def test_seconds_since_birth_attribute_starts_at_zero(self) -> None:
"""Tests whether a Cell starts with its "seconds_since_birth" attribute set to 0."""
self.assertEqual(Cell().seconds_since_birth, 0)
def test_cell_has_alive_attribute(self) -> None:
"""Tests whether a Cell has an "alive" attribute (a boolean value)."""
self.assertTrue(hasattr(self.cell, 'alive'))
self.assertIsInstance(self.cell.alive, bool)
def test_alive_attribute_starts_true(self) -> None:
"""Tests whether a Cell starts with its "alive" attribute set to True."""
self.assertEqual(Cell().alive, True)
def test_cell_has_senescent_attribute(self) -> None:
"""Tests whether a Cell has a "senescent" attribute (a boolean value)."""
self.assertTrue(hasattr(self.cell, 'senescent'))
self.assertIsInstance(self.cell.senescent, bool)
def test_senescent_attribute_starts_false(self) -> None:
"""Tests whether a Cell starts with its "senescent" attribute set to False."""
self.assertEqual(Cell().senescent, False)
def test_cell_has_fitness_memory_attribute(self) -> None:
"""Tests whether a Cell has a "fitness_memory" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'fitness_memory'))
self.assertIsInstance(self.cell.fitness_memory, float)
def test_fitness_memory_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "fitness_memory"
attribute is initialized outside the [0, 1] interval.
"""
for fitness_memory in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(fitness_memory=fitness_memory)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with fitness_memory = {fitness_memory}"
)
for fitness_memory in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(fitness_memory=fitness_memory)
def test_cell_has_division_threshold_attribute(self) -> None:
"""Tests whether a Cell has a "division_threshold" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'division_threshold'))
self.assertIsInstance(self.cell.division_threshold, float)
def test_division_threshold_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "division_threshold"
attribute is initialized outside the [0, 1] interval.
"""
for division_threshold in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(division_threshold=division_threshold)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with division_threshold = {division_threshold}"
)
for division_threshold in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(division_threshold=division_threshold)
def test_cell_division_threshold_attribute_is_between_zero_and_one(self) -> None:
"""
Tests whether the "division_threshold" attribute (random float value) lies between 0 and 1
when it is initialized as a None value.
"""
for _ in range(10):
cell = Cell(division_threshold=None)
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.division_threshold, 0)
self.assertLessEqual(cell.division_threshold, 1)
def test_cell_has_death_threshold_attribute(self) -> None:
"""Tests whether a Cell has a "death_threshold" attribute (a float)."""
self.assertTrue(hasattr(self.cell, 'death_threshold'))
self.assertIsInstance(self.cell.death_threshold, float)
def test_death_threshold_outside_zero_one_range_raises_error(self) -> None:
"""
Tests whether a Cell raises a SimulationError only when its "death_threshold"
attribute is initialized outside the [0, 1] interval.
"""
for death_threshold in [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]:
try:
Cell(death_threshold=death_threshold)
except SimulationError:
self.fail(
"SimulationError was unexpectedly raised when initializing Cell"
f" with death_threshold = {death_threshold}"
)
for death_threshold in [-0.1, 1.1]:
with self.assertRaises(SimulationError):
Cell(death_threshold=death_threshold)
def test_cell_death_threshold_attribute_is_between_zero_and_one(self) -> None:
"""
Tests whether the "death_threshold" attribute (random float value) lies between 0 and 1
when it is initialized as a None value.
"""
for _ in range(10):
cell = Cell(death_threshold=None)
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.death_threshold, 0)
self.assertLessEqual(cell.death_threshold, 1)
def test_cell_has_death_threshold_attribute_is_between_zero_and_one(self) -> None:
"""Tests whether the "death_threshold" attribute (random float value) lies between 0 and 1."""
for _ in range(10):
cell = Cell()
with self.subTest(cell=cell):
self.assertGreaterEqual(cell.death_threshold, 0)
self.assertLessEqual(cell.death_threshold, 1)
def test_cell_has_circle_attribute(self) -> None:
"""Tests whether a Cell has a "circle" attribute (a Circle instance)."""
self.assertTrue(hasattr(self.cell, 'circle'))
self.assertIsInstance(self.cell.circle, Circle)
def test_cell_has_signal_attribute(self) -> None:
"""Tests whether a Cell has a "signal" attribute (a CellSignal instance)."""
self.assertTrue(hasattr(self.cell, 'signal'))
self.assertIsInstance(self.cell.signal, CellSignal)
def test_cell_uses_a_constant_signal_if_signal_argument_is_none(self) -> None:
"""Tests whether a Cell uses a ConstantCellSignal instance when initialized with signal=None."""
cell = Cell(signal=None)
self.assertIsInstance(cell.signal, ConstantCellSignal)
def test_cell_has_treatment_attribute(self) -> None:
"""Tests whether a Cell has a "treatment" attribute (a Treatment instance)."""
self.assertTrue(hasattr(self.cell, 'treatment'))
self.assertIsInstance(self.cell.treatment, Treatment)
# def test_cell_uses_the_default_treatment_if_treatment_argument_is_none(self) -> None:
# """Tests whether a Cell uses the "default_treatment" class attribute when initialized with treatment=None."""
# cell = Cell(signal=None)
# self.assertIs(cell.treatment, self.cell.default_treatment)
def test_calculate_division_chance_method_returns_chance_depending_on_the_cell_seconds_since_birth(self) -> None:
"""
Tests whether the "calculate_division_chance" method returns a chance between
[0, 1] proportional to the Cell's age.
"""
self.cell.treatment = self.control_treatment # division stats: 24 (+-5) hours
self.cell.seconds_since_birth = 0 # Very low chance of dividing right after birth
self.assertLess(self.cell.calculate_division_chance(delta=self.default_delta), 0.1)
self.cell.seconds_since_birth = 60 * 60 * 1000 # Very high chance of dividing after 1000 h
self.assertGreater(self.cell.calculate_division_chance(delta=self.default_delta), 0.9)
def test_calculate_death_chance_method_returns_chance_depending_on_the_cell_seconds_since_birth(self) -> None:
"""
Tests whether the "calculate_death_chance" method returns a chance between
[0, 1] proportional to the Cell's age.
"""
self.cell.treatment = self.control_treatment # death stats: 24 (+-5) hours
self.cell.seconds_since_birth = 0 # Very low chance of dying right after birth
self.assertLess(self.cell.calculate_death_chance(delta=self.default_delta), 0.1)
self.cell.seconds_since_birth = 60 * 60 * 1000 # Very high chance of dying after 1000 h
self.assertGreater(self.cell.calculate_death_chance(delta=self.default_delta), 0.9)
def test_cell_has_circle_attributes_as_properties(self) -> None:
"""Tests whether a Cell exposes relevant Circle attributes as properties."""
test_cell = Cell(x=10.0, y=20.0, radius=5.0)
for attr_name in ['x', 'y', 'radius', 'center', 'area']:
with self.subTest(attr_name=attr_name):
try:
value = getattr(test_cell, attr_name)
self.assertEqual(value, getattr(test_cell.circle, attr_name))
except AttributeError:
self.fail(f"Test failed: could not get attribute {attr_name} in Cell instance {test_cell}")
def test_cell_is_able_to_set_circle_attributes(self) -> None:
"""Tests whether a Cell is able to directly set its "x", "y" and "radius" Circle attributes."""
test_cell = Cell(x=10.0, y=20.0, radius=5.0)
for attr_name in ['x', 'y', 'radius']:
with self.subTest(attr_name=attr_name):
try:
setattr(test_cell, attr_name, 1.0)
except AttributeError:
self.fail(f"Test failed: could not set attribute {attr_name} in Cell instance {test_cell}")
def test_cell_distance_to_method_calculates_cell_distance_using_circles(self) -> None:
"""Tests whether the "distance_to" method uses Circles to calculate distance between Cells."""
other_cell = Cell()
with mock.patch("clovars.abstract.Circle.distance_to") as mock_circle_distance_to:
self.cell.distance_to(other_cell=other_cell)
mock_circle_distance_to.assert_called_once_with(other_cell.circle)
def test_cell_distance_to_method_raises_type_error_if_argument_is_not_a_cell(self) -> None:
"""
Tests whether the "distance_to" method raises a TypeError only when the
other_cell argument is not an actual Cell instance.
"""
valid_argument = Cell()
try:
self.cell.distance_to(other_cell=valid_argument)
except TypeError:
self.fail("Cell raised TypeError unexpectedly!")
invalid_argument = "WHATEVER ELSE"
with self.assertRaises(TypeError):
self.cell.distance_to(other_cell=invalid_argument) # noqa
def test_cell_has_hours_since_birth_property(self) -> None:
"""Tests whether a Cell has an "hours_since_birth" property (a float)."""
self.assertTrue(hasattr(self.cell, 'hours_since_birth'))
self.assertIsInstance(self.cell.hours_since_birth, float)
def test_hours_since_birth_calculations_are_correct(self) -> None:
"""Tests whether the "hours_since_birth" property correctly calculates the Cell's hours since birth."""
for seconds, hours in [(0, 0.0), (60, 1/60), (3600, 1.0), (7200, 2.0), (9000, 2.5)]:
with self.subTest(seconds=seconds, hours=hours):
self.cell.seconds_since_birth = seconds
self.assertEqual(self.cell.hours_since_birth, hours)
def test_cell_has_branch_name_property(self) -> None:
"""Tests whether a Cell has a "branch_name" property (a string)."""
self.assertTrue(hasattr(self.cell, 'branch_name'))
self.assertIsInstance(self.cell.branch_name, str)
def test_branch_name_returns_root_name_up_to_first_division(self) -> None:
"""Tests whether the "branch_name" property returns the Cell's root name, including the branch number."""
for cell_name, branch_name in [('1', '1'), ('3b.1', '3b'), ('15e-5.1.2', '15e-5'), ('4d-3.2.2.1.2', '4d-3')]:
with self.subTest(cell_name=cell_name, branch_name=branch_name):
self.cell.name = cell_name
self.assertEqual(self.cell.branch_name, branch_name)
def test_cell_has_colony_name_property(self) -> None:
"""Tests whether a Cell has a "colony_name" property (a string)."""
self.assertTrue(hasattr(self.cell, 'colony_name'))
self.assertIsInstance(self.cell.colony_name, str)
def test_colony_name_returns_root_name_up_to_branch_name(self) -> None:
"""Tests whether the "colony_name" property returns the Cell's root name, excluding the branch number."""
for cell_name, colony_name in [('1', '1'), ('3b.1', '3b'), ('15e-5.1.2', '15e'), ('4d-3.2.2.1.2', '4d')]:
with self.subTest(cell_name=cell_name, colony_name=colony_name):
self.cell.name = cell_name
self.assertEqual(self.cell.colony_name, colony_name)
def test_cell_has_generation_property(self) -> None:
"""Tests whether a Cell has a "generation" property (an integer)."""
self.assertTrue(hasattr(self.cell, 'generation'))
self.assertIsInstance(self.cell.generation, int)
def test_generation_returns_cell_name_prefix(self) -> None:
"""
Tests whether the "generation" property returns the number of times that the Cell has divided
based on its name.
"""
for cell_name, generation in [('1', 0), ('3b.1', 1), ('15e-5.1.2', 2), ('4d-3.2.2.1.2', 4)]:
with self.subTest(cell_name=cell_name, generation=generation):
self.cell.name = cell_name
self.assertEqual(self.cell.generation, generation)
def test_cell_has_signal_value_property(self) -> None:
"""Tests whether a Cell has a "signal_value" property (a float)."""
self.assertTrue(hasattr(self.cell, 'signal_value'))
self.assertIsInstance(self.cell.signal_value, float)
def test_signal_value_returns_current_signal_value(self) -> None:
"""Tests whether the "signal_value" property returns the CellSignal's current value."""
signal = GaussianCellSignal()
test_cell = Cell(signal=signal)
for _ in range(10):
signal.oscillate(current_seconds=0)
current_signal_value = signal.value
with self.subTest(current_signal_value=current_signal_value):
self.assertEqual(test_cell.signal_value, current_signal_value)
def test_set_cell_fate_method_sets_fate_to_death_if_cell_should_die(self) -> None:
"""
Tests whether the "set_cell_fate" method sets the Cell fate to "death"
if the "should_die" method returns True.
"""
with mock.patch('clovars.bio.Cell.should_die', return_value=True):
self.cell.set_cell_fate(delta=self.default_delta)
self.assertEqual(self.cell.fate, "death")
def test_should_die_returns_boolean_based_on_death_chance_and_threshold(self) -> None:
"""Tests whether the "should_die" method returns True/False depending on the Cell's death chance."""
self.cell.death_threshold = 1.1 # death chance is in [0, 1], cell never dies here
self.assertFalse(self.cell.should_die(delta=self.default_delta))
self.cell.death_threshold = -0.1 # death chance is in [0, 1], cell always dies here
self.assertTrue(self.cell.should_die(delta=self.default_delta))
def test_set_cell_fate_method_sets_fate_to_division_if_cell_should_divide(self) -> None:
"""
Tests whether the "set_cell_fate" method sets the Cell fate to "division"
if the "should_die" method returns False and "should_divide" returns True.
"""
with (
mock.patch('clovars.bio.Cell.should_die', return_value=False),
mock.patch('clovars.bio.Cell.should_divide', return_value=True),
):
self.cell.set_cell_fate(delta=self.default_delta)
self.assertEqual(self.cell.fate, "division")
def test_should_divide_returns_boolean_based_on_division_chance_and_threshold(self) -> None:
"""Tests whether the "should_divide" method returns True/False depending on the Cell's division chance."""
self.cell.division_threshold = 1.1 # death chance is in [0, 1], cell never dies here
self.assertFalse(self.cell.should_divide(delta=self.default_delta))
self.cell.division_threshold = -0.1 # death chance is in [0, 1], cell always dies here
self.assertTrue(self.cell.should_divide(delta=self.default_delta))
def test_set_cell_fate_method_sets_fate_to_migration_if_cell_should_not_die_nor_divide(self) -> None:
"""
Tests whether the "set_cell_fate" method sets the Cell fate to "migration"
if both "should_die" and "should_divide" methods returns False.
"""
with (
mock.patch('clovars.bio.Cell.should_die', return_value=False),
mock.patch('clovars.bio.Cell.should_divide', return_value=False),
):
self.cell.set_cell_fate(delta=self.default_delta)
self.assertEqual(self.cell.fate, "migration")
@mock.patch('clovars.bio.Cell.migrate')
@mock.patch('clovars.bio.Cell.divide')
@mock.patch('clovars.bio.Cell.die')
def test_pass_time_method_calls_die_if_cell_fate_is_to_die(
self,
mock_die: MagicMock,
mock_divide: MagicMock,
mock_migrate: MagicMock,
) -> None:
"""Tests whether the "pass_time" method calls the "die" method if the Cell fate is set to "death"."""
self.cell.fate = 'death'
self.cell.pass_time(delta=self.default_delta, current_seconds=0)
mock_die.assert_called_once()
mock_divide.assert_not_called()
mock_migrate.assert_not_called()
def test_pass_time_method_returns_none_if_cell_fate_is_to_die(self) -> None:
"""Tests whether the "pass_time" method returns None if the Cell fate is set to "death"."""
self.cell.fate = 'death'
return_value = self.cell.pass_time(delta=self.default_delta, current_seconds=0)
self.assertIsNone(return_value)
@mock.patch('clovars.bio.Cell.migrate')
@mock.patch('clovars.bio.Cell.divide')
@mock.patch('clovars.bio.Cell.die')
def test_pass_time_method_calls_divide_if_cell_fate_is_to_divide(
self,
mock_die: MagicMock,
mock_divide: MagicMock,
mock_migrate: MagicMock,
) -> None:
"""Tests whether the "pass_time" method calls the "divide" method if the Cell fate is set to "division"."""
self.cell.fate = 'division'
self.cell.pass_time(delta=self.default_delta, current_seconds=0)
mock_die.assert_not_called()
mock_divide.assert_called_once()
mock_migrate.assert_not_called()
def test_pass_time_method_returns_a_tuple_of_child_cells_if_cell_fate_is_to_divide(self) -> None:
"""Tests whether the "pass_time" method returns a tuple of child Cells if the Cell fate is set to "division"."""
self.cell.fate = 'division'
return_value = self.cell.pass_time(delta=self.default_delta, current_seconds=0)
self.assertIsInstance(return_value, tuple)
for thing in return_value:
self.assertIsInstance(thing, Cell)
self.assertIsNot(thing, self.cell)
@mock.patch('clovars.bio.Cell.migrate')
@mock.patch('clovars.bio.Cell.divide')
@mock.patch('clovars.bio.Cell.die')
def test_pass_time_method_calls_migrate_if_cell_fate_is_to_migrate(
self,
mock_die: MagicMock,
mock_divide: MagicMock,
mock_migrate: MagicMock,
) -> None:
"""Tests whether the "pass_time" method calls the "migrate" method if the Cell fate is set to "migration"."""
self.cell.fate = 'migration'
self.cell.pass_time(delta=self.default_delta, current_seconds=0)
mock_die.assert_not_called()
mock_divide.assert_not_called()
mock_migrate.assert_called_once()
def test_pass_time_method_returns_the_same_cell_if_cell_fate_is_to_migrate(self) -> None:
"""Tests whether the "pass_time" method returns the own Cell instance if the Cell fate is set to "migration"."""
self.cell.fate = 'migration'
return_value = self.cell.pass_time(delta=self.default_delta, current_seconds=0)
self.assertIsInstance(return_value, Cell)
self.assertIs(return_value, self.cell)
def test_pass_time_method_raises_value_error_if_cell_fate_is_unexpected(self) -> None:
"""Tests whether the "pass_time" method raises a ValueError if the Cell fate value is unexpected."""
self.cell.fate = 'UNEXPECTED VALUE!'
with self.assertRaises(ValueError):
self.cell.pass_time(delta=self.default_delta, current_seconds=0)
def test_die_method_sets_the_state_of_the_alive_flag_to_false(self) -> None:
"""Tests whether the "die" method sets the state of the "alive" flag to False."""
self.assertTrue(self.cell.alive)
self.cell.die()
self.assertFalse(self.cell.alive)
def test_divide_method_returns_a_tuple_of_two_cells_with_matching_names(self) -> None:
"""Tests whether the "divide" returns a tuple of two child Cells with matching names (ending in .1 and .2)."""
children = self.cell.divide(delta=self.default_delta)
self.assertIsInstance(children[0], Cell)
self.assertEqual(children[0].name, self.cell.name + '.1')
self.assertIsInstance(children[1], Cell)
self.assertEqual(children[1].name, self.cell.name + '.2')
def test_get_child_cell_returns_a_new_cell_instance(self) -> None:
"""Tests whether the "get_child_cell" method returns a new Cell instance."""
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name='')
self.assertIsInstance(child_cell, Cell)
self.assertIsNot(child_cell, self.cell)
def test_get_child_cell_adds_the_branch_name_to_the_parent_cell_name(self) -> None:
"""Tests whether the Cell returned from "get_child_cell" has the same base name as its parent + branch name."""
for branch_name in ['1', '2', 'BRANCH_NAME', '...', '']:
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name=branch_name)
with self.subTest(branch_name=branch_name):
self.assertEqual(child_cell.name, f"{self.cell.name}.{branch_name}")
def test_get_child_cell_method_moves_cell(self) -> None:
"""Tests whether the "migrate" method moves the Cell from its previous position."""
previous_cell_center = self.cell.center
same_cell = self.cell.migrate(delta=self.default_delta)
self.assertNotEqual(same_cell.center, previous_cell_center) # unlikely to be equal, but it may happen...
def test_get_child_cell_copies_attributes_from_parent_cell(self) -> None:
"""Tests whether the Cell returned from "get_child_cell" has some identical attributes as its parent."""
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name='')
for attr_name in ['max_speed', 'radius', 'fitness_memory', 'treatment']:
with self.subTest(attr_name=attr_name):
self.assertEqual(getattr(child_cell, attr_name), getattr(self.cell, attr_name))
def test_get_child_cell_calls_get_child_fitness_to_assign_a_the_child_thresholds(self) -> None:
"""
Tests whether the Cell returned from "get_child_cell" has a division and death threshold values
returned from the parent's "get_child_fitness" method.
"""
mock_fitness = (0.1, 0.2)
with mock.patch.object(self.cell, 'get_child_fitness', return_value=mock_fitness) as mock_get_cell_fitness:
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name='')
mock_get_cell_fitness.assert_called()
self.assertIn(child_cell.division_threshold, mock_fitness)
self.assertIn(child_cell.death_threshold, mock_fitness)
def test_get_child_cell_uses_signal_split_to_assign_a_new_signal_to_child_cell(self) -> None:
"""
Tests whether the Cell returned from "get_child_cell" has a signal
returned from the parent's signal's "split" method.
"""
with mock.patch('clovars.scientific.CellSignal.split') as mock_split:
child_cell = self.cell.get_child_cell(delta=self.default_delta, branch_name='')
mock_split.assert_called_once()
self.assertIs(child_cell.signal, mock_split.return_value)
def test_get_new_xy_coordinates_method_returns_a_tuple_of_floats(self) -> None:
"""Tests whether the "get_new_xy_coordinates" method returns a tuple of floats."""
xy = self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name='migration')
self.assertIsInstance(xy, tuple)
for thing in xy:
self.assertIsInstance(thing, float)
def test_get_new_xy_coordinates_method_raises_value_error_if_event_name_is_not_migration_or_division(self) -> None:
"""
Tests whether the "get_new_xy_coordinates" raises a ValueError if the
event name argument isn't "migration" or "division".
"""
for event_name in ['migration', 'division']:
with self.subTest(event_name=event_name):
try:
self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name='migration')
except ValueError:
self.fail(f'Call to "get_new_xy_coordinates" failed unexpectedly with event_name="{event_name}"')
with self.assertRaises(ValueError):
self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name="INVALID EVENT NAME")
def test_get_new_xy_coordinates_method_uses_smaller_search_radius_on_division(self) -> None:
"""Tests whether the "get_new_xy_coordinates" uses a smaller search radius when the event name is "division"."""
with mock.patch('clovars.bio.cell.Circle') as mock_circle_init_migration:
self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name='migration')
migration_radius = mock_circle_init_migration.call_args[1]['radius']
with mock.patch('clovars.bio.cell.Circle') as mock_circle_init_division:
self.cell.get_new_xy_coordinates(delta=self.default_delta, event_name='division')
division_radius = mock_circle_init_division.call_args[1]['radius']
self.assertGreater(migration_radius, division_radius)
def test_get_child_fitness_method_returns_tuple_of_floats(self) -> None:
"""
Tests whether the "get_child_fitness" method returns a tuple of floats
representing the child Cell's division and death thresholds.
"""
return_value = self.cell.get_child_fitness()
self.assertIsInstance(return_value, tuple)
with self.assertSequenceNotEmpty(return_value):
for thing in return_value:
self.assertIsInstance(thing, float)
def test_get_child_fitness_method_returns_values_from_bounded_brownian_fluctuation_function(self) -> None:
"""
Tests whether the "get_child_fitness" method returns values from the
"bounded_brownian_fluctuation_function" function using the appropriate parameters from the Cell.
"""
with mock.patch('clovars.bio.cell.bounded_brownian_motion') as mock_brownian_motion:
self.cell.get_child_fitness()
mock_brownian_motion.assert_any_call(current_value=self.cell.division_threshold, scale=self.cell.fitness_memory)
mock_brownian_motion.assert_any_call(current_value=self.cell.death_threshold, scale=self.cell.fitness_memory)
def test_migrate_method_returns_the_same_cell(self) -> None:
"""Tests whether the "migrate" method returns the same Cell."""
same_cell = self.cell.migrate(delta=self.default_delta)
self.assertIs(same_cell, self.cell)
def test_migrate_method_adds_delta_seconds_to_the_cell_seconds_since_birth(self) -> None:
"""Tests whether the "migrate" method adds delta seconds to the Cell's "seconds_since_birth" attribute."""
previous_seconds_since_birth = self.cell.seconds_since_birth
same_cell = self.cell.migrate(delta=self.default_delta)
self.assertEqual(same_cell.seconds_since_birth, previous_seconds_since_birth + self.default_delta)
def test_migrate_method_moves_cell(self) -> None:
"""Tests whether the "migrate" method moves the Cell from its previous position."""
previous_cell_center = self.cell.center
same_cell = self.cell.migrate(delta=self.default_delta)
self.assertNotEqual(same_cell.center, previous_cell_center) # unlikely to be equal, but it may happen...
def test_fluctuate_signal_method_calls_signal_oscillate_method(self) -> None:
"""Tests whether the "fluctuate_signal" method calls the signal's "oscillate" method."""
self.cell.signal = (signal_mock := MagicMock())
self.cell.fluctuate_signal(current_seconds=0)
signal_mock.oscillate.assert_called_once_with(current_seconds=0)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/test_bio/test_colony.py
```python
import unittest
import numpy as np
from clovars.bio import Cell, Colony, Treatment
from clovars.scientific import ConstantCellSignal
from tests import SKIP_TESTS
class TestColony(unittest.TestCase):
"""Class representing unit-tests for clovars.bio.colony.Colony class."""
default_delta = 100
def setUp(self) -> None:
"""Sets up the test case subject (a Colony instance)."""
self.colony = Colony()
def test_colony_has_default_treatment_regimen_attribute(self) -> None:
"""Tests whether a Colony has a "default_treatment_regimen" attribute (a dictionary)."""
self.assertTrue(hasattr(self.colony, 'default_treatment_regimen'))
self.assertIsInstance(self.colony.default_treatment_regimen, dict)
for key, value in self.colony.default_treatment_regimen.items():
self.assertIsInstance(key, int)
self.assertIsInstance(value, Treatment)
def test_colony_has_cells_attribute(self) -> None:
"""Tests whether a Colony has a "cells" attribute (a list)."""
self.assertTrue(hasattr(self.colony, 'cells'))
self.assertIsInstance(self.colony.cells, list)
def test_cells_attribute_is_empty_list_when_cells_argument_is_none(self) -> None:
"""Tests whether the "cells" attribute is an empty list when a Colony is initialized with cells=None."""
self.assertEqual(Colony(cells=None).cells, [])
def test_colony_has_seconds_since_birth_attribute(self) -> None:
"""Tests whether a Colony has a "seconds_since_birth" attribute (an int)."""
self.assertTrue(hasattr(self.colony, 'seconds_since_birth'))
self.assertIsInstance(self.colony.seconds_since_birth, int)
def test_colony_has_treatment_regimen_attribute(self) -> None:
"""Tests whether a Colony has a "treatment_regimen" attribute (a dictionary)."""
self.assertTrue(hasattr(self.colony, 'treatment_regimen'))
self.assertIsInstance(self.colony.treatment_regimen, dict)
def test_treatment_regimen_attribute_is_the_default_when_treatment_regimen_argument_is_none(self) -> None:
"""
Tests whether the "treatment_regimen" attribute is set to the default Treatment regimen
when a Colony is initialized with treatment_regimen=None.
"""
self.assertEqual(Colony(treatment_regimen=None).treatment_regimen, Colony.default_treatment_regimen)
def test_colony_equality_compares_the_cell_lists(self) -> None:
"""Tests whether comparing two Colonies for equality compares each Colony's Cells list."""
other_colony = Colony()
self.assertEqual(self.colony, other_colony)
other_colony.cells = [Cell()]
self.assertNotEqual(self.colony, other_colony)
def test_colony_length_returns_number_of_cells_in_the_colony(self) -> None:
"""Tests whether the Colony length corresponds to the number of Cells in it."""
self.assertEqual(len(self.colony), 0)
self.colony.cells = [Cell(), Cell(), Cell()]
self.assertEqual(len(self.colony), 3)
def test_colony_iteration_yields_cells_from_colony(self) -> None:
"""Tests whether iterating over the Colony yields Cells from the Cells list."""
cells = [Cell(), Cell(), Cell()]
self.colony.cells = cells
for cell, cell_from_colony in zip(cells, self.colony):
self.assertIs(cell, cell_from_colony)
def test_colony_getitem_returns_the_cell_at_the_index(self) -> None:
"""Tests whether calling the Colony's "__getitem__" special method returns the Cell at the i-th index."""
with self.assertRaises(TypeError):
self.colony['abc'] # noqa
with self.assertRaises(IndexError):
self.colony[0] # noqa
self.colony.cells = [Cell(), Cell(), Cell()]
self.assertIs(self.colony[0], self.colony.cells[0])
self.assertIs(self.colony[1], self.colony.cells[1])
self.assertIs(self.colony[2], self.colony.cells[2])
with self.assertRaises(IndexError):
self.colony[3] # noqa
def test_colony_boolean_returns_whether_there_are_cells_in_the_colony(self) -> None:
"""Tests whether the Colony is considered to be True if there are Cells in it, and False otherwise."""
self.assertFalse(self.colony)
self.colony.cells = [Cell()]
self.assertTrue(self.colony)
def test_name_property_returns_name_from_the_first_cell(self) -> None:
"""Tests whether the Colony's "name" property returns the first Cell's "colony_name"."""
self.colony.cells = [Cell(name='1.2.1')]
self.assertEqual(self.colony.name, '1')
self.colony.cells = [Cell(name='19.1.1')]
self.assertEqual(self.colony.name, '19')
self.colony.cells.append(Cell(name='1.2.1'))
self.assertEqual(self.colony.name, '19')
def test_name_property_returns_none_if_colony_is_empty(self) -> None:
"""Tests whether the "name" property returns None if the Colony has no Cells in it."""
self.assertIsNone(Colony().name)
def test_center_property_returns_the_colony_average_xy_coordinate(self) -> None:
"""Tests whether the "center" property returns the Colony's "average" XY coordinate, considering its Cells."""
self.colony.cells.append(Cell(x=0, y=10))
self.assertEqual(self.colony.center, (0, 10))
self.colony.cells.append(Cell(x=10, y=32))
self.assertEqual(self.colony.center, (5, 21))
self.colony.cells.append(Cell(x=50, y=21))
self.assertEqual(self.colony.center, (20, 21))
def test_center_property_returns_none_if_colony_is_empty(self) -> None:
"""Tests whether the "center" property returns None if the Colony has no Cells in it."""
self.assertIsNone(Colony().center)
def test_is_dead_returns_true_if_colony_is_empty(self) -> None:
"""Tests whether the "is_dead" method returns False if the Colony has no Cells in it."""
self.assertTrue(Colony(cells=[]).is_dead())
def test_is_dead_returns_true_when_all_cells_in_it_are_dead(self) -> None:
"""Tests whether the "is_dead" method returns True only if all Cells in the Colony are dead."""
for _ in range(5):
cell = Cell()
self.colony.cells.append(cell)
self.assertFalse(self.colony.is_dead())
cell.die()
self.assertTrue(self.colony.is_dead())
def test_signal_mean_method_returns_the_mean_across_all_cell_signals(self) -> None:
"""Tests whether the Colony "signal_mean" property returns the Signal mean across all Cells in the Colony."""
signal_values = [1.0, 0.5, 0.0, -0.5, -1.0]
self.colony.cells = [
Cell(signal=ConstantCellSignal(initial_value=initial_value))
for initial_value in signal_values
]
self.assertEqual(self.colony.signal_mean(), np.mean(signal_values))
def test_signal_std_method_returns_the_mean_across_all_cell_signals(self) -> None:
"""
Tests whether the Colony "signal_std" property returns the Signal standard deviation
across all Cells in the Colony.
"""
signal_values = [1.0, 0.5, 0.0, -0.5, -1.0]
self.colony.cells = [
Cell(signal=ConstantCellSignal(initial_value=initial_value))
for initial_value in signal_values
]
self.assertEqual(self.colony.signal_std(), np.std(signal_values))
@unittest.skipIf(SKIP_TESTS is True, "SKIP TESTS is set to True")
def test_pass_time_method_(self) -> None:
"""Docstring."""
self.fail('Write the test!')
if __name__ == '__main__':
unittest.main()
```
#### File: tests/test_IO/test_colony_loader.py
```python
import unittest
from unittest import mock
from clovars.bio import Cell, Colony, Treatment
from clovars.IO import ColonyLoader
from tests import NotEmptyTestCase
class TestColonyLoader(NotEmptyTestCase):
"""Class representing unit-tests for clovars.IO.colony_loader.ColonyLoader class."""
def setUp(self) -> None:
"""Sets up the test case subject (a ColonyLoader instance)."""
self.colony_loader = ColonyLoader()
def test_colony_loader_has_default_class_attribute(self) -> None:
"""Tests whether a ColonyLoader has the expected default class attributes."""
for class_attr_name in ['default_cell_radius', 'default_cell_max_speed', 'default_fitness_memory']:
self.assertTrue(hasattr(ColonyLoader, class_attr_name))
self.assertIsInstance(getattr(ColonyLoader, class_attr_name), float)
def test_colony_loader_has_colonies_attribute(self) -> None:
"""Tests whether a ColonyLoader has a "colonies" attribute (a list)."""
self.assertTrue(hasattr(self.colony_loader, 'colonies'))
self.assertIsInstance(self.colony_loader.colonies, list)
def test_colony_loader_instantiation_calls_parse_colony_data_if_colony_data_is_truthy(self) -> None:
"""
Tests whether a ColonyLoader calls the "parse_colony_data" upon initialization
if the "colony_data" argument is truthy.
"""
for falsy_value in [[], None]:
with mock.patch('clovars.IO.ColonyLoader.parse_colony_data') as mock_parse_colony_data:
ColonyLoader(colony_data=falsy_value)
mock_parse_colony_data.assert_not_called()
for truthy_value in [[{'colonies_here': 0}], [{'colonies_here': 1, 'more colonies!': 2}]]:
with mock.patch('clovars.IO.ColonyLoader.parse_colony_data') as mock_parse_colony_data:
ColonyLoader(colony_data=truthy_value)
mock_parse_colony_data.assert_called_once_with(colony_data=truthy_value)
def test_parse_colony_data_appends_to_the_colony_list(self) -> None:
"""Tests whether the "parse_colony_data" method appends Colonies to the Colonies list."""
self.assertEqual(len(self.colony_loader.colonies), 0)
with mock.patch('clovars.IO.ColonyLoader.create_colony'):
self.colony_loader.parse_colony_data(colony_data=[{'copies': 1}])
self.assertEqual(len(self.colony_loader.colonies), 1)
def test_parse_colony_data_appends_multiple_copies(self) -> None:
"""Tests whether the "parse_colony_data" method appends one Colony for each copy in the "colony_data"."""
with mock.patch('clovars.IO.ColonyLoader.create_colony'):
for i in range(5):
with self.subTest(i=i):
colony_loader = ColonyLoader()
colony_loader.parse_colony_data(colony_data=[{'copies': i}])
self.assertEqual(len(colony_loader.colonies), i)
def test_parse_colony_data_appends_all_colonies_in_list(self) -> None:
"""Tests whether the "parse_colony_data" method appends one Colony for each dictionary in the "colony_data"."""
with mock.patch('clovars.IO.ColonyLoader.create_colony'):
for i in range(1, 5):
colony_loader = ColonyLoader()
colony_loader.parse_colony_data(colony_data=[{'copies': i}, {'copies': i*2}, {'copies': i*3}])
self.assertEqual(len(colony_loader.colonies), i + (i*2) + (i*3))
def test_get_colony_treatment_regimen_returns_a_dictionary(self) -> None:
"""
Tests whether the "get_colony_treatment_regimen" method returns a dictionary
with integers as keys and Treatment instances as values.
"""
treatment_data = {
0: {}
}
return_value = self.colony_loader.get_colony_treatment_regimen(treatment_data=treatment_data)
self.assertIsInstance(return_value, dict)
with self.assertSequenceNotEmpty(return_value):
for key, value in return_value.items():
self.assertIsInstance(key, int)
self.assertIsInstance(value, Treatment)
def test_get_colony_treatment_regimen_instantiates_one_treatment_per_pair(self) -> None:
"""
Tests whether the "get_colony_treatment_regimen" method creates one treatment
for each key-value pair in the treatment_data dictionary.
"""
treatment_data = {}
for i in range(5):
treatment_data[i] = {}
return_value = self.colony_loader.get_colony_treatment_regimen(treatment_data=treatment_data)
self.assertEqual(len(return_value), i+1)
def test_get_colony_treatment_regimen_returns_empty_dict_when_treatment_data_is_empty(self) -> None:
"""
Tests whether the "get_colony_treatment_regimen" method returns an empty dictionary
when the provided treatment data is empty.
"""
return_value = self.colony_loader.get_colony_treatment_regimen(treatment_data={})
self.assertEqual(return_value, {})
def test_create_colony_returns_a_colony(self) -> None:
"""Tests whether the "create_colony" method returns a Colony instance."""
with mock.patch('clovars.IO.ColonyLoader.create_cell'):
return_value = self.colony_loader.create_colony(
colony_index=0,
repeat_label='',
cell_data={},
initial_size=1,
treatment_regimen={},
)
self.assertIsInstance(return_value, Colony)
def test_create_colony_initial_size_determines_colony_size(self) -> None:
"""Tests whether the "create_colony" method returns a Colony with "initial_size" number of Cells."""
with mock.patch('clovars.IO.ColonyLoader.create_cell'):
for i in range(5):
created_colony = self.colony_loader.create_colony(
colony_index=0,
repeat_label='',
cell_data={},
initial_size=i,
treatment_regimen={},
)
self.assertEqual(len(created_colony), i)
def test_create_cell_returns_a_cell(self) -> None:
"""Tests whether the "create_cell" method returns a Cell instance."""
return_value = self.colony_loader.create_cell(cell_data={}, colony_index=0, repeat_label='', cell_index=0)
self.assertIsInstance(return_value, Cell)
def test_create_cell_uses_default_values(self) -> None:
"""Tests whether the "create_cell" properly uses default values if they're not in the "cell_data" dictionary."""
returned_cell = self.colony_loader.create_cell(cell_data={}, colony_index=0, repeat_label='', cell_index=0)
for attr_name in ['max_speed', 'radius']:
with self.subTest(attr_name=attr_name):
self.assertEqual(getattr(returned_cell, attr_name), getattr(ColonyLoader, f'default_cell_{attr_name}'))
def test_create_cell_uses_values_from_cell_data(self) -> None:
"""Tests whether the "create_cell" properly uses the values from the "cell_data" dictionary, when provided."""
for cell_data in [
{'max_speed': 0.5},
{'radius': 1.5},
{'max_speed': 2.5, 'radius': 3.5},
{'max_speed': 2.5, 'fitness_memory': 0.2},
{'max_speed': 2.5, 'radius': 4.7, 'fitness_memory': 0.49},
]:
returned_cell = self.colony_loader.create_cell(
cell_data=cell_data,
colony_index=0,
repeat_label='',
cell_index=0,
)
for attr_name, attr_value in cell_data.items():
with self.subTest(cell_data=cell_data, attr_name=attr_name, attr_value=attr_value):
self.assertEqual(getattr(returned_cell, attr_name), attr_value)
def test_create_cell_uses_signal_dict_for_calling_get_cell_signal(self) -> None:
"""Tests whether the "create_cell" properly uses the "signal" subdict for calling "get_cell_signal"."""
for cell_data in [
{'max_speed': 0.5, 'signal': {'thing': 1, 'another stuff': False}},
{'radius': 1.5, 'signal': {'thing': 0, 'another!!!': None}},
{'max_speed': 2.5, 'radius': 3.5, 'fitness_memory': 0.35, 'signal': {'thing': 1.05, '???': True}},
]:
with mock.patch('clovars.IO.colony_loader.get_cell_signal') as mock_get_cell_signal:
self.colony_loader.create_cell(cell_data=cell_data, colony_index=0, repeat_label='', cell_index=0)
mock_get_cell_signal.assert_called_once_with(**cell_data['signal'])
def test_create_cell_combines_arguments_into_cell_name(self) -> None:
"""Tests whether the "create_cell" properly uses its arguments to define the Cell name."""
cell = self.colony_loader.create_cell(cell_data={}, colony_index=1, repeat_label='Foo', cell_index=2)
self.assertEqual(cell.name, '1Foo-2')
if __name__ == '__main__':
unittest.main()
```
#### File: test_simulation/test_fit/test_fit_experimental_data.py
```python
import unittest
from tests import SKIP_TESTS
class TestFitExperimentalData(unittest.TestCase):
"""Class representing unit-tests of the fit_experimental_data module."""
@unittest.skipIf(SKIP_TESTS is True, "SKIP TESTS is set to True")
def test_(self) -> None:
self.fail("Write the test!")
if __name__ == '__main__':
unittest.main()
```
#### File: test_simulation/test_run/test_simulation_runner.py
```python
import unittest
from itertools import product
from typing import List
from unittest import mock
from unittest.mock import MagicMock
from clovars.simulation import SimulationRunner
from clovars.utils import QuietPrinterMixin, SimulationError
class TestSimulationRunner(unittest.TestCase):
"""Class representing unit-tests for clovars.simulation_runner.simulation_runner.SimulationRunner class."""
default_current_frame = 1
def setUp(self) -> None:
"""Sets up the test case subject (a SimulationRunner instance)."""
self.run_kwargs = {
'simulation_writer': MagicMock(),
'well': MagicMock(),
'settings': {
'delta': 3600,
'stop_conditions': {
'stop_at_frame': 5,
'stop_at_single_colony_size': None,
'stop_at_all_colonies_size': None,
},
},
}
self.simulation_runner = SimulationRunner()
def test_simulation_runner_inherits_from_quiet_printer_mixin(self) -> None:
"""Tests whether a SimulationRunner inherits from QuietPrinterMixin."""
self.assertIsInstance(self.simulation_runner, QuietPrinterMixin)
def test_simulation_runner_has_max_iteration_class_attributes(self) -> None:
"""Tests whether a SimulationRunner has the "max_iteration" class attribute (an integer)."""
self.assertTrue(hasattr(SimulationRunner, 'max_iteration'))
self.assertIsInstance(SimulationRunner.max_iteration, int)
def test_run_method_calls_validate_settings_method(self) -> None:
"""Tests whether the "run" method calls the "validate_settings" method."""
with mock.patch.object(self.simulation_runner, 'validate_settings') as mock_validate_settings:
mock_validate_settings.return_value = (
self.run_kwargs['settings']['delta'],
self.run_kwargs['settings']['stop_conditions'],
) # needs to return a tuple
self.simulation_runner.run(**self.run_kwargs)
mock_validate_settings.assert_called()
def test_run_method_calls_get_simulation_hours_method(self) -> None:
"""Tests whether the "run" method calls the "get_simulation_hours" method."""
with mock.patch.object(self.simulation_runner, 'get_simulation_hours') as mock_get_simulation_seconds:
self.simulation_runner.run(**self.run_kwargs)
mock_get_simulation_seconds.assert_called()
def test_run_method_calls_get_simulation_seconds_method(self) -> None:
"""Tests whether the "run" method calls the "get_simulation_seconds" method."""
with mock.patch.object(self.simulation_runner, 'get_simulation_seconds') as mock_get_simulation_seconds:
self.simulation_runner.run(**self.run_kwargs)
mock_get_simulation_seconds.assert_called()
def test_run_method_calls_modify_colony_treatment_regimens_method(self) -> None:
"""Tests whether the "run" method calls the Well's "modify_colony_treatment_regimens"."""
self.simulation_runner.run(**self.run_kwargs)
self.run_kwargs['well'].modify_colony_treatment_regimens.assert_called()
def test_run_method_calls_set_cell_fate_method(self) -> None:
"""Tests whether the "run" method calls the Well's "set_cell_fate"."""
self.simulation_runner.run(**self.run_kwargs)
self.run_kwargs['well'].set_cell_fate.assert_called()
def test_run_method_calls_write_simulation_status_method(self) -> None:
"""Tests whether the "run" method calls the "write_simulation_status" method."""
with mock.patch.object(self.simulation_runner, 'write_simulation_status') as mock_write_simulation_status:
self.simulation_runner.run(**self.run_kwargs)
mock_write_simulation_status.assert_called()
def test_run_method_calls_reached_stop_condition_method(self) -> None:
"""Tests whether the "run" method calls the "reached_stop_condition" method."""
with mock.patch.object(self.simulation_runner, 'reached_stop_condition') as mock_reached_stop_condition:
self.simulation_runner.run(**self.run_kwargs)
mock_reached_stop_condition.assert_called()
def test_run_method_calls_pass_time_method_when_stop_condition_is_not_met(self) -> None:
"""Tests whether the "run" method continues its loop while a stop condition is not met."""
with mock.patch.object(self.simulation_runner, 'reached_stop_condition', return_value=False):
self.simulation_runner.run(**self.run_kwargs)
self.run_kwargs['well'].pass_time.assert_called()
def test_run_method_does_not_call_pass_time_method_when_stop_condition_is_not_met(self) -> None:
"""Tests whether the "run" method breaks its loop when a stop condition is met."""
with mock.patch.object(self.simulation_runner, 'reached_stop_condition', return_value=True):
self.simulation_runner.run(**self.run_kwargs)
self.run_kwargs['well'].pass_time.assert_not_called()
def test_validate_settings_method_returns_delta_and_stop_conditions(self) -> None:
"""Tests whether the "validate_settings" method returns the delta and stop conditions."""
return_value = self.simulation_runner.validate_settings(settings=self.run_kwargs['settings'])
self.assertEqual(return_value[0], self.run_kwargs['settings']['delta'])
self.assertEqual(return_value[1], self.run_kwargs['settings']['stop_conditions'])
def test_validate_settings_raises_exception_if_delta_is_not_in_settings(self) -> None:
"""Tests whether the "validate_settings" method raises a SimulationError if the "delta" key is missing."""
self.run_kwargs['settings'].pop('delta')
with self.assertRaises(SimulationError):
self.simulation_runner.validate_settings(settings=self.run_kwargs)
def test_validate_settings_raises_exception_if_delta_is_not_integer(self) -> None:
"""
Tests whether the "validate_settings" method raises a SimulationError
if the "delta" key is not associated to an integer.
"""
self.run_kwargs['settings']['delta'] = "Not an integer!" # noqa
with self.assertRaises(SimulationError):
self.simulation_runner.validate_settings(settings=self.run_kwargs)
def test_validate_settings_raises_exception_if_stop_conditions_is_not_in_settings(self) -> None:
"""
Tests whether the "validate_settings" method raises a SimulationError
if the "stop_conditions" key is missing.
"""
self.run_kwargs['settings'].pop('stop_conditions')
with self.assertRaises(SimulationError):
self.simulation_runner.validate_settings(settings=self.run_kwargs)
def test_validate_settings_raises_exception_if_stop_conditions_is_not_dictionary(self) -> None:
"""
Tests whether the "validate_settings" method raises a SimulationError
if the "stop_conditions" key is not associated to a dictionary.
"""
self.run_kwargs['settings']['stop_conditions'] = "Not a dictionary!" # noqa
with self.assertRaises(SimulationError):
self.simulation_runner.validate_settings(settings=self.run_kwargs)
def test_get_simulation_hours_method_returns_the_correct_simulation_hours(self) -> None:
"""Tests whether the "get_simulation_hours" method properly returns the Simulation time in hours."""
hour_test_cases = [ # tuples in the order: (delta, current_frame, expected_time)
(0, 0, 0),
(1800, 1, 0.5),
(3600, 2, 2.0),
(7200, 3, 6.0),
(180, 10, 0.5),
]
for delta, current_frame, expected_hours in hour_test_cases:
with self.subTest(delta=delta, current_frame=current_frame, expected_hours=expected_hours):
actual_hours = self.simulation_runner.get_simulation_hours(delta=delta, current_frame=current_frame)
self.assertEqual(expected_hours, actual_hours)
def test_get_simulation_seconds_method_returns_the_correct_simulation_hours(self) -> None:
"""Tests whether the "get_simulation_seconds" method properly returns the Simulation time in seconds."""
seconds_test_cases = [ # tuples in the order: (delta, current_frame, expected_time)
(0, 0, 0),
(1800, 1, 1800),
(3600, 2, 7200),
(7200, 3, 21600),
(180, 10, 1800),
]
for delta, current_frame, expected_seconds in seconds_test_cases:
with self.subTest(delta=delta, current_frame=current_frame, expected_seconds=expected_seconds):
actual_seconds = self.simulation_runner.get_simulation_seconds(delta=delta, current_frame=current_frame)
self.assertEqual(expected_seconds, actual_seconds)
def test_write_simulation_status_method_calls_write_cells(self) -> None:
"""Tests whether the "write_simulation_status" method calls the SimulationWriter's "write_cells" method."""
self.simulation_runner.write_simulation_status(
simulation_writer=self.run_kwargs['simulation_writer'],
well=self.run_kwargs['well'],
simulation_seconds=0,
current_frame=self.default_current_frame,
)
self.run_kwargs['simulation_writer'].write_cells.assert_called_once_with(
well=self.run_kwargs['well'],
simulation_seconds=0,
current_frame=self.default_current_frame,
)
def test_write_simulation_status_method_calls_write_colonies(self) -> None:
"""Tests whether the "write_simulation_status" method calls the SimulationWriter's "write_colonies" method."""
self.simulation_runner.write_simulation_status(
simulation_writer=self.run_kwargs['simulation_writer'],
well=self.run_kwargs['well'],
simulation_seconds=0,
current_frame=self.default_current_frame,
)
self.run_kwargs['simulation_writer'].write_colonies.assert_called_once_with(
well=self.run_kwargs['well'],
simulation_seconds=0,
current_frame=self.default_current_frame,
)
@mock.patch('clovars.simulation.SimulationRunner.reached_all_colonies_size_limit')
@mock.patch('clovars.simulation.SimulationRunner.reached_single_colony_size_limit')
@mock.patch('clovars.simulation.SimulationRunner.reached_frame_limit')
def test_reached_stop_condition_method_returns_true_if_at_least_one_stop_condition_is_met(
self,
*mocks: List[MagicMock],
) -> None:
"""Tests if the "reached_stop_condition" method returns True if at least one stop condition is met."""
bool_values_grid = list(product([True, False], repeat=3))
for bool_values in bool_values_grid:
with self.subTest(bool_values=bool_values, mocks=mocks):
for return_value, method_mock in zip(bool_values, mocks):
method_mock.return_value = return_value
answer = self.simulation_runner.reached_stop_condition(
well=self.run_kwargs['well'],
current_frame=0,
stop_conditions=self.run_kwargs['settings']['stop_conditions'],
)
if all(value is False for value in bool_values): # no stop condition was met
self.assertFalse(answer)
else:
self.assertTrue(answer)
def test_reached_reached_frame_limit_returns_boolean_value(self) -> None:
"""Tests whether the "reached_frame_limit" method returns True or False according to the input parameters."""
limit = 1
current_frame_test_cases = [
(0, False), # Current frame below limit
(1, True), # Current frame at limit
(2, True), # Current frame above limit
]
for test_case, expected_value in current_frame_test_cases:
with self.subTest(test_case=test_case, expected_value=expected_value):
actual_value = self.simulation_runner.reached_frame_limit(current_frame=test_case, frame_limit=limit)
self.assertEqual(expected_value, actual_value)
def test_reached_single_colony_size_limit_returns_boolean_value(self) -> None:
"""
Tests whether the "reached_single_colony_size_limit" method returns True or False
according to the input parameters.
"""
limit = 1
single_colony_size_test_cases = [
(0, False), # Largest colony size below limit
(1, True), # Largest colony size at limit
(2, True), # Largest colony size above limit
]
for test_case, expected_value in single_colony_size_test_cases:
with self.subTest(test_case=test_case, expected_value=expected_value):
actual_value = self.simulation_runner.reached_single_colony_size_limit(
largest_colony_size=test_case,
single_colony_size_limit=limit,
)
self.assertEqual(expected_value, actual_value)
def test_reached_reached_all_colonies_size_limit_returns_boolean_value(self) -> None:
"""
Tests whether the "reached_all_colonies_size_limit" method returns True or False
according to the input parameters.
"""
limit = 1
all_colonies_size_test_cases = [
([0, 0, 0], False), # All colony sizes below limit
([1, 1, 0], False), # At least one colony size at limit
([1, 1, 1], True), # All colony sizes at limit
([1, 2, 1], True), # All colony sizes at or above limit
([2, 2, 2], True), # All colony sizes above limit
]
for test_case, expected_value in all_colonies_size_test_cases:
with self.subTest(test_case=test_case, expected_value=expected_value):
actual_value = self.simulation_runner.reached_all_colonies_size_limit(
all_colony_sizes=test_case,
all_colonies_size_limit=limit,
)
self.assertEqual(expected_value, actual_value)
if __name__ == '__main__':
unittest.main()
```
#### File: test_utils/test_mixins/test_path_creator.py
```python
import unittest
from pathlib import Path
from unittest import mock
from clovars.utils import PathCreatorMixin
from tests import SKIP_TESTS
class TestPathCreatorMixin(unittest.TestCase):
"""Class representing unit-tests for clovars.utils.mixins.path_creator.PathCreatorMixin class."""
test_folder = "TEST_FOLDER"
def setUp(self) -> None:
"""Sets up the test case subject (a PathCreatorMixin instance)."""
self.path_creator = PathCreatorMixin(folder=self.test_folder)
def tearDown(self) -> None:
"""Tears down the test case by removing the test folder (if it was created)."""
if (path := Path(self.test_folder)).exists():
path.rmdir()
def test_path_creator_has_path_attribute(self) -> None:
"""Tests whether the PathCreator has the "path" attribute (a Path instance)."""
self.assertTrue(hasattr(self.path_creator, 'path'))
self.assertIsInstance(self.path_creator.path, Path)
def test_create_path_calls_path_mkdir_method(self) -> None:
"""Tests whether the "create_path" method calls the Path's "mkdir" method."""
with mock.patch('clovars.utils.mixins.path_creator.Path.mkdir') as mock_mkdir:
self.path_creator.create_path(folder=self.test_folder)
mock_mkdir.assert_called_once_with(exist_ok=True)
def test_create_path_returns_a_path_object(self) -> None:
"""Tests whether the "create_path" method returns a Path object."""
return_value = self.path_creator.create_path(folder=self.test_folder)
self.assertIsInstance(return_value, Path)
@unittest.skipIf(SKIP_TESTS is True, "SKIP TESTS is set to True")
def test_delete_if_empty_method(self) -> None:
"""docstring."""
self.fail('Write the test!')
```
|
{
"source": "jfaccioni/dynafit",
"score": 3
}
|
#### File: dynafit/src/plotter.py
```python
from typing import List, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PolyCollection
from seaborn import distplot
from src.utils import get_missing_coordinate, get_start_end_values
class Plotter:
"""Class that contains all information necessary to plot the DynaFit results"""
hypothesis_plot_lower_ylim = -0.5
hypothesis_plot_upper_ylim = 1.5
data_color = '#fb7d07' # pumpkin orange
boot_color = '#4b006e' # royal purple
h0_color = '#be0119' # scarlet
h1_color = '#1e488f' # cobalt
cumul_color = '#fb7d07' # pumpkin orange
endp_color = '#4b006e' # royal purple
v_axis_color = 'black'
scatter_edgecolor = 'black'
scatter_facecolor = boot_color
violin_facecolor = boot_color
violin_edgecolor = 'black'
violin_median_color = 'white'
violin_whisker_color = 'black'
hist_interval_color = 'black'
def __init__(self, xs: np.ndarray, ys: np.ndarray, scatter_xs: np.ndarray, scatter_ys: np.ndarray,
mean_xs: np.ndarray, mean_ys: np.ndarray, show_violin: bool, violin_xs: Optional[np.ndarray],
violin_ys: Optional[List[np.ndarray]], violin_q1: Optional[np.ndarray],
violin_medians: Optional[np.ndarray], violin_q3: Optional[np.ndarray], cumulative_ys: np.ndarray,
endpoint_ys: np.ndarray, show_ci: bool, upper_ys: Optional[np.ndarray], lower_ys: Optional[np.ndarray],
cumulative_upper_ys: Optional[np.ndarray], cumulative_lower_ys: Optional[np.ndarray],
endpoint_upper_ys: Optional[np.ndarray], endpoint_lower_ys: Optional[np.ndarray], hist_xs: np.ndarray,
hist_intervals: np.ndarray) -> None:
"""Init method of Plotter class."""
self.xs = xs
self.ys = ys
self.scatter_xs = scatter_xs
self.scatter_ys = scatter_ys
self.mean_xs = mean_xs
self.mean_ys = mean_ys
self.show_violin = show_violin
self.violin_xs = violin_xs
self.violin_ys = violin_ys
self.violin_q1 = violin_q1
self.violin_medians = violin_medians
self.violin_q3 = violin_q3
self.cumulative_ys = cumulative_ys
self.endpoint_ys = endpoint_ys
self.show_ci = show_ci
self.upper_ys = upper_ys
self.lower_ys = lower_ys
self.cumulative_upper_ys = cumulative_upper_ys
self.cumulative_lower_ys = cumulative_lower_ys
self.endpoint_upper_ys = endpoint_upper_ys
self.endpoint_lower_ys = endpoint_lower_ys
self.hist_xs = np.log2(hist_xs)
self.hist_intervals = np.log2(hist_intervals)
def plot_cvp_ax(self, ax: plt.Axes) -> None:
"""Calls all the functions related to plotting the CVP."""
self.plot_supporting_lines(ax=ax)
if self.show_ci:
self.plot_supporting_lines_ci(ax=ax)
self.plot_mean_line_ci(ax=ax)
self.plot_mean_line(ax=ax)
if self.show_violin:
violins = self.plot_bootstrap_violins(ax=ax)
self.format_violins(violins=violins)
self.plot_bootstrap_violin_statistics(ax=ax)
self.plot_bootstrap_mean(ax=ax)
self.plot_bootstrap_scatter(ax=ax)
self.format_cvp(ax=ax)
def plot_supporting_lines(self, ax: plt.Axes) -> None:
"""Plots the three supporting lines of the CVP."""
start_x, end_x = get_start_end_values(array=self.xs)
start_y, end_y = get_start_end_values(array=self.ys)
self.plot_h0(start=start_x, end=end_x, initial_height=start_y, ax=ax)
self.plot_h1(start=start_x, end=end_x, initial_height=start_y, ax=ax)
self.plot_vertical_axis(start=start_x, ax=ax)
def plot_h0(self, start: float, end: float, initial_height: float, ax: plt.Axes) -> None:
"""Plots H0 on the CVP (horizontal red line)."""
ax.plot([start, end], [initial_height, initial_height], color=self.h0_color, lw=3)
def plot_h1(self, start: float, end: float, initial_height: float, ax: plt.Axes) -> None:
"""Plots H1 on the CVP (diagonal blue line)."""
final_height = get_missing_coordinate(x1=start, y1=initial_height, x2=end)
ax.plot([start, end], [initial_height, final_height], color=self.h1_color, lw=3)
def plot_vertical_axis(self, start: float, ax: plt.Axes) -> None:
"""Plots a bold vertical Y axis on the left limit of the CVP plot."""
ax.axvline(start, color=self.v_axis_color, lw=3, zorder=0)
def plot_mean_line(self, ax: plt.Axes) -> None:
"""Plots the mean value for each bootstrapped population as a line plot."""
ax.plot(self.xs, self.ys, color=self.data_color, lw=3, label='experimental mean')
def plot_bootstrap_scatter(self, ax: plt.Axes) -> None:
"""Plots bootstrap populations for each bin as scatter plots."""
ax.scatter(self.scatter_xs, self.scatter_ys, marker='.', alpha=0.6, edgecolor=self.scatter_edgecolor,
facecolor=self.scatter_facecolor)
def plot_bootstrap_mean(self, ax: plt.Axes) -> None:
"""Plots the mean of each bootstrap population as a line plot."""
ax.plot(self.mean_xs, self.mean_ys, color=self.boot_color, lw=3, label='bootstrap mean')
def plot_bootstrap_violins(self, ax: plt.Axes) -> List[PolyCollection]:
"""Plots the bootstrap populations for each bin as violin plots."""
violins = ax.violinplot(positions=self.violin_xs, dataset=self.violin_ys, showextrema=False)
return violins['bodies']
def format_violins(self, violins: List[PolyCollection]):
"""Adds formatting to the violins plotted."""
for violin in violins:
violin.set_alpha(0.3)
violin.set_facecolor(self.violin_facecolor)
violin.set_edgecolor(self.violin_edgecolor)
def plot_bootstrap_violin_statistics(self, ax: plt.Axes) -> None:
"""Plots the median and quantile statistics for the violins."""
ax.scatter(self.violin_xs, self.violin_medians, color=self.violin_median_color, s=10, zorder=100, alpha=0.8)
ax.vlines(self.violin_xs, self.violin_q1, self.violin_q3, color=self.violin_whisker_color, lw=5, alpha=0.8)
def plot_supporting_lines_ci(self, ax: plt.Axes) -> None:
"""Plots the CI for the supporting lines of the CVP."""
start_x, end_x = get_start_end_values(array=self.xs)
upper_start_y, upper_end_y = get_start_end_values(array=self.upper_ys)
lower_start_y, lower_end_y = get_start_end_values(array=self.lower_ys)
self.plot_h0_ci(start=start_x, end=end_x, upper=upper_start_y, lower=lower_start_y, ax=ax)
self.plot_h1_ci(start=start_x, end=end_x, upper=upper_start_y, lower=lower_start_y, ax=ax)
def plot_h0_ci(self, start: float, end: float, upper: float, lower: float, ax: plt.Axes) -> None:
"""Plots H0 confidence interval on the CVP (horizontal red line)"""
ax.fill_between([start, end], [upper, upper], [lower, lower], color=self.h0_color, alpha=0.1)
def plot_h1_ci(self, start: float, end: float, upper: float, lower: float, ax: plt.Axes) -> None:
"""Plots H1 confidence interval on the CVP (diagonal blue line)"""
upper_end = get_missing_coordinate(x1=start, y1=upper, x2=end)
lower_end = get_missing_coordinate(x1=start, y1=lower, x2=end)
ax.fill_between([start, end], [upper, upper_end], [lower, lower_end], color=self.h1_color, alpha=0.1)
def plot_mean_line_ci(self, ax: plt.Axes) -> None:
"""Plots the confidence interval around the mean line as a line plot."""
ax.fill_between(self.xs, self.upper_ys, self.lower_ys, color=self.boot_color, alpha=0.1)
@staticmethod
def format_cvp(ax: plt.Axes) -> None:
"""Adds formatting to the CVP."""
ax.set_xlabel('log2(Colony Size)')
ax.set_ylabel('log2(Growth Rate Variance)')
ax.legend()
def plot_hypothesis_ax(self, ax: plt.Axes,
xlims: Tuple[float, float]) -> List[Tuple[plt.Line2D, Optional[PolyCollection]]]:
"""Calls all the functions related to plotting the hypothesis distance plot."""
self.plot_hypothesis_lines(ax=ax)
cumulative_hypothesis_data = self.plot_cumulative_hypothesis_distance(ax=ax)
endpoint_hypothesis_data = self.plot_endpoint_hypothesis_distance(ax=ax)
self.set_hypothesis_plot_limits(ax=ax, xlims=xlims)
cumulative_hypothesis_ci, endpoint_hypothesis_ci = None, None
if self.show_ci:
cumulative_hypothesis_ci = self.plot_cumulative_hypothesis_ci(ax=ax)
endpoint_hypothesis_ci = self.plot_endpoint_hypothesis_ci(ax=ax)
self.format_hypothesis_plot(ax=ax)
self.invert_hypothesis_plot_y_axis(ax=ax)
return [(cumulative_hypothesis_data, cumulative_hypothesis_ci),
(endpoint_hypothesis_data, endpoint_hypothesis_ci)]
def plot_hypothesis_lines(self, ax: plt.Axes) -> None:
"""Plots the hypothesis in the hypothesis plot as horizontal lines."""
ax.axhline(0, color=self.h0_color, linestyle='dotted', alpha=0.8)
ax.axhline(1, color=self.h1_color, linestyle='dotted', alpha=0.8)
def plot_cumulative_hypothesis_distance(self, ax: plt.Axes) -> plt.Line2D:
"""Plots the cumulative hypothesis values as a line plot."""
lines = ax.plot(self.xs, self.cumulative_ys, color=self.cumul_color, label='Cumulative')
return lines[0] # single Line2D instance
def plot_endpoint_hypothesis_distance(self, ax: plt.Axes) -> plt.Line2D:
"""Plots the endpoint hypothesis values as a line plot."""
lines = ax.plot(self.xs, self.endpoint_ys, color=self.endp_color, label='Endpoint')
return lines[0] # single Line2D instance
def set_hypothesis_plot_limits(self, ax: plt.Axes, xlims: Tuple[float, float]) -> None:
"""Calculates appropriate limits for the XY axes in the hypothesis plot."""
ax.set_xlim(*xlims)
current_limits = ax.get_ylim()
if current_limits[0] >= self.hypothesis_plot_lower_ylim:
ax.set_ylim(bottom=self.hypothesis_plot_lower_ylim)
if current_limits[1] <= self.hypothesis_plot_upper_ylim:
ax.set_ylim(top=self.hypothesis_plot_upper_ylim)
def plot_cumulative_hypothesis_ci(self, ax: plt.Axes) -> PolyCollection:
"""Plots the CI around the cumulative hypothesis values as a line plot."""
filled_area = ax.fill_between(self.xs, self.cumulative_upper_ys, self.cumulative_lower_ys, alpha=0.2,
color=self.cumul_color)
return filled_area
def plot_endpoint_hypothesis_ci(self, ax: plt.Axes) -> PolyCollection:
"""Plots the CI around the endpoint hypothesis values as a line plot."""
filled_area = ax.fill_between(self.xs, self.endpoint_upper_ys, self.endpoint_lower_ys, alpha=0.2,
color=self.endp_color)
return filled_area
@staticmethod
def format_hypothesis_plot(ax: plt.Axes) -> None:
"""Adds formatting to the hypothesis plot."""
ax.set_title('Hypothesis plot')
ax.set_xlabel('log2(Colony Size)')
ax.set_ylabel('Hypothesis')
ax.set_yticks([0, 1])
ax.set_yticklabels(['H0', 'H1'])
ax.legend(bbox_to_anchor=(0.8, 1.4, 0.2, 0.1))
@staticmethod
def invert_hypothesis_plot_y_axis(ax: plt.Axes) -> None:
"""Inverts Y axis of hypothesis plot, so that it matches the structure of the CVP
(easier than redoing plot calculations)."""
ax.invert_yaxis()
def plot_histogram_ax(self, ax: plt.Axes) -> None:
"""Calls all the functions related to plotting the histogram."""
self.plot_distributions(ax=ax)
self.plot_group_divisions(ax=ax)
self.format_histogram(ax=ax)
def plot_distributions(self, ax: plt.Axes) -> None:
"""Plots the histogram."""
distplot(self.hist_xs, bins=self.hist_intervals, ax=ax, color=self.data_color)
def plot_group_divisions(self, ax: plt.Axes) -> None:
"""Plots the group divisions in the histogram as vertical lines."""
ax.vlines(self.hist_intervals, *ax.get_ylim(), color=self.hist_interval_color, linestyle='dotted', alpha=0.8)
@staticmethod
def format_histogram(ax: plt.Axes) -> None:
"""Adds formatting to the histogram."""
ax.set_title('Histogram of colony groups')
ax.set_xlabel('log2(Colony Size)')
ax.set_ylabel('% of colonies')
```
#### File: dynafit/src/validator.py
```python
import re
from typing import Optional, Tuple
import numpy as np
import pandas as pd
from openpyxl import Workbook
from openpyxl.cell.cell import Cell
from openpyxl.worksheet.worksheet import Worksheet
from src.exceptions import (BadCellStringError, DifferentSizeError, EmptyCellError, MismatchedColumnsError,
MismatchedRowsError, NoExcelFileError)
class ExcelValidator:
"""Class that bundles many different validation methods for the input data (Excel spreadsheet) used for DynaFit.
The validation methods aim to raise a specific, user-informative exception, if they fail."""
def __init__(self, workbook: Optional[Workbook], sheetname: str, cs_start_cell: str, cs_end_cell: str,
gr_start_cell: str, gr_end_cell: str) -> None:
"""Init method of ExcelValidator class."""
if workbook is None:
raise NoExcelFileError('Please select an Excel spreadsheet as the input file')
self.wb = workbook
self.sheetname = sheetname
# Add character '1' column strings (typing 'A' implies starting from cell '1A'
cs_start_cell = self.convert_column_to_first_cell(cs_start_cell.strip().upper())
gr_start_cell = self.convert_column_to_first_cell(gr_start_cell.strip().upper())
# Structures cell ranges in a dictionary
self.ranges = {
'CS': [cs_start_cell, cs_end_cell],
'GR': [gr_start_cell, gr_end_cell],
}
@property
def ws(self) -> Worksheet:
"""Returns the worksheet associated to the Excel workbook and sheetname defined in the __init__ method."""
return self.wb[self.sheetname]
def convert_column_to_first_cell(self, cell_str: str) -> str:
"""Converts and returns an Excel column accessor such as "A" to a first row cell accessor ("A1").
Returns the cell_string itself if it is not a column accessor."""
if cell_str != '':
if len(self.extract_digits(cell_str)) == 0:
return cell_str + '1'
return cell_str
def validation_routine(self) -> None:
"""Method responsible for calling downstream validation methods. Validation steps includes:
- Checking if the start cell is empty
- Checking if the start and end cells are valid Excel cells
- Checking if the start and end cells belong to the same column
- Checking if the start cell row comes before the end cell row
- Checking if the values between start and end cells can be converted to numbers (float),
ignoring None values and empty strings
- Checking if the ranges of values for CS and GR have the same size,
after removal of None values and empty strings
If no Exceptions were raised during validation, this method returns a pandas DataFrame in the format:
CS: <colony size values>,
GR: <growth rate values>
As an implementation detail, the GR column may actually be the final colony size column, if the user
chose to let the program calculate the growth rate instead. This will be later overwritten by DynaFit."""
for name, (start, end) in self.ranges.items():
if end: # User wants range from start cell to end cell
self.validate_cell_range(start=start, end=end)
else: # User wants range from start cell to end of the start cell's column
self.validate_cell_string(cell_str=start)
def validate_cell_string(self, cell_str: str) -> None:
"""Validates a cell string in an Excel spreadsheet. Raises an appropriate error if the validation fails."""
if cell_str == '': # Guaranteed only to happen on start cells
raise EmptyCellError(f'Start cell cannot be empty')
if not self.is_valid_excel(cell_str=cell_str):
raise BadCellStringError(f'The string "{cell_str}" is not a valid Excel cell accessor')
@staticmethod
def is_valid_excel(cell_str: str) -> bool:
"""Returns whether the string cell_str is a valid Excel cell accessor. This implies that it is an
alphanumerical string, with all numbers appearing after all letters."""
# cell_str must be exclusively composed of letters and numbers
if not cell_str.isalnum():
return False
# All letters in cell_str come before all numbers. Source:
# https://stackoverflow.com/questions/60758670/
return bool(re.match("[A-Z]+[1-9]\d*$", cell_str)) # noqa
def validate_cell_range(self, start: str, end: str) -> None:
"""Validates range of cells (from start cell to end cell) in an Excel spreadsheet. Raises an appropriate
error if the validation fails."""
for cell_str in (start, end):
self.validate_cell_string(cell_str=cell_str)
if not self.validate_cell_ranges_share_same_column(start=start, end=end):
raise MismatchedColumnsError(f'Cells {start} and {end} do not share same column')
if not self.validate_end_cell_comes_after_start_cell(start=start, end=end):
raise MismatchedRowsError(f'Start cell {start} comes after end cell {end}')
def validate_cell_ranges_share_same_column(self, start: str, end: str) -> bool:
"""Returns whether the start and end cells share the same column letter."""
start_letters = self.extract_letters(start)
end_letters = self.extract_letters(end)
return start_letters == end_letters
def validate_end_cell_comes_after_start_cell(self, start: str, end: str) -> bool:
"""Returns whether the row number of the end cell comes after the row number of the start cell."""
start_numbers = self.extract_digits(start)
end_numbers = self.extract_digits(end)
return int(start_numbers) < int(end_numbers)
def get_data(self) -> pd.DataFrame:
"""Returns the data from the Excel sheet loaded as a pandas DataFrame."""
self.validation_routine()
data = {}
for name, (start, end) in self.ranges.items():
if not end: # Figure out where the column ends
end = self.get_end_cell(start=start)
cell_range = f'{start}:{end}'
cell_rows = self.ws[cell_range]
values = self.get_cell_values(cell_rows)
data[name] = values
try:
df = pd.DataFrame(data)
except ValueError:
raise DifferentSizeError(('Columns have different number of numeric elements (after removing rows '
'containing text or empty cells). Please check the selected data ranges.'))
return df.dropna()
@staticmethod
def get_cell_values(rows: Tuple[Tuple[Cell]]) -> np.ndarray:
"""Returns the values of cells in a column (tuple of tuples)."""
return pd.to_numeric([row[0].value for row in rows], errors='coerce')
def get_end_cell(self, start: str) -> str:
"""Given a valid cell string, returns the cell string at the end of the column (same column + max row)
of the Excel spreadsheet associated with the ExcelValidator instance."""
letter = self.extract_letters(start)
number = self.ws.max_row
return f'{letter}{number}'
@staticmethod
def extract_letters(s: str) -> str:
"""Returns the letter portion of an alphanumerical string."""
return ''.join(char for char in s if char.isalpha())
@staticmethod
def extract_digits(s: str) -> str:
"""Returns the digit portion of an alphanumerical string."""
return ''.join(char for char in s if char.isdigit())
```
#### File: dynafit/src/worker.py
```python
import traceback
from typing import Callable
from PySide2.QtCore import QObject, QRunnable, Signal, Slot
class Worker(QRunnable):
"""Worker thread for DynaFit analysis. Avoids unresponsive GUI."""
def __init__(self, func: Callable, *args, **kwargs) -> None:
super().__init__()
self.func = func
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals() # noqa
def add_callbacks(self) -> None:
"""Adds keyword arguments related signaling between main thread and worker thread."""
self.kwargs['progress_callback'] = self.signals.progress
self.kwargs['warning_callback'] = self.signals.warning
@Slot() # noqa
def run(self) -> None:
"""Runs the Worker thread."""
try:
return_value = self.func(*self.args, **self.kwargs)
except Exception as error:
trace = traceback.format_exc()
self.signals.error.emit((error, trace)) # noqa
else:
self.signals.success.emit(return_value) # noqa
finally:
self.signals.finished.emit() # noqa
class WorkerSignals(QObject):
"""Defines the signals available from a running worker thread. Supported signals are:
Progress: Worker has finished a percentage of its job. Emits an int representing that percentage (0-100).
SS Warning: Worker has encountered low samples in one or more groups. Emits a tuple containing a QEvent and
a dictionary containing the low sample groups. Meant to wait for used response in the GUI through a QMutex
and QWaitCondition before moving on with its execution.
Finished: Worker has done executing (either naturally or by an Exception). Nothing is emitted.
Success: Worker finished executing without errors. Emits a tuple of a Plotter object and a pandas DataFrame.
Error: an Exception was raised. Emits a tuple containing an Exception object and the traceback as a string."""
progress = Signal(int)
warning = Signal(object)
finished = Signal()
success = Signal(object)
error = Signal(object)
```
#### File: dynafit/test/debug_interface.py
```python
import sys
from PySide2.QtWidgets import QApplication
from src.interface import DynaFitGUI
def run_debug_interface() -> None:
"""Run DynaFit interface on debug mode."""
global gui
gui.load_data(query='./test/test_cases/interface_test_case.xlsx')
gui.CS_start_textbox.setText('A2')
gui.GR_start_textbox.setText('B2')
gui.cs_gr_button.setChecked(True)
gui.cs_gr_button_clicked()
if __name__ == '__main__':
app = QApplication()
gui = DynaFitGUI()
run_debug_interface()
gui.show()
sys.exit(app.exec_())
```
#### File: dynafit/test/test_plotter.py
```python
import unittest
from typing import Sequence
from unittest.mock import MagicMock, patch
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PolyCollection
from src.plotter import Plotter
from src.utils import array_in_sequence
class TestPlotterModule(unittest.TestCase):
"""Tests the plotter.py module."""
plotter_kwargs = {
'xs': np.array([1, 2, 3]),
'ys': np.array([4, 5, 6]),
'scatter_xs': np.array([7, 8, 9]),
'scatter_ys': np.array([10, 11, 12]),
'show_violin': True,
'violin_xs': np.array([13, 14, 15]),
'violin_ys': [np.array([16, 17, 18]), np.array([19, 20, 21]), np.array([22, 23, 24])],
'violin_q1': np.array([25, 26, 27]),
'violin_medians': np.array([28, 29, 30]),
'violin_q3': np.array([31, 32, 33]),
'cumulative_ys': np.array([34, 35, 36]),
'endpoint_ys': np.array([37, 38, 39]),
'show_ci': True,
'upper_ys': np.array([40, 41, 42]),
'lower_ys': np.array([43, 44, 45]),
'cumulative_upper_ys': np.array([46, 47, 48]),
'cumulative_lower_ys': np.array([49, 50, 51]),
'endpoint_upper_ys': np.array([52, 53, 54]),
'endpoint_lower_ys': np.array([55, 56, 57]),
'hist_xs': np.array([58, 59, 60]),
'hist_intervals': np.array([61, 62, 63]),
}
def setUp(self) -> None:
"""Sets up each unit test by refreshing the Plotter instance, the MagicMock instance representing an Axes
instance, the Figure instance and the Axes instance."""
self.plotter = Plotter(**self.plotter_kwargs)
self.mock_ax = MagicMock()
self.fig, self.ax = plt.subplots()
def tearDown(self) -> None:
"""Tears down each unit test by deleting the Figure and Axes instances."""
self.ax.clear()
plt.close(self.fig)
del self.ax
del self.fig
def assertArrayIn(self, array: np.ndarray, sequence: Sequence) -> None:
"""Asserts whether a numpy array is inside a regular Python sequence."""
self.assertTrue(array_in_sequence(array, sequence))
def disable_violins(self) -> None:
"""Changes Plotter instance attributes to reflect a DynaFit analysis without violin plots enabled."""
self.plotter.show_violin = False
self.plotter.violin_ys = None
self.plotter.violin_colors = None
def disable_ci(self) -> None:
"""Changes Plotter instance attributes to reflect a DynaFit analysis without confidence interval enabled."""
self.plotter.show_ci = False
self.plotter.upper_ys = None
self.plotter.lower_ys = None
self.plotter.cumulative_upper_ys = None
self.plotter.cumulative_lower_ys = None
self.plotter.endpoint_upper_ys = None
self.plotter.endpoint_lower_ys = None
@patch('test_plotter.Plotter.plot_supporting_lines')
@patch('test_plotter.Plotter.plot_supporting_lines_ci')
@patch('test_plotter.Plotter.plot_mean_line_ci')
@patch('test_plotter.Plotter.plot_mean_line')
@patch('test_plotter.Plotter.plot_bootstrap_violin_statistics')
@patch('test_plotter.Plotter.plot_bootstrap_scatter')
@patch('test_plotter.Plotter.format_cvp')
@patch('test_plotter.Plotter.plot_bootstrap_violins')
@patch('test_plotter.Plotter.format_violins')
def test_plot_cvp_ax_calls_all_cvp_related_plot_functions(self, mock_format_violins, mock_plot_bootstrap_violins,
*cvp_functions) -> None:
self.plotter.plot_cvp_ax(ax=self.mock_ax)
mock_format_violins.assert_called_with(violins=mock_plot_bootstrap_violins.return_value)
for cvp_function in cvp_functions:
cvp_function.assert_called_with(ax=self.mock_ax)
@patch('test_plotter.Plotter.plot_bootstrap_violins')
@patch('test_plotter.Plotter.plot_supporting_lines_ci')
@patch('test_plotter.Plotter.plot_mean_line_ci')
def test_plot_cvp_ax_plots_everything_if_boolean_flags_are_set_to_true(self, mock_plot_mean_line_ci,
mock_plot_supporting_lines_ci,
mock_plot_bootstrap_violins) -> None:
self.plotter.plot_cvp_ax(ax=self.mock_ax)
for mock_plot_function in (mock_plot_mean_line_ci, mock_plot_supporting_lines_ci, mock_plot_bootstrap_violins):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_called_with(ax=self.mock_ax)
@patch('test_plotter.Plotter.plot_bootstrap_violins')
@patch('test_plotter.Plotter.plot_supporting_lines_ci')
@patch('test_plotter.Plotter.plot_mean_line_ci')
def test_plot_cvp_ax_does_not_plot_violins_if_flag_is_set_to_false(self, mock_plot_mean_line_ci,
mock_plot_supporting_lines_ci,
mock_plot_bootstrap_violins) -> None:
self.disable_violins()
self.plotter.plot_cvp_ax(ax=self.mock_ax)
for mock_plot_function in (mock_plot_mean_line_ci, mock_plot_supporting_lines_ci):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_called_with(ax=self.mock_ax)
mock_plot_bootstrap_violins.assert_not_called()
@patch('test_plotter.Plotter.plot_bootstrap_violins')
@patch('test_plotter.Plotter.plot_supporting_lines_ci')
@patch('test_plotter.Plotter.plot_mean_line_ci')
def test_plot_cvp_ax_does_not_add_ci_if_flag_is_set_to_false(self, mock_plot_mean_line_ci,
mock_plot_supporting_lines_ci,
mock_plot_bootstrap_violins) -> None:
self.disable_ci()
self.plotter.plot_cvp_ax(ax=self.mock_ax)
mock_plot_bootstrap_violins.assert_called_with(ax=self.mock_ax)
for mock_plot_function in (mock_plot_mean_line_ci, mock_plot_supporting_lines_ci):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_not_called()
def test_plot_supporting_lines_plots_h0_and_h1_as_line_plots(self) -> None:
self.plotter.plot_supporting_lines(ax=self.mock_ax)
self.assertEqual(self.mock_ax.plot.call_count, 2)
def test_plot_h0_plots_a_red_horizontal_line(self) -> None:
with patch('test_plotter.Plotter.plot_h1'): # do not call ax.plot inside Plotter.plot_h1 for this test
self.plotter.plot_supporting_lines(ax=self.mock_ax)
actual_args, actual_kwargs = self.mock_ax.plot.call_args
self.assertEqual(*actual_args[-1]) # horizontal line: start and end Y coordinates are equal for h0
self.assertIn(self.plotter.h0_color, actual_kwargs.values())
def test_plot_h1_plots_a_blue_diagonal_line(self) -> None:
with patch('test_plotter.Plotter.plot_h0'): # do not call ax.plot inside Plotter.plot_h0 for this test
self.plotter.plot_supporting_lines(ax=self.mock_ax)
actual_args, actual_kwargs = self.mock_ax.plot.call_args
self.assertGreater(*actual_args[-1]) # diagonal line: end Y coordinate is below start Y coordinate
self.assertIn(self.plotter.h1_color, actual_kwargs.values())
def test_plot_supporting_lines_plots_vertical_y_axis_as_a_vertical_line(self) -> None:
self.plotter.plot_supporting_lines(ax=self.mock_ax)
self.mock_ax.axvline.assert_called_once()
def test_plot_mean_line_plots_a_green_line_of_sample_xy_values(self) -> None:
self.plotter.plot_mean_line(ax=self.mock_ax)
self.mock_ax.plot.assert_called_once()
actual_args, actual_kwargs = self.mock_ax.plot.call_args
self.assertIn(self.plotter.data_color, actual_kwargs.values())
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.ys, actual_args)
def test_plot_bootstrap_scatter_plots_scatter_xs_and_ys(self) -> None:
self.plotter.plot_bootstrap_scatter(ax=self.mock_ax)
self.mock_ax.scatter.assert_called_once()
actual_args, _ = self.mock_ax.scatter.call_args
self.assertArrayIn(self.plotter.scatter_xs, actual_args)
self.assertArrayIn(self.plotter.scatter_ys, actual_args)
def test_plot_bootstrap_scatter_uses_scatter_edgecolor_and_facecolor_attributes(self) -> None:
self.plotter.plot_bootstrap_scatter(ax=self.mock_ax)
self.mock_ax.scatter.assert_called_once()
_, actual_kwargs = self.mock_ax.scatter.call_args
self.assertIn(self.plotter.scatter_edgecolor, actual_kwargs.values())
self.assertIn(self.plotter.scatter_facecolor, actual_kwargs.values())
def test_plot_bootstrap_violins_plots_violins(self) -> None:
self.plotter.plot_bootstrap_violins(ax=self.mock_ax)
self.mock_ax.violinplot.assert_called_once()
actual_args, actual_kwargs = self.mock_ax.violinplot.call_args
self.assertArrayIn(self.plotter.violin_xs, actual_kwargs.values())
for expected_violin_array, actual_violin_array in zip(self.plotter.violin_ys, actual_kwargs.get('dataset')):
with self.subTest(expected_violin_array=expected_violin_array, actual_violin_array=actual_violin_array):
np.testing.assert_allclose(expected_violin_array, actual_violin_array)
def test_plot_bootstrap_violins_returns_violins_as_a_list_of_polycollection_objects(self) -> None:
return_value = self.plotter.plot_bootstrap_violins(ax=self.mock_ax)
for expected_violin in return_value:
self.assertIsInstance(expected_violin, PolyCollection)
def test_format_violins_sets_violin_attributes_with_proper_values(self) -> None:
mock_violin = MagicMock()
self.plotter.format_violins(violins=[mock_violin])
mock_violin.set_facecolor.assert_called_with(self.plotter.violin_facecolor)
mock_violin.set_edgecolor.assert_called_with(self.plotter.violin_edgecolor)
def test_plot_supporting_lines_ci_plots_h0_ci_and_h1_ci_as_filled_areas(self) -> None:
self.plotter.plot_supporting_lines_ci(ax=self.mock_ax)
self.assertEqual(self.mock_ax.fill_between.call_count, 2)
def test_plot_h0_ci_fills_a_red_horizontal_area(self) -> None:
with patch('test_plotter.Plotter.plot_h1_ci'): # avoids ax.fill_between call inside Plotter.plot_h1_ci
self.plotter.plot_supporting_lines_ci(ax=self.mock_ax)
actual_args, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertEqual(*actual_args[-2]) # horizontal upper CI: start and end Y coordinates are equal for h0
self.assertEqual(*actual_args[-1]) # horizontal lower CI: start and end Y coordinates are equal for h0
self.assertIn(self.plotter.h0_color, actual_kwargs.values())
def test_plot_h0_ci_fills_a_blue_diagonal_area(self) -> None:
with patch('test_plotter.Plotter.plot_h0_ci'): # avoids ax.fill_between call inside Plotter.plot_h0_ci
self.plotter.plot_supporting_lines_ci(ax=self.mock_ax)
actual_args, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertGreater(*actual_args[-2]) # diagonal upper CI: end Y coordinate is below start Y coordinate
self.assertGreater(*actual_args[-1]) # diagonal lower CI: end Y coordinate is below start Y coordinate
self.assertIn(self.plotter.h1_color, actual_kwargs.values())
def test_plot_mean_line_ci_fills_an_area_of_xs_and_ys_values(self) -> None:
self.plotter.plot_mean_line_ci(ax=self.mock_ax)
self.mock_ax.fill_between.assert_called_once()
actual_args, _ = self.mock_ax.fill_between.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.upper_ys, actual_args)
self.assertArrayIn(self.plotter.lower_ys, actual_args)
def test_plot_mean_line_ci_fills_an_area_with_correct_color(self) -> None:
self.plotter.plot_mean_line_ci(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertIn(self.plotter.data_color, actual_kwargs.values())
def test_format_cvp_adds_xy_labels(self) -> None:
self.assertFalse(self.ax.get_xlabel())
self.assertFalse(self.ax.get_ylabel())
self.plotter.format_cvp(ax=self.ax)
self.assertTrue(self.ax.get_xlabel())
self.assertTrue(self.ax.get_ylabel())
@patch('test_plotter.Plotter.plot_hypothesis_lines')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_distance')
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_distance')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_ci')
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_ci')
@patch('test_plotter.Plotter.format_hypothesis_plot')
@patch('test_plotter.Plotter.invert_hypothesis_plot_y_axis')
@patch('test_plotter.Plotter.set_hypothesis_plot_limits')
def test_plot_hypothesis_ax_calls_all_hypothesis_related_plot_functions(self, mock_set_hypothesis_plot_limits,
*hypothesis_functions) -> None:
xlims = (0, 5)
self.plotter.plot_hypothesis_ax(ax=self.mock_ax, xlims=xlims)
mock_set_hypothesis_plot_limits.assert_called_with(ax=self.mock_ax, xlims=xlims)
for hypothesis_function in hypothesis_functions:
hypothesis_function.assert_called_with(ax=self.mock_ax)
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_ci')
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_distance')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_ci')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_distance')
def test_plot_hypothesis_ax_returns_values_from_appropriate_functions(self, mock_cumulative_distance,
mock_cumulative_ci, mock_endpoint_distance,
mock_endpoint_ci) -> None:
return_value = self.plotter.plot_hypothesis_ax(ax=self.ax, xlims=(0, 5))
(cumulative_line, cumulative_ci), (endpoint_line, endpoint_ci) = return_value
self.assertEqual(cumulative_line, mock_cumulative_distance.return_value)
self.assertEqual(cumulative_ci, mock_cumulative_ci.return_value)
self.assertEqual(endpoint_line, mock_endpoint_distance.return_value)
self.assertEqual(endpoint_ci, mock_endpoint_ci.return_value)
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_distance')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_distance')
def test_plot_hypothesis_ax_returns_none_values_if_boolean_flags_are_set_to_false(self, mock_cumulative_distance,
mock_endpoint_distance) -> None:
self.disable_ci()
return_value = self.plotter.plot_hypothesis_ax(ax=self.ax, xlims=(0, 5))
(cumulative_line, cumulative_ci), (endpoint_line, endpoint_ci) = return_value
self.assertEqual(cumulative_line, mock_cumulative_distance.return_value)
self.assertIsNone(cumulative_ci)
self.assertEqual(endpoint_line, mock_endpoint_distance.return_value)
self.assertIsNone(endpoint_ci)
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_ci')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_ci')
def test_plot_hypothesis_ax_plots_everything_if_boolean_flags_are_set_to_true(self, plot_cumulative_hypothesis_ci,
plot_endpoint_hypothesis_ci) -> None:
self.mock_ax.get_ylim.return_value = (0, 1) # Mocking standard Axes limits
self.plotter.plot_hypothesis_ax(ax=self.mock_ax, xlims=(0, 5))
for mock_plot_function in (plot_cumulative_hypothesis_ci, plot_endpoint_hypothesis_ci):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_called_with(ax=self.mock_ax)
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_ci')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_ci')
def test_plot_hypothesis_ax_does_not_plot_ci_if_boolean_flags_are_set_to_false(self, plot_cumulative_hypothesis_ci,
plot_endpoint_hypothesis_ci) -> None:
self.disable_ci()
self.mock_ax.get_ylim.return_value = (0, 1) # Mocking standard Axes limits
self.plotter.plot_hypothesis_ax(ax=self.mock_ax, xlims=(0, 5))
for mock_plot_function in (plot_cumulative_hypothesis_ci, plot_endpoint_hypothesis_ci):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_not_called()
def test_plot_hypothesis_lines_plots_red_h0_at_y0(self) -> None:
self.plotter.plot_hypothesis_lines(ax=self.mock_ax)
(h0_args, h0_kwargs), _ = self.mock_ax.axhline.call_args_list
self.assertIn(0, h0_args)
self.assertIn(self.plotter.h0_color, h0_kwargs.values())
def test_plot_hypothesis_lines_plots_red_h1_at_y1(self) -> None:
self.plotter.plot_hypothesis_lines(ax=self.mock_ax)
_, (h1_args, h1_kwargs) = self.mock_ax.axhline.call_args_list
self.assertIn(1, h1_args)
self.assertIn(self.plotter.h1_color, h1_kwargs.values())
def test_plot_cumulative_hypothesis_distance_plots_line_of_cumulative_distance_values(self) -> None:
self.plotter.plot_cumulative_hypothesis_distance(ax=self.mock_ax)
self.mock_ax.plot.assert_called_once()
actual_args, _ = self.mock_ax.plot.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.cumulative_ys, actual_args)
def test_plot_cumulative_hypothesis_distance_plots_line_of_correct_color(self) -> None:
self.plotter.plot_cumulative_hypothesis_distance(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.plot.call_args
self.assertIn(self.plotter.cumul_color, actual_kwargs.values())
def test_plot_cumulative_hypothesis_returns_a_line2d_instance(self) -> None:
expected_line2d = self.plotter.plot_cumulative_hypothesis_distance(ax=self.ax)
self.assertIsInstance(expected_line2d, plt.Line2D)
def test_plot_endpoint_hypothesis_distance_plots_line_of_endpoint_distance_values(self) -> None:
self.plotter.plot_endpoint_hypothesis_distance(ax=self.mock_ax)
self.mock_ax.plot.assert_called_once()
actual_args, _ = self.mock_ax.plot.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.endpoint_ys, actual_args)
def test_plot_endpoint_hypothesis_distance_plots_line_of_correct_color(self) -> None:
self.plotter.plot_endpoint_hypothesis_distance(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.plot.call_args
self.assertIn(self.plotter.endp_color, actual_kwargs.values())
def test_plot_endpoint_hypothesis_returns_a_line2d_instance(self) -> None:
expected_line2d = self.plotter.plot_endpoint_hypothesis_distance(ax=self.ax)
self.assertIsInstance(expected_line2d, plt.Line2D)
def test_set_hypothesis_plot_limits_sets_x_limits_to_argument_passed_in(self) -> None:
xlims = (-50, 50)
self.plotter.set_hypothesis_plot_limits(ax=self.ax, xlims=xlims)
self.assertEqual(self.ax.get_xlim(), xlims)
def test_set_hypothesis_plot_limits_does_not_adjust_with_y_limits_if_they_are_large_enough(self) -> None:
ylims = (-50, 50)
self.ax.set_ylim(ylims)
self.plotter.set_hypothesis_plot_limits(ax=self.ax, xlims=(0, 5))
self.assertEqual(self.ax.get_ylim(), ylims)
def test_set_hypothesis_plot_limits_adjusts_with_y_limits_if_they_are_not_large_enough(self) -> None:
ylims = (-0.1, 0.1)
self.plotter.set_hypothesis_plot_limits(ax=self.ax, xlims=(0, 5))
self.assertNotEqual(self.ax.get_ylim(), ylims)
self.assertEqual(self.ax.get_ylim(), (self.plotter.hypothesis_plot_lower_ylim,
self.plotter.hypothesis_plot_upper_ylim))
def test_plot_cumulative_hypothesis_ci_fills_an_area(self) -> None:
self.plotter.plot_cumulative_hypothesis_ci(ax=self.mock_ax)
self.mock_ax.fill_between.assert_called_once()
actual_args, _ = self.mock_ax.fill_between.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.cumulative_upper_ys, actual_args)
self.assertArrayIn(self.plotter.cumulative_lower_ys, actual_args)
def test_plot_cumulative_hypothesis_ci_uses_correct_color(self) -> None:
self.plotter.plot_cumulative_hypothesis_ci(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertIn(self.plotter.cumul_color, actual_kwargs.values())
def test_plot_cumulative_hypothesis_returns_a_polycollection_instance(self) -> None:
expected_polycollection = self.plotter.plot_cumulative_hypothesis_ci(ax=self.ax)
self.assertIsInstance(expected_polycollection, PolyCollection)
def test_plot_endpoint_hypothesis_ci_fills_an_area(self) -> None:
self.plotter.plot_endpoint_hypothesis_ci(ax=self.mock_ax)
self.mock_ax.fill_between.assert_called_once()
actual_args, _ = self.mock_ax.fill_between.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.endpoint_upper_ys, actual_args)
self.assertArrayIn(self.plotter.endpoint_lower_ys, actual_args)
def test_plot_endpoint_hypothesis_ci_uses_correct_color(self) -> None:
self.plotter.plot_endpoint_hypothesis_ci(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertIn(self.plotter.endp_color, actual_kwargs.values())
def test_plot_endpoint_hypothesis_returns_a_polycollection_instance(self) -> None:
expected_polycollection = self.plotter.plot_endpoint_hypothesis_ci(ax=self.ax)
self.assertIsInstance(expected_polycollection, PolyCollection)
def test_format_hypothesis_plot_adds_title_labels_ticks_and_set_plot_legends(self) -> None:
self.assertFalse(self.ax.get_title())
self.assertFalse(self.ax.get_xlabel())
self.assertFalse(self.ax.get_ylabel())
self.ax.legend = MagicMock()
self.plotter.format_hypothesis_plot(ax=self.ax)
self.ax.legend.assert_called_once()
self.assertTrue(self.ax.get_title())
self.assertTrue(self.ax.get_xlabel())
self.assertTrue(self.ax.get_ylabel())
for expected_label, text_object in zip(['H0', 'H1'], self.ax.get_yticklabels()):
actual_label = text_object.get_text()
with self.subTest(expected_label=expected_label, actual_label=actual_label):
self.assertEqual(expected_label, actual_label)
def test_invert_hypothesis_plot_y_axis_calls_ax_invert_yaxis(self) -> None:
self.mock_ax.invert_yaxis.assert_not_called()
self.plotter.invert_hypothesis_plot_y_axis(ax=self.mock_ax)
self.mock_ax.invert_yaxis.assert_called()
@patch('test_plotter.Plotter.plot_distributions')
@patch('test_plotter.Plotter.plot_group_divisions')
@patch('test_plotter.Plotter.format_histogram')
def test_plot_histogram_ax_calls_all_histogram_related_plot_functions(self, *histogram_functions) -> None:
self.plotter.plot_histogram_ax(ax=self.mock_ax)
for histogram_function in histogram_functions:
histogram_function.assert_called_with(ax=self.mock_ax)
@patch('src.plotter.distplot')
def test_plot_distributions_calls_seaborn_distplot(self, mock_seaborn_distplot) -> None:
self.plotter.plot_distributions(ax=self.mock_ax)
actual_args, actual_kwargs = mock_seaborn_distplot.call_args
self.assertArrayIn(self.plotter.hist_xs, actual_args)
self.assertArrayIn(self.plotter.hist_intervals, actual_kwargs.values())
self.assertIn(self.mock_ax, actual_kwargs.values())
def test_plot_group_divisions_adds_vertical_lines_based_on_breakpoints(self) -> None:
self.plotter.plot_group_divisions(ax=self.mock_ax)
actual_args, _ = self.mock_ax.vlines.call_args
np.testing.assert_allclose(self.plotter.hist_intervals, actual_args[0])
def test_plot_group_divisions_adds_vertical_lines_of_correct_colors(self) -> None:
self.plotter.plot_group_divisions(ax=self.mock_ax)
for interval, (_, actual_kwargs) in zip(self.plotter.hist_intervals, self.mock_ax.vlines.call_args_list):
with self.subTest(interval=interval, actual_kwargs=actual_kwargs):
self.assertIn(self.plotter.hist_interval_color, actual_kwargs.values())
def test_format_histogram_modifies_title_and_xy_labels(self) -> None:
self.assertFalse(self.ax.get_title())
self.assertFalse(self.ax.get_xlabel())
self.assertFalse(self.ax.get_ylabel())
self.plotter.format_histogram(ax=self.ax)
self.assertTrue(self.ax.get_title())
self.assertTrue(self.ax.get_xlabel())
self.assertTrue(self.ax.get_ylabel())
if __name__ == '__main__':
unittest.main()
```
#### File: dynafit/test/test_utils.py
```python
import unittest
from src.utils import *
class TestUtilsModule(unittest.TestCase):
"""Tests the utils.py module."""
def test_get_start_end_values_two_or_more_elements_in_array(self) -> None:
arr = np.array([1, 2])
return_value = get_start_end_values(array=arr)
self.assertEqual(len(return_value), 2)
self.assertEqual(arr[0], return_value[0])
self.assertEqual(arr[-1], return_value[1])
def test_get_start_end_values_only_one_elements_in_array(self) -> None:
arr = np.array([5])
return_value = get_start_end_values(array=arr)
self.assertEqual(len(return_value), 2)
self.assertEqual(arr[0], return_value[0])
self.assertEqual(arr[0], return_value[1])
def test_get_start_end_values_raises_index_error_with_empty_array(self) -> None:
arr = np.array([])
with self.assertRaises(IndexError):
get_start_end_values(array=arr)
def test_get_missing_coord_y_equals_x_line(self) -> None:
# line -> y = x
p1 = (0, 0)
x1, y1 = p1
p2 = (1, 1)
x2, expected_y2 = p2
actual_y2 = get_missing_coordinate(x1=x1, y1=y1, x2=x2, angular_coefficient=1.0)
self.assertEqual(expected_y2, actual_y2)
def test_get_missing_coord_y_equals_minus_two_x_plus_three_line(self) -> None:
# line -> y = -2x + 3
p1 = (0, 3)
x1, y1 = p1
p2 = (5, -7)
x2, expected_y2 = p2
actual_y2 = get_missing_coordinate(x1=x1, y1=y1, x2=x2, angular_coefficient=-2.0)
self.assertEqual(expected_y2, actual_y2)
def test_array_in_sequence_for_array_in_sequence(self) -> None:
array_01 = np.array([1, 2, 3])
array_02 = np.array([4, 5, 6])
sequence = (array_01, 'a', True, array_01, array_02, 2)
self.assertTrue(array_in_sequence(array_01, sequence))
self.assertTrue(array_in_sequence(array_02, sequence))
def test_array_in_sequence_for_array_not_sequence(self) -> None:
array_01 = np.array([1, 2, 3])
array_02 = np.array([4, 5, 6])
sequence = ('a', array_02, True, array_02, 2)
self.assertFalse(array_in_sequence(array_01, sequence))
self.assertFalse(array_in_sequence(array_01, (True, False, 'Sun', 'Moon', 10)))
self.assertFalse(array_in_sequence(array_01, 'This string is also a Python sequence.'))
if __name__ == '__main__':
unittest.main()
```
#### File: dynafit/test/test_worker.py
```python
import unittest
from typing import Any
from unittest.mock import MagicMock
from PySide2.QtCore import Slot
from src.worker import Worker, WorkerSignals
class TestWorkerModule(unittest.TestCase):
"""Tests the worker.py module."""
arg_a = 1
arg_b = 2
arbitrary_args = (3, 4)
kwarg_a = 5
kwarg_b = 6
arbitrary_kwargs = {'c': 7, 'd': 8}
def setUp(self) -> None:
"""Sets up the each unit test by creating a Worker instance."""
self.worker = Worker(self.add, self.arg_a, self.arg_b, *self.arbitrary_args, kwarg_a=self.kwarg_a,
kwarg_b=self.kwarg_b, **self.arbitrary_kwargs)
self.value = None
@staticmethod
def add(a: float, b: float, *args, **kwargs) -> float: # noqa
"""Returns the sum a + b. Accepts any arbitrary number of additional arguments and keyword arguments."""
return a + b
@Slot() # noqa
def slot_with_value(self, value: Any) -> None:
"""Slot for setting any value passed in from a Signal to self.value."""
self.value = value
@Slot() # noqa
def slot_without_value(self) -> Any:
"""Slot for receiving any signal emitted from a Signal without a value associated to it."""
self.value = None
def mock_worker_signals(self) -> None:
"""Replaces all Signals connected to self.worker by MagicMock instances."""
self.worker.signals.finished = MagicMock()
self.worker.signals.success = MagicMock()
self.worker.signals.error = MagicMock()
def test_worker_instance_accepts_arbitrary_args(self) -> None:
for arg in self.arbitrary_args:
with self.subTest(arg=arg):
self.assertIn(arg, self.worker.args)
def test_worker_instance_bundles_arguments_in_attribute_args(self) -> None:
expected_args = (self.arg_a, self.arg_b, *self.arbitrary_args)
self.assertEqual(expected_args, self.worker.args)
def test_worker_instance_accepts_arbitrary_kwargs(self) -> None:
for k, v in self.arbitrary_kwargs.items():
with self.subTest(k=k, v=v):
self.assertIn(k, self.worker.kwargs.keys())
self.assertIn(v, self.worker.kwargs.values())
def test_worker_instance_bundles_keyword_arguments_in_attribute_kwargs(self) -> None:
expected_kwargs = dict(kwarg_a=self.kwarg_a, kwarg_b=self.kwarg_b, **self.arbitrary_kwargs)
self.assertEqual(expected_kwargs, self.worker.kwargs)
def test_worker_signal_attribute_is_an_instance_of_worker_signals_class(self) -> None:
self.assertIsInstance(self.worker.signals, WorkerSignals)
def test_add_callbacks_adds_callback_signals_to_kwargs(self) -> None:
self.worker.add_callbacks()
expected_kwargs = dict(kwarg_a=self.kwarg_a, kwarg_b=self.kwarg_b,
progress_callback=self.worker.signals.progress,
warning_callback=self.worker.signals.warning, **self.arbitrary_kwargs)
self.assertEqual(expected_kwargs, self.worker.kwargs)
def test_run_calls_func_with_args_and_kwargs(self) -> None:
mock_func = MagicMock()
self.worker.func = mock_func
self.worker.run()
mock_func.assert_called_with(*self.worker.args, **self.worker.kwargs) # noqa
def test_run_emits_finished_and_success_signals_when_no_error_happens(self) -> None:
self.mock_worker_signals()
self.worker.run()
self.worker.signals.finished.emit.assert_called()
self.worker.signals.success.emit.assert_called()
self.worker.signals.error.emit.assert_not_called()
def test_run_emits_finished_and_error_signals_when_some_error_happens(self) -> None:
self.mock_worker_signals()
mock_func = MagicMock(side_effect=Exception)
self.worker.func = mock_func
self.worker.run()
self.worker.signals.finished.emit.assert_called()
self.worker.signals.success.emit.assert_not_called()
self.worker.signals.error.emit.assert_called()
def test_run_passes_progress_and_warning_signals_to_func(self) -> None:
self.worker.add_callbacks()
mock_func = MagicMock()
self.worker.func = mock_func
self.worker.run()
for downstream_signals in (self.worker.signals.progress, self.worker.signals.warning):
func_args, func_kwargs = mock_func.call_args
self.assertIn(downstream_signals, func_kwargs.values())
def test_worker_signals_progress_emits_an_integer(self) -> None:
signals = WorkerSignals(parent=None)
signals.progress.connect(self.slot_with_value)
value = 1
signals.progress.emit(value)
self.assertEqual(self.value, value)
def test_worker_signals_warning_emits_a_python_object(self) -> None:
signals = WorkerSignals(parent=None)
signals.warning.connect(self.slot_with_value)
value = object
signals.warning.emit(value)
self.assertEqual(self.value, value)
def test_worker_signals_finished_emits_nothing(self) -> None:
signals = WorkerSignals(parent=None)
signals.finished.connect(self.slot_without_value)
signals.finished.emit()
self.assertEqual(self.value, None)
def test_worker_signals_success_emits_a_python_object(self) -> None:
signals = WorkerSignals(parent=None)
signals.success.connect(self.slot_with_value)
value = object
signals.success.emit(value)
self.assertEqual(self.value, value)
def test_worker_signals_error_emits_a_python_object(self) -> None:
signals = WorkerSignals(parent=None)
signals.error.connect(self.slot_with_value)
value = object
signals.error.emit(value)
self.assertEqual(self.value, value)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jfaccioni/SCOUTS",
"score": 2
}
|
#### File: SCOUTS/src/violins.py
```python
import os
import sys
import traceback
from typing import Callable, Generator, List, Tuple
import pandas as pd
import seaborn as sns
from PySide2.QtCore import QEvent, QObject, QRunnable, QThreadPool, Qt, Signal, Slot
from PySide2.QtGui import QIcon, QPixmap
from PySide2.QtWidgets import (QApplication, QCheckBox, QComboBox, QDialog, QFileDialog, QFormLayout, QFrame, QLabel,
QLineEdit, QMainWindow, QMessageBox, QPushButton, QSizePolicy, QVBoxLayout, QWidget)
from matplotlib import use as set_backend
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavBar
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from xlrd import XLRDError
from src.utils import get_project_root
set_backend('Qt5Agg')
sns.set(style="whitegrid")
class ViolinGUI(QMainWindow):
"""Main Window Widget for ViolinGUI."""
style = {
'title': 'QLabel {font-size: 18pt; font-weight: 600}',
'header': 'QLabel {font-size: 12pt; font-weight: 520}',
'label': 'QLabel {font-size: 10pt}',
'button': 'QPushButton {font-size: 10pt}',
'run button': 'QPushButton {font-size: 18pt; font-weight: 600}',
'line edit': 'QLineEdit {font-size: 10pt}',
'checkbox': 'QCheckBox {font-size: 10pt}',
'drop down': 'QComboBox {font-size: 10pt}'
}
def __init__(self) -> None:
"""ViolinGUI Constructor. Defines all aspects of the GUI."""
# ## Setup section
# Inherits from QMainWindow
super().__init__()
self.rootdir = get_project_root()
# QMainWindow basic properties
self.setWindowTitle("SCOUTS - Violins")
self.setWindowIcon(QIcon(os.path.abspath(os.path.join(self.rootdir, 'src', 'scouts.ico'))))
# Creates QWidget as QMainWindow's central widget
self.page = QWidget(self)
self.setCentralWidget(self.page)
# Miscellaneous initialization values
self.threadpool = QThreadPool() # Threadpool for workers
self.population_df = None # DataFrame of whole population (raw data)
self.summary_df = None # DataFrame indicating which SCOUTS output corresponds to which rule
self.summary_path = None # path to all DataFrames generated by SCOUTS
self.main_layout = QVBoxLayout(self.page)
# Title section
# Title
self.title = QLabel(self.page)
self.title.setText('SCOUTS - Violins')
self.title.setStyleSheet(self.style['title'])
self.title.adjustSize()
self.main_layout.addWidget(self.title)
# ## Input section
# Input header
self.input_header = QLabel(self.page)
self.input_header.setText('Load data')
self.input_header.setStyleSheet(self.style['header'])
self.input_header.adjustSize()
self.main_layout.addWidget(self.input_header)
# Input/Output frame
self.input_frame = QFrame(self.page)
self.input_frame.setFrameShape(QFrame.StyledPanel)
self.input_frame.setLayout(QFormLayout())
self.main_layout.addWidget(self.input_frame)
# Raw data button
self.input_button = QPushButton(self.page)
self.input_button.setStyleSheet(self.style['button'])
self.set_icon(self.input_button, 'x-office-spreadsheet')
self.input_button.setObjectName('file')
self.input_button.setText(' Load raw data file')
self.input_button.setToolTip('Load raw data file (the file given to SCOUTS as the input file)')
self.input_button.clicked.connect(self.get_path)
# SCOUTS results button
self.output_button = QPushButton(self.page)
self.output_button.setStyleSheet(self.style['button'])
self.set_icon(self.output_button, 'folder')
self.output_button.setObjectName('folder')
self.output_button.setText(' Load SCOUTS results')
self.output_button.setToolTip('Load data from SCOUTS analysis '
'(the folder given to SCOUTS as the output folder)')
self.output_button.clicked.connect(self.get_path)
# Add widgets above to input frame Layout
self.input_frame.layout().addRow(self.input_button)
self.input_frame.layout().addRow(self.output_button)
# ## Samples section
# Samples header
self.samples_header = QLabel(self.page)
self.samples_header.setText('Select sample names')
self.samples_header.setStyleSheet(self.style['header'])
self.samples_header.adjustSize()
self.main_layout.addWidget(self.samples_header)
# Samples frame
self.samples_frame = QFrame(self.page)
self.samples_frame.setFrameShape(QFrame.StyledPanel)
self.samples_frame.setLayout(QFormLayout())
self.main_layout.addWidget(self.samples_frame)
# Samples label
self.samples_label = QLabel(self.page)
self.samples_label.setText('Write sample names delimited by semicolons below.\nEx: Control;Treat_01;Pac-03')
self.samples_label.setStyleSheet(self.style['label'])
# Sample names line edit
self.sample_names = QLineEdit(self.page)
self.sample_names.setStyleSheet(self.style['line edit'])
# Add widgets above to samples frame Layout
self.samples_frame.layout().addRow(self.samples_label)
self.samples_frame.layout().addRow(self.sample_names)
# ## Analysis section
# Analysis header
self.analysis_header = QLabel(self.page)
self.analysis_header.setText('Plot parameters')
self.analysis_header.setStyleSheet(self.style['header'])
self.analysis_header.adjustSize()
self.main_layout.addWidget(self.analysis_header)
# Analysis frame
self.analysis_frame = QFrame(self.page)
self.analysis_frame.setFrameShape(QFrame.StyledPanel)
self.analysis_frame.setLayout(QFormLayout())
self.main_layout.addWidget(self.analysis_frame)
# Analysis labels
self.analysis_label_01 = QLabel(self.page)
self.analysis_label_01.setText('Compare')
self.analysis_label_01.setStyleSheet(self.style['label'])
self.analysis_label_02 = QLabel(self.page)
self.analysis_label_02.setText('with')
self.analysis_label_02.setStyleSheet(self.style['label'])
self.analysis_label_03 = QLabel(self.page)
self.analysis_label_03.setText('for marker')
self.analysis_label_03.setStyleSheet(self.style['label'])
self.analysis_label_04 = QLabel(self.page)
self.analysis_label_04.setText('Outlier type')
self.analysis_label_04.setStyleSheet(self.style['label'])
# Analysis drop-down boxes
self.drop_down_01 = QComboBox(self.page)
self.drop_down_01.addItems(['whole population', 'non-outliers', 'top outliers', 'bottom outliers', 'none'])
self.drop_down_01.setStyleSheet(self.style['drop down'])
self.drop_down_01.setCurrentIndex(2)
self.drop_down_02 = QComboBox(self.page)
self.drop_down_02.addItems(['whole population', 'non-outliers', 'top outliers', 'bottom outliers', 'none'])
self.drop_down_02.setStyleSheet(self.style['drop down'])
self.drop_down_02.setCurrentIndex(0)
self.drop_down_03 = QComboBox(self.page)
self.drop_down_03.setStyleSheet(self.style['drop down'])
self.drop_down_04 = QComboBox(self.page)
self.drop_down_04.addItems(['OutS', 'OutR'])
self.drop_down_04.setStyleSheet(self.style['drop down'])
# Add widgets above to samples frame Layout
self.analysis_frame.layout().addRow(self.analysis_label_01, self.drop_down_01)
self.analysis_frame.layout().addRow(self.analysis_label_02, self.drop_down_02)
self.analysis_frame.layout().addRow(self.analysis_label_03, self.drop_down_03)
self.analysis_frame.layout().addRow(self.analysis_label_04, self.drop_down_04)
self.legend_checkbox = QCheckBox(self.page)
self.legend_checkbox.setText('Add legend to the plot')
self.legend_checkbox.setStyleSheet(self.style['checkbox'])
self.main_layout.addWidget(self.legend_checkbox)
# Plot button (stand-alone)
self.plot_button = QPushButton(self.page)
self.set_icon(self.plot_button, 'system-run')
self.plot_button.setText(' Plot')
self.plot_button.setToolTip('Plot data after loading the input data and selecting parameters')
self.plot_button.setStyleSheet(self.style['run button'])
self.plot_button.setEnabled(False)
self.plot_button.clicked.connect(self.run_plot)
self.main_layout.addWidget(self.plot_button)
# ## Secondary Window
# This is used to plot the violins only
self.secondary_window = QMainWindow(self)
self.secondary_window.resize(720, 720)
self.dynamic_canvas = DynamicCanvas(self.secondary_window, width=6, height=6, dpi=120)
self.secondary_window.setCentralWidget(self.dynamic_canvas)
self.secondary_window.addToolBar(NavBar(self.dynamic_canvas, self.secondary_window))
def set_icon(self, widget: QWidget, icon: str) -> None:
"""Associates an icon to a widget."""
i = QIcon()
i.addPixmap(QPixmap(os.path.abspath(os.path.join(self.rootdir, 'src', 'default_icons', f'{icon}.svg'))))
widget.setIcon(QIcon.fromTheme(icon, i))
def get_path(self) -> None:
"""Opens a dialog box and loads the corresponding data into memory, depending on the caller widget."""
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
query = None
func = None
if self.sender().objectName() == 'file':
query, _ = QFileDialog.getOpenFileName(self, "Select file", "", "All Files (*)", options=options)
func = self.load_scouts_input_data
elif self.sender().objectName() == 'folder':
query = QFileDialog.getExistingDirectory(self, "Select Directory", options=options)
func = self.load_scouts_results
if query:
self.load_data(query, func)
def load_data(self, query: str, func: Callable) -> None:
"""Loads input data into memory, while displaying a loading message as a separate worker."""
worker = Worker(func=func, query=query)
message = self.loading_message()
worker.signals.started.connect(message.show)
worker.signals.started.connect(self.page.setDisabled)
worker.signals.error.connect(self.generic_error_message)
worker.signals.error.connect(message.destroy)
worker.signals.failed.connect(self.plot_button.setDisabled)
worker.signals.success.connect(message.destroy)
worker.signals.success.connect(self.enable_plot)
worker.signals.finished.connect(self.page.setEnabled)
self.threadpool.start(worker)
def loading_message(self) -> QDialog:
"""Returns the message box to be displayed while the user waits for the input data to load."""
message = QDialog(self)
message.setWindowTitle('Loading')
message.resize(300, 50)
label = QLabel('loading DataFrame into memory...', message)
label.setStyleSheet(self.style['label'])
label.adjustSize()
label.setAlignment(Qt.AlignCenter)
label.move(int((message.width() - label.width())/2), int((message.height() - label.height())/2))
return message
def load_scouts_input_data(self, query: str) -> None:
"""Loads data for whole population prior to SCOUTS into memory (used for plotting the whole population)."""
try:
self.population_df = pd.read_excel(query, index_col=0)
except XLRDError:
self.population_df = pd.read_csv(query, index_col=0)
self.drop_down_03.clear()
self.drop_down_03.addItems(list(self.population_df.columns))
self.drop_down_03.setCurrentIndex(0)
def load_scouts_results(self, query: str) -> None:
"""Loads the SCOUTS summary file into memory, in order to dynamically locate SCOUTS output files later when
the user chooses which data to plot."""
self.summary_df = pd.read_excel(os.path.join(query, 'summary.xlsx'), index_col=None)
self.summary_path = query
def enable_plot(self) -> None:
"""Enables plot button if all necessary files are placed in memory."""
if isinstance(self.summary_df, pd.DataFrame) and isinstance(self.population_df, pd.DataFrame):
self.plot_button.setEnabled(True)
def run_plot(self) -> None:
"""Sets and starts the plot worker."""
worker = Worker(func=self.plot)
worker.signals.error.connect(self.generic_error_message)
worker.signals.success.connect(self.secondary_window.show)
self.threadpool.start(worker)
def plot(self) -> None:
"""Logic for plotting data based on user selection of populations, markers, etc."""
# Clear figure currently on plot
self.dynamic_canvas.axes.cla()
# Initialize values and get parameters from GUI
columns = ['sample', 'marker', 'population', 'expression']
samples = self.parse_sample_names()
pop_01 = self.drop_down_01.currentText()
pop_02 = self.drop_down_02.currentText()
pops_to_analyse = [pop_01, pop_02]
marker = self.drop_down_03.currentText()
cutoff_from_reference = True if self.drop_down_04.currentText() == 'OutR' else False
violin_df = pd.DataFrame(columns=columns)
# Start fetching data from files
# Whole population
for pop in pops_to_analyse:
if pop == 'whole population':
for partial_df in self.yield_violin_values(df=self.population_df, population='whole population',
samples=samples, marker=marker, columns=columns):
violin_df = violin_df.append(partial_df)
# Other comparisons
elif pop != 'none':
for file_number in self.yield_selected_file_numbers(summary_df=self.summary_df, population=pop,
cutoff_from_reference=cutoff_from_reference,
marker=marker):
df_path = os.path.join(self.summary_path, 'data', f'{"%04d" % file_number}.')
try:
sample_df = pd.read_excel(df_path + 'xlsx', index_col=0)
except FileNotFoundError:
sample_df = pd.read_csv(df_path + 'csv', index_col=0)
if not sample_df.empty:
for partial_df in self.yield_violin_values(df=sample_df, population=pop, samples=samples,
marker=marker, columns=columns):
violin_df = violin_df.append(partial_df)
# Plot data
pops_to_analyse = [p for p in pops_to_analyse if p != 'none']
violin_df = violin_df[violin_df['marker'] == marker]
for pop in pops_to_analyse:
pop_subset = violin_df.loc[violin_df['population'] == pop]
for sample in samples:
sample_subset = pop_subset.loc[pop_subset['sample'] == sample]
sat = 1.0 - samples.index(sample) / (len(samples) + 1)
self.dynamic_canvas.update_figure(subset_by_sample=sample_subset, pop=pop, sat=sat, samples=samples)
# Draw plotted data on canvas
if self.legend_checkbox.isChecked():
self.dynamic_canvas.add_legend()
self.dynamic_canvas.axes.set_title(f'{marker} expression - {self.drop_down_04.currentText()}')
self.dynamic_canvas.fig.canvas.draw()
def parse_sample_names(self) -> List[str]:
"""Parse sample names from the QLineEdit Widget."""
return self.sample_names.text().split(';')
def generic_error_message(self, error: Tuple[Exception, str]) -> None:
"""Error message box used to display any error message (including traceback) for any uncaught errors."""
name, trace = error
QMessageBox.critical(self, 'An error occurred!', f"Error: {str(name)}\n\nfull traceback:\n{trace}")
def closeEvent(self, event: QEvent) -> None:
"""Defines the message box for when the user wants to quit ViolinGUI."""
title = 'Quit Application'
mes = "Are you sure you want to quit?"
reply = QMessageBox.question(self, title, mes, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.setEnabled(False)
self.threadpool.waitForDone()
event.accept()
else:
event.ignore()
@staticmethod
def yield_violin_values(df: pd.DataFrame, population: str, samples: List[str], marker: str,
columns: List[str]) -> pd.DataFrame:
"""Returns a DataFrame from expression values, along with information of sample, marker and population. This
DataFrame is appended to the violin plot DataFrame in order to simplify plotting the violins afterwards."""
for sample in samples:
series = df.loc[df.index.str.contains(sample)].loc[:, marker]
yield pd.DataFrame({'sample': sample, 'marker': marker, 'population': population, 'expression': series},
columns=columns)
@staticmethod
def yield_selected_file_numbers(summary_df: pd.DataFrame, population: str, cutoff_from_reference: bool,
marker: str) -> Generator[pd.DataFrame, None, None]:
"""Yields file numbers from DataFrames resulting from SCOUTS analysis. DataFrames are yielded based on
global values, i.e. the comparisons the user wants to perform."""
cutoff = 'sample'
if cutoff_from_reference is True:
cutoff = 'reference'
for index, (file_number, cutoff_from, reference, outliers_for, category) in summary_df.iterrows():
if cutoff_from == cutoff and outliers_for == marker and category == population:
yield file_number
class DynamicCanvas(FigureCanvas):
"""Class for the plot canvas in the window independent from the main GUI window."""
colors = {
'top outliers': [0.988, 0.553, 0.384], # green
'bottom outliers': [0.259, 0.455, 0.643], # blue
'non-outliers': [0.400, 0.761, 0.647], # orange
'whole population': [0.600, 0.600, 0.600] # gray
}
def __init__(self, parent=None, width=5, height=4, dpi=100) -> None:
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def update_figure(self, subset_by_sample: pd.DataFrame, pop: str, sat: float, samples: List[str]) -> None:
"""Updates the figure shown based on the passed in as arguments."""
color = self.colors[pop]
sns.violinplot(ax=self.axes, data=subset_by_sample, x='sample', y='expression', color=color, saturation=sat,
order=samples)
def add_legend(self) -> None:
"""Adds legends to the figure (if the user chose to do so)."""
labels = {name: Line2D([], [], color=color, marker='s', linestyle='None')
for name, color in self.colors.items()}
self.axes.legend(labels.values(), labels.keys(), fontsize=8)
class Worker(QRunnable):
"""Worker thread for loading DataFrames and generating plots. Avoids unresponsive GUI."""
def __init__(self, func: Callable, *args, **kwargs) -> None:
super().__init__()
self.func = func
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
@Slot()
def run(self) -> None:
"""Runs the Worker thread."""
self.signals.started.emit(True)
try:
self.func(*self.args, **self.kwargs)
except Exception as error:
trace = traceback.format_exc()
self.signals.error.emit((error, trace))
self.signals.failed.emit()
else:
self.signals.success.emit()
finally:
self.signals.finished.emit(True)
class WorkerSignals(QObject):
"""Defines the signals available from a running worker thread. Supported signals are:
Started: Worker has started its job. Emits a boolean.
Error: an Exception was raised. Emits a tuple containing an Exception object and the traceback as a string.
Failed: Worker has not finished its job due to an error. Nothing is emitted.
Success: Worker has finished executing without errors. Nothing is emitted.
Finished: Worker has stopped working (either naturally or by raising an Exception). Emits a boolean."""
started = Signal(bool)
error = Signal(Exception)
failed = Signal()
success = Signal()
finished = Signal(bool)
def main() -> None:
"""Entry point function for ViolinGUI."""
app = QApplication(sys.argv)
violin_gui = ViolinGUI()
violin_gui.show()
sys.exit(app.exec_())
```
|
{
"source": "Jfach/beer-for-python",
"score": 3
}
|
#### File: beer-for-python/pybeer/pybeer.py
```python
import re
from BeautifulSoup import BeautifulSoup as bs
import beer_data
import bad_beer as errors
class Beer:
def __init__(self, name):
try:
self.name = name.title()
#keep the raw html, just in case we want it
self._html = beer_data.beer_profile_html(name)
self._soup = bs(self._html)
except errors.Invalid_Beer as error:
print(error.args[0])
def __repr__(self):
return "{}(\"{}\")".format(
self.__class__.__name__,
self.name)
@property
def abv(self):
styleabv = self._soup.firstText("Style | ABV")
text = styleabv.parent.parent.getText()
abv = re.search(r'([0-9.]+%)ABV', text)
#what about beers with multiple styles? (TODO)
#NB: I haven't found an example of that yet
return abv.groups()[0]
@property
def style(self):
styleabv = self._soup.firstText("Style | ABV")
style = styleabv.findNext('b').getText()
return style
@property
def brewer(self):
brewed_by_text = self._soup.firstText("Brewed by:")
brewer = brewed_by_text.findNext('b').getText()
return brewer
@property
def score(self):
score = self._soup.find(attrs={"class": "BAscore_big ba-score"})
return score.getText()
@property
def score_text(self):
score_text = self._soup.find(attrs={"class": "ba-score_text"})
return score_text.getText()
@property
def description(self):
#is this ever not "No notes at this time."?
desc = self._soup.firstText("Notes & Commercial Description:")
all_text = desc.parent.parent.contents
#without a page that has something other than "No notes at this time.",
#it's pretty difficult to know how to handle this section if there's
#ever more than the one line
#if beeradvocate.com ever add in descriptions, this will need
#to be revisited (TODO, I guess)
return all_text[-1]
```
|
{
"source": "jfach/nautobot",
"score": 2
}
|
#### File: dummy_plugin/dummy_plugin/template_content.py
```python
from nautobot.extras.plugins import PluginTemplateExtension
class SiteContent(PluginTemplateExtension):
model = "dcim.site"
def left_page(self):
return "SITE CONTENT - LEFT PAGE"
def right_page(self):
return "SITE CONTENT - RIGHT PAGE"
def full_width_page(self):
return "SITE CONTENT - FULL WIDTH PAGE"
def buttons(self):
return "SITE CONTENT - BUTTONS"
template_extensions = [SiteContent]
```
#### File: core/graphql/__init__.py
```python
from django.db.models import JSONField, BigIntegerField
from django.db.models.fields import BinaryField
from django.test.client import RequestFactory
from nautobot.extras.context_managers import web_request_context
from nautobot.extras.models import GraphQLQuery
import graphene
from graphene.types import generic
from graphene_django.converter import convert_django_field
from graphene_django.settings import graphene_settings
from graphql import get_default_backend
from graphql.language import ast
@convert_django_field.register(JSONField)
def convert_json(field, registry=None):
"""Convert JSONField to GenericScalar."""
return generic.GenericScalar()
@convert_django_field.register(BinaryField)
def convert_binary(field, registry=None):
"""Convert BinaryField to String."""
return graphene.String()
def execute_query(query, variables=None, request=None, user=None):
"""Execute a query from the ORM.
Args:
- query (str): String with GraphQL query.
- variables (dict, optional): If the query has variables they need to be passed in as a dictionary.
- request (django.test.client.RequestFactory, optional): Used to authenticate.
- user (django.contrib.auth.models.User, optional): Used to authenticate.
Returns:
GraphQL Object: Result for query
"""
if not request and not user:
raise ValueError("Either request or username should be provided")
if not request:
request = RequestFactory().post("/graphql/")
request.user = user
backend = get_default_backend()
schema = graphene_settings.SCHEMA
document = backend.document_from_string(schema, query)
if variables:
return document.execute(context_value=request, variable_values=variables)
else:
return document.execute(context_value=request)
def execute_saved_query(saved_query_slug, **kwargs):
"""Execute saved query from the ORM.
Args:
- saved_query_slug (str): Slug of a saved GraphQL query.
- variables (dict, optional): If the query has variables they need to be passed in as a dictionary.
- request (django.test.client.RequestFactory, optional): Used to authenticate.
- user (django.contrib.auth.models.User, optional): Used to authenticate.
Returns:
GraphQL Object: Result for query
"""
query = GraphQLQuery.objects.get(slug=saved_query_slug)
return execute_query(query=query.query, **kwargs)
# See also:
# https://github.com/graphql-python/graphene-django/issues/241
# https://github.com/graphql-python/graphene/pull/1261 (graphene 3.0)
class BigInteger(graphene.types.Scalar):
"""An integer which, unlike GraphQL's native Int type, doesn't reject values outside (-2^31, 2^31-1).
Currently only used for ASNField, which goes up to 2^32-1 (i.e., unsigned 32-bit int); it's possible
that this approach may fail for values in excess of 2^53-1 (the largest integer value supported in JavaScript).
"""
serialize = int
parse_value = int
@staticmethod
def parse_literal(node):
if isinstance(node, ast.IntValue):
return int(node.value)
@convert_django_field.register(BigIntegerField)
def convert_biginteger(field, registry=None):
"""Convert BigIntegerField to BigInteger scalar."""
return BigInteger()
```
#### File: core/tests/test_checks.py
```python
from django.test import TestCase
from django.test import override_settings
from nautobot.core import checks
class CheckCoreSettingsTest(TestCase):
@override_settings(
CACHEOPS_DEFAULTS={"timeout": 0},
)
def test_check_cache_timeout(self):
"""Error if CACHEOPS_DEFAULTS['timeout'] is 0."""
self.assertEqual(checks.check_cache_timeout(None), [checks.E001])
@override_settings(
AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.ModelBackend"],
)
def test_check_object_permissions_backend(self):
"""
Error if 'nautobot.core.authentication.ObjectPermissionBackend' not in AUTHENTICATION_BACKENDS.
"""
self.assertEqual(checks.check_object_permissions_backend(None), [checks.E002])
@override_settings(
RELEASE_CHECK_TIMEOUT=0,
)
def test_check_release_check_timeout(self):
"""Error if RELEASE_CHECK_TIMEOUT < 3600."""
self.assertEqual(checks.check_release_check_timeout(None), [checks.E003])
@override_settings(
RELEASE_CHECK_URL="bogus url://tom.horse",
)
def test_check_release_check_url(self):
"""Error if RELEASE_CHECK_URL is not a valid URL."""
self.assertEqual(checks.check_release_check_url(None), [checks.E004])
@override_settings(
STORAGE_BACKEND=None,
STORAGE_CONFIG={"test_key": "test_value"},
)
def test_check_storage_config_and_backend(self):
"""Warn if STORAGE_CONFIG and STORAGE_BACKEND aren't mutually set."""
self.assertEqual(checks.check_storage_config_and_backend(None), [checks.W005])
```
#### File: extras/api/serializers.py
```python
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from drf_yasg.utils import swagger_serializer_method
from graphene_django.settings import graphene_settings
from graphql import get_default_backend
from graphql.error import GraphQLSyntaxError
from rest_framework import serializers
from nautobot.core.api import (
ChoiceField,
ContentTypeField,
SerializedPKRelatedField,
ValidatedModelSerializer,
)
from nautobot.core.api.exceptions import SerializerNotFound
from nautobot.dcim.api.nested_serializers import (
NestedDeviceSerializer,
NestedDeviceRoleSerializer,
NestedPlatformSerializer,
NestedRackSerializer,
NestedRegionSerializer,
NestedSiteSerializer,
)
from nautobot.dcim.models import Device, DeviceRole, DeviceType, Platform, Rack, Region, Site
from nautobot.extras.choices import *
from nautobot.extras.datasources import get_datasource_content_choices
from nautobot.extras.models import (
ComputedField,
ConfigContext,
ConfigContextSchema,
CustomField,
CustomFieldChoice,
CustomLink,
ExportTemplate,
GitRepository,
GraphQLQuery,
ImageAttachment,
JobResult,
ObjectChange,
Relationship,
RelationshipAssociation,
Status,
Tag,
Webhook,
)
from nautobot.extras.api.fields import StatusSerializerField
from nautobot.extras.utils import FeatureQuery
from nautobot.tenancy.api.nested_serializers import (
NestedTenantSerializer,
NestedTenantGroupSerializer,
)
from nautobot.tenancy.models import Tenant, TenantGroup
from nautobot.users.api.nested_serializers import NestedUserSerializer
from nautobot.utilities.api import get_serializer_for_model
from nautobot.virtualization.api.nested_serializers import (
NestedClusterGroupSerializer,
NestedClusterSerializer,
)
from nautobot.virtualization.models import Cluster, ClusterGroup
from .customfields import CustomFieldModelSerializer
from .fields import MultipleChoiceJSONField
from .nested_serializers import *
#
# Custom fields
#
class CustomFieldSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:customfield-detail")
content_types = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("custom_fields").get_query()),
many=True,
)
type = ChoiceField(choices=CustomFieldTypeChoices)
filter_logic = ChoiceField(choices=CustomFieldFilterLogicChoices, required=False)
class Meta:
model = CustomField
fields = [
"id",
"url",
"content_types",
"type",
"name",
"label",
"description",
"required",
"filter_logic",
"default",
"weight",
"validation_minimum",
"validation_maximum",
"validation_regex",
]
class CustomFieldChoiceSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:customfieldchoice-detail")
field = NestedCustomFieldSerializer()
class Meta:
model = CustomFieldChoice
fields = ["id", "url", "field", "value", "weight"]
#
# Export templates
#
class ExportTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:exporttemplate-detail")
content_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("export_templates").get_query()),
)
owner_content_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("export_template_owners").get_query()),
required=False,
allow_null=True,
default=None,
)
owner = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ExportTemplate
fields = [
"id",
"url",
"content_type",
"owner_content_type",
"owner_object_id",
"owner",
"name",
"description",
"template_code",
"mime_type",
"file_extension",
]
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_owner(self, obj):
if obj.owner is None:
return None
serializer = get_serializer_for_model(obj.owner, prefix="Nested")
context = {"request": self.context["request"]}
return serializer(obj.owner, context=context).data
#
# Tags
#
class TagSerializer(CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:tag-detail")
tagged_items = serializers.IntegerField(read_only=True)
class Meta:
model = Tag
fields = [
"id",
"url",
"name",
"slug",
"color",
"description",
"tagged_items",
"custom_fields",
"created",
"last_updated",
]
class TaggedObjectSerializer(serializers.Serializer):
tags = NestedTagSerializer(many=True, required=False)
def create(self, validated_data):
tags = validated_data.pop("tags", None)
instance = super().create(validated_data)
if tags is not None:
return self._save_tags(instance, tags)
return instance
def update(self, instance, validated_data):
tags = validated_data.pop("tags", None)
# Cache tags on instance for change logging
instance._tags = tags or []
instance = super().update(instance, validated_data)
if tags is not None:
return self._save_tags(instance, tags)
return instance
def _save_tags(self, instance, tags):
if tags:
instance.tags.set(*[t.name for t in tags])
else:
instance.tags.clear()
return instance
#
# Git repositories
#
class GitRepositorySerializer(CustomFieldModelSerializer):
"""Git repositories defined as a data source."""
url = serializers.HyperlinkedIdentityField(view_name="extras-api:gitrepository-detail")
token = serializers.CharField(source="_token", write_only=True, required=False)
provided_contents = MultipleChoiceJSONField(
choices=get_datasource_content_choices("extras.gitrepository"),
allow_blank=True,
required=False,
)
class Meta:
model = GitRepository
fields = [
"id",
"url",
"name",
"slug",
"remote_url",
"branch",
"token",
"username",
"current_head",
"provided_contents",
"created",
"last_updated",
"custom_fields",
"computed_fields",
]
opt_in_fields = ["computed_fields"]
def validate(self, data):
"""
Add the originating Request as a parameter to be passed when creating/updating a GitRepository.
"""
data["request"] = self.context["request"]
return super().validate(data)
#
# Image attachments
#
class ImageAttachmentSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:imageattachment-detail")
content_type = ContentTypeField(queryset=ContentType.objects.all())
parent = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ImageAttachment
fields = [
"id",
"url",
"content_type",
"object_id",
"parent",
"name",
"image",
"image_height",
"image_width",
"created",
]
def validate(self, data):
# Validate that the parent object exists
try:
data["content_type"].get_object_for_this_type(id=data["object_id"])
except ObjectDoesNotExist:
raise serializers.ValidationError(
"Invalid parent object: {} ID {}".format(data["content_type"], data["object_id"])
)
# Enforce model validation
super().validate(data)
return data
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_parent(self, obj):
# Static mapping of models to their nested serializers
if isinstance(obj.parent, Device):
serializer = NestedDeviceSerializer
elif isinstance(obj.parent, Rack):
serializer = NestedRackSerializer
elif isinstance(obj.parent, Site):
serializer = NestedSiteSerializer
else:
raise Exception("Unexpected type of parent object for ImageAttachment")
return serializer(obj.parent, context={"request": self.context["request"]}).data
#
# Config contexts
#
class ConfigContextSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:configcontext-detail")
owner_content_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("config_context_owners").get_query()),
required=False,
allow_null=True,
default=None,
)
owner = serializers.SerializerMethodField(read_only=True)
schema = NestedConfigContextSchemaSerializer(required=False, allow_null=True)
regions = SerializedPKRelatedField(
queryset=Region.objects.all(),
serializer=NestedRegionSerializer,
required=False,
many=True,
)
sites = SerializedPKRelatedField(
queryset=Site.objects.all(),
serializer=NestedSiteSerializer,
required=False,
many=True,
)
roles = SerializedPKRelatedField(
queryset=DeviceRole.objects.all(),
serializer=NestedDeviceRoleSerializer,
required=False,
many=True,
)
device_types = SerializedPKRelatedField(
queryset=DeviceType.objects.all(),
serializer=NestedDeviceRoleSerializer,
required=False,
many=True,
)
platforms = SerializedPKRelatedField(
queryset=Platform.objects.all(),
serializer=NestedPlatformSerializer,
required=False,
many=True,
)
cluster_groups = SerializedPKRelatedField(
queryset=ClusterGroup.objects.all(),
serializer=NestedClusterGroupSerializer,
required=False,
many=True,
)
clusters = SerializedPKRelatedField(
queryset=Cluster.objects.all(),
serializer=NestedClusterSerializer,
required=False,
many=True,
)
tenant_groups = SerializedPKRelatedField(
queryset=TenantGroup.objects.all(),
serializer=NestedTenantGroupSerializer,
required=False,
many=True,
)
tenants = SerializedPKRelatedField(
queryset=Tenant.objects.all(),
serializer=NestedTenantSerializer,
required=False,
many=True,
)
tags = serializers.SlugRelatedField(queryset=Tag.objects.all(), slug_field="slug", required=False, many=True)
class Meta:
model = ConfigContext
fields = [
"id",
"url",
"name",
"owner_content_type",
"owner_object_id",
"owner",
"weight",
"description",
"schema",
"is_active",
"regions",
"sites",
"roles",
"device_types",
"platforms",
"cluster_groups",
"clusters",
"tenant_groups",
"tenants",
"tags",
"data",
"created",
"last_updated",
]
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_owner(self, obj):
if obj.owner is None:
return None
serializer = get_serializer_for_model(obj.owner, prefix="Nested")
context = {"request": self.context["request"]}
return serializer(obj.owner, context=context).data
#
# Config context Schemas
#
class ConfigContextSchemaSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:configcontextschema-detail")
owner_content_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("config_context_owners").get_query()),
required=False,
allow_null=True,
default=None,
)
owner = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ConfigContextSchema
fields = [
"id",
"url",
"name",
"slug",
"owner_content_type",
"owner_object_id",
"owner",
"description",
"data_schema",
"created",
"last_updated",
]
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_owner(self, obj):
if obj.owner is None:
return None
serializer = get_serializer_for_model(obj.owner, prefix="Nested")
context = {"request": self.context["request"]}
return serializer(obj.owner, context=context).data
#
# Job Results
#
class JobResultSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:jobresult-detail")
user = NestedUserSerializer(read_only=True)
status = ChoiceField(choices=JobResultStatusChoices, read_only=True)
obj_type = ContentTypeField(read_only=True)
class Meta:
model = JobResult
fields = [
"id",
"url",
"created",
"completed",
"name",
"obj_type",
"status",
"user",
"data",
"job_id",
]
#
# Jobs (fka Custom Scripts, Reports)
#
class JobSerializer(serializers.Serializer):
url = serializers.HyperlinkedIdentityField(
view_name="extras-api:job-detail",
lookup_field="class_path",
lookup_url_kwarg="class_path",
)
id = serializers.CharField(read_only=True, source="class_path")
name = serializers.CharField(max_length=255, read_only=True)
description = serializers.CharField(max_length=255, required=False, read_only=True)
test_methods = serializers.ListField(child=serializers.CharField(max_length=255))
vars = serializers.SerializerMethodField(read_only=True)
result = NestedJobResultSerializer()
def get_vars(self, instance):
return {k: v.__class__.__name__ for k, v in instance._get_vars().items()}
class JobDetailSerializer(JobSerializer):
result = JobResultSerializer()
class JobInputSerializer(serializers.Serializer):
data = serializers.JSONField(required=False, default="")
commit = serializers.BooleanField(required=False, default=None)
#
# Change logging
#
class ObjectChangeSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:objectchange-detail")
user = NestedUserSerializer(read_only=True)
action = ChoiceField(choices=ObjectChangeActionChoices, read_only=True)
changed_object_type = ContentTypeField(read_only=True)
changed_object = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ObjectChange
fields = [
"id",
"url",
"time",
"user",
"user_name",
"request_id",
"action",
"changed_object_type",
"changed_object_id",
"changed_object",
"object_data",
]
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_changed_object(self, obj):
"""
Serialize a nested representation of the changed object.
"""
if obj.changed_object is None:
return None
try:
serializer = get_serializer_for_model(obj.changed_object, prefix="Nested")
except SerializerNotFound:
return obj.object_repr
context = {"request": self.context["request"]}
data = serializer(obj.changed_object, context=context).data
return data
#
# ContentTypes
#
class ContentTypeSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:contenttype-detail")
display = serializers.SerializerMethodField()
class Meta:
model = ContentType
fields = ["id", "url", "app_label", "model", "display"]
@swagger_serializer_method(serializer_or_field=serializers.CharField)
def get_display(self, obj):
return obj.app_labeled_name
#
# Custom Links
#
class CustomLinkSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:customlink-detail")
content_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("custom_links").get_query()).order_by("app_label", "model"),
)
class Meta:
model = CustomLink
fields = (
"id",
"url",
"target_url",
"name",
"content_type",
"text",
"weight",
"group_name",
"button_class",
"new_window",
)
#
# Webhook
#
class WebhookSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:webhook-detail")
content_types = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("webhooks").get_query()).order_by("app_label", "model"),
many=True,
)
class Meta:
model = Webhook
fields = [
"id",
"url",
"content_types",
"name",
"type_create",
"type_update",
"type_delete",
"payload_url",
"http_method",
"http_content_type",
"additional_headers",
"body_template",
"secret",
"ssl_verification",
"ca_file_path",
]
#
# Custom statuses
#
class StatusSerializer(CustomFieldModelSerializer):
"""Serializer for `Status` objects."""
url = serializers.HyperlinkedIdentityField(view_name="extras-api:status-detail")
content_types = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("statuses").get_query()),
many=True,
)
class Meta:
model = Status
fields = [
"id",
"url",
"content_types",
"name",
"slug",
"color",
"custom_fields",
"created",
"last_updated",
]
class StatusModelSerializerMixin(serializers.Serializer):
"""Mixin to add non-required `status` choice field to model serializers."""
status = StatusSerializerField(required=False, queryset=Status.objects.all())
#
# Relationship
#
class RelationshipSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:relationship-detail")
source_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("relationships").get_query()),
)
destination_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("relationships").get_query()),
)
class Meta:
model = Relationship
fields = [
"id",
"url",
"name",
"slug",
"description",
"type",
"source_type",
"source_label",
"source_hidden",
"source_filter",
"destination_type",
"destination_label",
"destination_hidden",
"destination_filter",
]
class RelationshipAssociationSerializer(serializers.ModelSerializer):
source_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("relationships").get_query()),
)
destination_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("relationships").get_query()),
)
relationship = NestedRelationshipSerializer()
class Meta:
model = RelationshipAssociation
fields = [
"id",
"relationship",
"source_type",
"source_id",
"destination_type",
"destination_id",
]
#
# GraphQL Queries
#
class GraphQLQuerySerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:graphqlquery-detail")
variables = serializers.DictField(allow_null=True, default={})
class Meta:
model = GraphQLQuery
fields = (
"id",
"url",
"name",
"slug",
"query",
"variables",
)
# Computed Fields
class ComputedFieldSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="extras-api:computedfield-detail")
content_type = ContentTypeField(
queryset=ContentType.objects.filter(FeatureQuery("custom_fields").get_query()).order_by("app_label", "model"),
)
class Meta:
model = ComputedField
fields = (
"id",
"url",
"slug",
"label",
"description",
"content_type",
"template",
"fallback_value",
"weight",
)
```
#### File: extras/tests/test_jobs.py
```python
import os
import uuid
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from nautobot.dcim.models import Site
from nautobot.extras.choices import JobResultStatusChoices
from nautobot.extras.jobs import get_job, run_job
from nautobot.extras.models import JobResult
from nautobot.utilities.testing import TestCase
class JobTest(TestCase):
"""
Test basic jobs to ensure importing works.
"""
maxDiff = None
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_pass"
name = "TestPass"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_fail"
name = "TestFail"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
def test_field_order(self):
"""
Job test with field order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_field_order"
name = "TestFieldOrder"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>""",
)
def test_no_field_order(self):
"""
Job test without field_order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_no_field_order"
name = "TestNoFieldOrder"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>""",
)
def test_ready_only_job_pass(self):
"""
Job read only test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_pass"
name = "TestReadOnlyPass"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted
def test_read_only_job_fail(self):
"""
Job read only test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_fail"
name = "TestReadOnlyFail"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted
# Also ensure the standard log message about aborting the transaction is *not* present
self.assertNotEqual(
job_result.data["run"]["log"][-1][-1], "Database changes have been reverted due to error."
)
def test_read_only_no_commit_field(self):
"""
Job read only test commit field is not shown.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_no_commit_field"
name = "TestReadOnlyNoCommitField"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var">Var:</label></th><td>
<input class="form-control form-control" id="id_var" name="var" placeholder="None" required type="text">
<br><span class="helptext">Hello</span><input id="id__commit" name="_commit" type="hidden" value="False"></td></tr>""",
)
```
#### File: nautobot/extras/views.py
```python
import inspect
from django import template
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.http import Http404, HttpResponseForbidden
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.views.generic import View
from django_tables2 import RequestConfig
from jsonschema.validators import Draft7Validator
from nautobot.core.views import generic
from nautobot.dcim.models import Device
from nautobot.dcim.tables import DeviceTable
from nautobot.extras.utils import get_worker_count
from nautobot.utilities.paginator import EnhancedPaginator, get_paginate_count
from nautobot.utilities.utils import (
copy_safe_request,
count_related,
shallow_compare_dict,
)
from nautobot.utilities.tables import ButtonsColumn
from nautobot.utilities.views import ContentTypePermissionRequiredMixin
from nautobot.virtualization.models import VirtualMachine
from nautobot.virtualization.tables import VirtualMachineTable
from . import filters, forms, tables
from .choices import JobResultStatusChoices
from .models import (
ComputedField,
ConfigContext,
ConfigContextSchema,
CustomLink,
ExportTemplate,
GitRepository,
GraphQLQuery,
ImageAttachment,
ObjectChange,
JobResult,
Relationship,
RelationshipAssociation,
Status,
Tag,
TaggedItem,
Webhook,
)
from .jobs import get_job, get_jobs, run_job, Job
from .datasources import (
get_datasource_contents,
enqueue_pull_git_repository_and_refresh_data,
)
#
# Tags
#
class TagListView(generic.ObjectListView):
queryset = Tag.objects.annotate(items=count_related(TaggedItem, "tag"))
filterset = filters.TagFilterSet
filterset_form = forms.TagFilterForm
table = tables.TagTable
class TagView(generic.ObjectView):
queryset = Tag.objects.all()
def get_extra_context(self, request, instance):
tagged_items = TaggedItem.objects.filter(tag=instance).prefetch_related("content_type", "content_object")
# Generate a table of all items tagged with this Tag
items_table = tables.TaggedItemTable(tagged_items)
paginate = {
"paginator_class": EnhancedPaginator,
"per_page": get_paginate_count(request),
}
RequestConfig(request, paginate).configure(items_table)
return {
"items_count": tagged_items.count(),
"items_table": items_table,
}
class TagEditView(generic.ObjectEditView):
queryset = Tag.objects.all()
model_form = forms.TagForm
template_name = "extras/tag_edit.html"
class TagDeleteView(generic.ObjectDeleteView):
queryset = Tag.objects.all()
class TagBulkImportView(generic.BulkImportView):
queryset = Tag.objects.all()
model_form = forms.TagCSVForm
table = tables.TagTable
class TagBulkEditView(generic.BulkEditView):
queryset = Tag.objects.annotate(items=count_related(TaggedItem, "tag"))
table = tables.TagTable
form = forms.TagBulkEditForm
class TagBulkDeleteView(generic.BulkDeleteView):
queryset = Tag.objects.annotate(items=count_related(TaggedItem, "tag"))
table = tables.TagTable
#
# Config contexts
#
# TODO: disallow (or at least warn) user from manually editing config contexts that
# have an associated owner, such as a Git repository
class ConfigContextListView(generic.ObjectListView):
queryset = ConfigContext.objects.all()
filterset = filters.ConfigContextFilterSet
filterset_form = forms.ConfigContextFilterForm
table = tables.ConfigContextTable
action_buttons = ("add",)
class ConfigContextView(generic.ObjectView):
queryset = ConfigContext.objects.all()
def get_extra_context(self, request, instance):
# Determine user's preferred output format
if request.GET.get("format") in ["json", "yaml"]:
format = request.GET.get("format")
if request.user.is_authenticated:
request.user.set_config("extras.configcontext.format", format, commit=True)
elif request.user.is_authenticated:
format = request.user.get_config("extras.configcontext.format", "json")
else:
format = "json"
return {
"format": format,
}
class ConfigContextEditView(generic.ObjectEditView):
queryset = ConfigContext.objects.all()
model_form = forms.ConfigContextForm
template_name = "extras/configcontext_edit.html"
class ConfigContextBulkEditView(generic.BulkEditView):
queryset = ConfigContext.objects.all()
filterset = filters.ConfigContextFilterSet
table = tables.ConfigContextTable
form = forms.ConfigContextBulkEditForm
class ConfigContextDeleteView(generic.ObjectDeleteView):
queryset = ConfigContext.objects.all()
class ConfigContextBulkDeleteView(generic.BulkDeleteView):
queryset = ConfigContext.objects.all()
table = tables.ConfigContextTable
class ObjectConfigContextView(generic.ObjectView):
base_template = None
template_name = "extras/object_configcontext.html"
def get_extra_context(self, request, instance):
source_contexts = ConfigContext.objects.restrict(request.user, "view").get_for_object(instance)
# Determine user's preferred output format
if request.GET.get("format") in ["json", "yaml"]:
format = request.GET.get("format")
if request.user.is_authenticated:
request.user.set_config("extras.configcontext.format", format, commit=True)
elif request.user.is_authenticated:
format = request.user.get_config("extras.configcontext.format", "json")
else:
format = "json"
return {
"rendered_context": instance.get_config_context(),
"source_contexts": source_contexts,
"format": format,
"base_template": self.base_template,
"active_tab": "config-context",
}
#
# Config context schemas
#
# TODO: disallow (or at least warn) user from manually editing config context schemas that
# have an associated owner, such as a Git repository
class ConfigContextSchemaListView(generic.ObjectListView):
queryset = ConfigContextSchema.objects.all()
filterset = filters.ConfigContextSchemaFilterSet
filterset_form = forms.ConfigContextSchemaFilterForm
table = tables.ConfigContextSchemaTable
action_buttons = ("add",)
class ConfigContextSchemaView(generic.ObjectView):
queryset = ConfigContextSchema.objects.all()
def get_extra_context(self, request, instance):
# Determine user's preferred output format
if request.GET.get("format") in ["json", "yaml"]:
format = request.GET.get("format")
if request.user.is_authenticated:
request.user.set_config("extras.configcontextschema.format", format, commit=True)
elif request.user.is_authenticated:
format = request.user.get_config("extras.configcontextschema.format", "json")
else:
format = "json"
return {
"format": format,
"active_tab": "configcontextschema",
}
class ConfigContextSchemaObjectValidationView(generic.ObjectView):
"""
This view renders a detail tab that shows tables of objects that utilize the given schema object
and their validation state.
"""
queryset = ConfigContextSchema.objects.all()
template_name = "extras/configcontextschema/validation.html"
def get_extra_context(self, request, instance):
"""
Reuse the model tables for config context, device, and virtual machine but inject
the `ConfigContextSchemaValidationStateColumn` and an object edit action button.
"""
# Prep the validator with the schema so it can be reused for all records
validator = Draft7Validator(instance.data_schema)
# Config context table
config_context_table = tables.ConfigContextTable(
data=instance.configcontext_set.all(),
orderable=False,
extra_columns=[
(
"validation_state",
tables.ConfigContextSchemaValidationStateColumn(validator, "data", empty_values=()),
),
("actions", ButtonsColumn(model=ConfigContext, buttons=["edit"])),
],
)
paginate = {
"paginator_class": EnhancedPaginator,
"per_page": get_paginate_count(request),
}
RequestConfig(request, paginate).configure(config_context_table)
# Device table
device_table = DeviceTable(
data=instance.device_set.prefetch_related(
"tenant", "site", "rack", "device_type", "device_role", "primary_ip"
),
orderable=False,
extra_columns=[
(
"validation_state",
tables.ConfigContextSchemaValidationStateColumn(validator, "local_context_data", empty_values=()),
),
("actions", ButtonsColumn(model=Device, buttons=["edit"])),
],
)
paginate = {
"paginator_class": EnhancedPaginator,
"per_page": get_paginate_count(request),
}
RequestConfig(request, paginate).configure(device_table)
# Virtual machine table
virtual_machine_table = VirtualMachineTable(
data=instance.virtualmachine_set.prefetch_related("cluster", "role", "tenant", "primary_ip"),
orderable=False,
extra_columns=[
(
"validation_state",
tables.ConfigContextSchemaValidationStateColumn(validator, "local_context_data", empty_values=()),
),
("actions", ButtonsColumn(model=VirtualMachine, buttons=["edit"])),
],
)
paginate = {
"paginator_class": EnhancedPaginator,
"per_page": get_paginate_count(request),
}
RequestConfig(request, paginate).configure(virtual_machine_table)
return {
"config_context_table": config_context_table,
"device_table": device_table,
"virtual_machine_table": virtual_machine_table,
"active_tab": "validation",
}
class ConfigContextSchemaEditView(generic.ObjectEditView):
queryset = ConfigContextSchema.objects.all()
model_form = forms.ConfigContextSchemaForm
template_name = "extras/configcontextschema_edit.html"
class ConfigContextSchemaBulkEditView(generic.BulkEditView):
queryset = ConfigContextSchema.objects.all()
filterset = filters.ConfigContextSchemaFilterSet
table = tables.ConfigContextSchemaTable
form = forms.ConfigContextSchemaBulkEditForm
class ConfigContextSchemaDeleteView(generic.ObjectDeleteView):
queryset = ConfigContextSchema.objects.all()
class ConfigContextSchemaBulkDeleteView(generic.BulkDeleteView):
queryset = ConfigContextSchema.objects.all()
table = tables.ConfigContextSchemaTable
#
# Change logging
#
class ObjectChangeListView(generic.ObjectListView):
queryset = ObjectChange.objects.all()
filterset = filters.ObjectChangeFilterSet
filterset_form = forms.ObjectChangeFilterForm
table = tables.ObjectChangeTable
template_name = "extras/objectchange_list.html"
action_buttons = ("export",)
class ObjectChangeView(generic.ObjectView):
queryset = ObjectChange.objects.all()
def get_extra_context(self, request, instance):
related_changes = (
ObjectChange.objects.restrict(request.user, "view")
.filter(request_id=instance.request_id)
.exclude(pk=instance.pk)
)
related_changes_table = tables.ObjectChangeTable(data=related_changes[:50], orderable=False)
objectchanges = ObjectChange.objects.restrict(request.user, "view").filter(
changed_object_type=instance.changed_object_type,
changed_object_id=instance.changed_object_id,
)
next_change = objectchanges.filter(time__gt=instance.time).order_by("time").first()
prev_change = objectchanges.filter(time__lt=instance.time).order_by("-time").first()
if prev_change:
diff_added = shallow_compare_dict(
prev_change.object_data,
instance.object_data,
exclude=["last_updated"],
)
diff_removed = {x: prev_change.object_data.get(x) for x in diff_added}
else:
# No previous change; this is the initial change that added the object
diff_added = diff_removed = instance.object_data
return {
"diff_added": diff_added,
"diff_removed": diff_removed,
"next_change": next_change,
"prev_change": prev_change,
"related_changes_table": related_changes_table,
"related_changes_count": related_changes.count(),
}
class ObjectChangeLogView(View):
"""
Present a history of changes made to a particular object.
base_template: The name of the template to extend. If not provided, "<app>/<model>.html" will be used.
"""
base_template = None
def get(self, request, model, **kwargs):
# Handle QuerySet restriction of parent object if needed
if hasattr(model.objects, "restrict"):
obj = get_object_or_404(model.objects.restrict(request.user, "view"), **kwargs)
else:
obj = get_object_or_404(model, **kwargs)
# Gather all changes for this object (and its related objects)
content_type = ContentType.objects.get_for_model(model)
objectchanges = (
ObjectChange.objects.restrict(request.user, "view")
.prefetch_related("user", "changed_object_type")
.filter(
Q(changed_object_type=content_type, changed_object_id=obj.pk)
| Q(related_object_type=content_type, related_object_id=obj.pk)
)
)
objectchanges_table = tables.ObjectChangeTable(data=objectchanges, orderable=False)
# Apply the request context
paginate = {
"paginator_class": EnhancedPaginator,
"per_page": get_paginate_count(request),
}
RequestConfig(request, paginate).configure(objectchanges_table)
# Default to using "<app>/<model>.html" as the template, if it exists. Otherwise,
# fall back to using base.html.
if self.base_template is None:
self.base_template = f"{model._meta.app_label}/{model._meta.model_name}.html"
# TODO: This can be removed once an object view has been established for every model.
try:
template.loader.get_template(self.base_template)
except template.TemplateDoesNotExist:
self.base_template = "base.html"
return render(
request,
"extras/object_changelog.html",
{
"object": obj,
"table": objectchanges_table,
"base_template": self.base_template,
"active_tab": "changelog",
},
)
#
# Git repositories
#
class GitRepositoryListView(generic.ObjectListView):
queryset = GitRepository.objects.all()
# filterset = filters.GitRepositoryFilterSet
# filterset_form = forms.GitRepositoryFilterForm
table = tables.GitRepositoryTable
template_name = "extras/gitrepository_list.html"
def extra_context(self):
git_repository_content_type = ContentType.objects.get(app_label="extras", model="gitrepository")
# Get the newest results for each repository name
results = {
r.name: r
for r in JobResult.objects.filter(
obj_type=git_repository_content_type,
status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES,
)
.order_by("completed")
.defer("data")
}
return {
"job_results": results,
"datasource_contents": get_datasource_contents("extras.gitrepository"),
}
class GitRepositoryView(generic.ObjectView):
queryset = GitRepository.objects.all()
def get_extra_context(self, request, instance):
return {
"datasource_contents": get_datasource_contents("extras.gitrepository"),
}
class GitRepositoryEditView(generic.ObjectEditView):
queryset = GitRepository.objects.all()
model_form = forms.GitRepositoryForm
def alter_obj(self, obj, request, url_args, url_kwargs):
# A GitRepository needs to know the originating request when it's saved so that it can enqueue using it
obj.request = request
return super().alter_obj(obj, request, url_args, url_kwargs)
def get_return_url(self, request, obj):
if request.method == "POST":
return reverse("extras:gitrepository_result", kwargs={"slug": obj.slug})
return super().get_return_url(request, obj)
class GitRepositoryDeleteView(generic.ObjectDeleteView):
queryset = GitRepository.objects.all()
class GitRepositoryBulkImportView(generic.BulkImportView):
queryset = GitRepository.objects.all()
model_form = forms.GitRepositoryCSVForm
table = tables.GitRepositoryBulkTable
class GitRepositoryBulkEditView(generic.BulkEditView):
queryset = GitRepository.objects.all()
filterset = filters.GitRepositoryFilterSet
table = tables.GitRepositoryBulkTable
form = forms.GitRepositoryBulkEditForm
def alter_obj(self, obj, request, url_args, url_kwargs):
# A GitRepository needs to know the originating request when it's saved so that it can enqueue using it
obj.request = request
return super().alter_obj(obj, request, url_args, url_kwargs)
def extra_context(self):
return {
"datasource_contents": get_datasource_contents("extras.gitrepository"),
}
class GitRepositoryBulkDeleteView(generic.BulkDeleteView):
queryset = GitRepository.objects.all()
table = tables.GitRepositoryBulkTable
def extra_context(self):
return {
"datasource_contents": get_datasource_contents("extras.gitrepository"),
}
class GitRepositorySyncView(View):
def post(self, request, slug):
if not request.user.has_perm("extras.change_gitrepository"):
return HttpResponseForbidden()
repository = get_object_or_404(GitRepository.objects.all(), slug=slug)
# Allow execution only if a worker process is running.
if not get_worker_count(request):
messages.error(request, "Unable to run job: Celery worker process not running.")
else:
enqueue_pull_git_repository_and_refresh_data(repository, request)
return redirect("extras:gitrepository_result", slug=slug)
class GitRepositoryResultView(ContentTypePermissionRequiredMixin, View):
def get_required_permission(self):
return "extras.view_gitrepository"
def get(self, request, slug):
git_repository_content_type = ContentType.objects.get(app_label="extras", model="gitrepository")
git_repository = get_object_or_404(GitRepository.objects.all(), slug=slug)
job_result = (
JobResult.objects.filter(obj_type=git_repository_content_type, name=git_repository.name)
.order_by("-created")
.first()
)
return render(
request,
"extras/gitrepository_result.html",
{
"base_template": "extras/gitrepository.html",
"object": git_repository,
"result": job_result,
"active_tab": "result",
},
)
#
# Image attachments
#
class ImageAttachmentEditView(generic.ObjectEditView):
queryset = ImageAttachment.objects.all()
model_form = forms.ImageAttachmentForm
def alter_obj(self, imageattachment, request, args, kwargs):
if not imageattachment.present_in_database:
# Assign the parent object based on URL kwargs
model = kwargs.get("model")
imageattachment.parent = get_object_or_404(model, pk=kwargs["object_id"])
return imageattachment
def get_return_url(self, request, imageattachment):
return imageattachment.parent.get_absolute_url()
class ImageAttachmentDeleteView(generic.ObjectDeleteView):
queryset = ImageAttachment.objects.all()
def get_return_url(self, request, imageattachment):
return imageattachment.parent.get_absolute_url()
#
# Jobs
#
class JobListView(ContentTypePermissionRequiredMixin, View):
"""
Retrieve all of the available jobs from disk and the recorded JobResult (if any) for each.
"""
def get_required_permission(self):
return "extras.view_job"
def get(self, request):
jobs_dict = get_jobs()
job_content_type = ContentType.objects.get(app_label="extras", model="job")
# Get the newest results for each job name
results = {
r.name: r
for r in JobResult.objects.filter(
obj_type=job_content_type,
status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES,
)
.order_by("completed")
.defer("data")
}
# get_jobs() gives us a nested dict {grouping: {module: {"name": name, "jobs": [job, job, job]}}}
# But for presentation to the user we want to flatten this to {module_name: [job, job, job]}
modules_dict = {}
for grouping, modules in jobs_dict.items():
for module, entry in modules.items():
module_jobs = modules_dict.get(entry["name"], [])
for job_class in entry["jobs"].values():
job = job_class()
job.result = results.get(job.class_path, None)
module_jobs.append(job)
if module_jobs:
# TODO: should we sort module_jobs by job name? Currently they're in source code order
modules_dict[entry["name"]] = module_jobs
return render(
request,
"extras/job_list.html",
{
# Order the jobs listing by case-insensitive sorting of the module human-readable name
"jobs": sorted(modules_dict.items(), key=lambda kvpair: kvpair[0].lower()),
},
)
class JobView(ContentTypePermissionRequiredMixin, View):
"""
View the parameters of a Job and enqueue it if desired.
"""
def get_required_permission(self):
return "extras.view_job"
def get(self, request, class_path):
job_class = get_job(class_path)
if job_class is None:
raise Http404
job = job_class()
grouping, module, class_name = class_path.split("/", 2)
form = job.as_form(initial=request.GET)
return render(
request,
"extras/job.html",
{
"grouping": grouping,
"module": module,
"job": job,
"form": form,
},
)
def post(self, request, class_path):
if not request.user.has_perm("extras.run_job"):
return HttpResponseForbidden()
job_class = get_job(class_path)
if job_class is None:
raise Http404
job = job_class()
grouping, module, class_name = class_path.split("/", 2)
form = job.as_form(request.POST, request.FILES)
# Allow execution only if a worker process is running.
if not get_worker_count(request):
messages.error(request, "Unable to run job: Celery worker process not running.")
elif form.is_valid():
# Run the job. A new JobResult is created.
commit = form.cleaned_data.pop("_commit")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.enqueue_job(
run_job,
job.class_path,
job_content_type,
request.user,
data=form.cleaned_data,
request=copy_safe_request(request),
commit=commit,
)
return redirect("extras:job_jobresult", pk=job_result.pk)
return render(
request,
"extras/job.html",
{
"grouping": grouping,
"module": module,
"job": job,
"form": form,
},
)
class JobJobResultView(ContentTypePermissionRequiredMixin, View):
"""
Display a JobResult and its Job data.
"""
def get_required_permission(self):
return "extras.view_jobresult"
def get(self, request, pk):
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = get_object_or_404(JobResult.objects.all(), pk=pk, obj_type=job_content_type)
job_class = get_job(job_result.name)
job = job_class() if job_class else None
return render(
request,
"extras/job_jobresult.html",
{
"job": job,
"result": job_result,
},
)
#
# JobResult
#
class JobResultListView(generic.ObjectListView):
"""
List JobResults
"""
queryset = JobResult.objects.all()
filterset = filters.JobResultFilterSet
filterset_form = forms.JobResultFilterForm
table = tables.JobResultTable
action_buttons = ()
class JobResultDeleteView(generic.ObjectDeleteView):
queryset = JobResult.objects.all()
class JobResultBulkDeleteView(generic.BulkDeleteView):
queryset = JobResult.objects.all()
table = tables.JobResultTable
class JobResultView(ContentTypePermissionRequiredMixin, View):
"""
Display a JobResult and its data.
"""
def get_required_permission(self):
return "extras.view_jobresult"
def get(self, request, pk):
job_result = get_object_or_404(JobResult.objects.all(), pk=pk)
associated_record = None
job = None
related_object = job_result.related_object
if inspect.isclass(related_object) and issubclass(related_object, Job):
job = related_object()
elif related_object:
associated_record = related_object
return render(
request,
"extras/jobresult.html",
{
"associated_record": associated_record,
"job": job,
"object": job_result,
"result": job_result,
},
)
class ExportTemplateListView(generic.ObjectListView):
queryset = ExportTemplate.objects.all()
table = tables.ExportTemplateTable
filterset = filters.ExportTemplateFilterSet
filterset_form = forms.ExportTemplateFilterForm
action_buttons = ("add",)
class ExportTemplateView(generic.ObjectView):
queryset = ExportTemplate.objects.all()
class ExportTemplateEditView(generic.ObjectEditView):
queryset = ExportTemplate.objects.all()
model_form = forms.ExportTemplateForm
class ExportTemplateDeleteView(generic.ObjectDeleteView):
queryset = ExportTemplate.objects.all()
class ExportTemplateBulkDeleteView(generic.BulkDeleteView):
queryset = ExportTemplate.objects.all()
table = tables.ExportTemplateTable
class CustomLinkListView(generic.ObjectListView):
queryset = CustomLink.objects.all()
table = tables.CustomLinkTable
filterset = filters.CustomLinkFilterSet
filterset_form = forms.CustomLinkFilterForm
action_buttons = ("add",)
class CustomLinkView(generic.ObjectView):
queryset = CustomLink.objects.all()
class CustomLinkEditView(generic.ObjectEditView):
queryset = CustomLink.objects.all()
model_form = forms.CustomLinkForm
class CustomLinkDeleteView(generic.ObjectDeleteView):
queryset = CustomLink.objects.all()
class CustomLinkBulkDeleteView(generic.BulkDeleteView):
queryset = CustomLink.objects.all()
table = tables.CustomLinkTable
class WebhookListView(generic.ObjectListView):
queryset = Webhook.objects.all()
table = tables.WebhookTable
filterset = filters.WebhookFilterSet
filterset_form = forms.WebhookFilterForm
action_buttons = ("add",)
class WebhookView(generic.ObjectView):
queryset = Webhook.objects.all()
def get_extra_context(self, request, instance):
return {"content_types": instance.content_types.order_by("app_label", "model")}
class WebhookEditView(generic.ObjectEditView):
queryset = Webhook.objects.all()
model_form = forms.WebhookForm
class WebhookDeleteView(generic.ObjectDeleteView):
queryset = Webhook.objects.all()
class WebhookBulkDeleteView(generic.BulkDeleteView):
queryset = Webhook.objects.all()
table = tables.WebhookTable
#
# Custom statuses
#
class StatusListView(generic.ObjectListView):
"""List `Status` objects."""
queryset = Status.objects.all()
filterset = filters.StatusFilterSet
filterset_form = forms.StatusFilterForm
table = tables.StatusTable
class StatusEditView(generic.ObjectEditView):
"""Edit a single `Status` object."""
queryset = Status.objects.all()
model_form = forms.StatusForm
class StatusBulkEditView(generic.BulkEditView):
"""Edit multiple `Status` objects."""
queryset = Status.objects.all()
table = tables.StatusTable
form = forms.StatusBulkEditForm
class StatusBulkDeleteView(generic.BulkDeleteView):
"""Delete multiple `Status` objects."""
queryset = Status.objects.all()
table = tables.StatusTable
class StatusDeleteView(generic.ObjectDeleteView):
"""Delete a single `Status` object."""
queryset = Status.objects.all()
class StatusBulkImportView(generic.BulkImportView):
"""Bulk CSV import of multiple `Status` objects."""
queryset = Status.objects.all()
model_form = forms.StatusCSVForm
table = tables.StatusTable
class StatusView(generic.ObjectView):
"""Detail view for a single `Status` object."""
queryset = Status.objects.all()
def get_extra_context(self, request, instance):
"""Return ordered content types."""
return {"content_types": instance.content_types.order_by("app_label", "model")}
#
# Relationship
#
class RelationshipListView(generic.ObjectListView):
queryset = Relationship.objects.all()
filterset = filters.RelationshipFilterSet
filterset_form = forms.RelationshipFilterForm
table = tables.RelationshipTable
action_buttons = "add"
class RelationshipEditView(generic.ObjectEditView):
queryset = Relationship.objects.all()
model_form = forms.RelationshipForm
class RelationshipDeleteView(generic.ObjectDeleteView):
queryset = Relationship.objects.all()
class RelationshipAssociationListView(generic.ObjectListView):
queryset = RelationshipAssociation.objects.all()
filterset = filters.RelationshipAssociationFilterSet
filterset_form = forms.RelationshipAssociationFilterForm
table = tables.RelationshipAssociationTable
action_buttons = ()
class RelationshipAssociationDeleteView(generic.ObjectDeleteView):
queryset = RelationshipAssociation.objects.all()
class GraphQLQueryListView(generic.ObjectListView):
queryset = GraphQLQuery.objects.all()
table = tables.GraphQLQueryTable
filterset = filters.GraphQLQueryFilterSet
filterset_form = forms.GraphQLQueryFilterForm
action_buttons = ("add",)
class GraphQLQueryView(generic.ObjectView):
queryset = GraphQLQuery.objects.all()
class GraphQLQueryEditView(generic.ObjectEditView):
queryset = GraphQLQuery.objects.all()
model_form = forms.GraphQLQueryForm
class GraphQLQueryDeleteView(generic.ObjectDeleteView):
queryset = GraphQLQuery.objects.all()
class GraphQLQueryBulkDeleteView(generic.BulkDeleteView):
queryset = GraphQLQuery.objects.all()
table = tables.GraphQLQueryTable
class ComputedFieldListView(generic.ObjectListView):
queryset = ComputedField.objects.all()
table = tables.ComputedFieldTable
filterset = filters.ComputedFieldFilterSet
filterset_form = forms.ComputedFieldFilterForm
action_buttons = ("add",)
class ComputedFieldView(generic.ObjectView):
queryset = ComputedField.objects.all()
class ComputedFieldEditView(generic.ObjectEditView):
queryset = ComputedField.objects.all()
model_form = forms.ComputedFieldForm
class ComputedFieldDeleteView(generic.ObjectDeleteView):
queryset = ComputedField.objects.all()
class ComputedFieldBulkDeleteView(generic.BulkDeleteView):
queryset = ComputedField.objects.all()
table = tables.ComputedFieldTable
```
#### File: ipam/tests/test_querysets.py
```python
import netaddr
from nautobot.ipam.models import Prefix, Aggregate, IPAddress, RIR
from nautobot.utilities.testing import TestCase
class AggregateQuerysetTestCase(TestCase):
queryset = Aggregate.objects.all()
@classmethod
def setUpTestData(cls):
rir = RIR.objects.create(name="RIR 1", slug="rir-1")
Aggregate.objects.create(prefix=netaddr.IPNetwork("192.168.0.0/16"), rir=rir)
Aggregate.objects.create(prefix=netaddr.IPNetwork("192.168.1.0/24"), rir=rir)
Aggregate.objects.create(prefix=netaddr.IPNetwork("192.168.2.0/24"), rir=rir)
Aggregate.objects.create(prefix=netaddr.IPNetwork("192.168.3.0/24"), rir=rir)
Aggregate.objects.create(prefix=netaddr.IPNetwork("192.168.3.192/28"), rir=rir)
Aggregate.objects.create(prefix=netaddr.IPNetwork("192.168.3.208/28"), rir=rir)
Aggregate.objects.create(prefix=netaddr.IPNetwork("192.168.3.224/28"), rir=rir)
def test_net_equals(self):
self.assertEqual(self.queryset.net_equals(netaddr.IPNetwork("192.168.0.0/16")).count(), 1)
self.assertEqual(self.queryset.net_equals(netaddr.IPNetwork("172.16.17.32/16")).count(), 0)
self.assertEqual(self.queryset.net_equals(netaddr.IPNetwork("192.168.127.12/32")).count(), 0)
def test_net_contained(self):
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.0.0.0/8")).count(), 7)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.0.0/16")).count(), 6)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.3.0/24")).count(), 3)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.1.0/24")).count(), 0)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.3.192/28")).count(), 0)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.3.192/32")).count(), 0)
def test_net_contained_or_equal(self):
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.0.0.0/8")).count(), 7)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.0.0/16")).count(), 7)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.3.0/24")).count(), 4)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.1.0/24")).count(), 1)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.3.192/28")).count(), 1)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.3.192/32")).count(), 0)
def test_net_contains(self):
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.0.0/8")).count(), 0)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.0.0/16")).count(), 0)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.3.0/24")).count(), 1)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.3.192/28")).count(), 2)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.3.192/30")).count(), 3)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.3.192/32")).count(), 3)
def test_net_contains_or_equals(self):
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.0.0/8")).count(), 0)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.0.0/16")).count(), 1)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.3.0/24")).count(), 2)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.3.192/28")).count(), 3)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.3.192/30")).count(), 3)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.3.192/32")).count(), 3)
def test_get_by_prefix(self):
prefix = self.queryset.net_equals(netaddr.IPNetwork("192.168.0.0/16"))[0]
self.assertEqual(self.queryset.get(prefix="192.168.0.0/16"), prefix)
def test_get_by_prefix_fails(self):
_ = self.queryset.net_equals(netaddr.IPNetwork("192.168.0.0/16"))[0]
with self.assertRaises(Aggregate.DoesNotExist):
self.queryset.get(prefix="192.168.3.0/16")
def test_filter_by_prefix(self):
prefix = self.queryset.net_equals(netaddr.IPNetwork("192.168.0.0/16"))[0]
self.assertEqual(self.queryset.filter(prefix="192.168.0.0/16")[0], prefix)
class IPAddressQuerySet(TestCase):
queryset = IPAddress.objects.all()
@classmethod
def setUpTestData(cls):
IPAddress.objects.create(address="10.0.0.1/24", vrf=None, tenant=None)
IPAddress.objects.create(address="10.0.0.2/24", vrf=None, tenant=None)
IPAddress.objects.create(address="10.0.0.3/24", vrf=None, tenant=None)
IPAddress.objects.create(address="10.0.0.4/24", vrf=None, tenant=None)
IPAddress.objects.create(address="10.0.0.1/25", vrf=None, tenant=None)
IPAddress.objects.create(address="2001:db8::1/64", vrf=None, tenant=None)
IPAddress.objects.create(address="2001:db8::2/64", vrf=None, tenant=None)
IPAddress.objects.create(address="2001:db8::3/64", vrf=None, tenant=None)
def test_ip_family(self):
self.assertEqual(self.queryset.ip_family(4).count(), 5)
self.assertEqual(self.queryset.ip_family(6).count(), 3)
def test_net_host_contained(self):
self.assertEqual(self.queryset.net_host_contained(netaddr.IPNetwork("10.0.0.0/24")).count(), 5)
self.assertEqual(self.queryset.net_host_contained(netaddr.IPNetwork("10.0.0.0/30")).count(), 4)
self.assertEqual(self.queryset.net_host_contained(netaddr.IPNetwork("10.0.0.0/31")).count(), 2)
self.assertEqual(self.queryset.net_host_contained(netaddr.IPNetwork("10.0.10.0/24")).count(), 0)
def test_net_in(self):
args = ["10.0.0.1/24"]
self.assertEqual(self.queryset.net_in(args).count(), 1)
args = ["10.0.0.1"]
self.assertEqual(self.queryset.net_in(args).count(), 2)
args = ["10.0.0.1/24", "10.0.0.1/25"]
self.assertEqual(self.queryset.net_in(args).count(), 2)
def test_get_by_address(self):
address = self.queryset.net_in(["10.0.0.1/24"])[0]
self.assertEqual(self.queryset.get(address="10.0.0.1/24"), address)
def test_filter_by_address(self):
address = self.queryset.net_in(["10.0.0.1/24"])[0]
self.assertEqual(self.queryset.filter(address="10.0.0.1/24")[0], address)
def test_string_search_parse_as_network_string(self):
"""
Tests that the parsing underlying `string_search` behaves as expected.
"""
tests = {
"10": "10.0.0.0/8",
"10.": "10.0.0.0/8",
"10.0": "10.0.0.0/16",
"10.0.0.4": "10.0.0.4/32",
"10.0.0": "10.0.0.0/24",
"10.0.0.4/24": "10.0.0.4/32",
"10.0.0.4/24": "10.0.0.4/32",
"2001": "2001::/16",
"2001:": "2001::/16",
"2001::": "2001::/16",
"2001:db8:": "2001:db8::/32",
"2001:0db8::": "2001:db8::/32",
"2001:db8:abcd:0012::0/64": "2001:db8:abcd:12::/128",
"2001:db8::1/65": "2001:db8::1/128",
"fe80": "fe80::/16",
"fe80::": "fe80::/16",
"fe80::46b:a212:1132:3615": "fe80::46b:a212:1132:3615/128",
"fe80::76:88e9:12aa:334d": "fe80::76:88e9:12aa:334d/128",
}
for test, expected in tests.items():
self.assertEqual(str(self.queryset._parse_as_network_string(test)), expected)
def test_string_search(self):
search_terms = {
"10": 5,
"10.0.0.1": 2,
"10.0.0.1/24": 2,
"10.0.0.1/25": 2,
"10.0.0.2": 1,
"11": 0,
"2001": 3,
"2001::": 3,
"2001:db8::": 3,
"2001:db8::1": 1,
"fe80::": 0,
}
for term, cnt in search_terms.items():
self.assertEqual(self.queryset.string_search(term).count(), cnt)
class PrefixQuerysetTestCase(TestCase):
queryset = Prefix.objects.all()
@classmethod
def setUpTestData(cls):
Prefix.objects.create(prefix=netaddr.IPNetwork("192.168.0.0/16"))
Prefix.objects.create(prefix=netaddr.IPNetwork("192.168.1.0/24"))
Prefix.objects.create(prefix=netaddr.IPNetwork("192.168.2.0/24"))
Prefix.objects.create(prefix=netaddr.IPNetwork("192.168.3.0/24"))
Prefix.objects.create(prefix=netaddr.IPNetwork("192.168.3.192/28"))
Prefix.objects.create(prefix=netaddr.IPNetwork("192.168.3.208/28"))
Prefix.objects.create(prefix=netaddr.IPNetwork("192.168.3.224/28"))
Prefix.objects.create(prefix=netaddr.IPNetwork("fd78:da4f:e596:c217::/64"))
Prefix.objects.create(prefix=netaddr.IPNetwork("fd78:da4f:e596:c217::/120"))
Prefix.objects.create(prefix=netaddr.IPNetwork("fd78:da4f:e596:c217::/122"))
def test_net_equals(self):
self.assertEqual(self.queryset.net_equals(netaddr.IPNetwork("192.168.0.0/16")).count(), 1)
self.assertEqual(self.queryset.net_equals(netaddr.IPNetwork("172.16.17.32/16")).count(), 0)
self.assertEqual(self.queryset.net_equals(netaddr.IPNetwork("172.16.17.32/32")).count(), 0)
def test_net_contained(self):
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.0.0.0/8")).count(), 7)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.0.0/16")).count(), 6)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.3.0/24")).count(), 3)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.1.0/24")).count(), 0)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.3.192/28")).count(), 0)
self.assertEqual(self.queryset.net_contained(netaddr.IPNetwork("192.168.3.192/32")).count(), 0)
def test_net_contained_or_equal(self):
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.0.0.0/8")).count(), 7)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.0.0/16")).count(), 7)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.3.0/24")).count(), 4)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.1.0/24")).count(), 1)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.3.192/28")).count(), 1)
self.assertEqual(self.queryset.net_contained_or_equal(netaddr.IPNetwork("192.168.3.192/32")).count(), 0)
def test_net_contains(self):
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.0.0/8")).count(), 0)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.0.0/16")).count(), 0)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.3.0/24")).count(), 1)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.3.192/28")).count(), 2)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.3.192/30")).count(), 3)
self.assertEqual(self.queryset.net_contains(netaddr.IPNetwork("192.168.3.192/32")).count(), 3)
def test_net_contains_or_equals(self):
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.0.0/8")).count(), 0)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.0.0/16")).count(), 1)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.3.0/24")).count(), 2)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.3.192/28")).count(), 3)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.3.192/30")).count(), 3)
self.assertEqual(self.queryset.net_contains_or_equals(netaddr.IPNetwork("192.168.3.192/32")).count(), 3)
def test_annotate_tree(self):
self.assertEqual(self.queryset.annotate_tree().get(prefix="192.168.0.0/16").parents, 0)
self.assertEqual(self.queryset.annotate_tree().get(prefix="192.168.0.0/16").children, 6)
self.assertEqual(self.queryset.annotate_tree().get(prefix="192.168.3.0/24").parents, 1)
self.assertEqual(self.queryset.annotate_tree().get(prefix="192.168.3.0/24").children, 3)
self.assertEqual(self.queryset.annotate_tree().get(prefix="192.168.3.224/28").parents, 2)
self.assertEqual(self.queryset.annotate_tree().get(prefix="192.168.3.224/28").children, 0)
self.assertEqual(self.queryset.annotate_tree().get(prefix="fd78:da4f:e596:c217::/64").parents, 0)
self.assertEqual(self.queryset.annotate_tree().get(prefix="fd78:da4f:e596:c217::/64").children, 2)
self.assertEqual(self.queryset.annotate_tree().get(prefix="fd78:da4f:e596:c217::/120").parents, 1)
self.assertEqual(self.queryset.annotate_tree().get(prefix="fd78:da4f:e596:c217::/120").children, 1)
self.assertEqual(self.queryset.annotate_tree().get(prefix="fd78:da4f:e596:c217::/122").parents, 2)
self.assertEqual(self.queryset.annotate_tree().get(prefix="fd78:da4f:e596:c217::/122").children, 0)
def test_get_by_prefix(self):
prefix = self.queryset.net_equals(netaddr.IPNetwork("192.168.0.0/16"))[0]
self.assertEqual(self.queryset.get(prefix="192.168.0.0/16"), prefix)
def test_get_by_prefix_fails(self):
_ = self.queryset.net_equals(netaddr.IPNetwork("192.168.0.0/16"))[0]
with self.assertRaises(Prefix.DoesNotExist):
self.queryset.get(prefix="192.168.3.0/16")
def test_filter_by_prefix(self):
prefix = self.queryset.net_equals(netaddr.IPNetwork("192.168.0.0/16"))[0]
self.assertEqual(self.queryset.filter(prefix="192.168.0.0/16")[0], prefix)
```
#### File: tenancy/tests/test_api.py
```python
from django.urls import reverse
from nautobot.tenancy.models import Tenant, TenantGroup
from nautobot.utilities.testing import APITestCase, APIViewTestCases
class AppTest(APITestCase):
def test_root(self):
url = reverse("tenancy-api:api-root")
response = self.client.get("{}?format=api".format(url), **self.header)
self.assertEqual(response.status_code, 200)
class TenantGroupTest(APIViewTestCases.APIViewTestCase):
model = TenantGroup
brief_fields = ["_depth", "display", "id", "name", "slug", "tenant_count", "url"]
bulk_update_data = {
"description": "New description",
}
@classmethod
def setUpTestData(cls):
parent_tenant_groups = (
TenantGroup.objects.create(name="Parent Tenant Group 1", slug="parent-tenant-group-1"),
TenantGroup.objects.create(name="Parent Tenant Group 2", slug="parent-tenant-group-2"),
)
TenantGroup.objects.create(name="Tenant Group 1", slug="tenant-group-1", parent=parent_tenant_groups[0])
TenantGroup.objects.create(name="Tenant Group 2", slug="tenant-group-2", parent=parent_tenant_groups[0])
TenantGroup.objects.create(name="Tenant Group 3", slug="tenant-group-3", parent=parent_tenant_groups[0])
cls.create_data = [
{
"name": "Tenant Group 4",
"slug": "tenant-group-4",
"parent": parent_tenant_groups[1].pk,
},
{
"name": "Tenant Group 5",
"slug": "tenant-group-5",
"parent": parent_tenant_groups[1].pk,
},
{
"name": "Tenant Group 6",
"slug": "tenant-group-6",
"parent": parent_tenant_groups[1].pk,
},
]
class TenantTest(APIViewTestCases.APIViewTestCase):
model = Tenant
brief_fields = ["display", "id", "name", "slug", "url"]
bulk_update_data = {
"description": "New description",
}
@classmethod
def setUpTestData(cls):
tenant_groups = (
TenantGroup.objects.create(name="Tenant Group 1", slug="tenant-group-1"),
TenantGroup.objects.create(name="Tenant Group 2", slug="tenant-group-2"),
)
Tenant.objects.create(name="Tenant 1", slug="tenant-1", group=tenant_groups[0])
Tenant.objects.create(name="Tenant 2", slug="tenant-2", group=tenant_groups[0])
Tenant.objects.create(name="Tenant 3", slug="tenant-3", group=tenant_groups[0])
cls.create_data = [
{
"name": "Tenant 4",
"slug": "tenant-4",
"group": tenant_groups[1].pk,
},
{
"name": "Tenant 5",
"slug": "tenant-5",
"group": tenant_groups[1].pk,
},
{
"name": "Tenant 6",
"slug": "tenant-6",
"group": tenant_groups[1].pk,
},
]
```
#### File: utilities/testing/__init__.py
```python
import time
from celery.contrib.testing.worker import start_worker
from django.test import tag, TransactionTestCase as _TransactionTestCase
from nautobot.core.celery import app
from .api import *
from .utils import *
from .views import *
@tag("unit")
class TransactionTestCase(_TransactionTestCase):
"""
Base test case class using the TransactionTestCase for unit testing
"""
class CeleryTestCase(TransactionTestCase):
"""
Test class that provides a running Celery worker for the duration of the test case
"""
@classmethod
def setUpClass(cls):
"""Start a celery worker"""
super().setUpClass()
# Special namespace loading of methods needed by start_worker, per the celery docs
app.loader.import_module("celery.contrib.testing.tasks")
cls.clear_worker()
cls.celery_worker = start_worker(app, concurrency=1)
cls.celery_worker.__enter__()
@classmethod
def tearDownClass(cls):
"""Stop the celery worker"""
super().tearDownClass()
cls.celery_worker.__exit__(None, None, None)
@staticmethod
def clear_worker():
"""Purge any running or queued tasks"""
app.control.purge()
@classmethod
def wait_on_active_tasks(cls):
"""Wait on all active tasks to finish before returning"""
# TODO(john): admittedly, this is not great, but it seems the standard
# celery APIs for inspecting the worker, looping through all active tasks,
# and calling `.get()` on them is not working when the worker is in solo mode.
# Needs more investigation and until then, these tasks run very quickly, so
# simply delaying the test execution provides enough time for them to complete.
time.sleep(1)
```
|
{
"source": "jfacoustic/MyTwitterBot",
"score": 3
}
|
#### File: src/agent/Bot.py
```python
import tweepy
import pandas as pd
import time
import numpy as np
try:
from key import ConsumerKey, ConsumerSecret
from key import AccessToken, AccessTokenSecret
except ImportError:
from agent.key import ConsumerKey, ConsumerSecret
from agent.key import AccessToken, AccessTokenSecret
import os
import sys
import inspect
from requests_oauthlib import OAuth1Session
import json
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from utils import get_real_friends, get_date, get_date_and_time
from twitter.TweetGenerator import TweetGenerator
from twitter.functions import TweetValid
from text_processing.functions import file_len
class Bot():
"""
The autonomous agent behind the twitter account.
This class assumes that you have the file "key.py"
in the folder "agent".
In "key.py" I assume you have the variables:
"ConsumerKey" , "ConsumerSecret", "AccessToken"
and "AccessTokenSecret". For more info on how to get
the value of these variables go watch this video on
youtube https://www.youtube.com/watch?v=M7MqML2ZVOY
:type corpus: str
:type friends: list of str
:type commentary: srt
:type black_list: list
:type local: str
:type hashtag_search: None or list
"""
def __init__(self,
corpus,
friends=[],
commentary="None",
black_list=[],
local="world",
hashtag_search=None):
self.black_list = black_list
self.local = local
self.friends = friends
self.corpus = corpus
auth = tweepy.OAuthHandler(ConsumerKey, ConsumerSecret)
auth.set_access_token(AccessToken, AccessTokenSecret)
self.api = tweepy.API(auth)
entry = [("Date", [get_date()]),
("Followers", [len(self.api.followers_ids())]),
("Following", [len(self.api.friends_ids())]),
("Commentary", [commentary])]
self.df = pd.DataFrame.from_items(entry)
self.log()
if hashtag_search is None:
self.hashtag_search = self.get_trends(self.local)
else:
self.hashtag_search = hashtag_search + self.get_trends(self.local)
def clear_follow(self,
Realfriends=get_real_friends()):
"""
Method to remove all the people that the bot followers
that are not in the list "Realfriends"
:type Realfriends: list of int
"""
friends = self.api.friends_ids()
for friend in friends:
if friend not in Realfriends:
self.api.destroy_friendship(friend)
def log(self):
"""
Method to save the twitter status on a csv for
future reference.
"""
log_folder = os.path.join(os.getcwd(), "twitter_log")
csv_name = os.path.join(log_folder, "stats.csv")
if not os.path.exists(log_folder):
os.makedirs(log_folder)
try:
old_df = pd.read_csv(csv_name)
new_df = old_df.append(self.df, ignore_index=True)
new_df.to_csv(csv_name, index=False)
except OSError:
self.df.to_csv(csv_name, index=False)
def get_local_identifier(self):
"""
Method to get dict local: identifier.
the identifier is of type WOEID (Where On Earth IDentifier).
:rtype: dict
"""
WOEID = {"world": "1",
"EUA": "23424977",
"Brazil": "23424768"}
return WOEID
def get_trends(self, local):
"""
Method to get the trending hashtags.
:type local: str
:rtype: list of str
"""
session_string = "https://api.twitter.com/1.1/trends/place.json?id="
local_id = self.get_local_identifier()[local]
session_string += local_id
session = OAuth1Session(ConsumerKey,
ConsumerSecret,
AccessToken,
AccessTokenSecret)
response = session.get(session_string)
if response.__dict__['status_code'] == 200:
local_trends = json.loads(response.text)[0]["trends"]
hashtags = [trend["name"]
for trend in local_trends if trend["name"][0] == '#']
else:
hashtags = []
return hashtags
def curator_writer(self,
num_tweets,
show_tweets=10,
num_hashtags=5):
"""
Method to write "num_tweets" tweets. Here I use a loop
to get an input to the user to choose one tweet.
At the end of the loop the method write a txt file with
all the tweets. We use the trending hashtags and the bot's
frieds to compose the tweet.
:type num_tweets: int
:type num_hashtags: int
:rtype: str
"""
saved_tweets = []
tg = TweetGenerator(text_path=self.corpus,
black_list=self.black_list,
train=False)
while len(saved_tweets) < num_tweets:
print(('=-=' * 5))
print("You have {} saved tweets so far.".format(len(saved_tweets)))
print("Type the beginning of a tweet")
print(('=-=' * 5))
first_part = input('> ')
if not TweetValid(first_part):
first_part = '<eos>'
print("Too long!!\nstarting text = <eos>")
hashtags = self.get_trends(self.local)
hashtags_and_friends = self.friends + hashtags
h_and_f_size = len(hashtags_and_friends)
if h_and_f_size < num_hashtags:
num_hashtags = max(len(hashtags_and_friends) - 1, 1)
print("Picking only {} hashtags".format(num_hashtags))
if h_and_f_size > 0:
choice = np.random.choice(h_and_f_size, num_hashtags)
my_hashtags = [hashtags_and_friends[i] for i in choice]
else:
my_hashtags = []
tweets = tg.generate_tweet_list(number_of_tweets=show_tweets,
starting_text=first_part,
hashtag_list=my_hashtags)
for i, tweet in enumerate(tweets):
print("{0}) {1}".format(i, tweet))
user_choice = -1
number_of_tweets = len(tweets)
while True:
print(('=-=' * 5))
print("Choose one tweet!")
print("Type a number from 0 to {}".format(number_of_tweets - 1))
print("Or type -99 to generate other tweets")
print(('=-=' * 5))
user_choice = input('> ')
try:
user_choice = int(user_choice)
except ValueError:
print("Oops! That was no valid number.")
if user_choice == -99 or user_choice in range(number_of_tweets):
break
if user_choice >= 0:
saved_tweets.append(tweets[user_choice])
draft_folder = os.path.join(os.getcwd(), "twitter_draft")
filename = os.path.join(draft_folder, get_date_and_time() + ".txt")
if not os.path.exists(draft_folder):
os.makedirs(draft_folder)
with open(filename, "w") as f:
for tweet in saved_tweets:
f.write(tweet + "\n")
return filename
def post_from_txt(self,
text_path,
minutes_paused=2,
num_tweets_to_see=51):
"""
Method to post all the tweets from the txt in "text_path".
Each tweet is posted and after that the bot starts to
liking tweets that have the same hasthags as the ones in the list
self.hashtag_search, the bot also retweet the theets and follow the
user. After that it pause for "minutes_paused" minutes
(default is 2 minutes).
:type text_path: str
:type minutes_paused: int
:type num_tweets_to_see: int
"""
seconds_pause = minutes_paused * 60
num_tweets = file_len(text_path)
with open(text_path) as file:
for i, tweet in enumerate(file):
if TweetValid(tweet):
print("Posting {0} from {1}".format(i, num_tweets))
self.api.update_status(tweet)
choice = np.random.choice(len(self.hashtag_search), 1)[0]
current_hashtag = self.hashtag_search[choice]
print("\ncurrent hashtag is {}".format(current_hashtag))
count = 0
for tweet in tweepy.Cursor(self.api.search,
q=current_hashtag).items():
print("\ncount = {}".format(count))
if count < num_tweets_to_see:
try:
# Favorite the tweet
tweet.favorite()
print('Favorited the tweet')
# Follow the user who tweeted
tweet.user.follow()
print('Followed the user')
if count % 25 == 0:
tweet.retweet()
print('Retweeted the tweet')
print("\nWaiting {} minutes".format(minutes_paused))
time.sleep(seconds_pause)
count += 1
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
print("No more tweets for the hashtag = {}".format(current_hashtag))
break
else:
print("\ncount = {}, above upper bound".format(count))
break
def write(self,
num_tweets,
first_part='<eos>',
num_hashtags=5,
minutes_pause=60,
publish=True):
"""
Method to write "num_tweets" tweets, using the string
"first part" as the begining of the tweet and
using "num_hashtags" hashtags
Each tweet is posted after a pause of
"minutes_pause" minutes (default is one hour).
:type num_tweets: int
:type num_hashtags: int
:type minutes_pause: int
:type publish: boolean
"""
seconds_pause = minutes_pause * 60
tg = TweetGenerator(text_path=self.corpus,
black_list=self.black_list,
train=False)
for i in range(num_tweets):
trends = self.api.trends_place(1)[0]['trends']
TrendsNames = [trend['name'] for trend in trends]
hashtags = [words for words in TrendsNames if words[0] == "#"]
if len(hashtags) < num_hashtags:
num_hashtags = max(len(hashtags) - 1, 1)
print("Picking only {} hashtags".format(num_hashtags))
choice = np.random.choice(len(hashtags), num_hashtags)
my_hashtags = [hashtags[i] for i in choice]
tweet = tg.generate_tweet_list(starting_text=first_part,
hashtag_list=my_hashtags)[0]
print("\nThe {} tweet is:\n".format(i), tweet)
if publish:
self.api.update_status(tweet)
print("Waiting {} minutes".format(minutes_pause))
time.sleep(seconds_pause)
```
#### File: src/test/BotTest.py
```python
import unittest
import os
import sys
import inspect
import shutil
import pandas as pd
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from utils import run_test
from agent.Bot import Bot
class BotTest(unittest.TestCase):
"""
Class that test the twitter Bot.
"""
@classmethod
def setUpClass(cls):
cls.csv_path = os.path.join(currentdir, "twitter_log", "stats.csv")
cls.data_path = os.path.join(parentdir, "data")
@classmethod
def tearDown(cls):
check_path = os.path.join(currentdir, "checkpoints")
logs_path = os.path.join(currentdir, "twitter_log")
if os.path.exists(check_path):
shutil.rmtree(check_path)
if os.path.exists(logs_path):
shutil.rmtree(logs_path)
def test_log(self):
"""
Everytime we create one bot he saves the
twitter status in a csv file. This function tests
if he is saving the correct information.
"""
Bot(corpus=BotTest.data_path)
self.assertTrue(os.path.exists(BotTest.csv_path),
msg="Not writing csv for the first time")
Bot(corpus=BotTest.data_path)
df = pd.read_csv(BotTest.csv_path)
self.assertEqual(df.shape, (2, 4),
msg="Wrong Shape\n {}".format(df))
if __name__ == "__main__":
key_path = os.path.join(parentdir, "agent", "key.py")
if os.path.exists(key_path):
run_test(BotTest,
"\n=== Running test for the Twitter Bot ===\n")
else:
print("No file in the path \n {}".format(key_path))
```
|
{
"source": "jfagn/pycaret",
"score": 3
}
|
#### File: pycaret/pycaret/nlp.py
```python
def setup(
data,
target=None,
custom_stopwords=None,
html=True,
session_id=None,
log_experiment=False,
experiment_name=None,
log_plots=False,
log_data=False,
verbose=True,
):
"""
This function initializes the training environment and creates the transformation
pipeline. Setup function must be called before executing any other function. It takes
one mandatory parameter only: ``data``. All the other parameters are optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
data: pandas.Dataframe or list
pandas.Dataframe with shape (n_samples, n_features) or a list.
target: str
When ``data`` is pandas.Dataframe, name of column containing text.
custom_stopwords: list, default = None
List of stopwords.
html: bool, default = True
When set to False, prevents runtime display of monitor. This must be set to False
when the environment does not support IPython. For example, command line terminal,
Databricks Notebook, Spyder and other similar IDEs.
session_id: int, default = None
Controls the randomness of experiment. It is equivalent to 'random_state' in
scikit-learn. When None, a pseudo random number is generated. This can be used
for later reproducibility of the entire experiment.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on the ``MLFlow`` server.
experiment_name: str, default = None
Name of the experiment for logging. Ignored when ``log_experiment`` is not True.
log_plots: bool or list, default = False
When set to True, certain plots are logged automatically in the ``MLFlow`` server.
log_data: bool, default = False
When set to True, dataset is logged on the ``MLflow`` server as a csv file.
Ignored when ``log_experiment`` is not True.
verbose: bool, default = True
When set to False, Information grid is not printed.
Returns:
Global variables that can be changed using the ``set_config`` function.
Warnings
--------
- pycaret.nlp requires following language models:
``python -m spacy download en_core_web_sm``
``python -m textblob.download_corpora``
"""
# exception checking
import sys
from pycaret.utils import __version__
ver = __version__
import logging
# create logger
global logger
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("PyCaret NLP Module")
logger.info("version " + str(ver))
logger.info("Initializing setup()")
# generate USI for mlflow tracking
import secrets
global USI
USI = secrets.token_hex(nbytes=2)
logger.info("USI: " + str(USI))
try:
data_shape = data.shape
except:
data_shape = len(data)
logger.info(
"""setup(data={}, target={}, custom_stopwords={}, html={}, session_id={}, log_experiment={},
experiment_name={}, log_plots={}, log_data={}, verbose={})""".format(
str(data_shape),
str(target),
str(custom_stopwords),
str(html),
str(session_id),
str(log_experiment),
str(experiment_name),
str(log_plots),
str(log_data),
str(verbose),
)
)
# logging environment and libraries
logger.info("Checking environment")
from platform import python_version, platform, python_build, machine
try:
logger.info("python_version: " + str(python_version()))
except:
logger.warning("cannot find platform.python_version")
try:
logger.info("python_build: " + str(python_build()))
except:
logger.warning("cannot find platform.python_build")
try:
logger.info("machine: " + str(machine()))
except:
logger.warning("cannot find platform.machine")
try:
logger.info("platform: " + str(platform()))
except:
logger.warning("cannot find platform.platform")
try:
import psutil
logger.info("Memory: " + str(psutil.virtual_memory()))
logger.info("Physical Core: " + str(psutil.cpu_count(logical=False)))
logger.info("Logical Core: " + str(psutil.cpu_count(logical=True)))
except:
logger.warning(
"cannot find psutil installation. memory not traceable. Install psutil using pip to enable memory logging. "
)
logger.info("Checking libraries")
try:
from pandas import __version__
logger.info("pd==" + str(__version__))
except:
logger.warning("pandas not found")
try:
from numpy import __version__
logger.info("numpy==" + str(__version__))
except:
logger.warning("numpy not found")
try:
import warnings
warnings.filterwarnings("ignore")
from gensim import __version__
logger.info("gensim==" + str(__version__))
except:
logger.warning("gensim not found")
try:
from spacy import __version__
logger.info("spacy==" + str(__version__))
except:
logger.warning("spacy not found")
try:
from nltk import __version__
logger.info("nltk==" + str(__version__))
except:
logger.warning("nltk not found")
try:
from textblob import __version__
logger.info("textblob==" + str(__version__))
except:
logger.warning("textblob not found")
try:
from pyLDAvis import __version__
logger.info("pyLDAvis==" + str(__version__))
except:
logger.warning("pyLDAvis not found")
try:
from wordcloud import __version__
logger.info("wordcloud==" + str(__version__))
except:
logger.warning("wordcloud not found")
try:
from mlflow.version import VERSION
import warnings
warnings.filterwarnings("ignore")
logger.info("mlflow==" + str(VERSION))
except:
logger.warning("mlflow not found")
logger.info("Checking Exceptions")
# run_time
import datetime, time
runtime_start = time.time()
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
"""
error handling starts here
"""
# checking data type
if hasattr(data, "shape") is False:
if type(data) is not list:
sys.exit(
"(Type Error): data passed must be of type pandas.DataFrame or list"
)
# if dataframe is passed then target is mandatory
if hasattr(data, "shape"):
if target is None:
sys.exit(
"(Type Error): When pandas.Dataframe is passed as data param. Target column containing text must be specified in target param."
)
# checking target parameter
if target is not None:
if target not in data.columns:
sys.exit(
"(Value Error): Target parameter doesnt exist in the data provided."
)
# custom stopwords checking
if custom_stopwords is not None:
if type(custom_stopwords) is not list:
sys.exit("(Type Error): custom_stopwords must be of list type.")
# checking session_id
if session_id is not None:
if type(session_id) is not int:
sys.exit("(Type Error): session_id parameter must be an integer.")
# check if spacy is loaded
try:
import spacy
sp = spacy.load("en_core_web_sm", disable=["parser", "ner"])
except:
sys.exit(
"(Type Error): spacy english model is not yet downloaded. See the documentation of setup to see installation guide."
)
# html
if type(html) is not bool:
sys.exit("(Type Error): html parameter only accepts True or False.")
# log_experiment
if type(log_experiment) is not bool:
sys.exit("(Type Error): log_experiment parameter only accepts True or False.")
# log_plots
if type(log_plots) is not bool:
sys.exit("(Type Error): log_plots parameter only accepts True or False.")
# log_data
if type(log_data) is not bool:
sys.exit("(Type Error): log_data parameter only accepts True or False.")
# verbose
if type(verbose) is not bool:
sys.exit("(Type Error): verbose parameter only accepts True or False.")
"""
error handling ends here
"""
logger.info("Preloading libraries")
# pre-load libraries
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
# global html_param
global html_param
# create html_param
html_param = html
"""
generate monitor starts
"""
logger.info("Preparing display monitor")
# progress bar
max_steps = 11
total_steps = 9
progress = ipw.IntProgress(
value=0, min=0, max=max_steps, step=1, description="Processing: "
)
if verbose:
if html_param:
display(progress)
try:
max_sub = len(data[target].values.tolist())
except:
max_sub = len(data)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame(
[
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
["Status", ". . . . . . . . . . . . . . . . . .", "Loading Dependencies"],
[
"Step",
". . . . . . . . . . . . . . . . . .",
"Step 0 of " + str(total_steps),
],
],
columns=["", " ", " "],
).set_index("")
if verbose:
if html_param:
display(monitor, display_id="monitor")
"""
generate monitor end
"""
logger.info("Importing libraries")
# general dependencies
import numpy as np
import random
import spacy
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import spacy
import re
import secrets
# setting sklearn config to print all parameters including default
import sklearn
sklearn.set_config(print_changed_only=False)
logger.info("Declaring global variables")
# defining global variables
global text, id2word, corpus, data_, seed, target_, experiment__, exp_name_log, logging_param, log_plots_param
# create an empty list for pickling later.
try:
experiment__.append("dummy")
experiment__.pop()
except:
experiment__ = []
# converting to dataframe if list provided
if type(data) is list:
logger.info("Converting list into dataframe")
data = pd.DataFrame(data, columns=["en"])
target = "en"
# converting target column into list
try:
text = data[target].values.tolist()
target_ = str(target)
logger.info("Input provided : dataframe")
except:
text = data
target_ = "en"
logger.info("Input provided : list")
# generate seed to be used globally
if session_id is None:
seed = random.randint(150, 9000)
else:
seed = session_id
logger.info("session_id set to : " + str(seed))
logger.info("Copying training dataset")
# copying dataframe
if type(data) is list:
data_ = pd.DataFrame(data)
data_.columns = ["en"]
else:
data_ = data.copy()
# create logging parameter
logging_param = log_experiment
# create exp_name_log param incase logging is False
exp_name_log = "no_logging"
# create an empty log_plots_param
if log_plots:
log_plots_param = True
else:
log_plots_param = False
progress.value += 1
"""
DEFINE STOPWORDS
"""
try:
logger.info("Importing stopwords from nltk")
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
stop_words = stopwords.words("english")
except:
logger.info(
"Importing stopwords from nltk failed .. loading pre-defined stopwords"
)
stop_words = [
"ourselves",
"hers",
"between",
"yourself",
"but",
"again",
"there",
"about",
"once",
"during",
"out",
"very",
"having",
"with",
"they",
"own",
"an",
"be",
"some",
"for",
"do",
"its",
"yours",
"such",
"into",
"of",
"most",
"itself",
"other",
"off",
"is",
"s",
"am",
"or",
"who",
"as",
"from",
"him",
"each",
"the",
"themselves",
"until",
"below",
"are",
"we",
"these",
"your",
"his",
"through",
"don",
"nor",
"me",
"were",
"her",
"more",
"himself",
"this",
"down",
"should",
"our",
"their",
"while",
"above",
"both",
"up",
"to",
"ours",
"had",
"she",
"all",
"no",
"when",
"at",
"any",
"before",
"them",
"same",
"and",
"been",
"have",
"in",
"will",
"on",
"does",
"yourselves",
"then",
"that",
"because",
"what",
"over",
"why",
"so",
"can",
"did",
"not",
"now",
"under",
"he",
"you",
"herself",
"has",
"just",
"where",
"too",
"only",
"myself",
"which",
"those",
"i",
"after",
"few",
"whom",
"t",
"being",
"if",
"theirs",
"my",
"against",
"a",
"by",
"doing",
"it",
"how",
"further",
"was",
"here",
"than",
]
if custom_stopwords is not None:
stop_words = stop_words + custom_stopwords
if custom_stopwords is None:
logger.info("No custom stopwords defined")
progress.value += 1
"""
TEXT PRE-PROCESSING STARTS HERE
"""
"""
STEP 1 - REMOVE NUMERIC CHARACTERS FROM THE LIST
"""
logger.info("Removing numeric characters from the text")
monitor.iloc[1, 1:] = "Removing Numeric Characters"
monitor.iloc[2, 1:] = "Step 1 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step1 = []
for i in range(0, len(text)):
review = re.sub("\d+", "", str(text[i]))
text_step1.append(review)
text = text_step1 # re-assigning
del text_step1
progress.value += 1
"""
STEP 2 - REGULAR EXPRESSIONS
"""
logger.info("Removing special characters from the text")
monitor.iloc[1, 1:] = "Removing Special Characters"
monitor.iloc[2, 1:] = "Step 2 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step2 = []
for i in range(0, len(text)):
review = re.sub(r"\W", " ", str(text[i]))
review = review.lower()
review = re.sub(r"\s+[a-z]\s+", " ", review)
review = re.sub(r"^[a-z]\s+", " ", review)
review = re.sub(r"\d+", " ", review)
review = re.sub(r"\s+", " ", review)
text_step2.append(review)
text = text_step2 # re-assigning
del text_step2
progress.value += 1
"""
STEP 3 - WORD TOKENIZATION
"""
logger.info("Tokenizing Words")
monitor.iloc[1, 1:] = "Tokenizing Words"
monitor.iloc[2, 1:] = "Step 3 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step3 = []
for i in text:
review = gensim.utils.simple_preprocess(str(i), deacc=True)
text_step3.append(review)
text = text_step3
del text_step3
progress.value += 1
"""
STEP 4 - REMOVE STOPWORDS
"""
logger.info("Removing stopwords")
monitor.iloc[1, 1:] = "Removing Stopwords"
monitor.iloc[2, 1:] = "Step 4 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step4 = []
for i in text:
ii = []
for word in i:
if word not in stop_words:
ii.append(word)
text_step4.append(ii)
text = text_step4
del text_step4
progress.value += 1
"""
STEP 5 - BIGRAM EXTRACTION
"""
logger.info("Extracting Bigrams")
monitor.iloc[1, 1:] = "Extracting Bigrams"
monitor.iloc[2, 1:] = "Step 5 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
bigram = gensim.models.Phrases(text, min_count=5, threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
text_step5 = []
for i in text:
text_step5.append(bigram_mod[i])
text = text_step5
del text_step5
progress.value += 1
"""
STEP 6 - TRIGRAM EXTRACTION
"""
logger.info("Extracting Trigrams")
monitor.iloc[1, 1:] = "Extracting Trigrams"
monitor.iloc[2, 1:] = "Step 6 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
trigram = gensim.models.Phrases(bigram[text], threshold=100)
trigram_mod = gensim.models.phrases.Phraser(trigram)
text_step6 = []
for i in text:
text_step6.append(trigram_mod[bigram_mod[i]])
text = text_step6
del text_step6
progress.value += 1
"""
STEP 7 - LEMMATIZATION USING SPACY
"""
logger.info("Lemmatizing tokens")
monitor.iloc[1, 1:] = "Lemmatizing"
monitor.iloc[2, 1:] = "Step 7 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
nlp = spacy.load("en_core_web_sm", disable=["parser", "ner"])
nlp.max_length = (
3000000 # increasing text length to 3000000 from default of 1000000
)
allowed_postags = ["NOUN", "ADJ", "VERB", "ADV"]
text_step7 = []
for i in text:
doc = nlp(" ".join(i))
text_step7.append(
[token.lemma_ for token in doc if token.pos_ in allowed_postags]
)
text = text_step7
del text_step7
progress.value += 1
"""
STEP 8 - CUSTOM STOPWORD REMOVER
"""
logger.info("Removing stopwords after lemmatizing")
monitor.iloc[1, 1:] = "Removing Custom Stopwords"
monitor.iloc[2, 1:] = "Step 8 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step8 = []
for i in text:
ii = []
for word in i:
if word not in stop_words:
ii.append(word)
text_step8.append(ii)
text = text_step8
del text_step8
progress.value += 1
"""
STEP 8 - CREATING CORPUS AND DICTIONARY
"""
logger.info("Creating corpus and dictionary")
monitor.iloc[1, 1:] = "Compiling Corpus"
monitor.iloc[2, 1:] = "Step 9 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# creating dictionary
id2word = corpora.Dictionary(text)
# creating corpus
corpus = []
for i in text:
d = id2word.doc2bow(i)
corpus.append(d)
progress.value += 1
"""
PROGRESS NOT YET TRACKED - TO BE CODED LATER
"""
logger.info("Compiling processed text")
text_join = []
for i in text:
word = " ".join(i)
text_join.append(word)
data_[target_] = text_join
"""
Final display Starts
"""
if custom_stopwords is None:
csw = False
else:
csw = True
logger.info("Compiling information grid")
functions = pd.DataFrame(
[
["session_id", seed],
["Documents", len(corpus)],
["Vocab Size", len(id2word.keys())],
["Custom Stopwords", csw],
],
columns=["Description", "Value"],
)
functions_ = functions.style.hide_index()
"""
Final display Ends
"""
# log into experiment
experiment__.append(("Info", functions))
experiment__.append(("Dataset", data_))
experiment__.append(("Corpus", corpus))
experiment__.append(("Dictionary", id2word))
experiment__.append(("Text", text))
# end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
if logging_param:
logger.info("Creating MLFlow logs")
monitor.iloc[1, 1:] = "Creating Logs"
monitor.iloc[2, 1:] = "Final"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
import mlflow
from pathlib import Path
import os
if experiment_name is None:
exp_name_ = "nlp-default-name"
else:
exp_name_ = experiment_name
URI = secrets.token_hex(nbytes=4)
exp_name_log = exp_name_
try:
mlflow.create_experiment(exp_name_log)
except:
pass
# mlflow logging
mlflow.set_experiment(exp_name_log)
run_name_ = "Session Initialized " + str(USI)
mlflow.end_run()
mlflow.start_run(run_name=run_name_)
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
k = functions.copy()
k.set_index("Description", drop=True, inplace=True)
kdict = k.to_dict()
params = kdict.get("Value")
mlflow.log_params(params)
# set tag of compare_models
mlflow.set_tag("Source", "setup")
import secrets
URI = secrets.token_hex(nbytes=4)
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
# Log gensim id2word
id2word.save("id2word")
mlflow.log_artifact("id2word")
import os
os.remove("id2word")
# Log data
if log_data:
data_.to_csv("data.csv")
mlflow.log_artifact("data.csv")
os.remove("data.csv")
# Log plots
if log_plots:
logger.info(
"SubProcess plot_model() called =================================="
)
plot_model(plot="frequency", save=True, system=False)
mlflow.log_artifact("Word Frequency.html")
os.remove("Word Frequency.html")
plot_model(plot="bigram", save=True, system=False)
mlflow.log_artifact("Bigram.html")
os.remove("Bigram.html")
plot_model(plot="trigram", save=True, system=False)
mlflow.log_artifact("Trigram.html")
os.remove("Trigram.html")
plot_model(plot="pos", save=True, system=False)
mlflow.log_artifact("POS.html")
os.remove("POS.html")
logger.info(
"SubProcess plot_model() end =================================="
)
if verbose:
clear_output()
if html_param:
display(functions_)
else:
print(functions_.data)
logger.info("setup() succesfully completed......................................")
return (
text,
data_,
corpus,
id2word,
seed,
target_,
experiment__,
exp_name_log,
logging_param,
log_plots_param,
USI,
html_param,
)
def create_model(
model=None, multi_core=False, num_topics=None, verbose=True, system=True, **kwargs
):
"""
This function trains a given topic model. All the available models
can be accessed using the ``models`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
model: str, default = None
Models available in the model library (ID - Name):
* 'lda' - Latent Dirichlet Allocation
* 'lsi' - Latent Semantic Indexing
* 'hdp' - Hierarchical Dirichlet Process
* 'rp' - Random Projections
* 'nmf' - Non-Negative Matrix Factorization
multi_core: bool, default = False
True would utilize all CPU cores to parallelize and speed up model training.
Ignored when ``model`` is not 'lda'.
num_topics: int, default = 4
Number of topics to be created. If None, default is set to 4.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
system: bool, default = True
Must remain True all times. Only to be changed by internal functions.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns:
Trained Model
"""
# exception checking
import sys
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing create_model()")
logger.info(
"""create_model(model={}, multi_core={}, num_topics={}, verbose={}, system={})""".format(
str(model), str(multi_core), str(num_topics), str(verbose), str(system)
)
)
logger.info("Checking exceptions")
# run_time
import datetime, time
runtime_start = time.time()
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
"""
error handling starts here
"""
# checking for model parameter
if model is None:
sys.exit(
"(Value Error): Model parameter Missing. Please see docstring for list of available models."
)
# checking for allowed models
allowed_models = ["lda", "lsi", "hdp", "rp", "nmf"]
if model not in allowed_models:
sys.exit(
"(Value Error): Model Not Available. Please see docstring for list of available models."
)
# checking multicore type:
if type(multi_core) is not bool:
sys.exit(
"(Type Error): multi_core parameter can only take argument as True or False."
)
# checking round parameter
if num_topics is not None:
if num_topics <= 1:
sys.exit("(Type Error): num_topics parameter only accepts integer value.")
# checking verbose parameter
if type(verbose) is not bool:
sys.exit(
"(Type Error): Verbose parameter can only take argument as True or False."
)
"""
error handling ends here
"""
logger.info("Preloading libraries")
# pre-load libraries
import pandas as pd
import numpy as np
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
"""
monitor starts
"""
logger.info("Preparing display monitor")
# progress bar and monitor control
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
progress = ipw.IntProgress(
value=0, min=0, max=4, step=1, description="Processing: "
)
monitor = pd.DataFrame(
[
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
["Status", ". . . . . . . . . . . . . . . . . .", "Initializing"],
],
columns=["", " ", " "],
).set_index("")
if verbose:
if html_param:
display(progress)
display(monitor, display_id="monitor")
progress.value += 1
"""
monitor starts
"""
logger.info("Defining topic model")
model_name_short = model
# define topic_model_name
if model == "lda":
topic_model_name = "Latent Dirichlet Allocation"
elif model == "lsi":
topic_model_name = "Latent Semantic Indexing"
elif model == "hdp":
topic_model_name = "Hierarchical Dirichlet Process"
elif model == "nmf":
topic_model_name = "Non-Negative Matrix Factorization"
elif model == "rp":
topic_model_name = "Random Projections"
logger.info("Model: " + str(topic_model_name))
# defining default number of topics
logger.info("Defining num_topics parameter")
if num_topics is None:
n_topics = 4
else:
n_topics = num_topics
logger.info("num_topics set to: " + str(n_topics))
# monitor update
monitor.iloc[1, 1:] = "Fitting Topic Model"
progress.value += 1
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
model_fit_start = time.time()
if model == "lda":
if multi_core:
logger.info("LDA multi_core enabled")
from gensim.models.ldamulticore import LdaMulticore
logger.info("LdaMulticore imported successfully")
model = LdaMulticore(
corpus=corpus,
num_topics=n_topics,
id2word=id2word,
workers=4,
random_state=seed,
chunksize=100,
passes=10,
alpha="symmetric",
per_word_topics=True,
**kwargs
)
logger.info("LdaMulticore trained successfully")
progress.value += 1
else:
from gensim.models.ldamodel import LdaModel
logger.info("LdaModel imported successfully")
model = LdaModel(
corpus=corpus,
num_topics=n_topics,
id2word=id2word,
random_state=seed,
update_every=1,
chunksize=100,
passes=10,
alpha="auto",
per_word_topics=True,
**kwargs
)
logger.info("LdaModel trained successfully")
progress.value += 1
elif model == "lsi":
from gensim.models.lsimodel import LsiModel
logger.info("LsiModel imported successfully")
model = LsiModel(corpus=corpus, num_topics=n_topics, id2word=id2word, **kwargs)
logger.info("LsiModel trained successfully")
progress.value += 1
elif model == "hdp":
from gensim.models import HdpModel
logger.info("HdpModel imported successfully")
model = HdpModel(
corpus=corpus,
id2word=id2word,
random_state=seed,
chunksize=100,
T=n_topics,
**kwargs
)
logger.info("HdpModel trained successfully")
progress.value += 1
elif model == "rp":
from gensim.models import RpModel
logger.info("RpModel imported successfully")
model = RpModel(corpus=corpus, id2word=id2word, num_topics=n_topics, **kwargs)
logger.info("RpModel trained successfully")
progress.value += 1
elif model == "nmf":
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
logger.info(
"CountVectorizer, TfidfTransformer, NMF, normalize imported successfully"
)
text_join = []
for i in text:
word = " ".join(i)
text_join.append(word)
progress.value += 1
vectorizer = CountVectorizer(analyzer="word", max_features=5000)
x_counts = vectorizer.fit_transform(text_join)
logger.info("CountVectorizer() Fit Successfully")
transformer = TfidfTransformer(smooth_idf=False)
x_tfidf = transformer.fit_transform(x_counts)
logger.info("TfidfTransformer() Fit Successfully")
xtfidf_norm = normalize(x_tfidf, norm="l1", axis=1)
model = NMF(n_components=n_topics, init="nndsvd", random_state=seed, **kwargs)
model.fit(xtfidf_norm)
logger.info("NMF() Trained Successfully")
model_fit_end = time.time()
model_fit_time = np.array(model_fit_end - model_fit_start).round(2)
progress.value += 1
# end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
# mlflow logging
if logging_param and system:
logger.info("Creating MLFLow Logs")
# Creating Logs message monitor
monitor.iloc[1, 1:] = "Creating Logs"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# import mlflow
import mlflow
from pathlib import Path
import os
mlflow.set_experiment(exp_name_log)
with mlflow.start_run(run_name=topic_model_name, nested=True) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
# Log model parameters
from copy import deepcopy
model_copied = deepcopy(model)
try:
params = model_copied.get_params()
except:
import inspect
params = inspect.getmembers(model_copied)[2][1]
for i in list(params):
v = params.get(i)
if len(str(v)) > 250:
params.pop(i)
mlflow.log_params(params)
# set tag of compare_models
mlflow.set_tag("Source", "create_model")
import secrets
URI = secrets.token_hex(nbytes=4)
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
# Log model and related artifacts
if model_name_short == "nmf":
logger.info(
"SubProcess save_model() called =================================="
)
save_model(model, "model", verbose=False)
logger.info(
"SubProcess save_model() end =================================="
)
mlflow.log_artifact("model.pkl")
size_bytes = Path("model.pkl").stat().st_size
os.remove("model.pkl")
elif model_name_short == "lda":
model.save("model")
mlflow.log_artifact("model")
mlflow.log_artifact("model.expElogbeta.npy")
mlflow.log_artifact("model.id2word")
mlflow.log_artifact("model.state")
size_bytes = (
Path("model").stat().st_size
+ Path("model.id2word").stat().st_size
+ Path("model.state").stat().st_size
)
os.remove("model")
os.remove("model.expElogbeta.npy")
os.remove("model.id2word")
os.remove("model.state")
elif model_name_short == "lsi":
model.save("model")
mlflow.log_artifact("model")
mlflow.log_artifact("model.projection")
size_bytes = (
Path("model").stat().st_size
+ Path("model.projection").stat().st_size
)
os.remove("model")
os.remove("model.projection")
elif model_name_short == "rp":
model.save("model")
mlflow.log_artifact("model")
size_bytes = Path("model").stat().st_size
os.remove("model")
elif model_name_short == "hdp":
model.save("model")
mlflow.log_artifact("model")
size_bytes = Path("model").stat().st_size
os.remove("model")
size_kb = np.round(size_bytes / 1000, 2)
mlflow.set_tag("Size KB", size_kb)
# Log training time in seconds
mlflow.log_metric("TT", model_fit_time)
try:
mlflow.log_metrics(model_results.to_dict().get("Metric"))
except:
pass
# storing into experiment
if verbose:
clear_output()
logger.info(str(model))
logger.info(
"create_model() succesfully completed......................................"
)
return model
def assign_model(model, verbose=True):
"""
This function assigns topic labels to the dataset for a given model.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> lda_df = assign_model(lda)
model: trained model object, default = None
Trained model object
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
pandas.DataFrame
"""
# exception checking
import sys
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing assign_model()")
logger.info(
"""assign_model(model={}, verbose={})""".format(str(model), str(verbose))
)
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
logger.info("Determining model type")
# determine model type
if "LdaModel" in str(type(model)):
mod_type = "lda"
elif "LdaMulticore" in str(type(model)):
mod_type = "lda"
elif "LsiModel" in str(type(model)):
mod_type = "lsi"
elif "NMF" in str(type(model)):
mod_type = "nmf"
elif "HdpModel" in str(type(model)):
mod_type = "hdp"
elif "RpModel" in str(type(model)):
mod_type = "rp"
else:
mod_type = None
logger.info("model type: " + str(mod_type))
"""
error handling starts here
"""
logger.info("Checking exceptions")
# checking for allowed models
allowed_models = ["lda", "lsi", "hdp", "rp", "nmf"]
if mod_type not in allowed_models:
sys.exit(
"(Value Error): Model Not Recognized. Please see docstring for list of available models."
)
# checking verbose parameter
if type(verbose) is not bool:
sys.exit(
"(Type Error): Verbose parameter can only take argument as True or False."
)
"""
error handling ends here
"""
logger.info("Preloading libraries")
# pre-load libraries
import numpy as np
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
logger.info("Preparing display monitor")
# progress bar and monitor control
max_progress = len(text) + 5
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
progress = ipw.IntProgress(
value=0, min=0, max=max_progress, step=1, description="Processing: "
)
monitor = pd.DataFrame(
[
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
["Status", ". . . . . . . . . . . . . . . . . .", "Initializing"],
],
columns=["", " ", " "],
).set_index("")
if verbose:
if html_param:
display(progress)
display(monitor, display_id="monitor")
progress.value += 1
monitor.iloc[1, 1:] = "Extracting Topics from Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
progress.value += 1
# assignment starts here
if mod_type == "lda":
c = model.get_document_topics(corpus, minimum_probability=0)
ls = []
for i in range(len(c)):
ls.append(c[i])
bb = []
for i in ls:
bs = []
for k in i:
progress.value += 1
bs.append(k[1])
bb.append(bs)
Dominant_Topic = []
for i in bb:
max_ = max(i)
max_ = i.index(max_)
Dominant_Topic.append("Topic " + str(max_))
pdt = []
for i in range(0, len(bb)):
l = max(bb[i]) / sum(bb[i])
pdt.append(round(l, 2))
col_names = []
for i in range(len(model.show_topics(num_topics=999999))):
a = "Topic_" + str(i)
col_names.append(a)
progress.value += 1
bb = pd.DataFrame(bb, columns=col_names)
bb_ = pd.concat([data_, bb], axis=1)
dt_ = pd.DataFrame(Dominant_Topic, columns=["Dominant_Topic"])
bb_ = pd.concat([bb_, dt_], axis=1)
pdt_ = pd.DataFrame(pdt, columns=["Perc_Dominant_Topic"])
bb_ = pd.concat([bb_, pdt_], axis=1)
progress.value += 1
if verbose:
clear_output()
elif mod_type == "lsi":
col_names = []
for i in range(0, len(model.print_topics(num_topics=999999))):
a = "Topic_" + str(i)
col_names.append(a)
df_ = pd.DataFrame()
Dominant_Topic = []
for i in range(0, len(text)):
progress.value += 1
db = id2word.doc2bow(text[i])
db_ = model[db]
db_array = np.array(db_)
db_array_ = db_array[:, 1]
max_ = max(db_array_)
max_ = list(db_array_).index(max_)
Dominant_Topic.append("Topic " + str(max_))
db_df_ = pd.DataFrame([db_array_])
df_ = pd.concat([df_, db_df_])
progress.value += 1
df_.columns = col_names
df_["Dominant_Topic"] = Dominant_Topic
df_ = df_.reset_index(drop=True)
bb_ = pd.concat([data_, df_], axis=1)
progress.value += 1
if verbose:
clear_output()
elif mod_type == "hdp" or mod_type == "rp":
rate = []
for i in range(0, len(corpus)):
progress.value += 1
rate.append(model[corpus[i]])
topic_num = []
topic_weight = []
doc_num = []
counter = 0
for i in rate:
for k in i:
topic_num.append(k[0])
topic_weight.append(k[1])
doc_num.append(counter)
counter += 1
progress.value += 1
df = pd.DataFrame(
{"Document": doc_num, "Topic": topic_num, "Topic Weight": topic_weight}
).sort_values(by="Topic")
df = df.pivot(index="Document", columns="Topic", values="Topic Weight").fillna(
0
)
df.columns = ["Topic_" + str(i) for i in df.columns]
Dominant_Topic = []
for i in range(0, len(df)):
s = df.iloc[i].max()
d = list(df.iloc[i]).index(s)
v = df.columns[d]
v = v.replace("_", " ")
Dominant_Topic.append(v)
df["Dominant_Topic"] = Dominant_Topic
progress.value += 1
if verbose:
clear_output()
bb_ = pd.concat([data_, df], axis=1)
elif mod_type == "nmf":
"""
this section will go away in future release through better handling
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
text_join = []
for i in text:
word = " ".join(i)
text_join.append(word)
progress.value += 1
vectorizer = CountVectorizer(analyzer="word", max_features=5000)
x_counts = vectorizer.fit_transform(text_join)
transformer = TfidfTransformer(smooth_idf=False)
x_tfidf = transformer.fit_transform(x_counts)
xtfidf_norm = normalize(x_tfidf, norm="l1", axis=1)
"""
section ends
"""
bb = list(model.fit_transform(xtfidf_norm))
col_names = []
for i in range(len(bb[0])):
a = "Topic_" + str(i)
col_names.append(a)
Dominant_Topic = []
for i in bb:
progress.value += 1
max_ = max(i)
max_ = list(i).index(max_)
Dominant_Topic.append("Topic " + str(max_))
pdt = []
for i in range(0, len(bb)):
l = max(bb[i]) / sum(bb[i])
pdt.append(round(l, 2))
progress.value += 1
bb = pd.DataFrame(bb, columns=col_names)
bb_ = pd.concat([data_, bb], axis=1)
dt_ = pd.DataFrame(Dominant_Topic, columns=["Dominant_Topic"])
bb_ = pd.concat([bb_, dt_], axis=1)
pdt_ = pd.DataFrame(pdt, columns=["Perc_Dominant_Topic"])
bb_ = pd.concat([bb_, pdt_], axis=1)
progress.value += 1
if verbose:
clear_output()
logger.info(str(bb_.shape))
logger.info(
"assign_model() succesfully completed......................................"
)
return bb_
def plot_model(model=None, plot="frequency", topic_num=None, save=False, system=True, display_format = None):
"""
This function takes a trained model object (optional) and returns a plot based
on the inferred dataset by internally calling assign_model before generating a
plot. Where a model parameter is not passed, a plot on the entire dataset will
be returned instead of one at the topic level. As such, plot_model can be used
with or without model. All plots with a model parameter passed as a trained
model object will return a plot based on the first topic i.e. 'Topic 0'. This
can be changed using the topic_num param.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> plot_model(lda, plot = 'frequency')
model: object, default = none
Trained Model Object
plot: str, default = 'frequency'
List of available plots (ID - Name):
* Word Token Frequency - 'frequency'
* Word Distribution Plot - 'distribution'
* Bigram Frequency Plot - 'bigram'
* Trigram Frequency Plot - 'trigram'
* Sentiment Polarity Plot - 'sentiment'
* Part of Speech Frequency - 'pos'
* t-SNE (3d) Dimension Plot - 'tsne'
* Topic Model (pyLDAvis) - 'topic_model'
* Topic Infer Distribution - 'topic_distribution'
* Wordcloud - 'wordcloud'
* UMAP Dimensionality Plot - 'umap'
topic_num : str, default = None
Topic number to be passed as a string. If set to None, default generation will
be on 'Topic 0'
save: string/bool, default = False
Plot is saved as png file in local directory when save parameter set to True.
Plot is saved as png file in the specified directory when the path to the directory is specified.
system: bool, default = True
Must remain True all times. Only to be changed by internal functions.
display_format: str, default = None
To display plots in Streamlit (https://www.streamlit.io/), set this to 'streamlit'.
Currently, not all plots are supported.
Returns:
None
Warnings
--------
- 'pos' and 'umap' plot not available at model level. Hence the model parameter is
ignored. The result will always be based on the entire training corpus.
- 'topic_model' plot is based on pyLDAVis implementation. Hence its not available
for model = 'lsi', 'rp' and 'nmf'.
"""
# exception checking
import sys
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing plot_model()")
logger.info(
"""plot_model(model={}, plot={}, topic_num={}, save={}, system={})""".format(
str(model), str(plot), str(topic_num), str(save), str(system)
)
)
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
# setting default of topic_num
if model is not None and topic_num is None:
topic_num = "Topic 0"
logger.info("Topic selected. topic_num : " + str(topic_num))
"""
exception handling starts here
"""
# determine model type
if model is not None:
mod = str(type(model))
if "LdaModel" in mod:
mod_type = "lda"
elif "LdaMulticore" in str(type(model)):
mod_type = "lda"
elif "LsiModel" in str(type(model)):
mod_type = "lsi"
elif "NMF" in str(type(model)):
mod_type = "nmf"
elif "HdpModel" in str(type(model)):
mod_type = "hdp"
elif "RpModel" in str(type(model)):
mod_type = "rp"
logger.info("Checking exceptions")
# plot checking
allowed_plots = [
"frequency",
"distribution",
"bigram",
"trigram",
"sentiment",
"pos",
"tsne",
"topic_model",
"topic_distribution",
"wordcloud",
"umap",
]
if plot not in allowed_plots:
sys.exit(
"(Value Error): Plot Not Available. Please see docstring for list of available plots."
)
# plots without topic model
if model is None:
not_allowed_wm = ["tsne", "topic_model", "topic_distribution"]
if plot in not_allowed_wm:
sys.exit(
"(Type Error): Model parameter Missing. Plot not supported without specific model passed in as Model param."
)
# handle topic_model plot error
if plot == "topic_model":
not_allowed_tm = ["lsi", "rp", "nmf"]
if mod_type in not_allowed_tm:
sys.exit(
"(Type Error): Model not supported for plot = topic_model. Please see docstring for list of available models supported for topic_model."
)
# checking display_format parameter
plot_formats = [None, "streamlit"]
if display_format not in plot_formats:
raise ValueError("display_format can only be None or 'streamlit'.")
if display_format == "streamlit":
try:
import streamlit as st
except ImportError:
raise ImportError(
"It appears that streamlit is not installed. Do: pip install streamlit"
)
"""
error handling ends here
"""
logger.info("Importing libraries")
# import dependencies
import pandas as pd
import numpy
# import cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
# save parameter
if save:
save_param = True
else:
save_param = False
logger.info("save_param set to " + str(save_param))
logger.info("plot type: " + str(plot))
if plot == "frequency":
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_words(corpus, n=None):
vec = CountVectorizer()
logger.info("Fitting CountVectorizer()")
bag_of_words = vec.fit_transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()
]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
logger.info("Rendering Visual")
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_words(data_[target_], n=100)
df2 = pd.DataFrame(common_words, columns=["Text", "count"])
if display_format=="streamlit":
df3 = (
df2.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 words after removing stop words",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df2.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 words after removing stop words",
asFigure=save_param,
)
)
else:
title = (
str(topic_num) + ": " + "Top 100 words after removing stop words"
)
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
common_words = get_top_n_words(filtered_df[target_], n=100)
df2 = pd.DataFrame(common_words, columns=["Text", "count"])
if display_format=="streamlit":
df3 = (
df2.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df2.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=save_param,
)
)
logger.info("Visual Rendered Successfully")
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, "Word Frequency.html")
else:
plot_filename = "Word Frequency.html"
logger.info(f"Saving '{plot_filename}'")
df3.write_html(plot_filename)
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "distribution":
try:
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
b = data_[target_].apply(lambda x: len(str(x).split()))
b = pd.DataFrame(b)
logger.info("Rendering Visual")
if display_format=="streamlit":
b = b[target_].iplot(
kind="hist",
bins=100,
xTitle="word count",
linecolor="black",
yTitle="count",
title="Word Count Distribution",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(b)
else:
b = b[target_].iplot(
kind="hist",
bins=100,
xTitle="word count",
linecolor="black",
yTitle="count",
title="Word Count Distribution",
asFigure=save_param
)
else:
title = str(topic_num) + ": " + "Word Count Distribution"
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
b = filtered_df[target_].apply(lambda x: len(str(x).split()))
b = pd.DataFrame(b)
logger.info("Rendering Visual")
if display_format=="streamlit":
b = b[target_].iplot(
kind="hist",
bins=100,
xTitle="word count",
linecolor="black",
yTitle="count",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(b)
else:
b = b[target_].iplot(
kind="hist",
bins=100,
xTitle="word count",
linecolor="black",
yTitle="count",
title=title,
asFigure=save_param
)
logger.info("Visual Rendered Successfully")
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, "Distribution.html")
else:
plot_filename = "Distribution.html"
logger.info(f"Saving '{plot_filename}'")
b.write_html(plot_filename)
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "bigram":
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_bigram(corpus, n=None):
logger.info("Fitting CountVectorizer()")
vec = CountVectorizer(ngram_range=(2, 2)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()
]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_bigram(data_[target_], 100)
df3 = pd.DataFrame(common_words, columns=["Text", "count"])
logger.info("Rendering Visual")
if display_format=="streamlit":
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 bigrams after removing stop words",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 bigrams after removing stop words",
asFigure=save_param
)
)
else:
title = (
str(topic_num) + ": " + "Top 100 bigrams after removing stop words"
)
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
common_words = get_top_n_bigram(filtered_df[target_], 100)
df3 = pd.DataFrame(common_words, columns=["Text", "count"])
logger.info("Rendering Visual")
if display_format=="streamlit":
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=save_param
)
)
logger.info("Visual Rendered Successfully")
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, "Bigram.html")
else:
plot_filename = "Bigram.html"
logger.info(f"Saving '{plot_filename}'")
df3.write_html(plot_filename)
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "trigram":
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_trigram(corpus, n=None):
vec = CountVectorizer(ngram_range=(3, 3)).fit(corpus)
logger.info("Fitting CountVectorizer()")
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()
]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_trigram(data_[target_], 100)
df3 = pd.DataFrame(common_words, columns=["Text", "count"])
logger.info("Rendering Visual")
if display_format=="streamlit":
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 trigrams after removing stop words",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 trigrams after removing stop words",
asFigure=save_param
)
)
else:
title = (
str(topic_num) + ": " + "Top 100 trigrams after removing stop words"
)
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
common_words = get_top_n_trigram(filtered_df[target_], 100)
df3 = pd.DataFrame(common_words, columns=["Text", "count"])
logger.info("Rendering Visual")
if display_format=="streamlit":
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=save_param
)
)
logger.info("Visual Rendered Successfully")
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, "Trigram.html")
else:
plot_filename = "Trigram.html"
logger.info(f"Saving '{plot_filename}'")
df3.write_html(plot_filename)
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "sentiment":
try:
# loadies dependencies
import plotly.graph_objects as go
from textblob import TextBlob
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
sentiments = data_[target_].map(
lambda text: TextBlob(text).sentiment.polarity
)
sentiments = pd.DataFrame(sentiments)
logger.info("Rendering Visual")
if display_format=="streamlit":
sentiments = sentiments[target_].iplot(
kind="hist",
bins=50,
xTitle="polarity",
linecolor="black",
yTitle="count",
title="Sentiment Polarity Distribution",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(sentiments)
else:
sentiments = sentiments[target_].iplot(
kind="hist",
bins=50,
xTitle="polarity",
linecolor="black",
yTitle="count",
title="Sentiment Polarity Distribution",
asFigure=save_param
)
else:
title = str(topic_num) + ": " + "Sentiment Polarity Distribution"
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
sentiments = filtered_df[target_].map(
lambda text: TextBlob(text).sentiment.polarity
)
sentiments = pd.DataFrame(sentiments)
logger.info("Rendering Visual")
if display_format=="streamlit":
sentiments = sentiments[target_].iplot(
kind="hist",
bins=50,
xTitle="polarity",
linecolor="black",
yTitle="count",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(sentiments)
else:
sentiments = sentiments[target_].iplot(
kind="hist",
bins=50,
xTitle="polarity",
linecolor="black",
yTitle="count",
title=title,
asFigure=save_param
)
logger.info("Visual Rendered Successfully")
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, "Sentiments.html")
else:
plot_filename = "Sentiments.html"
logger.info(f"Saving '{plot_filename}'")
sentiments.write_html(plot_filename)
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "pos":
from textblob import TextBlob
b = list(id2word.token2id.keys())
logger.info("Fitting TextBlob()")
blob = TextBlob(str(b))
pos_df = pd.DataFrame(blob.tags, columns=["word", "pos"])
pos_df = pos_df.loc[pos_df["pos"] != "POS"]
pos_df = pos_df.pos.value_counts()[:20]
logger.info("Rendering Visual")
if display_format=="streamlit":
pos_df = pos_df.iplot(
kind="bar",
xTitle="POS",
yTitle="count",
title="Top 20 Part-of-speech tagging for review corpus",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(pos_df)
else:
pos_df = pos_df.iplot(
kind="bar",
xTitle="POS",
yTitle="count",
title="Top 20 Part-of-speech tagging for review corpus",
asFigure=save_param
)
logger.info("Visual Rendered Sucessfully")
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, "POS.html")
else:
plot_filename = "POS.html"
logger.info(f"Saving '{plot_filename}'")
pos_df.write_html(plot_filename)
elif plot == "tsne":
logger.info(
"SubProcess assign_model() called =================================="
)
b = assign_model(model, verbose=False)
logger.info("SubProcess assign_model() end ==================================")
b.dropna(axis=0, inplace=True) # droping rows where Dominant_Topic is blank
c = []
for i in b.columns:
if "Topic_" in i:
a = i
c.append(a)
bb = b[c]
from sklearn.manifold import TSNE
logger.info("Fitting TSNE()")
X_embedded = TSNE(n_components=3).fit_transform(bb)
logger.info("Sorting Dataframe")
X = pd.DataFrame(X_embedded)
X["Dominant_Topic"] = b["Dominant_Topic"]
X.sort_values(by="Dominant_Topic", inplace=True)
X.dropna(inplace=True)
logger.info("Rendering Visual")
import plotly.express as px
df = X
fig = px.scatter_3d(
df,
x=0,
y=1,
z=2,
color="Dominant_Topic",
title="3d TSNE Plot for Topic Model",
opacity=0.7,
width=900,
height=800,
)
if system:
if display_format=="streamlit":
st.write(fig)
else:
fig.show()
logger.info("Visual Rendered Successfully")
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, "TSNE.html")
else:
plot_filename = "TSNE.html"
logger.info(f"Saving '{plot_filename}'")
fig.write_html(plot_filename)
elif plot == "topic_model":
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import warnings
warnings.filterwarnings("ignore")
pyLDAvis.enable_notebook()
logger.info("Preparing pyLDAvis visual")
vis = pyLDAvis.gensim.prepare(model, corpus, id2word, mds="mmds")
display(vis)
logger.info("Visual Rendered Successfully")
elif plot == "topic_distribution":
try:
iter1 = len(model.show_topics(999999))
except:
try:
iter1 = model.num_topics
except:
iter1 = model.n_components_
topic_name = []
keywords = []
for i in range(0, iter1):
try:
s = model.show_topic(i, topn=10)
topic_name.append("Topic " + str(i))
kw = []
for i in s:
kw.append(i[0])
keywords.append(kw)
except:
keywords.append("NA")
topic_name.append("Topic " + str(i))
keyword = []
for i in keywords:
b = ", ".join(i)
keyword.append(b)
kw_df = pd.DataFrame({"Topic": topic_name, "Keyword": keyword}).set_index(
"Topic"
)
logger.info(
"SubProcess assign_model() called =================================="
)
ass_df = assign_model(model, verbose=False)
logger.info("SubProcess assign_model() end ==================================")
ass_df_pivot = ass_df.pivot_table(
index="Dominant_Topic", values="Topic_0", aggfunc="count"
)
df2 = ass_df_pivot.join(kw_df)
df2 = df2.reset_index()
df2.columns = ["Topic", "Documents", "Keyword"]
"""
sorting column starts
"""
logger.info("Sorting Dataframe")
topic_list = list(df2["Topic"])
s = []
for i in range(0, len(topic_list)):
a = int(topic_list[i].split()[1])
s.append(a)
df2["Topic"] = s
df2.sort_values(by="Topic", inplace=True)
df2.sort_values(by="Topic", inplace=True)
topic_list = list(df2["Topic"])
topic_list = list(df2["Topic"])
s = []
for i in topic_list:
a = "Topic " + str(i)
s.append(a)
df2["Topic"] = s
df2.reset_index(drop=True, inplace=True)
"""
sorting column ends
"""
logger.info("Rendering Visual")
import plotly.express as px
fig = px.bar(
df2,
x="Topic",
y="Documents",
hover_data=["Keyword"],
title="Document Distribution by Topics",
)
if system:
if display_format=="streamlit":
st.write(fig)
else:
fig.show()
logger.info("Visual Rendered Successfully")
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, "Topic Distribution.html")
else:
plot_filename = "Topic Distribution.html"
logger.info(f"Saving '{plot_filename}'")
fig.write_html(plot_filename)
elif plot == "wordcloud":
try:
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
stopwords = set(STOPWORDS)
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
atext = " ".join(review for review in data_[target_])
else:
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
atext = " ".join(review for review in filtered_df[target_])
logger.info("Fitting WordCloud()")
wordcloud = WordCloud(
width=800,
height=800,
background_color="white",
stopwords=stopwords,
min_font_size=10,
).generate(atext)
# plot the WordCloud image
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
logger.info("Rendering Visual")
if save or log_plots_param:
if system:
plt.savefig("Wordcloud.png")
else:
plt.savefig("Wordcloud.png")
plt.close()
logger.info("Saving 'Wordcloud.png' in current active directory")
else:
if display_format=="streamlit":
st.write(plt)
else:
plt.show()
logger.info("Visual Rendered Successfully")
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "umap":
# warnings
from matplotlib.axes._axes import _log as matplotlib_axes_logger
matplotlib_axes_logger.setLevel("ERROR")
# loading dependencies
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from yellowbrick.text import UMAPVisualizer
import matplotlib.pyplot as plt
tfidf = TfidfVectorizer()
logger.info("Fitting TfidfVectorizer()")
docs = tfidf.fit_transform(data_[target_])
# Instantiate the clustering model
clusters = KMeans(n_clusters=5, random_state=seed)
logger.info("Fitting KMeans()")
clusters.fit(docs)
plt.figure(figsize=(10, 6))
umap = UMAPVisualizer(random_state=seed)
logger.info("Fitting UMAP()")
umap.fit(docs, ["c{}".format(c) for c in clusters.labels_])
logger.info("Rendering Visual")
if save or log_plots_param:
if system:
umap.show(outpath="UMAP.png")
else:
umap.show(outpath="UMAP.png", clear_figure=True)
logger.info("Saving 'UMAP.png' in current active directory")
else:
if display_format=="streamlit":
st.write(umap)
else:
umap.show()
logger.info("Visual Rendered Successfully")
logger.info(
"plot_model() succesfully completed......................................"
)
def tune_model(
model=None,
multi_core=False,
supervised_target=None,
estimator=None,
optimize=None,
custom_grid=None,
auto_fe=True,
fold=10,
verbose=True,
):
"""
This function tunes the ``num_topics`` parameter of a given model.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> tuned_lda = tune_model(model = 'lda', supervised_target = 'status')
model: str, default = None
Enter ID of the models available in model library (ID - Model):
* 'lda' - Latent Dirichlet Allocation
* 'lsi' - Latent Semantic Indexing
* 'hdp' - Hierarchical Dirichlet Process
* 'rp' - Random Projections
* 'nmf' - Non-Negative Matrix Factorization
multi_core: bool, default = False
True would utilize all CPU cores to parallelize and speed up model
training. Ignored when ``model`` is not 'lda'.
supervised_target: str
Name of the target column for supervised learning. If None, the model
coherence value is used as the objective function.
estimator: str, default = None
Classification (ID - Name):
* 'lr' - Logistic Regression (Default)
* 'knn' - K Nearest Neighbour
* 'nb' - Naive Bayes
* 'dt' - Decision Tree Classifier
* 'svm' - SVM - Linear Kernel
* 'rbfsvm' - SVM - Radial Kernel
* 'gpc' - Gaussian Process Classifier
* 'mlp' - Multi Level Perceptron
* 'ridge' - Ridge Classifier
* 'rf' - Random Forest Classifier
* 'qda' - Quadratic Discriminant Analysis
* 'ada' - Ada Boost Classifier
* 'gbc' - Gradient Boosting Classifier
* 'lda' - Linear Discriminant Analysis
* 'et' - Extra Trees Classifier
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Classifier
Regression (ID - Name):
* 'lr' - Linear Regression (Default)
* 'lasso' - Lasso Regression
* 'ridge' - Ridge Regression
* 'en' - Elastic Net
* 'lar' - Least Angle Regression
* 'llar' - Lasso Least Angle Regression
* 'omp' - Orthogonal Matching Pursuit
* 'br' - Bayesian Ridge
* 'ard' - Automatic Relevance Determ.
* 'par' - Passive Aggressive Regressor
* 'ransac' - Random Sample Consensus
* 'tr' - TheilSen Regressor
* 'huber' - Huber Regressor
* 'kr' - Kernel Ridge
* 'svm' - Support Vector Machine
* 'knn' - K Neighbors Regressor
* 'dt' - Decision Tree
* 'rf' - Random Forest
* 'et' - Extra Trees Regressor
* 'ada' - AdaBoost Regressor
* 'gbr' - Gradient Boosting
* 'mlp' - Multi Level Perceptron
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Regressor
optimize: str, default = None
For Classification tasks:
Accuracy, AUC, Recall, Precision, F1, Kappa (default = 'Accuracy')
For Regression tasks:
MAE, MSE, RMSE, R2, RMSLE, MAPE (default = 'R2')
custom_grid: list, default = None
By default, a pre-defined number of topics is iterated over to
optimize the supervised objective. To overwrite default iteration,
pass a list of num_topics to iterate over in custom_grid param.
auto_fe: bool, default = True
Automatic text feature engineering. When set to True, it will generate
text based features such as polarity, subjectivity, wordcounts. Ignored
when ``supervised_target`` is None.
fold: int, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
Trained Model with optimized ``num_topics`` parameter.
Warnings
--------
- Random Projections ('rp') and Non Negative Matrix Factorization ('nmf')
is not available for unsupervised learning. Error is raised when 'rp' or
'nmf' is passed without supervised_target.
- Estimators using kernel based methods such as Kernel Ridge Regressor,
Automatic Relevance Determinant, Gaussian Process Classifier, Radial Basis
Support Vector Machine and Multi Level Perceptron may have longer training
times.
"""
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing tune_model()")
logger.info(
"""tune_model(model={}, multi_core={}, supervised_target={}, estimator={}, optimize={}, custom_grid={}, auto_fe={}, fold={}, verbose={})""".format(
str(model),
str(multi_core),
str(supervised_target),
str(estimator),
str(optimize),
str(custom_grid),
str(auto_fe),
str(fold),
str(verbose),
)
)
logger.info("Checking exceptions")
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
import sys
# checking for model parameter
if model is None:
sys.exit(
"(Value Error): Model parameter Missing. Please see docstring for list of available models."
)
# checking for allowed models
allowed_models = ["lda", "lsi", "hdp", "rp", "nmf"]
if model not in allowed_models:
sys.exit(
"(Value Error): Model Not Available. Please see docstring for list of available models."
)
# checking multicore type:
if type(multi_core) is not bool:
sys.exit(
"(Type Error): multi_core parameter can only take argument as True or False."
)
# check supervised target:
if supervised_target is not None:
all_col = list(data_.columns)
target = target_
all_col.remove(target)
if supervised_target not in all_col:
sys.exit(
"(Value Error): supervised_target not recognized. It can only be one of the following: "
+ str(all_col)
)
# supervised target exception handling
if supervised_target is None:
models_not_allowed = ["rp", "nmf"]
if model in models_not_allowed:
sys.exit(
"(Type Error): Model not supported for unsupervised tuning. Either supervised_target param has to be passed or different model has to be used. Please see docstring for available models."
)
# checking estimator:
if estimator is not None:
available_estimators = [
"lr",
"knn",
"nb",
"dt",
"svm",
"rbfsvm",
"gpc",
"mlp",
"ridge",
"rf",
"qda",
"ada",
"gbc",
"lda",
"et",
"lasso",
"ridge",
"en",
"lar",
"llar",
"omp",
"br",
"ard",
"par",
"ransac",
"tr",
"huber",
"kr",
"svm",
"knn",
"dt",
"rf",
"et",
"ada",
"gbr",
"mlp",
"xgboost",
"lightgbm",
"catboost",
]
if estimator not in available_estimators:
sys.exit(
"(Value Error): Estimator Not Available. Please see docstring for list of available estimators."
)
# checking optimize parameter
if optimize is not None:
available_optimizers = [
"MAE",
"MSE",
"RMSE",
"R2",
"ME",
"Accuracy",
"AUC",
"Recall",
"Precision",
"F1",
"Kappa",
]
if optimize not in available_optimizers:
sys.exit(
"(Value Error): optimize parameter Not Available. Please see docstring for list of available parameters."
)
# checking auto_fe:
if type(auto_fe) is not bool:
sys.exit(
"(Type Error): auto_fe parameter can only take argument as True or False."
)
# checking fold parameter
if type(fold) is not int:
sys.exit("(Type Error): Fold parameter only accepts integer value.")
"""
exception handling ends here
"""
logger.info("Preloading libraries")
# pre-load libraries
import pandas as pd
import ipywidgets as ipw
from ipywidgets import Output
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
logger.info("Preparing display monitor")
# progress bar
if custom_grid is None:
max_steps = 25
else:
max_steps = 10 + len(custom_grid)
progress = ipw.IntProgress(
value=0, min=0, max=max_steps, step=1, description="Processing: "
)
if verbose:
if html_param:
display(progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame(
[
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
["Status", ". . . . . . . . . . . . . . . . . .", "Loading Dependencies"],
["Step", ". . . . . . . . . . . . . . . . . .", "Initializing"],
],
columns=["", " ", " "],
).set_index("")
monitor_out = Output()
if verbose:
if html_param:
display(monitor_out)
if verbose:
if html_param:
with monitor_out:
display(monitor, display_id="monitor")
logger.info("Importing libraries")
# General Dependencies
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
import numpy as np
import plotly.express as px
# setting up cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
progress.value += 1
# define the problem
if supervised_target is None:
problem = "unsupervised"
logger.info("Objective : Unsupervised")
elif data_[supervised_target].value_counts().count() == 2:
problem = "classification"
logger.info("Objective : Classification")
else:
problem = "regression"
logger.info("Objective : Regression")
# define topic_model_name
logger.info("Defining model name")
if model == "lda":
topic_model_name = "Latent Dirichlet Allocation"
elif model == "lsi":
topic_model_name = "Latent Semantic Indexing"
elif model == "hdp":
topic_model_name = "Hierarchical Dirichlet Process"
elif model == "nmf":
topic_model_name = "Non-Negative Matrix Factorization"
elif model == "rp":
topic_model_name = "Random Projections"
logger.info("Topic Model Name: " + str(topic_model_name))
# defining estimator:
logger.info("Defining supervised estimator")
if problem == "classification" and estimator is None:
estimator = "lr"
elif problem == "regression" and estimator is None:
estimator = "lr"
else:
estimator = estimator
logger.info("Estimator: " + str(estimator))
# defining optimizer:
logger.info("Defining Optimizer")
if optimize is None and problem == "classification":
optimize = "Accuracy"
elif optimize is None and problem == "regression":
optimize = "R2"
else:
optimize = optimize
logger.info("Optimize: " + str(optimize))
progress.value += 1
# creating sentiments
if problem == "classification" or problem == "regression":
logger.info("Problem : Supervised")
if auto_fe:
logger.info("auto_fe param set to True")
monitor.iloc[1, 1:] = "Feature Engineering"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
from textblob import TextBlob
monitor.iloc[2, 1:] = "Extracting Polarity"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Extracting Polarity")
polarity = data_[target_].map(
lambda text: TextBlob(text).sentiment.polarity
)
monitor.iloc[2, 1:] = "Extracting Subjectivity"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Extracting Subjectivity")
subjectivity = data_[target_].map(
lambda text: TextBlob(text).sentiment.subjectivity
)
monitor.iloc[2, 1:] = "Extracting Wordcount"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Extracting Wordcount")
word_count = [len(i) for i in text]
progress.value += 1
# defining tuning grid
logger.info("Defining Tuning Grid")
if custom_grid is not None:
logger.info("Custom Grid used")
param_grid = custom_grid
else:
logger.info("Pre-defined Grid used")
param_grid = [2, 4, 8, 16, 32, 64, 100, 200, 300, 400]
master = []
master_df = []
monitor.iloc[1, 1:] = "Creating Topic Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
for i in param_grid:
logger.info("Fitting Model with num_topics = " + str(i))
progress.value += 1
monitor.iloc[2, 1:] = "Fitting Model With " + str(i) + " Topics"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# create and assign the model to dataset d
logger.info(
"SubProcess create_model() called =================================="
)
m = create_model(
model=model, multi_core=multi_core, num_topics=i, verbose=False
)
logger.info("SubProcess create_model() end ==================================")
logger.info(
"SubProcess assign_model() called =================================="
)
d = assign_model(m, verbose=False)
logger.info("SubProcess assign_model() end ==================================")
if problem in ["classification", "regression"] and auto_fe:
d["Polarity"] = polarity
d["Subjectivity"] = subjectivity
d["word_count"] = word_count
master.append(m)
master_df.append(d)
# topic model creation end's here
if problem == "unsupervised":
logger.info("Problem : Unsupervised")
monitor.iloc[1, 1:] = "Evaluating Topic Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
from gensim.models import CoherenceModel
logger.info("CoherenceModel imported successfully")
coherence = []
metric = []
counter = 0
for i in master:
logger.info("Evaluating Coherence with num_topics: " + str(i))
progress.value += 1
monitor.iloc[2, 1:] = (
"Evaluating Coherence With " + str(param_grid[counter]) + " Topics"
)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
model = CoherenceModel(
model=i, texts=text, dictionary=id2word, coherence="c_v"
)
model_coherence = model.get_coherence()
coherence.append(model_coherence)
metric.append("Coherence")
counter += 1
monitor.iloc[1, 1:] = "Compiling Results"
monitor.iloc[1, 1:] = "Finalizing"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Creating metrics dataframe")
df = pd.DataFrame(
{"# Topics": param_grid, "Score": coherence, "Metric": metric}
)
df.columns = ["# Topics", "Score", "Metric"]
sorted_df = df.sort_values(by="Score", ascending=False)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
logger.info("Rendering Visual")
fig = px.line(
df,
x="# Topics",
y="Score",
line_shape="linear",
title="Coherence Value and # of Topics",
color="Metric",
)
fig.update_layout(plot_bgcolor="rgb(245,245,245)")
fig.show()
logger.info("Visual Rendered Successfully")
# monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
monitor_out.clear_output()
progress.close()
best_k = np.array(sorted_df.head(1)["# Topics"])[0]
best_m = round(np.array(sorted_df.head(1)["Score"])[0], 4)
p = (
"Best Model: "
+ topic_model_name
+ " |"
+ " # Topics: "
+ str(best_k)
+ " | "
+ "Coherence: "
+ str(best_m)
)
print(p)
elif problem == "classification":
logger.info("Importing untrained Classifier")
"""
defining estimator
"""
monitor.iloc[1, 1:] = "Evaluating Topic Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
if estimator == "lr":
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=seed)
full_name = "Logistic Regression"
elif estimator == "knn":
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
full_name = "K Nearest Neighbours"
elif estimator == "nb":
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
full_name = "Naive Bayes"
elif estimator == "dt":
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(random_state=seed)
full_name = "Decision Tree"
elif estimator == "svm":
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(max_iter=1000, tol=0.001, random_state=seed)
full_name = "Support Vector Machine"
elif estimator == "rbfsvm":
from sklearn.svm import SVC
model = SVC(
gamma="auto", C=1, probability=True, kernel="rbf", random_state=seed
)
full_name = "RBF SVM"
elif estimator == "gpc":
from sklearn.gaussian_process import GaussianProcessClassifier
model = GaussianProcessClassifier(random_state=seed)
full_name = "Gaussian Process Classifier"
elif estimator == "mlp":
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(max_iter=500, random_state=seed)
full_name = "Multi Level Perceptron"
elif estimator == "ridge":
from sklearn.linear_model import RidgeClassifier
model = RidgeClassifier(random_state=seed)
full_name = "Ridge Classifier"
elif estimator == "rf":
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=10, random_state=seed)
full_name = "Random Forest Classifier"
elif estimator == "qda":
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
model = QuadraticDiscriminantAnalysis()
full_name = "Quadratic Discriminant Analysis"
elif estimator == "ada":
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier(random_state=seed)
full_name = "AdaBoost Classifier"
elif estimator == "gbc":
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier(random_state=seed)
full_name = "Gradient Boosting Classifier"
elif estimator == "lda":
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
model = LinearDiscriminantAnalysis()
full_name = "Linear Discriminant Analysis"
elif estimator == "et":
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier(random_state=seed)
full_name = "Extra Trees Classifier"
elif estimator == "xgboost":
from xgboost import XGBClassifier
model = XGBClassifier(random_state=seed, n_jobs=-1, verbosity=0)
full_name = "Extreme Gradient Boosting"
elif estimator == "lightgbm":
import lightgbm as lgb
model = lgb.LGBMClassifier(random_state=seed)
full_name = "Light Gradient Boosting Machine"
elif estimator == "catboost":
from catboost import CatBoostClassifier
model = CatBoostClassifier(
random_state=seed, silent=True
) # Silent is True to suppress CatBoost iteration results
full_name = "CatBoost Classifier"
logger.info(str(full_name) + " Imported Successfully")
progress.value += 1
"""
start model building here
"""
acc = []
auc = []
recall = []
prec = []
kappa = []
f1 = []
for i in range(0, len(master_df)):
progress.value += 1
param_grid_val = param_grid[i]
logger.info(
"Training supervised model with num_topics: " + str(param_grid_val)
)
monitor.iloc[2, 1:] = (
"Evaluating Classifier With " + str(param_grid_val) + " Topics"
)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# prepare the dataset for supervised problem
d = master_df[i]
d.dropna(axis=0, inplace=True) # droping rows where Dominant_Topic is blank
d.drop([target_], inplace=True, axis=1)
d = pd.get_dummies(d)
# split the dataset
X = d.drop(supervised_target, axis=1)
y = d[supervised_target]
# fit the model
logger.info("Fitting Model")
model.fit(X, y)
# generate the prediction and evaluate metric
logger.info("Generating Cross Val Predictions")
pred = cross_val_predict(model, X, y, cv=fold, method="predict")
acc_ = metrics.accuracy_score(y, pred)
acc.append(acc_)
recall_ = metrics.recall_score(y, pred)
recall.append(recall_)
precision_ = metrics.precision_score(y, pred)
prec.append(precision_)
kappa_ = metrics.cohen_kappa_score(y, pred)
kappa.append(kappa_)
f1_ = metrics.f1_score(y, pred)
f1.append(f1_)
if hasattr(model, "predict_proba"):
pred_ = cross_val_predict(model, X, y, cv=fold, method="predict_proba")
pred_prob = pred_[:, 1]
auc_ = metrics.roc_auc_score(y, pred_prob)
auc.append(auc_)
else:
auc.append(0)
monitor.iloc[1, 1:] = "Compiling Results"
monitor.iloc[1, 1:] = "Finalizing"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Creating metrics dataframe")
df = pd.DataFrame(
{
"# Topics": param_grid,
"Accuracy": acc,
"AUC": auc,
"Recall": recall,
"Precision": prec,
"F1": f1,
"Kappa": kappa,
}
)
sorted_df = df.sort_values(by=optimize, ascending=False)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
progress.value += 1
logger.info("Rendering Visual")
sd = pd.melt(
df,
id_vars=["# Topics"],
value_vars=["Accuracy", "AUC", "Recall", "Precision", "F1", "Kappa"],
var_name="Metric",
value_name="Score",
)
fig = px.line(
sd,
x="# Topics",
y="Score",
color="Metric",
line_shape="linear",
range_y=[0, 1],
)
fig.update_layout(plot_bgcolor="rgb(245,245,245)")
title = str(full_name) + " Metrics and # of Topics"
fig.update_layout(
title={
"text": title,
"y": 0.95,
"x": 0.45,
"xanchor": "center",
"yanchor": "top",
}
)
fig.show()
logger.info("Visual Rendered Successfully")
# monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
monitor_out.clear_output()
progress.close()
best_k = np.array(sorted_df.head(1)["# Topics"])[0]
best_m = round(np.array(sorted_df.head(1)[optimize])[0], 4)
p = (
"Best Model: "
+ topic_model_name
+ " |"
+ " # Topics: "
+ str(best_k)
+ " | "
+ str(optimize)
+ " : "
+ str(best_m)
)
print(p)
elif problem == "regression":
logger.info("Importing untrained Regressor")
"""
defining estimator
"""
monitor.iloc[1, 1:] = "Evaluating Topic Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
if estimator == "lr":
from sklearn.linear_model import LinearRegression
model = LinearRegression()
full_name = "Linear Regression"
elif estimator == "lasso":
from sklearn.linear_model import Lasso
model = Lasso(random_state=seed)
full_name = "Lasso Regression"
elif estimator == "ridge":
from sklearn.linear_model import Ridge
model = Ridge(random_state=seed)
full_name = "Ridge Regression"
elif estimator == "en":
from sklearn.linear_model import ElasticNet
model = ElasticNet(random_state=seed)
full_name = "Elastic Net"
elif estimator == "lar":
from sklearn.linear_model import Lars
model = Lars()
full_name = "Least Angle Regression"
elif estimator == "llar":
from sklearn.linear_model import LassoLars
model = LassoLars()
full_name = "Lasso Least Angle Regression"
elif estimator == "omp":
from sklearn.linear_model import OrthogonalMatchingPursuit
model = OrthogonalMatchingPursuit()
full_name = "Orthogonal Matching Pursuit"
elif estimator == "br":
from sklearn.linear_model import BayesianRidge
model = BayesianRidge()
full_name = "Bayesian Ridge Regression"
elif estimator == "ard":
from sklearn.linear_model import ARDRegression
model = ARDRegression()
full_name = "Automatic Relevance Determination"
elif estimator == "par":
from sklearn.linear_model import PassiveAggressiveRegressor
model = PassiveAggressiveRegressor(random_state=seed)
full_name = "Passive Aggressive Regressor"
elif estimator == "ransac":
from sklearn.linear_model import RANSACRegressor
model = RANSACRegressor(random_state=seed)
full_name = "Random Sample Consensus"
elif estimator == "tr":
from sklearn.linear_model import TheilSenRegressor
model = TheilSenRegressor(random_state=seed)
full_name = "TheilSen Regressor"
elif estimator == "huber":
from sklearn.linear_model import HuberRegressor
model = HuberRegressor()
full_name = "Huber Regressor"
elif estimator == "kr":
from sklearn.kernel_ridge import KernelRidge
model = KernelRidge()
full_name = "Kernel Ridge"
elif estimator == "svm":
from sklearn.svm import SVR
model = SVR()
full_name = "Support Vector Regression"
elif estimator == "knn":
from sklearn.neighbors import KNeighborsRegressor
model = KNeighborsRegressor()
full_name = "Nearest Neighbors Regression"
elif estimator == "dt":
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor(random_state=seed)
full_name = "Decision Tree Regressor"
elif estimator == "rf":
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(random_state=seed)
full_name = "Random Forest Regressor"
elif estimator == "et":
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor(random_state=seed)
full_name = "Extra Trees Regressor"
elif estimator == "ada":
from sklearn.ensemble import AdaBoostRegressor
model = AdaBoostRegressor(random_state=seed)
full_name = "AdaBoost Regressor"
elif estimator == "gbr":
from sklearn.ensemble import GradientBoostingRegressor
model = GradientBoostingRegressor(random_state=seed)
full_name = "Gradient Boosting Regressor"
elif estimator == "mlp":
from sklearn.neural_network import MLPRegressor
model = MLPRegressor(random_state=seed)
full_name = "MLP Regressor"
elif estimator == "xgboost":
from xgboost import XGBRegressor
model = XGBRegressor(random_state=seed, n_jobs=-1, verbosity=0)
full_name = "Extreme Gradient Boosting Regressor"
elif estimator == "lightgbm":
import lightgbm as lgb
model = lgb.LGBMRegressor(random_state=seed)
full_name = "Light Gradient Boosting Machine"
elif estimator == "catboost":
from catboost import CatBoostRegressor
model = CatBoostRegressor(random_state=seed, silent=True)
full_name = "CatBoost Regressor"
logger.info(str(full_name) + " Imported Successfully")
progress.value += 1
"""
start model building here
"""
score = []
metric = []
for i in range(0, len(master_df)):
progress.value += 1
param_grid_val = param_grid[i]
logger.info(
"Training supervised model with num_topics: " + str(param_grid_val)
)
monitor.iloc[2, 1:] = (
"Evaluating Regressor With " + str(param_grid_val) + " Topics"
)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# prepare the dataset for supervised problem
d = master_df[i]
d.dropna(axis=0, inplace=True) # droping rows where Dominant_Topic is blank
d.drop([target_], inplace=True, axis=1)
d = pd.get_dummies(d)
# split the dataset
X = d.drop(supervised_target, axis=1)
y = d[supervised_target]
# fit the model
logger.info("Fitting Model")
model.fit(X, y)
# generate the prediction and evaluate metric
logger.info("Generating Cross Val Predictions")
pred = cross_val_predict(model, X, y, cv=fold, method="predict")
if optimize == "R2":
r2_ = metrics.r2_score(y, pred)
score.append(r2_)
elif optimize == "MAE":
mae_ = metrics.mean_absolute_error(y, pred)
score.append(mae_)
elif optimize == "MSE":
mse_ = metrics.mean_squared_error(y, pred)
score.append(mse_)
elif optimize == "RMSE":
mse_ = metrics.mean_squared_error(y, pred)
rmse_ = np.sqrt(mse_)
score.append(rmse_)
elif optimize == "ME":
max_error_ = metrics.max_error(y, pred)
score.append(max_error_)
metric.append(str(optimize))
monitor.iloc[1, 1:] = "Compiling Results"
monitor.iloc[1, 1:] = "Finalizing"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Creating metrics dataframe")
df = pd.DataFrame({"# Topics": param_grid, "Score": score, "Metric": metric})
df.columns = ["# Topics", optimize, "Metric"]
# sorting to return best model
if optimize == "R2":
sorted_df = df.sort_values(by=optimize, ascending=False)
else:
sorted_df = df.sort_values(by=optimize, ascending=True)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
logger.info("Rendering Visual")
fig = px.line(
df,
x="# Topics",
y=optimize,
line_shape="linear",
title=str(full_name) + " Metrics and # of Topics",
color="Metric",
)
fig.update_layout(plot_bgcolor="rgb(245,245,245)")
progress.value += 1
# monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
monitor_out.clear_output()
progress.close()
fig.show()
logger.info("Visual Rendered Successfully")
best_k = np.array(sorted_df.head(1)["# Topics"])[0]
best_m = round(np.array(sorted_df.head(1)[optimize])[0], 4)
p = (
"Best Model: "
+ topic_model_name
+ " |"
+ " # Topics: "
+ str(best_k)
+ " | "
+ str(optimize)
+ " : "
+ str(best_m)
)
print(p)
logger.info(str(best_model))
logger.info(
"tune_model() succesfully completed......................................"
)
return best_model
def evaluate_model(model):
"""
This function displays a user interface for analyzing performance of a trained
model. It calls the ``plot_model`` function internally.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> evaluate_model(lda)
model: object, default = none
A trained model object should be passed.
Returns:
None
"""
from ipywidgets import widgets
from ipywidgets.widgets import interact, fixed, interact_manual
import numpy as np
"""
generate sorted list
"""
try:
n_topic_assigned = len(model.show_topics())
except:
try:
n_topic_assigned = model.num_topics
except:
n_topic_assigned = model.n_components
final_list = []
for i in range(0, n_topic_assigned):
final_list.append("Topic " + str(i))
a = widgets.ToggleButtons(
options=[
("Frequency Plot", "frequency"),
("Bigrams", "bigram"),
("Trigrams", "trigram"),
("Sentiment Polarity", "sentiment"),
("Word Cloud", "wordcloud"),
],
description="Plot Type:",
disabled=False,
button_style="", # 'success', 'info', 'warning', 'danger' or ''
icons=[""],
)
b = widgets.Dropdown(options=final_list, description="Topic #:", disabled=False)
d = interact_manual(
plot_model,
model=fixed(model),
plot=a,
topic_num=b,
save=fixed(False),
system=fixed(True),
display_format=fixed(None),
)
def save_model(model, model_name, verbose=True, **kwargs):
"""
This function saves the trained model object into the current active
directory as a pickle file for later use.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> save_model(lda, 'saved_lda_model')
model: object
A trained model object should be passed.
model_name: str
Name of pickle file to be passed as a string.
verbose: bool, default = True
When set to False, success message is not printed.
**kwargs:
Additional keyword arguments to pass to joblib.dump().
Returns:
Tuple of the model object and the filename.
"""
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing save_model()")
logger.info(
"""save_model(model={}, model_name={}, verbose={})""".format(
str(model), str(model_name), str(verbose)
)
)
import joblib
model_name = model_name + ".pkl"
joblib.dump(model, model_name, **kwargs)
if verbose:
print("Model Succesfully Saved")
logger.info(str(model))
logger.info(
"save_model() succesfully completed......................................"
)
return (model, model_name)
def load_model(model_name, verbose=True):
"""
This function loads a previously saved model.
Example
-------
>>> from pycaret.nlp import load_model
>>> saved_lda = load_model('saved_lda_model')
model_name: str
Name of pickle file to be passed as a string.
verbose: bool, default = True
When set to False, success message is not printed.
Returns:
Trained Model
"""
import joblib
model_name = model_name + ".pkl"
if verbose:
print("Model Sucessfully Loaded")
return joblib.load(model_name)
def models():
"""
Returns table of models available in model library.
Example
-------
>>> from pycaret.nlp import models
>>> all_models = models()
Returns:
pandas.DataFrame
"""
import pandas as pd
model_id = ["lda", "lsi", "hdp", "rp", "nmf"]
model_name = [
"Latent Dirichlet Allocation",
"Latent Semantic Indexing",
"Hierarchical Dirichlet Process",
"Random Projections",
"Non-Negative Matrix Factorization",
]
model_ref = [
"gensim/models/ldamodel",
"gensim/models/lsimodel",
"gensim/models/hdpmodel",
"gensim/models/rpmodel",
"sklearn.decomposition.NMF",
]
df = pd.DataFrame({"ID": model_id, "Name": model_name, "Reference": model_ref})
df.set_index("ID", inplace=True)
return df
def get_logs(experiment_name=None, save=False):
"""
Returns a table of experiment logs. Only works when ``log_experiment``
is True when initializing the ``setup`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en', log_experiment = True)
>>> lda = create_model('lda')
>>> exp_logs = get_logs()
experiment_name: str, default = None
When None current active run is used.
save: bool, default = False
When set to True, csv file is saved in current working directory.
Returns:
pandas.DataFrame
"""
import sys
if experiment_name is None:
exp_name_log_ = exp_name_log
else:
exp_name_log_ = experiment_name
import mlflow
from mlflow.tracking import MlflowClient
client = MlflowClient()
if client.get_experiment_by_name(exp_name_log_) is None:
sys.exit(
"No active run found. Check logging parameter in setup or to get logs for inactive run pass experiment_name."
)
exp_id = client.get_experiment_by_name(exp_name_log_).experiment_id
runs = mlflow.search_runs(exp_id)
if save:
file_name = str(exp_name_log_) + "_logs.csv"
runs.to_csv(file_name, index=False)
return runs
def get_config(variable):
"""
This function retrieves the global variables created when initializing the
``setup`` function. Following variables are accessible:
- text: Tokenized words as a list with length = # documents
- data_: pandas.DataFrame containing text after all processing
- corpus: List containing tuples of id to word mapping
- id2word: gensim.corpora.dictionary.Dictionary
- seed: random state set through session_id
- target_: Name of column containing text. 'en' by default.
- html_param: html_param configured through setup
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> text = get_config('text')
Returns:
Global variable
"""
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing get_config()")
logger.info("""get_config(variable={})""".format(str(variable)))
if variable == "text":
global_var = text
if variable == "data_":
global_var = data_
if variable == "corpus":
global_var = corpus
if variable == "id2word":
global_var = id2word
if variable == "seed":
global_var = seed
if variable == "target_":
global_var = target_
if variable == "html_param":
global_var = html_param
if variable == "exp_name_log":
global_var = exp_name_log
if variable == "logging_param":
global_var = logging_param
if variable == "log_plots_param":
global_var = log_plots_param
if variable == "USI":
global_var = USI
logger.info("Global variable: " + str(variable) + " returned")
logger.info(
"get_config() succesfully completed......................................"
)
return global_var
def set_config(variable, value):
"""
This function resets the global variables. Following variables are
accessible:
- text: Tokenized words as a list with length = # documents
- data_: pandas.DataFrame containing text after all processing
- corpus: List containing tuples of id to word mapping
- id2word: gensim.corpora.dictionary.Dictionary
- seed: random state set through session_id
- target_: Name of column containing text. 'en' by default.
- html_param: html_param configured through setup
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> set_config('seed', 123)
Returns:
None
"""
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing set_config()")
logger.info(
"""set_config(variable={}, value={})""".format(str(variable), str(value))
)
if variable == "text":
global text
text = value
if variable == "data_":
global data_
data_ = value
if variable == "corpus":
global corpus
corpus = value
if variable == "id2word":
global id2word
id2word = value
if variable == "seed":
global seed
seed = value
if variable == "html_param":
global html_param
html_param = value
if variable == "exp_name_log":
global exp_name_log
exp_name_log = value
if variable == "logging_param":
global logging_param
logging_param = value
if variable == "log_plots_param":
global log_plots_param
log_plots_param = value
if variable == "USI":
global USI
USI = value
logger.info("Global variable: " + str(variable) + " updated")
logger.info(
"set_config() succesfully completed......................................"
)
def get_topics(data, text, model=None, num_topics=4):
"""
Callable from any external environment without requiring setup initialization.
"""
if model is None:
model = "lda"
s = setup(data=data, target=text)
c = create_model(model=model, num_topics=num_topics, verbose=False)
dataset = assign_model(c, verbose=False)
return dataset
```
|
{
"source": "jfahlen/isofit",
"score": 2
}
|
#### File: examples/py-hypertrace/hypertrace.py
```python
import copy
import pathlib
import json
import shutil
import logging
import numpy as np
import spectral as sp
from scipy.io import loadmat
from scipy.interpolate import interp1d
from isofit.core.isofit import Isofit
from isofit.utils import empirical_line, segment, extractions
from isofit.utils.apply_oe import write_modtran_template
logger = logging.getLogger(__name__)
def do_hypertrace(isofit_config, wavelength_file, reflectance_file,
rtm_template_file,
lutdir, outdir,
surface_file="./data/prior.mat",
noisefile=None, snr=300,
aod=0.1, h2o=1.0, atmosphere_type="ATM_MIDLAT_WINTER",
atm_aod_h2o=None,
solar_zenith=0, observer_zenith=0,
solar_azimuth=0, observer_azimuth=0,
observer_altitude_km=99.9,
dayofyear=200,
latitude=34.15, longitude=-118.14,
localtime=10.0,
elevation_km=0.01,
inversion_mode="inversion",
use_empirical_line=False,
calibration_uncertainty_file=None,
n_calibration_draws=1,
calibration_scale=1,
create_lut=True,
overwrite=False):
"""One iteration of the hypertrace workflow.
Required arguments:
isofit_config: dict of isofit configuration options
`wavelength_file`: Path to ASCII space delimited table containing two
columns, wavelength and full width half max (FWHM); both in nanometers.
`reflectance_file`: Path to input reflectance file. Note that this has
to be an ENVI-formatted binary reflectance file, and this path is to the
associated header file (`.hdr`), not the image file itself (following
the convention of the `spectral` Python library, which will be used to
read this file).
rtm_template_file: Path to the atmospheric RTM template. For LibRadtran,
note that this is slightly different from the Isofit template in that
the Isofit fields are surrounded by two sets of `{{` while a few
additional options related to geometry are surrounded by just `{` (this
is because Hypertrace does an initial pass at formatting the files).
`lutdir`: Directory where look-up tables will be stored. Will be created
if missing.
`outdir`: Directory where outputs will be stored. Will be created if
missing.
Keyword arguments:
surface_file: Matlab (`.mat`) file containing a multicomponent surface
prior. See Isofit documentation for details.
noisefile: Parametric instrument noise file. See Isofit documentation for
details. Default = `None`
snr: Instrument signal-to-noise ratio. Ignored if `noisefile` is present.
Default = 300
aod: True aerosol optical depth. Default = 0.1
h2o: True water vapor content. Default = 1.0
atmosphere_type: LibRadtran or Modtran atmosphere type. See RTM
manuals for details. Default = `ATM_MIDLAT_WINTER`
atm_aod_h2o: A list containing three elements: The atmosphere type, AOD,
and H2O. This provides a way to iterate over specific known atmospheres
that are combinations of the three previous variables. If this is set, it
overrides the three previous arguments. Default = `None`
solar_zenith, observer_zenith: Solar and observer zenith angles,
respectively (0 = directly overhead, 90 = horizon). These are in degrees
off nadir. Default = 0 for both. (Note that off-nadir angles make
LibRadtran run _much_ more slowly, so be prepared if you need to generate
those LUTs). (Note: For `modtran` and `modtran_simulator`, `solar_zenith`
is calculated from the `gmtime` and location, so this parameter is ignored.)
solar_azimuth, observer_azimuth: Solar and observer azimuth angles,
respectively, in degrees. Observer azimuth is the sensor _position_ (so
180 degrees off from view direction) relative to N, rotating
counterclockwise; i.e., 0 = Sensor in N, looking S; 90 = Sensor in W,
looking E (this follows the LibRadtran convention). Default = 0 for both.
Note: For `modtran` and `modtran_simulator`, `observer_azimuth` is used as
`to_sensor_azimuth`; i.e., the *relative* azimuth of the sensor. The true
solar azimuth is calculated from lat/lon and time, so `solar_azimuth` is ignored.
observer_altitude_km: Sensor altitude in km. Must be less than 100. Default = 99.9.
(`modtran` and `modtran_simulator` only)
dayofyear: Julian date of observation. Default = 200
(`modtran` and `modtran_simulator` only)
latitude, longitude: Decimal degree coordinates of observation. Default =
34.15, -118.14 (Pasadena, CA).
(`modtran` and `modtran_simulator` only)
localtime: Local time, in decimal hours (0-24). Default = 10.0
(`modtran` and `modtran_simulator` only)
elevation_km: Target elevation above sea level, in km. Default = 0.01
(`modtran` and `modtran_simulator` only)
inversion_mode: Inversion algorithm to use. Must be either "inversion"
(default) for standard optimal estimation, or "mcmc_inversion" for MCMC.
use_empirical_line: (boolean, default = `False`) If `True`, perform
atmospheric correction on a segmented image and then resample using the
empirical line method. If `False`, run Isofit pixel-by-pixel.
overwrite: (boolean, default = `False`) If `False` (default), skip steps
where output files already exist. If `True`, run the full workflow
regardless of existing files.
"""
outdir = mkabs(outdir)
outdir.mkdir(parents=True, exist_ok=True)
assert observer_altitude_km < 100, "Isofit 6S does not support altitude >= 100km"
isofit_common = copy.deepcopy(isofit_config)
# NOTE: All of these settings are *not* copied, but referenced. So these
# changes propagate to the `forward_settings` object below.
forward_settings = isofit_common["forward_model"]
instrument_settings = forward_settings["instrument"]
# NOTE: This also propagates to the radiative transfer engine
instrument_settings["wavelength_file"] = str(mkabs(wavelength_file))
surface_settings = forward_settings["surface"]
surface_settings["surface_file"] = str(mkabs(surface_file))
if noisefile is not None:
noisetag = f"noise_{pathlib.Path(noisefile).stem}"
if "SNR" in instrument_settings:
instrument_settings.pop("SNR")
instrument_settings["parametric_noise_file"] = str(mkabs(noisefile))
if "integrations" not in instrument_settings:
instrument_settings["integrations"] = 1
elif snr is not None:
noisetag = f"snr_{snr}"
instrument_settings["SNR"] = snr
priortag = f"prior_{pathlib.Path(surface_file).stem}__" +\
f"inversion_{inversion_mode}"
if atm_aod_h2o is not None:
atmosphere_type = atm_aod_h2o[0]
aod = atm_aod_h2o[1]
h2o = atm_aod_h2o[2]
atmtag = f"aod_{aod:.3f}__h2o_{h2o:.3f}"
if calibration_uncertainty_file is not None:
caltag = f"cal_{pathlib.Path(calibration_uncertainty_file).stem}__" +\
f"draw_{n_calibration_draws}__" +\
f"scale_{calibration_scale}"
else:
caltag = "cal_NONE__draw_0__scale_0"
if create_lut:
lutdir = mkabs(lutdir)
lutdir.mkdir(parents=True, exist_ok=True)
vswir_conf = forward_settings["radiative_transfer"]["radiative_transfer_engines"]["vswir"]
atmospheric_rtm = vswir_conf["engine_name"]
if atmospheric_rtm == "libradtran":
lrttag = f"atm_{atmosphere_type}__" +\
f"szen_{solar_zenith:.2f}__" +\
f"ozen_{observer_zenith:.2f}__" +\
f"saz_{solar_azimuth:.2f}__" +\
f"oaz_{observer_azimuth:.2f}"
lutdir2 = lutdir / lrttag
lutdir2.mkdir(parents=True, exist_ok=True)
lrtfile = lutdir2 / "lrt-template.inp"
with open(rtm_template_file, "r") as f:
fs = f.read()
open(lrtfile, "w").write(fs.format(
atmosphere=atmosphere_type, solar_azimuth=solar_azimuth,
solar_zenith=solar_zenith,
cos_observer_zenith=np.cos(observer_zenith * np.pi / 180.0),
observer_azimuth=observer_azimuth
))
open(lutdir2 / "prescribed_geom", "w").write(f"99:99:99 {solar_zenith} {solar_azimuth}")
elif atmospheric_rtm in ("modtran", "sRTMnet"):
loctag = f"atm_{atmosphere_type}__" +\
f"alt_{observer_altitude_km:.2f}__" +\
f"doy_{dayofyear:.0f}__" +\
f"lat_{latitude:.3f}__lon_{longitude:.3f}"
angtag = f"az_{observer_azimuth:.2f}__" +\
f"zen_{180 - observer_zenith:.2f}__" +\
f"time_{localtime:.2f}__" +\
f"elev_{elevation_km:.2f}"
lrttag = loctag + "/" + angtag
lutdir2 = lutdir / lrttag
lutdir2.mkdir(parents=True, exist_ok=True)
lrtfile = lutdir2 / "modtran-template-h2o.json"
mt_params = {
"atmosphere_type": atmosphere_type,
"fid": "hypertrace",
"altitude_km": observer_altitude_km,
"dayofyear": dayofyear,
"latitude": latitude,
"longitude": longitude,
"to_sensor_azimuth": observer_azimuth,
"to_sensor_zenith": 180 - observer_zenith,
"gmtime": localtime,
"elevation_km": elevation_km,
"output_file": lrtfile,
"ihaze_type": "AER_NONE"
}
write_modtran_template(**mt_params)
mt_params["ihaze_type"] = "AER_RURAL"
mt_params["output_file"] = lutdir2 / "modtran-template.json"
write_modtran_template(**mt_params)
vswir_conf["modtran_template_path"] = str(mt_params["output_file"])
if atmospheric_rtm == "sRTMnet":
vswir_conf["interpolator_base_path"] = str(lutdir2 / "sRTMnet_interpolator")
# These need to be absolute file paths
for path in ["emulator_aux_file", "emulator_file",
"earth_sun_distance_file", "irradiance_file"]:
vswir_conf[path] = str(mkabs(vswir_conf[path]))
else:
raise ValueError(f"Invalid atmospheric rtm {atmospheric_rtm}")
vswir_conf["lut_path"] = str(lutdir2)
vswir_conf["template_file"] = str(lrtfile)
outdir2 = outdir / lrttag / noisetag / priortag / atmtag / caltag
outdir2.mkdir(parents=True, exist_ok=True)
# Observation file, which describes the geometry
# Angles follow LibRadtran conventions
obsfile = outdir2 / "obs.txt"
geomvec = [
-999, # path length; not used
observer_azimuth, # Degrees 0-360; 0 = Sensor in N, looking S; 90 = Sensor in W, looking E
observer_zenith, # Degrees 0-90; 0 = directly overhead, 90 = horizon
solar_azimuth, # Degrees 0-360; 0 = Sun in S; 90 = Sun in W.
solar_zenith, # Same units as observer zenith
180.0 - abs(observer_zenith), # MODTRAN OBSZEN -- t
observer_azimuth - solar_azimuth + 180.0, # MODTRAN relative azimuth
observer_azimuth, # MODTRAN azimuth
np.cos(observer_zenith * np.pi / 180.0) # Libradtran cos obsever zenith
]
np.savetxt(obsfile, np.array([geomvec]))
isofit_common["input"] = {"obs_file": str(obsfile)}
isofit_fwd = copy.deepcopy(isofit_common)
isofit_fwd["input"]["reflectance_file"] = str(mkabs(reflectance_file))
isofit_fwd["implementation"]["mode"] = "simulation"
isofit_fwd["implementation"]["inversion"]["simulation_mode"] = True
fwd_surface = isofit_fwd["forward_model"]["surface"]
fwd_surface["surface_category"] = "surface"
# Check that prior and wavelength file have the same dimensions
prior = loadmat(mkabs(surface_file))
prior_wl = prior["wl"][0]
prior_nwl = len(prior_wl)
file_wl = np.loadtxt(wavelength_file)
file_nwl = file_wl.shape[0]
assert prior_nwl == file_nwl, \
f"Mismatch between wavelength file ({file_nwl}) " +\
f"and prior ({prior_nwl})."
fwd_surface["wavelength_file"] = str(wavelength_file)
radfile = outdir2 / "toa-radiance"
isofit_fwd["output"] = {"simulated_measurement_file": str(radfile)}
fwd_state = isofit_fwd["forward_model"]["radiative_transfer"]["statevector"]
fwd_state["AOT550"]["init"] = aod
fwd_state["H2OSTR"]["init"] = h2o
# Also set the LUT grid to only target state. We don't want to interpolate
# over the LUT for our forward simulations!
fwd_lut = isofit_fwd["forward_model"]["radiative_transfer"]["lut_grid"]
fwd_lut["AOT550"] = [aod]
fwd_lut["H2OSTR"] = [h2o]
# Also have to create a one-off LUT directory for the forward run, to avoid
# using an (incorrect) previously cached one.
fwd_lutdir = outdir2 / "fwd_lut"
fwd_lutdir.mkdir(parents=True, exist_ok=True)
fwd_vswir = (isofit_fwd["forward_model"]
["radiative_transfer"]
["radiative_transfer_engines"]
["vswir"])
fwd_vswir["lut_path"] = str(fwd_lutdir)
fwd_vswir["interpolator_base_path"] = str(fwd_lutdir)
if radfile.exists() and not overwrite:
logger.info("Skipping forward simulation because file exists.")
else:
fwdfile = outdir2 / "forward.json"
json.dump(isofit_fwd, open(fwdfile, "w"), indent=2)
logger.info("Starting forward simulation.")
Isofit(fwdfile).run()
logger.info("Forward simulation complete.")
isofit_inv = copy.deepcopy(isofit_common)
if inversion_mode == "simple":
# Special case! Use the optimal estimation code, but set `max_nfev` to 1.
inversion_mode = "inversion"
imp_inv = isofit_inv["implementation"]["inversion"]
if "least_squares_params" not in imp_inv:
imp_inv["least_squares_params"] = {}
imp_inv["least_squares_params"]["max_nfev"] = 1
isofit_inv["implementation"]["mode"] = inversion_mode
isofit_inv["input"]["measured_radiance_file"] = str(radfile)
est_refl_file = outdir2 / "estimated-reflectance"
post_unc_path = outdir2 / "posterior-uncertainty"
# Inverse mode
est_state_file = outdir2 / "estimated-state"
atm_coef_file = outdir2 / "atmospheric-coefficients"
post_unc_file = outdir2 / "posterior-uncertainty"
isofit_inv["output"] = {"estimated_reflectance_file": str(est_refl_file),
"estimated_state_file": str(est_state_file),
"atmospheric_coefficients_file": str(atm_coef_file),
"posterior_uncertainty_file": str(post_unc_file)}
# Run the workflow
if calibration_uncertainty_file is not None:
# Apply calibration uncertainty here
calmat = loadmat(calibration_uncertainty_file)
cov = calmat["Covariance"]
cov_l = np.linalg.cholesky(cov)
cov_wl = np.squeeze(calmat["wavelengths"])
rad_img = sp.open_image(str(radfile) + ".hdr")
rad_wl = rad_img.bands.centers
del rad_img
for ical in range(n_calibration_draws):
icalp1 = ical + 1
radfile_cal = f"{str(radfile)}-{icalp1:02d}"
reflfile_cal = f"{str(est_refl_file)}-{icalp1:02d}"
statefile_cal = f"{str(est_state_file)}-{icalp1:02d}"
atmfile_cal = f"{str(atm_coef_file)}-{icalp1:02d}"
uncfile_cal = f"{str(post_unc_file)}-{icalp1:02d}"
if pathlib.Path(reflfile_cal).exists() and not overwrite:
logger.info("Skipping calibration %d/%d because output exists",
icalp1, n_calibration_draws)
next
logger.info("Applying calibration uncertainty (%d/%d)", icalp1, n_calibration_draws)
sample_calibration_uncertainty(radfile, radfile_cal, cov_l, cov_wl, rad_wl,
bias_scale=calibration_scale)
logger.info("Starting inversion (calibration %d/%d)", icalp1, n_calibration_draws)
do_inverse(
copy.deepcopy(isofit_inv), radfile_cal, reflfile_cal,
statefile_cal, atmfile_cal, uncfile_cal,
overwrite=overwrite, use_empirical_line=use_empirical_line
)
logger.info("Inversion complete (calibration %d/%d)", icalp1, n_calibration_draws)
else:
if est_refl_file.exists() and not overwrite:
logger.info("Skipping inversion because output exists.")
else:
logger.info("Starting inversion.")
do_inverse(
copy.deepcopy(isofit_inv), radfile, est_refl_file,
est_state_file, atm_coef_file, post_unc_file,
overwrite=overwrite, use_empirical_line=use_empirical_line
)
logger.info("Inversion complete.")
logger.info("Workflow complete!")
##################################################
def do_inverse(isofit_inv: dict,
radfile: pathlib.Path,
est_refl_file: pathlib.Path,
est_state_file: pathlib.Path,
atm_coef_file: pathlib.Path,
post_unc_file: pathlib.Path,
overwrite: bool,
use_empirical_line: bool):
if use_empirical_line:
# Segment first, then run on segmented file
SEGMENTATION_SIZE = 40
CHUNKSIZE = 256
lbl_working_path = radfile.parent / str(radfile).replace("toa-radiance", "segmentation")
rdn_subs_path = radfile.with_suffix("-subs")
rfl_subs_path = est_refl_file.with_suffix("-subs")
state_subs_path = est_state_file.with_suffix("-subs")
atm_subs_path = atm_coef_file.with_suffix("-subs")
unc_subs_path = post_unc_file.with_suffix("-subs")
isofit_inv["input"]["measured_radiance_file"] = str(rdn_subs_path)
isofit_inv["output"] = {
"estimated_reflectance_file": str(rfl_subs_path),
"estimated_state_file": str(state_subs_path),
"atmospheric_coefficients_file": str(atm_subs_path),
"posterior_uncertainty_file": str(unc_subs_path)
}
if not overwrite and lbl_working_path.exists() and rdn_subs_path.exists():
logger.info("Skipping segmentation and extraction because files exist.")
else:
logger.info("Fixing any radiance values slightly less than zero...")
rad_img = sp.open_image(str(radfile) + ".hdr")
rad_m = rad_img.open_memmap(writable=True)
nearzero = np.logical_and(rad_m < 0, rad_m > -2)
rad_m[nearzero] = 0.0001
del rad_m
del rad_img
logger.info("Segmenting...")
segment(spectra=(str(radfile), str(lbl_working_path)),
flag=-9999, npca=5, segsize=SEGMENTATION_SIZE, nchunk=CHUNKSIZE)
logger.info("Extracting...")
extractions(inputfile=str(radfile), labels=str(lbl_working_path),
output=str(rdn_subs_path), chunksize=CHUNKSIZE, flag=-9999)
else:
# Run Isofit directly
isofit_inv["input"]["measured_radiance_file"] = str(radfile)
isofit_inv["output"] = {
"estimated_reflectance_file": str(est_refl_file),
"estimated_state_file": str(est_state_file),
"atmospheric_coefficients_file": str(atm_coef_file),
"posterior_uncertainty_file": str(post_unc_file)
}
if not overwrite and pathlib.Path(isofit_inv["output"]["estimated_reflectance_file"]).exists():
logger.info("Skipping inversion because output file exists.")
else:
invfile = radfile.parent / (str(radfile).replace("toa-radiance", "inverse") + ".json")
json.dump(isofit_inv, open(invfile, "w"), indent=2)
Isofit(invfile).run()
if use_empirical_line:
if not overwrite and est_refl_file.exists():
logger.info("Skipping empirical line because output exists.")
else:
logger.info("Applying empirical line...")
empirical_line(reference_radiance_file=str(rdn_subs_path),
reference_reflectance_file=str(rfl_subs_path),
reference_uncertainty_file=str(unc_subs_path),
reference_locations_file=None,
segmentation_file=str(lbl_working_path),
input_radiance_file=str(radfile),
input_locations_file=None,
output_reflectance_file=str(est_refl_file),
output_uncertainty_file=str(post_unc_file),
isofit_config=str(invfile))
def mkabs(path):
"""Make a path absolute."""
path2 = pathlib.Path(path)
return path2.expanduser().resolve()
def sample_calibration_uncertainty(input_file: pathlib.Path,
output_file: pathlib.Path,
cov_l: np.ndarray,
cov_wl: np.ndarray,
rad_wl: np.ndarray,
bias_scale=1.0):
input_file_hdr = str(input_file) + ".hdr"
output_file_hdr = str(output_file) + ".hdr"
shutil.copy(input_file, output_file)
shutil.copy(input_file_hdr, output_file_hdr)
img = sp.open_image(str(output_file_hdr))
img_m = img.open_memmap(writable=True)
# Here, we assume that the calibration bias is constant across the entire
# image (i.e., the same bias is added to all pixels).
z = np.random.normal(size=cov_l.shape[0], scale=bias_scale)
Az = 1.0 + cov_l @ z
# Resample the added noise vector to match the wavelengths of the target
# image.
Az_resampled = interp1d(cov_wl, Az, fill_value="extrapolate")(rad_wl)
img_m *= Az_resampled
return output_file
```
#### File: examples/py-hypertrace/summarize.py
```python
from pathlib import Path
import re
import json
import copy
import sys
import numpy as np
import pandas as pd
import spectral as sp
import matplotlib.pyplot as plt
assert len(sys.argv) > 1, "Please specify a JSON config file."
configfile = sys.argv[1]
with open(configfile, "r") as f:
config = json.load(f)
outdir = Path(config["outdir"])
reflfiles = list(outdir.glob("**/estimated-reflectance"))
assert len(reflfiles) > 0, f"No reflectance files found in directory {outdir}"
true_refl_file = Path(config["reflectance_file"]).expanduser()
true_reflectance = sp.open_image(str(true_refl_file) + ".hdr")
true_waves = np.array(true_reflectance.metadata["wavelength"], dtype=float)
true_refl_m = true_reflectance.open_memmap()
windows = config["isofit"]["implementation"]["inversion"]["windows"]
def parse_dir(ddir):
grps = {"directory": [str(ddir)]}
for key in ["atm", "noise", "prior", "inversion"]:
pat = f".*{key}_(.+?)" + r"(__|/|\Z)"
match = re.match(pat, str(ddir))
if match is not None:
match = match.group(1)
grps[key] = [match]
for key in ["szen", "ozen", "zen",
"saz", "oaz", "az",
"time", "elev",
"snr", "aod", "h2o"]:
pat = f".*{key}_([0-9.]+)" + r"(__|/|\Z)"
match = re.match(pat, str(ddir))
if match is not None:
match = float(match.group(1))
grps[key] = [match]
return pd.DataFrame(grps, index=[0])
info = pd.concat([parse_dir(x.parent) for x in reflfiles])\
.reset_index(drop=True)
def mask_windows(data, waves, windows):
inside_l = []
for w in windows:
inside_l.append(np.logical_and(waves >= w[0],
waves <= w[1]))
inside = np.logical_or.reduce(inside_l)
d2 = copy.copy(data)
d2[:, :, np.logical_not(inside)] = np.nan
return d2
info["rmse"] = np.nan
info["bias"] = np.nan
info["rel_bias"] = np.nan
for i in range(info.shape[0]):
ddir = Path(info["directory"][i])
est_refl_file = ddir / "estimated-reflectance"
est_refl = sp.open_image(str(est_refl_file) + ".hdr")
est_refl_waves = np.array(est_refl.metadata["wavelength"], dtype=float)
est_refl_m = est_refl.open_memmap()
if est_refl_m.shape != true_refl_m.shape:
true_resample = np.zeros_like(est_refl_m)
for r in range(true_resample.shape[0]):
for c in range(true_resample.shape[1]):
true_resample[r, c, :] = np.interp(
est_refl_waves,
true_waves,
true_refl_m[r, c, :]
)
else:
true_resample = true_refl_m
est_refl_m2 = mask_windows(est_refl_m, est_refl_waves, windows)
bias = est_refl_m2 - true_resample
rmse = np.sqrt(np.nanmean(bias**2))
mean_bias = np.nanmean(bias)
rel_bias = bias / true_resample
mean_rel_bias = np.nanmean(rel_bias)
info.loc[i, "rmse"] = rmse
info.loc[i, "bias"] = mean_bias
info.loc[i, "rel_bias"] = mean_rel_bias
# Bias by wavelength
bias_wl = np.nanmean(bias, axis=(0, 1))
bias_wl_q = np.nanquantile(bias, (0.05, 0.95), axis=(0, 1))
plt.axhline(y=0, color="gray")
plt.plot(est_refl_waves, bias_wl, "k-")
plt.plot(est_refl_waves, np.transpose(bias_wl_q), "k--")
plt.xlabel("Wavelength (nm)")
plt.ylabel("Bias (Estimate - True; 90% CI)")
plt.savefig(ddir / "bias.png")
print("Simulations sorted by RMSE (lowest first)")
print(info.sort_values("rmse"))
info.to_csv(outdir / "summary.csv")
```
#### File: isofit/utils/interpolate_atmosphere.py
```python
from scipy.linalg import inv
from isofit.core.instrument import Instrument
from spectral.io import envi
from scipy.spatial import KDTree
import numpy as np
import logging
import time
import matplotlib
import pylab as plt
from isofit.configs import configs
import multiprocessing
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
from sklearn.decomposition import PCA
plt.switch_backend("Agg")
def _write_bil_chunk(dat: np.array, outfile: str, line: int, shape: tuple, dtype: str = 'float32') -> None:
"""
Write a chunk of data to a binary, BIL formatted data cube.
Args:
dat: data to write
outfile: output file to write to
line: line of the output file to write to
shape: shape of the output file
dtype: output data type
Returns:
None
"""
outfile = open(outfile, 'rb+')
outfile.seek(line * shape[1] * shape[2] * np.dtype(dtype).itemsize)
outfile.write(dat.astype(dtype).tobytes())
outfile.close()
def _run_chunk(start_line: int, stop_line: int, reference_radiance_file: str, reference_atm_file: str,
reference_locations_file: str, input_radiance_file: str,
input_locations_file: str, segmentation_file: str, isofit_config: dict, output_reflectance_file: str,
output_uncertainty_file: str, radiance_factors: np.array, nneighbors: int,
nodata_value: float) -> None:
"""
Args:
start_line: line to start empirical line run at
stop_line: line to stop empirical line run at
reference_radiance_file: source file for radiance (interpolation built from this)
reference_atm_file: source file for atmosphere coefficients (interpolation built from this)
reference_locations_file: source file for file locations (lon, lat, elev), (interpolation built from this)
input_radiance_file: input radiance file (interpolate over this)
input_locations_file: input location file (interpolate over this)
segmentation_file: input file noting the per-pixel segmentation used
isofit_config: dictionary-stype isofit configuration
output_reflectance_file: location to write output reflectance to
output_uncertainty_file: location to write output uncertainty to
radiance_factors: radiance adjustment factors
nneighbors: number of neighbors to use for interpolation
nodata_value: nodata value of input and output
Returns:
None
"""
# Load reference images
reference_radiance_img = envi.open(reference_radiance_file + '.hdr', reference_radiance_file)
reference_atm_img = envi.open(reference_atm_file + '.hdr', reference_atm_file)
reference_locations_img = envi.open(reference_locations_file + '.hdr', reference_locations_file)
n_reference_lines, n_radiance_bands, n_reference_columns = [int(reference_radiance_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
# Load input images
input_radiance_img = envi.open(input_radiance_file + '.hdr', input_radiance_file)
n_input_lines, n_input_bands, n_input_samples = [int(input_radiance_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
wl = np.array([float(w) for w in input_radiance_img.metadata['wavelength']])
input_locations_img = envi.open(input_locations_file + '.hdr', input_locations_file)
n_location_bands = int(input_locations_img.metadata['bands'])
# Load output images
output_reflectance_img = envi.open(output_reflectance_file + '.hdr', output_reflectance_file)
output_uncertainty_img = envi.open(output_uncertainty_file + '.hdr', output_uncertainty_file)
n_output_reflectance_bands = int(output_reflectance_img.metadata['bands'])
n_output_uncertainty_bands = int(output_uncertainty_img.metadata['bands'])
# Load reference data
reference_locations_mm = reference_locations_img.open_memmap(interleave='source', writable=False)
reference_locations = np.array(reference_locations_mm[:, :, :]).reshape((n_reference_lines, n_location_bands))
reference_radiance_mm = reference_radiance_img.open_memmap(interleave='source', writable=False)
reference_radiance = np.array(reference_radiance_mm[:, :, :]).reshape((n_reference_lines, n_radiance_bands))
reference_atm_mm = reference_atm_img.open_memmap(interleave='source', writable=False)
reference_atm = np.array(reference_atm_mm[:, :, :]).reshape((n_reference_lines, n_radiance_bands*5))
rhoatm = reference_atm[:,:n_radiance_bands]
sphalb = reference_atm[:,n_radiance_bands:(n_radiance_bands*2)]
transm = reference_atm[:,(n_radiance_bands*2):(n_radiance_bands*3)]
solirr = reference_atm[:,(n_radiance_bands*3):(n_radiance_bands*4)]
coszen = reference_atm[:,(n_radiance_bands*4):(n_radiance_bands*5)]
# Load segmentation data
if segmentation_file:
segmentation_img = envi.open(segmentation_file + '.hdr', segmentation_file)
segmentation_img = segmentation_img.read_band(0)
else:
segmentation_img = None
# Prepare instrument model, if available
if isofit_config is not None:
config = configs.create_new_config(isofit_config)
instrument = Instrument(config)
logging.info('Loading instrument')
else:
instrument = None
# Load radiance factors
if radiance_factors is None:
radiance_adjustment = np.ones(n_radiance_bands, )
else:
radiance_adjustment = np.loadtxt(radiance_factors)
# PCA coefficients
rdn_pca = PCA(n_components=2)
reference_pca = rdn_pca.fit_transform(reference_radiance * radiance_adjustment)
# Create the tree to find nearest neighbor segments.
# Assume (heuristically) that, for distance purposes, 1 m vertically is
# comparable to 10 m horizontally, and that there are 100 km per latitude
# degree. This is all approximate of course. Elevation appears in the
# Third element, and the first two are latitude/longitude coordinates
# The fourth and fifth elements are "spectral distance" determined by the
# top principal component coefficients
loc_scaling = np.array([1e5, 1e5, 10, 100, 100])
scaled_ref_loc = np.concatenate((reference_locations,reference_pca),axis=1) * loc_scaling
tree = KDTree(scaled_ref_loc)
# Fit GP parameters on transmissivity of an H2O feature, in the
# first 400 datapoints
use = np.arange(min(len(rhoatm),400))
h2oband = np.argmin(abs(wl-940))
scale = (500,500,500,500,500)
bounds = ((100,2000),(100,2000),(100,2000),(100,2000),(100,2000))
kernel = RBF(length_scale=scale, length_scale_bounds=bounds) +\
WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 0.1))
gp = GaussianProcessRegressor(kernel=kernel, alpha=0.0, normalize_y=True)
gp = gp.fit(scaled_ref_loc[use,:], transm[use,h2oband])
kernel = gp.kernel_
# Iterate through image. Each segment has its own GP, stored in a
# hash table indexed by location in the segmentation map
hash_table = {}
for row in np.arange(start_line, stop_line):
# Load inline input data
input_radiance_mm = input_radiance_img.open_memmap(
interleave='source', writable=False)
input_radiance = np.array(input_radiance_mm[row, :, :])
if input_radiance_img.metadata['interleave'] == 'bil':
input_radiance = input_radiance.transpose((1, 0))
input_radiance = input_radiance * radiance_adjustment
input_locations_mm = input_locations_img.open_memmap(
interleave='source', writable=False)
input_locations = np.array(input_locations_mm[row, :, :])
if input_locations_img.metadata['interleave'] == 'bil':
input_locations = input_locations.transpose((1, 0))
output_reflectance_row = np.zeros(input_radiance.shape) + nodata_value
output_uncertainty_row = np.zeros(input_radiance.shape) + nodata_value
nspectra, start = 0, time.time()
for col in np.arange(n_input_samples):
# Get radiance, pca coordinates, physical location for this datum
my_rdn = input_radiance[col, :]
my_pca = rdn_pca.transform(my_rdn[np.newaxis,:])
my_loc = np.r_[input_locations[col, :], my_pca[0,:]] * loc_scaling
if np.all(np.isclose(my_rdn, nodata_value)):
output_reflectance_row[col, :] = nodata_value
output_uncertainty_row[col, :] = nodata_value
continue
# Retrieve or build the GP
gp_rhoatm, gp_sphalb, gp_transm, irr = None, None, None, None
hash_idx = segmentation_img[row, col]
if hash_idx in hash_table:
gp_rhoatm, gp_sphalb, gp_transm, irr = hash_table[hash_idx]
else:
# There is no GP for this segment, so we build one from
# the atmospheric coefficients from closest neighbors
dists, nn = tree.query(my_loc, nneighbors)
neighbor_rhoatm = rhoatm[nn, :]
neighbor_transm = transm[nn, :]
neighbor_sphalb = sphalb[nn, :]
neighbor_coszen = coszen[nn, :]
neighbor_solirr = solirr[nn, :]
neighbor_locs = scaled_ref_loc[nn, :]
# Create a new GP using the optimized parameters as a fixed kernel
gp_rhoatm = GaussianProcessRegressor(kernel=kernel, alpha=0.0,
normalize_y=True, optimizer=None)
gp_rhoatm.fit(neighbor_locs, neighbor_rhoatm)
gp_sphalb = GaussianProcessRegressor(kernel=kernel, alpha=0.0,
normalize_y=True, optimizer=None)
gp_sphalb.fit(neighbor_locs, neighbor_sphalb)
gp_transm = GaussianProcessRegressor(kernel=kernel, alpha=0.0,
normalize_y=True, optimizer=None)
gp_transm.fit(neighbor_locs, neighbor_transm)
irr = solirr[1,:]*coszen[1,:]
irr[irr<1e-8] = 1e-8
hash_table[hash_idx] = (gp_rhoatm, gp_sphalb, gp_transm, irr)
my_rhoatm = gp_rhoatm.predict(my_loc[np.newaxis,:])
my_sphalb = gp_sphalb.predict(my_loc[np.newaxis,:])
my_transm = gp_transm.predict(my_loc[np.newaxis,:])
my_rho = (my_rdn * np.pi) / irr
my_rfl = 1.0 / (my_transm / (my_rho - my_rhoatm) + my_sphalb)
output_reflectance_row[col, :] = my_rfl
# Calculate uncertainties. Sy approximation rather than Seps for
# speed, for now... but we do take into account instrument
# radiometric uncertainties
#output_uncertainty_row[col, :] = np.zeros()
#if instrument is None:
#else:
# Sy = instrument.Sy(x, geom=None)
# calunc = instrument.bval[:instrument.n_chan]
# output_uncertainty_row[col, :] = np.sqrt(
# np.diag(Sy) + pow(calunc * x, 2)) * bhat[:, 1]
# if loglevel == 'DEBUG':
# plot_example(xv, yv, bhat)
nspectra = nspectra + 1
elapsed = float(time.time() - start)
logging.info('row {}/{}, ({}/{} local), {} spectra per second'.format(row, n_input_lines, int(row - start_line),
int(stop_line - start_line),
round(float(nspectra) / elapsed, 2)))
del input_locations_mm
del input_radiance_mm
output_reflectance_row = output_reflectance_row.transpose((1, 0))
output_uncertainty_row = output_uncertainty_row.transpose((1, 0))
shp = output_reflectance_row.shape
output_reflectance_row = output_reflectance_row.reshape((1, shp[0], shp[1]))
shp = output_uncertainty_row.shape
output_uncertainty_row = output_uncertainty_row.reshape((1, shp[0], shp[1]))
_write_bil_chunk(output_reflectance_row, output_reflectance_file, row,
(n_input_lines, n_output_reflectance_bands, n_input_samples))
_write_bil_chunk(output_uncertainty_row, output_uncertainty_file, row,
(n_input_lines, n_output_uncertainty_bands, n_input_samples))
def interpolate_atmosphere(reference_radiance_file: str, reference_atm_file: str,
reference_locations_file: str, segmentation_file: str, input_radiance_file: str,
input_locations_file: str, output_reflectance_file: str, output_uncertainty_file: str,
nneighbors: int = 15, nodata_value: float = -9999.0, level: str = 'INFO',
radiance_factors: np.array = None, isofit_config: dict = None, n_cores: int = -1) -> None:
"""
Perform a Gaussian process interpolation of atmospheric parameters. It relies on precalculated
atmospheric coefficients at a subset of spatial locations stored in a file. The file has
each coefficient defined for every radiance channel, appearing in the order: (1) atmospheric
path reflectance; (2) spherical sky albedo; (3) total diffuse and direct transmittance of the
two-part downwelling and upwelling path; (4) extraterrestrial solar irradiance; (5) cosine of solar
zenith angle.
Args:
reference_radiance_file: source file for radiance (interpolation built from this)
reference_atm_file: source file for atmospheric coefficients (interpolation from this)
reference_locations_file: source file for file locations (lon, lat, elev), (interpolation from this)
segmentation_file: input file noting the per-pixel segmentation used
input_radiance_file: input radiance file (interpolate over this)
input_locations_file: input location file (interpolate over this)
output_reflectance_file: location to write output reflectance
output_uncertainty_file: location to write output uncertainty
nneighbors: number of neighbors to use for interpolation
nodata_value: nodata value of input and output
level: logging level
radiance_factors: radiance adjustment factors
isofit_config: dictionary-stype isofit configuration
n_cores: number of cores to run on
Returns:
None
"""
loglevel = level
logging.basicConfig(format='%(message)s', level=loglevel)
# Open input data to check that band formatting is correct
# Load reference set radiance
reference_radiance_img = envi.open(reference_radiance_file + '.hdr', reference_radiance_file)
n_reference_lines, n_radiance_bands, n_reference_columns = [int(reference_radiance_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
if n_reference_columns != 1:
raise IndexError("Reference data should be a single-column list")
# Load reference set atmospheric coefficients
reference_atm_img = envi.open(reference_atm_file + '.hdr', reference_atm_file)
nrefa, nba, srefa = [int(reference_atm_img.metadata[n]) for n in ('lines', 'bands', 'samples')]
if nrefa != n_reference_lines or srefa != n_reference_columns:
raise IndexError("Reference file dimension mismatch (atmosphere)")
if nba != (n_radiance_bands * 5):
raise IndexError("Reference atmosphere file has incorrect dimensioning")
# Load reference set locations
reference_locations_img = envi.open(reference_locations_file + '.hdr', reference_locations_file)
nrefl, lb, ls = [int(reference_locations_img.metadata[n]) for n in ('lines', 'bands', 'samples')]
if nrefl != n_reference_lines or lb != 3:
raise IndexError("Reference file dimension mismatch (locations)")
input_radiance_img = envi.open(input_radiance_file + '.hdr', input_radiance_file)
n_input_lines, n_input_bands, n_input_samples = [int(input_radiance_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
if n_radiance_bands != n_input_bands:
msg = 'Number of channels mismatch: input (%i) vs. reference (%i)'
raise IndexError(msg % (n_input_bands, n_radiance_bands))
input_locations_img = envi.open(input_locations_file + '.hdr', input_locations_file)
nll, nlb, nls = [int(input_locations_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
if nll != n_input_lines or nlb != 3 or nls != n_input_samples:
raise IndexError('Input location dimension mismatch')
# Create output files
output_metadata = input_radiance_img.metadata
output_metadata['interleave'] = 'bil'
output_reflectance_img = envi.create_image(output_reflectance_file + '.hdr', ext='',
metadata=output_metadata, force=True)
output_uncertainty_img = envi.create_image(output_uncertainty_file + '.hdr', ext='',
metadata=output_metadata, force=True)
# Now cleanup inputs and outputs, we'll write dynamically above
del output_reflectance_img, output_uncertainty_img
del reference_atm_img, reference_locations_img, input_radiance_img, input_locations_img
# Determine the number of cores to use
if n_cores == -1:
n_cores = multiprocessing.cpu_count()
n_cores = min(n_cores, n_input_lines)
# Break data into sections
line_sections = np.linspace(0, n_input_lines, num=n_cores + 1, dtype=int)
# Set up our pool
pool = multiprocessing.Pool(processes=n_cores)
start_time = time.time()
logging.info('Beginning atmospheric interpolation inversions using {} cores'.format(n_cores))
# Run the pool (or run serially)
results = []
for l in range(len(line_sections) - 1):
args = (line_sections[l], line_sections[l + 1], reference_radiance_file, reference_atm_file,
reference_locations_file, input_radiance_file,
input_locations_file, segmentation_file, isofit_config, output_reflectance_file,
output_uncertainty_file, radiance_factors, nneighbors, nodata_value,)
if n_cores != 1:
results.append(pool.apply_async(_run_chunk, args))
else:
_run_chunk(*args)
pool.close()
pool.join()
total_time = time.time() - start_time
logging.info('Parallel empirical line inversions complete. {} s total, {} spectra/s, {} spectra/s/core'.format(
total_time, line_sections[-1] * n_input_samples / total_time,
line_sections[-1] * n_input_samples / total_time / n_cores))
```
|
{
"source": "jfahrg/augentbot",
"score": 3
}
|
#### File: scripts/pymarkovchain_dynamic/DynamicMarkovChain.py
```python
import random
from .MarkovChain import MarkovChain, _wordIter, _db_factory
class DynamicMarkovChain(MarkovChain):
def generateDatabase(self, textSample, sentenceSep='[.!?\n]', n=2):
self.textSample = _wordIter(textSample, sentenceSep)
# get an array for the 'sentences'
self.sentenceSep = sentenceSep
self.n = n
self.db = _db_factory()
def _temporaryDatabase(self, textSample):
""" Generate word probability database from iterable of sentences """
# I'm using the database to temporarily store word counts
# We're using '' as special symbol for the beginning
# of a sentence
self.db[('',)][''] = 0.0
for line in textSample:
words = line.strip().split() # split words in line
if len(words) == 0:
continue
# first word follows a sentence end
self.db[("",)][words[0]] += 1
for order in range(1, self.n + 1):
for i in range(len(words) - 1):
if i + order >= len(words):
continue
word = tuple(words[i:i + order])
self.db[word][words[i + order]] += 1
# last word precedes a sentence end
self.db[tuple(words[len(words) - order:len(words)])][""] += 1
# We've now got the self.db filled with parametrized word counts
# We still need to normalize this to represent probabilities
for word in self.db:
wordsum = 0
for nextword in self.db[word]:
wordsum += self.db[word][nextword]
if wordsum != 0:
for nextword in self.db[word]:
self.db[word][nextword] /= wordsum
def _relevantSentences(self, seedwords):
for sentence in self.textSample:
for seedword in seedwords:
if seedword in sentence:
yield sentence
def _databaseFromSeed(self, seedwords):
return self._temporaryDatabase(self._relevantSentences(seedwords))
def generateStringWithSeed(self, seed):
""" Generate a "sentence" with the database and a given word """
# using str.split here means we're contructing the list in memory
# but as the generated sentence only depends on the last word of the seed
# I'm assuming seeds tend to be rather short.
words = seed.split()
return self._accumulateWithSeed(words)
def _accumulateWithSeed(self, seed):
self._databaseFromSeed(seed)
return MarkovChain._accumulateWithSeed(self, seed)
def _nextWord(self, lastwords):
lastwords = tuple(lastwords)
if lastwords != ('',):
while lastwords not in self.db:
lastwords = lastwords[1:]
if not lastwords:
return ''
probmap = self.db[lastwords]
sample = random.random()
# since rounding errors might make us miss out on some words
maxprob = 0.0
maxprobword = ""
for candidate in probmap:
# remember which word had the highest probability
# this is the word we'll default to if we can't find anythin else
if probmap[candidate] > maxprob:
maxprob = probmap[candidate]
maxprobword = candidate
if sample > probmap[candidate]:
sample -= probmap[candidate]
else:
return candidate
# getting here means we haven't found a matching word. :(
return maxprobword
def flushDatabase(self):
""" Deletes the current state of the database to free up memory """
self.db = _db_factory()
```
#### File: augentbot/scripts/timestamps.py
```python
import _io
import datetime
from typing import Iterable
def add_timestamp(entry: str) -> str:
timestamp = str(tuple(datetime.datetime.now().timetuple())[:6])
timestamp = '{0}{1} '.format(' ' * (25 - len(timestamp)), timestamp)
entry = entry.replace('\n', '\n'+timestamp)
return timestamp + entry
def remove_timestamp(entry: str) -> str:
return entry[27:]
def get_timestamp(entry: str) -> str:
return entry[:27].strip()
def write_with_timestamps(file: _io.TextIOWrapper, entries: Iterable[str]) -> None:
for e in entries:
file.write(add_timestamp(e))
def read_wo_timestamps(entries: Iterable[str]) -> Iterable[str]:
return [remove_timestamp(e) for e in entries]
def timetuple(entry: str):
time = datetime.datetime.strptime(get_timestamp(entry), '(%Y, %m, %d, %H, %M, %S)')
return entry, time
```
|
{
"source": "jfairf01/OrgoWebsite",
"score": 3
}
|
#### File: app/main/views.py
```python
from flask import render_template, jsonify
from sqlalchemy import func
from . import main
from .. import db
from ..models import User, HighScore
@main.route('/highScore')
def highScore():
highScore = HighScore.query.order_by(HighScore.username).first()
if highScore is None:
return jsonify(username="None", score=0)
return jsonify(username=highScore.username, score=highScore.highScore)
@main.route('/newhighScore/<userName>/<newScore>')
def newhighScore(userName, newScore):
HighScore.query.delete()
newHighScore = HighScore(username=userName, highScore=newScore)
db.session.add(newHighScore)
db.session.commit()
return jsonify(username=newHighScore.username, score=newHighScore.highScore)
@main.route('/')
def index():
return render_template('main/index.html')
@main.route('/Categories')
def Categories():
return render_template('main/Categories.html')
@main.route('/FourthPage')
def fourthPage():
return render_template('main/FourthPage.html')
@main.route('/FifthPage')
def fifthPage():
return render_template('main/FifthPage.html')
```
#### File: app/SnEMechs/errors.py
```python
from flask import render_template
from . import SnEMechs
@SnEMechs.app_errorhandler(403)
def forbidden(_):
return render_template('errors/403.html'), 403
@SnEMechs.app_errorhandler(404)
def page_not_found(_):
return render_template('errors/404.html'), 404
@SnEMechs.app_errorhandler(500)
def internal_server_error(_):
return render_template('errors/500.html'), 500
```
#### File: providers/misc/__init__.py
```python
from __future__ import unicode_literals
import hashlib
import string
import uuid
import os
from faker.generator import random
from faker.providers.date_time import Provider as DatetimeProvider
from .. import BaseProvider
class Provider(BaseProvider):
# Locales supported by Linux Mint from `/usr/share/i18n/SUPPORTED`
language_locale_codes = {
'aa': ('DJ', 'ER', 'ET'), 'af': ('ZA',), 'ak': ('GH',), 'am': ('ET',),
'an': ('ES',), 'apn': ('IN',),
'ar': ('AE', 'BH', 'DZ', 'EG', 'IN', 'IQ', 'JO', 'KW', 'LB', 'LY',
'MA', 'OM', 'QA', 'SA', 'SD', 'SS', 'SY', 'TN', 'YE'),
'as': ('IN',), 'ast': ('ES',), 'ayc': ('PE',), 'az': ('AZ', 'IN'),
'be': ('BY',), 'bem': ('ZM',), 'ber': ('DZ', 'MA'), 'bg': ('BG',),
'bhb': ('IN',), 'bho': ('IN',), 'bn': ('BD', 'IN'), 'bo': ('CN', 'IN'),
'br': ('FR',), 'brx': ('IN',), 'bs': ('BA',), 'byn': ('ER',),
'ca': ('AD', 'ES', 'FR', 'IT'), 'ce': ('RU',), 'ckb': ('IQ',),
'cmn': ('TW',), 'crh': ('UA',), 'cs': ('CZ',), 'csb': ('PL',),
'cv': ('RU',), 'cy': ('GB',), 'da': ('DK',),
'de': ('AT', 'BE', 'CH', 'DE', 'LI', 'LU'), 'doi': ('IN',),
'dv': ('MV',), 'dz': ('BT',), 'el': ('GR', 'CY'),
'en': ('AG', 'AU', 'BW', 'CA', 'DK', 'GB', 'HK', 'IE', 'IN', 'NG',
'NZ', 'PH', 'SG', 'US', 'ZA', 'ZM', 'ZW'),
'eo': ('US',),
'es': ('AR', 'BO', 'CL', 'CO', 'CR', 'CU', 'DO', 'EC', 'ES', 'GT',
'HN', 'MX', 'NI', 'PA', 'PE', 'PR', 'PY', 'SV', 'US', 'UY', 'VE'
), 'et': ('EE',), 'eu': ('ES', 'FR'), 'fa': ('IR',),
'ff': ('SN',), 'fi': ('FI',), 'fil': ('PH',), 'fo': ('FO',),
'fr': ('CA', 'CH', 'FR', 'LU'), 'fur': ('IT',), 'fy': ('NL', 'DE'),
'ga': ('IE',), 'gd': ('GB',), 'gez': ('ER', 'ET'), 'gl': ('ES',),
'gu': ('IN',), 'gv': ('GB',), 'ha': ('NG',), 'hak': ('TW',),
'he': ('IL',), 'hi': ('IN',), 'hne': ('IN',), 'hr': ('HR',),
'hsb': ('DE',), 'ht': ('HT',), 'hu': ('HU',), 'hy': ('AM',),
'ia': ('FR',), 'id': ('ID',), 'ig': ('NG',), 'ik': ('CA',),
'is': ('IS',), 'it': ('CH', 'IT'), 'iu': ('CA',), 'iw': ('IL',),
'ja': ('JP',), 'ka': ('GE',), 'kk': ('KZ',), 'kl': ('GL',),
'km': ('KH',), 'kn': ('IN',), 'ko': ('KR',), 'kok': ('IN',),
'ks': ('IN',), 'ku': ('TR',), 'kw': ('GB',), 'ky': ('KG',),
'lb': ('LU',), 'lg': ('UG',), 'li': ('BE', 'NL'), 'lij': ('IT',),
'ln': ('CD',), 'lo': ('LA',), 'lt': ('LT',), 'lv': ('LV',),
'lzh': ('TW',), 'mag': ('IN',), 'mai': ('IN',), 'mg': ('MG',),
'mhr': ('RU',), 'mi': ('NZ',), 'mk': ('MK',), 'ml': ('IN',),
'mn': ('MN',), 'mni': ('IN',), 'mr': ('IN',), 'ms': ('MY',),
'mt': ('MT',), 'my': ('MM',), 'nan': ('TW',), 'nb': ('NO',),
'nds': ('DE', 'NL'), 'ne': ('NP',), 'nhn': ('MX',),
'niu': ('NU', 'NZ'), 'nl': ('AW', 'BE', 'NL'), 'nn': ('NO',),
'nr': ('ZA',), 'nso': ('ZA',), 'oc': ('FR',), 'om': ('ET', 'KE'),
'or': ('IN',), 'os': ('RU',), 'pa': ('IN', 'PK'),
'pap': ('AN', 'AW', 'CW'), 'pl': ('PL',), 'ps': ('AF',),
'pt': ('BR', 'PT'), 'quz': ('PE',), 'raj': ('IN',), 'ro': ('RO',),
'ru': ('RU', 'UA'), 'rw': ('RW',), 'sa': ('IN',), 'sat': ('IN',),
'sc': ('IT',), 'sd': ('IN', 'PK'), 'se': ('NO',), 'shs': ('CA',),
'si': ('LK',), 'sid': ('ET',), 'sk': ('SK',), 'sl': ('SI',),
'so': ('DJ', 'ET', 'KE', 'SO'), 'sq': ('AL', 'ML'), 'sr': ('ME', 'RS'),
'ss': ('ZA',), 'st': ('ZA',), 'sv': ('FI', 'SE'), 'sw': ('KE', 'TZ'),
'szl': ('PL',), 'ta': ('IN', 'LK'), 'tcy': ('IN',), 'te': ('IN',),
'tg': ('TJ',), 'th': ('TH',), 'the': ('NP',), 'ti': ('ER', 'ET'),
'tig': ('ER',), 'tk': ('TM',), 'tl': ('PH',), 'tn': ('ZA',),
'tr': ('CY', 'TR'), 'ts': ('ZA',), 'tt': ('RU',), 'ug': ('CN',),
'uk': ('UA',), 'unm': ('US',), 'ur': ('IN', 'PK'), 'uz': ('UZ',),
've': ('ZA',), 'vi': ('VN',), 'wa': ('BE',), 'wae': ('CH',),
'wal': ('ET',), 'wo': ('SN',), 'xh': ('ZA',), 'yi': ('US',),
'yo': ('NG',), 'yue': ('HK',), 'zh': ('CN', 'HK', 'SG', 'TW'),
'zu': ('ZA',)
}
@classmethod
def boolean(cls, chance_of_getting_true=50):
return random.randint(1, 100) <= chance_of_getting_true
@classmethod
def null_boolean(cls):
return {
0: None,
1: True,
-1: False
}[random.randint(-1, 1)]
@classmethod
def binary(cls, length=(1 * 1024 * 1024)):
""" Returns random binary blob.
Default blob size is 1 Mb.
"""
return os.urandom(length)
@classmethod
def md5(cls, raw_output=False):
"""
Calculates the md5 hash of a given string
:example 'cfcd208495d565ef66e7dff9f98764da'
"""
res = hashlib.md5(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
@classmethod
def sha1(cls, raw_output=False):
"""
Calculates the sha1 hash of a given string
:example 'b5d86317c2a144cd04d0d7c03b2b02666fafadf2'
"""
res = hashlib.sha1(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
@classmethod
def sha256(cls, raw_output=False):
"""
Calculates the sha256 hash of a given string
:example '85086017559ccc40638fcde2fecaf295e0de7ca51b7517b6aebeaaf75b4d4654'
"""
res = hashlib.sha256(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
@classmethod
def locale(cls):
language_code = cls.language_code()
return language_code + '_' + cls.random_element(
cls.language_locale_codes[language_code]
)
@classmethod
def country_code(cls):
return cls.random_element(DatetimeProvider.countries)['code']
@classmethod
def language_code(cls):
return cls.random_element(cls.language_locale_codes.keys())
@classmethod
def uuid4(cls):
"""
Generates a random UUID4 string.
"""
return str(uuid.uuid4())
@classmethod
def password(cls, length=10, special_chars=True, digits=True, upper_case=True, lower_case=True):
"""
Generates a random password.
@param length: Integer. Length of a password
@param special_chars: Boolean. Whether to use special characters !@#$%^&*()_+
@param digits: Boolean. Whether to use digits
@param upper_case: Boolean. Whether to use upper letters
@param lower_case: Boolean. Whether to use lower letters
@return: String. Random password
"""
choices = ""
required_tokens = []
if special_chars:
required_tokens.append(random.choice("!@#$%^&*()_+"))
choices += "!@#$%^&*()_+"
if digits:
required_tokens.append(random.choice(string.digits))
choices += string.digits
if upper_case:
required_tokens.append(random.choice(string.ascii_uppercase))
choices += string.ascii_uppercase
if lower_case:
required_tokens.append(random.choice(string.ascii_lowercase))
choices += string.ascii_lowercase
assert len(required_tokens) <= length, "Required length is shorter than required characters"
# Generate a first version of the password
chars = [random.choice(choices) for x in range(length)]
# Pick some unique locations
random_indexes = set()
while len(random_indexes) < len(required_tokens):
random_indexes.add(random.randint(0, len(chars) - 1))
# Replace them with the required characters
for i, index in enumerate(random_indexes):
chars[index] = required_tokens[i]
return ''.join(chars)
```
|
{
"source": "jfajkowski/asr-utils",
"score": 3
}
|
#### File: asr-utils/scripts/clean.py
```python
import argparse
import os
import re
import sys
import yaml
from abc import ABC, abstractmethod
LOCALE_YML = os.path.dirname(os.path.realpath(__file__)) + '/locale.yml'
class Cleaner(ABC):
@abstractmethod
def clean(self, text):
pass
class HyperlinksCleaner(Cleaner):
PATTERN = '(?:https?:\/\/)(?:www)?\.?([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w \.-]*)*\/?'
def __init__(self):
self.regex = re.compile(HyperlinksCleaner.PATTERN)
def clean(self, text):
if None in self.regex.split(text):
print(text)
return ' '.join(self.regex.split(text))
class EmoticonsCleaner(Cleaner):
SIDEWAYS = [r'[;:xX][-]?[dDoOsSpP]+?']
UPRIGHT = [r'[*;^][_,][*;^]']
def __init__(self):
self.sideways_regex = re.compile(r'\b' + r'\b|\b'.join(EmoticonsCleaner.SIDEWAYS) + r'\b|'
r'' + r'|\b'.join(EmoticonsCleaner.SIDEWAYS) + r'\b')
self.upright_regex = re.compile(r'\b' + r'\b|\b'.join(EmoticonsCleaner.UPRIGHT) + r'\b|'
r'' + r'|\b'.join(EmoticonsCleaner.UPRIGHT) + r'\b')
def clean(self, text):
text = self.sideways_regex.sub(' ', text)
text = self.upright_regex.sub(' ', text)
return text
class AlphanumericCleaner(Cleaner):
SPLIT_PATTERN = r'(\W+)'
def __init__(self):
self.regex = re.compile(AlphanumericCleaner.SPLIT_PATTERN)
def clean(self, text):
words = self.regex.split(text)
filtered = filter(lambda w: w.isalnum() and (w.isalpha() or w.isnumeric()), words)
return ' '.join(filtered)
class CharacterCleaner(Cleaner):
def __init__(self, characters):
self.regex = re.compile(r'[^' + characters + ']')
def clean(self, text):
return self.regex.sub(' ', text)
class SeparatorCleaner(Cleaner):
def __init__(self, separators):
self.separators = separators
self.regex = re.compile(r'[' + separators + ']+')
def clean(self, text):
return self.regex.sub(' ', text).strip(self.separators)
class TyposCleaner(Cleaner):
MAX_IN_ROW = 3
def clean(self, text):
cleaned = ''
prev_letter = ''
buffer = ''
for curr_letter in text:
if curr_letter != prev_letter:
cleaned += buffer if len(buffer) <= TyposCleaner.MAX_IN_ROW else prev_letter
buffer = ''
buffer += curr_letter
prev_letter = curr_letter
cleaned += buffer if len(buffer) < TyposCleaner.MAX_IN_ROW else prev_letter
return cleaned
def parse_args():
parser = argparse.ArgumentParser(description='filters characters that are out of whitelist')
parser.add_argument('locale', metavar='LOCALE', default='pl-PL')
parser.add_argument('files', metavar='FILES', default='-', nargs='*')
parser.add_argument('-d', '--delimiter', dest='delimiter', type=str, default='\t')
parser.add_argument('-f', '--field', dest='field', type=int, default=1)
return parser.parse_args()
def clean(files, locale, delimiter='\t', field=1, locale_yml=LOCALE_YML):
characters, separators = read_locale_config(locale, locale_yml)
pipeline = [
HyperlinksCleaner(),
EmoticonsCleaner(),
AlphanumericCleaner(),
CharacterCleaner(characters),
SeparatorCleaner(separators),
TyposCleaner()
]
for file in files:
with sys.stdin if file == '-' else open(file) as f_in:
for line in f_in:
line = line.rstrip('\n').split(delimiter)
for cleaner in pipeline:
line[field - 1] = cleaner.clean(line[field - 1])
line = delimiter.join(line)
print(line)
def read_locale_config(locale, locale_yml):
with open(locale_yml) as c_in:
config = yaml.load(c_in)
language_code = re.search('[a-z]{2}', locale)[0]
region_code = re.search('[A-Z]{2}', locale)[0]
characters = config[language_code][region_code]['characters']
separators = config[language_code][region_code]['separators']
return characters, separators
def main():
args = parse_args()
clean(args.files, args.locale, args.delimiter, args.field)
if __name__ == '__main__':
main()
```
#### File: asr-utils/scripts/mix.py
```python
import argparse
import logging
import random
def parse_args():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-f', '--file', dest='files', action='append', nargs=2, metavar=('CORPUS', 'RATIO'), default=[])
group.add_argument('-b', '--bytes', dest='bytes', type=int)
group.add_argument('-c', '--chars', dest='chars', type=int)
group.add_argument('-l', '--lines', dest='lines', type=int)
group.add_argument('-w', '--words', dest='words', type=int)
return parser.parse_args()
def normalize(files):
normalized = []
ratio_sum = sum([float(i[1]) for i in files])
for corpus, ratio in files:
normalized.append((corpus, float(ratio) / ratio_sum))
return normalized
def mix(files, max_count, count_method):
count = 0
results = []
logging.info('Selecting ~{} {}'.format(max_count, count_method_unit(count_method)))
for corpus, ratio in files:
max_corpus_count = int(max_count * ratio)
corpus_count = select_from_corpus(corpus, max_corpus_count, count_method)
count += corpus_count
results.append((corpus, corpus_count))
logging.info('Selected ~{} {}'.format(count, count_method_unit(count_method)))
return results
def select_from_corpus(corpus, max_corpus_count, count_method):
iteration = 0
corpus_count = 0
logging.info('Selecting ~{} {} from {}'.format(max_corpus_count, count_method_unit(count_method), corpus))
while corpus_count < max_corpus_count:
with open(corpus, encoding='UTF-8') as c_in:
lines, iteration_count = random_lines(c_in, max_corpus_count - corpus_count, count_method)
for line in lines:
print(line.rstrip('\n'))
iteration += 1
corpus_count += iteration_count
logging.info(
'Selected {} {} from {} in {} iteration(s)'.format(corpus_count, count_method_unit(count_method),
corpus, iteration))
return corpus_count
def random_lines(file, max_count, count_method):
count = 0
selected = []
for i, line in enumerate(file):
if count < max_count:
selected.append(line)
count += count_method(line)
else:
m = random.randint(0, i)
if m < len(selected):
count -= count_method(selected[m])
selected[m] = line
count += count_method(selected[m])
return selected, count
def count_bytes(line):
return len(line.encode('UTF-8'))
def count_chars(line):
return len(line)
def count_lines(line):
return 1 if line else 0
def count_words(line):
return len(line.split(' '))
def count_method_unit(count_method):
return count_method.__name__.replace('count_', '')
def main():
logging.basicConfig(format='[%(asctime)s][%(levelname)s] %(name)s: %(message)s', level=logging.INFO)
args = parse_args()
files = normalize(args.files)
logging.info('Desired ratio: {}'.format(','.join([str(f) for f in files])))
if args.bytes:
files = mix(files, args.bytes, count_bytes)
elif args.chars:
files = mix(files, args.chars, count_chars)
elif args.lines:
files = mix(files, args.lines, count_lines)
elif args.words:
files = mix(files, args.words, count_words)
files = normalize(files)
logging.info('Achieved ratio: {}'.format(','.join([str(f) for f in files])))
if __name__ == '__main__':
main()
```
|
{
"source": "jfajkowski/PyCheckers",
"score": 3
}
|
#### File: PyCheckers/pycheckers/config_read.py
```python
import yaml
from elements import Color
from strategies import RandomGameStrategy, MinMaxGameStrategy, AlphaBetaGameStrategy, ManualGameStrategy
from heuristics import light_pieces_dark_pieces_difference_heuristic, dark_pieces_light_pieces_difference_heuristic, light_pieces_maximizing_heuristic, dark_pieces_maximizing_heuristic
def read_players():
with open('../game_config.yaml', 'r') as f:
game_config = yaml.load(f)
return read_light_player(game_config), read_dark_player(game_config)
def read_light_player(game_config):
return read_player("light_player", Color.LIGHT_PIECE, game_config)
def read_dark_player(game_config):
return read_player("dark_player", Color.DARK_PIECE, game_config)
def read_player(player_name, color, game_config):
player = None
strategy = game_config[player_name]["strategy"]
if strategy == 'manual':
player = ManualGameStrategy(color)
elif strategy == 'random':
player = RandomGameStrategy(color)
else:
heuristic = get_heuristic_from_string(game_config[player_name]["heuristic"])
depth = int(game_config[player_name]["depth"])
if strategy == 'alpha_beta':
player = AlphaBetaGameStrategy(color, heuristic, depth)
elif strategy == 'min_max':
player = MinMaxGameStrategy(color, heuristic, depth)
else:
print("wrong strategy error")
return player
def get_heuristic_from_string(heuristic_string):
if heuristic_string == 'dark_pieces_maximizing':
return dark_pieces_maximizing_heuristic
elif heuristic_string == 'light_pieces_maximizing':
return light_pieces_maximizing_heuristic
elif heuristic_string == 'dark_pieces_light_pieces_difference':
return dark_pieces_light_pieces_difference_heuristic
elif heuristic_string == 'light_pieces_dark_pieces_difference':
return light_pieces_dark_pieces_difference_heuristic
else:
print("wrong heuristic error")
```
#### File: PyCheckers/pycheckers/moves.py
```python
import logging
import numpy as np
from copy import copy
from elements import State, Color, Pawn, King, Board
class PawnMove:
'''Move from one position to another without beating'''
def __init__(self, state: State, piece_position, target_position):
self._state = copy(state)
self.piece_position = piece_position
self.target_position = target_position
self.final_position = target_position
self.piece = state.get_piece(*self.piece_position)
self.transform = False
@property
def state(self):
return copy(self._state)
def is_valid(self):
if self.is_forward():
return True
return False
def is_forward(self):
delta_y = self.target_position[1] - self.piece_position[1]
if self.piece.color == Color.DARK_PIECE and delta_y > 0:
return True
elif self.piece.color == Color.LIGHT_PIECE and delta_y < 0:
return True
return False
def execute(self):
next_state = self.state
next_state.remove(*self.piece_position)
next_state.add(self.final_position[0], self.final_position[1], self.piece)
if self.transform:
next_state.transform_into_king(*self.final_position)
return next_state
def is_to_last_position(self):
if self.piece.color == Color.DARK_PIECE and self.final_position[1] == self._state.rows - 1:
return True
elif self.piece.color == Color.LIGHT_PIECE and self.final_position[1] == 0:
return True
return False
# get all possible positions where the Piece can be placed after beating piece.
@staticmethod
def calculate_final_positions(piece_position, target_position):
final_positions = []
y = target_position[1]
x = target_position[0]
sign_y = np.sign(y - piece_position[1])
sign_x = np.sign(x - piece_position[0])
for j in range(-Board.COLS, Board.COLS):
for i in range(-Board.COLS, Board.COLS):
# every field after the beating piece (in the context of attacking piece)
if -1 < (x + sign_x * i) < Board.COLS and -1 < (y + sign_y * j) < Board.COLS:
# fields on diagonal in the right direction
if i * i == j * j and j > 0 and i > 0:
final_positions.append(((x + sign_x * i), (y + sign_y * j)))
return final_positions
# check if any other Piece stay on a path between piece_position and target_position
def is_path_empty(self, start_position, aim_position, including_aim):
param = 1 if including_aim else 0
y = aim_position[1]
x = aim_position[0]
sign_y = np.sign(y - start_position[1])
sign_x = np.sign(x - start_position[0])
for j in range(-Board.COLS, Board.COLS):
for i in range(-Board.COLS, Board.COLS):
# every field after the beating piece (in the context of attacking piece)
if -1 < (x + sign_x * i) < Board.COLS and -1 < (y + sign_y * j) < Board.COLS:
# fields between attacking piece and the aim (including the aim position or not)
if i * i == j * j and j < param and i < param:
if sign_x * (x + sign_x * i) > sign_x * start_position[0] \
and sign_y * (y + sign_y * j) > sign_y * start_position[1]:
if self.state.is_occupied((x + sign_x * i), (y + sign_y * j)):
return False
return True
def __str__(self):
return '[{}] ({},{}) -> ({},{})'.format(self.__class__.__name__,
self.piece_position[0], self.piece_position[1],
self.target_position[0], self.target_position[1])
class PawnBeat(PawnMove):
def __init__(self, state: State, piece_position, target_position):
super().__init__(state, piece_position, target_position)
self.beat_piece = self.state.get_piece(*target_position)
self.next_beats = []
self.final_position = self.calculate_final_position()
def is_valid(self):
final_position = self.calculate_final_position()
return self.state.is_in_bounds(*final_position) \
and not self.state.is_occupied(*final_position) \
and self.piece.color != self.beat_piece.color
def execute(self):
next_state = self.state
next_state.remove(*self.piece_position)
next_state.remove(*self.target_position)
next_state.add(self.final_position[0], self.final_position[1], self.piece)
if self.transform:
next_state.transform_into_king(*self.final_position)
return next_state
# can be replaced by calculate_final_positions() method. Then final pos should be set in constructor, or setter method.
def calculate_final_position(self):
return (2 * self.target_position[0] - self.piece_position[0],
2 * self.target_position[1] - self.piece_position[1])
def to_list(self):
return self.unfold(self)
@staticmethod
def unfold(beat):
if not beat.next_beats:
return [[beat]]
else:
sequences = []
for next_beat in beat.next_beats:
for sequence in PawnBeat.unfold(next_beat):
sequences.append([beat] + sequence)
return sequences
def __str__(self):
return '[{}] ({},{}) -> ({},{}) -> ({},{})'.format(self.__class__.__name__,
self.piece_position[0], self.piece_position[1],
self.target_position[0], self.target_position[1],
self.final_position[0], self.final_position[1])
class KingMove(PawnMove):
# valid is only when there is no pieces on the path
def is_valid(self):
return self.is_path_empty(self.piece_position, self.target_position, True)
class KingBeat(PawnBeat):
def __init__(self, state: State, piece_position, target_position, final_position):
super().__init__(state, piece_position, target_position)
self.final_position = final_position
# valid when there is no piece on the path between attacking piece and the aim and between aim and the final position (including it).
def is_valid(self):
if self.piece.color != self.beat_piece.color:
if self.is_path_empty(self.piece_position, self.target_position, False) \
and self.is_path_empty(self.target_position, self.final_position, True):
return True
return False
return False
```
#### File: PyCheckers/pycheckers/strategies.py
```python
import math
import random
import pygame
from abc import ABC, abstractmethod
from typing import Tuple
from elements import State, Piece, Pawn, King, Color, Board
from heuristics import light_pieces_dark_pieces_difference_heuristic
from moves import PawnMove, PawnBeat, KingMove, KingBeat
class GameStrategy(ABC):
def __init__(self, color):
self._color = color
@property
def color(self):
return self._color
@abstractmethod
def move(self, state: State):
pass
def _calculate_all_moves(self, state: State, color: Tuple[int, int, int]):
beats = []
for piece_position in state.piece_positions(color):
beats += self._calculate_valid_beats(piece_position, state)
if beats:
for beat in beats:
if isinstance(beat[-1], PawnBeat) and beat[-1].is_to_last_position():
beat[-1].transform = True
return beats
moves = []
for piece_position in state.piece_positions(color):
moves += self._calculate_valid_moves(piece_position, state)
if moves:
for move in moves:
if isinstance(move[-1], PawnMove) and move[-1].is_to_last_position():
move[-1].transform = True
return moves
return []
def _calculate_valid_moves(self, piece_position, state: State):
moves = []
piece = state.get_piece(*piece_position)
target_positions = piece.target_positions(*piece_position)
for target_position in target_positions:
if state.is_in_bounds(*target_position) and not state.is_occupied(*target_position):
move = KingMove(state, piece_position, target_position) if isinstance(piece, King) \
else PawnMove(state, piece_position, target_position)
if move.is_valid():
moves.append([move])
return moves
def _calculate_valid_beats(self, piece_position, state: State, previous_beat: PawnBeat = None):
beats = []
if piece_position is None:
return beats
piece = state.get_piece(*piece_position)
target_positions = piece.target_positions(*piece_position)
for target_position in target_positions:
if state.is_in_bounds(*target_position) and state.is_occupied(*target_position) \
and state.get_color(*target_position) != piece.color:
sub_beats = []
if isinstance(piece, King):
final_positions = KingBeat.calculate_final_positions(piece_position, target_position)
for final_position in final_positions:
sub_beats.append(KingBeat(state, piece_position, target_position, final_position))
else:
sub_beats.append(PawnBeat(state, piece_position, target_position))
for sub_beat in sub_beats:
if sub_beat.is_valid():
next_state = sub_beat.execute()
beats += self._calculate_valid_beats(sub_beat.final_position, next_state, sub_beat)
if previous_beat:
previous_beat.next_beats.append(sub_beat)
else:
beats += sub_beat.to_list()
return beats
class AlphaBetaGameStrategy(GameStrategy):
def __init__(self, color, heuristic=light_pieces_dark_pieces_difference_heuristic, depth=10):
super().__init__(color)
self._heuristic = heuristic
self._depth = depth
def move(self, state: State):
# alpha for maximizer, beta for minimizer
alpha, beta = -math.inf, math.inf
best_move, best_value = None, -math.inf
for move in self._calculate_all_moves(state, self._color):
initial_state = move[-1].execute()
value = self.alpha_beta(initial_state, Color.opposite(self._color), alpha, beta, self._depth - 1)
if value > best_value:
best_value, best_move = value, move
return best_move, False
def alpha_beta(self, state, color: Tuple[int, int, int], alpha, beta, depth):
if depth == 0 or state.is_ending():
heuristic = self._heuristic(state)
return heuristic
if color == self._color:
for move in self._calculate_all_moves(state, color):
next_state = move[-1].execute()
alpha = max(alpha, self.alpha_beta(next_state, Color.opposite(color), alpha, beta, depth - 1))
if beta <= alpha:
return beta
return alpha
else:
for move in self._calculate_all_moves(state, color):
next_state = move[-1].execute()
beta = min(beta, self.alpha_beta(next_state, Color.opposite(color), alpha, beta, depth - 1))
if beta <= alpha:
return alpha
return beta
class ManualGameStrategy(GameStrategy):
def __init__(self, color):
super().__init__(color)
self._next_beat_piece = None
def move(self, state: State):
beats = []
for piece_position in state.piece_positions(self.color):
beats += self._calculate_valid_beats(piece_position, state)
if beats:
for beat in beats:
if isinstance(beat[-1], PawnBeat) and beat[-1].is_to_last_position():
beat[-1].transform = True
moves = []
for piece_position in state.piece_positions(self.color):
moves += self._calculate_valid_moves(piece_position, state)
if moves:
for move in moves:
if isinstance(move[-1], PawnMove) and move[-1].is_to_last_position():
move[-1].transform = True
while True:
click_up = None
click_down = None
move_clicked = False
while not move_clicked:
ev = pygame.event.get()
for event in ev:
if event.type == pygame.MOUSEBUTTONUP:
x, y = pygame.mouse.get_pos()
x = int(x / (640 / Board.ROWS)) # board width
y = int(y / (640 / Board.COLS)) # board height
click_up = (x, y)
move_clicked = True
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
x = int(x / (640 / Board.ROWS))
y = int(y / (640 / Board.COLS))
click_down = (x, y)
if click_up == click_down:
continue
if self._next_beat_piece and self._next_beat_piece != click_down:
continue
if beats:
current_beat = None
for b in beats:
if b[0].piece_position == click_down and b[0].final_position == click_up:
if current_beat is None or len(current_beat[0].next_beats) < len(b[0].next_beats):
current_beat = b
if current_beat is None:
continue
piece = state.get_piece(*click_down)
beat_to_return = None
if isinstance(piece, King):
beat_to_return = KingBeat(state, current_beat[0].piece_position, current_beat[0].target_position, current_beat[0].final_position)
else:
beat_to_return = PawnBeat(state, current_beat[0].piece_position, current_beat[0].target_position)
if len(current_beat[0].next_beats) > 0:
self._next_beat_piece = current_beat[0].final_position
return [beat_to_return], True
else:
self._next_beat_piece = None
return [beat_to_return], False
if moves and not beats:
for m in moves:
if m[0].piece_position == click_down and m[0].target_position == click_up:
return m, False
class MinMaxGameStrategy(GameStrategy):
def __init__(self, color, heuristic=light_pieces_dark_pieces_difference_heuristic, depth=4):
super().__init__(color)
self._heuristic = heuristic
self._depth = depth
def move(self, state: State):
best_move, best_value = None, -math.inf
for move in self._calculate_all_moves(state, self._color):
next_state = move[-1].execute()
value = self.min_max(next_state, Color.opposite(self._color), self._depth - 1)
if value > best_value:
best_move, best_value = move, value
return best_move, False
def min_max(self, state: State, color: Tuple[int, int, int], depth: int):
if depth == 0 or state.is_ending():
return self._heuristic(state)
if color == self._color:
best_value = -math.inf
for move in self._calculate_all_moves(state, color):
next_state = move[-1].execute()
value = self.min_max(next_state, Color.opposite(color), depth - 1)
best_value = max(best_value, value)
return best_value
else:
best_value = math.inf
for move in self._calculate_all_moves(state, color):
next_state = move[-1].execute()
value = self.min_max(next_state, Color.opposite(color), depth - 1)
best_value = min(best_value, value)
return best_value
class RandomGameStrategy(GameStrategy):
def move(self, state: State):
beats = []
for piece in state.piece_positions(self._color):
beats += self._calculate_valid_beats(piece, state)
if beats:
return random.choice(beats), False
moves = []
for piece in state.piece_positions(self._color):
moves += self._calculate_valid_moves(piece, state)
if moves:
return random.choice(moves), False
```
|
{
"source": "jfajkowski/stock-market-forecasting",
"score": 3
}
|
#### File: scripts/data/clean_corpus.py
```python
import copy
import re
import num2words as n2w
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
# %% Load corpus data
df = pd.read_csv('./data/interim/Classes_Changed.csv')
corpus = df.loc[:, 'Top1':'Top25']
# %% Remove newlines, bounding apostrophes/quotation marks and ensure that there are only single non-trailing spaces
def extract(document):
if not isinstance(document, str):
return ''
document = document.replace('\n', ' ')
document = re.sub(r" +", " ", document)
match = re.match(r'^b\'(.+?)\'$|^b\"(.+?)\"$|(.+)', document)
return next(g for g in match.groups() if g is not None) if match else ''
corpus = corpus.applymap(extract)
# %% Process all documents and abbreviate adequate words
class Trie():
class Node():
def __init__(self, value=None):
self._children = dict()
self._value = value
@property
def value(self):
return self._value
def insert(self, key, value, default=None):
if len(key) > 0:
if key[0] not in self._children:
self._children[key[0]] = Trie.Node(value=copy.deepcopy(default))
self._children[key[0]].insert(key[1:], value, default)
else:
self._value = value
def get(self, key, longest_match=False):
if len(key) > 0:
if key[0] not in self._children:
if longest_match:
return self._value
else:
raise KeyError()
value = self._children[key[0]].get(key[1:], longest_match)
return value if value else self._value
else:
return self._value
def __contains__(self, key):
if len(key) > 0:
if key[0] not in self._children:
return False
return self._children[key[0]].__contains__(key[1:])
else:
return True
def is_leaf(self, key):
if len(key) > 0:
if key[0] not in self._children:
raise KeyError()
return self._children[key[0]].is_leaf(key[1:])
else:
return not self._children
def __init__(self, default=None, longest_match=False):
self._default = default
self._longest_match = longest_match
self._root = Trie.Node(default)
def __setitem__(self, key, value):
self._root.insert(key, value, self._default)
def __getitem__(self, key):
return self._root.get(key, self._longest_match)
def __contains__(self, key):
return self._root.__contains__(key)
def is_leaf(self, key):
return self._root.is_leaf(key)
SPLIT_PATTERN = re.compile(r'(\W+)')
JOIN_PATTERN = ''
TRIE = Trie(longest_match=True)
with open('./data/external/abbreviations.csv', encoding="utf8") as f_in:
for line in f_in:
line = line.rstrip('\n')
phrase, abbreviation = list(map(SPLIT_PATTERN.split, line.split(',')))
TRIE[phrase] = (phrase, abbreviation)
def abbreviate(text):
wrong_words = SPLIT_PATTERN.split(text)
correct_words = []
i = 0
while i < len(wrong_words):
result = TRIE[wrong_words[i:]]
if result:
current_wrong_words, current_correct_words = result
i += len(current_wrong_words)
else:
current_correct_words = wrong_words[i]
i += 1
correct_words += current_correct_words
return JOIN_PATTERN.join(correct_words)
# corpus = corpus.applymap(abbreviate)
# %% Replace currency symbols with words
currency_dict = {}
currency_regex = ''
SPACE_JOIN_PATTERN = ' '
with open('./data/external/currencies.csv', encoding="utf8") as f_in:
lines = (line.rstrip('\n') for line in f_in if line.rstrip('\n'))
symbols = []
for line in lines:
symbol, currency = line.split(',')
currency_dict[symbol] = currency
symbols.append(symbol)
currency_regex = "([" + '|'.join(symbols) + "])"
def currenciate(text):
extracted_currencies = re.split(currency_regex, text)
translated = [currency_dict[word] if word in currency_dict else word for word in extracted_currencies]
joined = SPACE_JOIN_PATTERN.join(translated)
return re.sub(r" +", " ", joined)
corpus = corpus.applymap(currenciate)
# %% Change existing numbers to words
def numbers_to_words(text):
numbers = re.findall('[-+]?\d*\.\d+|\d*,\d+|\d+', text)
for n in numbers:
convertible = n.replace(",", ".")
text = text.replace(n, " " + n2w.num2words(convertible) + " ", 1)
return re.sub(r" +", " ", text)
# corpus = corpus.applymap(numbers_to_words)
# %% Tokenize documents using NLTK
corpus = corpus.applymap(lambda document: word_tokenize(document))
# %% Lowercase documents
corpus = corpus.applymap(lambda document: [word.lower() for word in document])
# %% Remove stopwords
stoplist = stopwords.words('english')
corpus = corpus.applymap(lambda document: [word for word in document if word not in stoplist])
# %% Use lemmatizer to reduce dimensionality
lemmatizer = WordNetLemmatizer()
corpus = corpus.applymap(lambda document: [lemmatizer.lemmatize(word) for word in document])
# %% Remove non alphanumeric characters
sanitize = lambda word: re.sub(r'[^0-9A-Za-z ]+', ' ', word).strip()
corpus = corpus.applymap(lambda document: [sanitize(word) for word in document])
# %% Remove tokens shorter than 3
corpus = corpus.applymap(lambda document: [word for word in document if len(word) > 3])
# %% Print corpus statistics
def stats(corpus):
document_lengths = [len(document) for document in corpus]
tokens = [token for document in corpus for token in document]
unique_tokens = set(tokens)
print("Number of documents: {}".format(len(corpus)))
print("Document length mean: {:.2f}".format(np.mean(document_lengths)))
print("Document length variance: {:.2f}".format(np.var(document_lengths)))
print("Number of tokens: {}".format(len(tokens)))
print("Number of token types: {}".format(len(unique_tokens)))
print("Type-Token Ratio: {:.2%}".format(len(unique_tokens) / len(tokens)))
print()
stats(corpus.values.flatten())
# %% Persist cleaned data
df.loc[:, 'Top1':'Top25'] = corpus.applymap(lambda x: ' '.join(x))
df.to_csv(path_or_buf='./data/interim/Corpus_Cleaned.csv', index=False)
```
|
{
"source": "jfalcou/infra",
"score": 2
}
|
#### File: bin/lib/binary_info.py
```python
import re
import subprocess
from collections import defaultdict
from pathlib import Path
from typing import Dict, Any, Iterable
SYMBOLLINE_RE = re.compile(r'^\s*(\d*):\s[0-9a-f]*\s*(\d*)\s(\w*)\s*(\w*)\s*(\w*)\s*([\w|\d]*)\s?([\w\.]*)?$',
re.MULTILINE)
SO_STRANGE_SYMLINK = re.compile(r'INPUT \((\S*)\)')
ELF_CLASS_RE = re.compile(r'^\s*Class:\s*(.*)$', re.MULTILINE)
ELF_OSABI_RE = re.compile(r'^\s*OS\/ABI:\s*(.*)$', re.MULTILINE)
ELF_MACHINE_RE = re.compile(r'^\s*Machine:\s*(.*)$', re.MULTILINE)
sym_grp_num = 0
sym_grp_val = 1
sym_grp_type = 2
sym_grp_bind = 3
sym_grp_vis = 4
sym_grp_ndx = 5
sym_grp_name = 6
class BinaryInfo:
def __init__(self, logger, buildfolder, filepath):
self.logger = logger
self.buildfolder = Path(buildfolder)
self.filepath = Path(filepath)
self.readelf_header_details = ''
self.readelf_symbols_details = ''
self.ldd_details = ''
self._follow_and_readelf()
self._read_symbols_from_binary()
def _follow_and_readelf(self) -> None:
self.logger.debug('Readelf on %s', self.filepath)
if not self.filepath.exists():
return
try:
self.readelf_header_details = subprocess.check_output(
['readelf', '-h', str(self.filepath)]).decode('utf-8', 'replace')
self.readelf_symbols_details = subprocess.check_output(
['readelf', '-W', '-s', str(self.filepath)]).decode('utf-8', 'replace')
if ".so" in self.filepath.name:
self.ldd_details = subprocess.check_output(['ldd', str(self.filepath)]).decode('utf-8', 'replace')
except subprocess.CalledProcessError:
match = SO_STRANGE_SYMLINK.match(Path(self.filepath).read_text(encoding='utf-8'))
if match:
self.filepath = self.buildfolder / match[1]
self._follow_and_readelf()
def _read_symbols_from_binary(self) -> None:
self.required_symbols = set()
self.implemented_symbols = set()
symbollinematches = SYMBOLLINE_RE.findall(self.readelf_symbols_details)
if symbollinematches:
for line in symbollinematches:
if len(line) == 7 and line[sym_grp_name]:
if line[sym_grp_ndx] == 'UND':
self.required_symbols.add(line[sym_grp_name])
else:
self.implemented_symbols.add(line[sym_grp_name])
@staticmethod
def symbol_maybe_cxx11abi(symbol: str) -> bool:
return 'cxx11' in symbol
def set_maybe_cxx11abi(self, symbolset: Iterable[str]) -> bool:
return any(self.symbol_maybe_cxx11abi(s) for s in symbolset)
def cxx_info_from_binary(self) -> Dict[str, Any]:
info: Dict[str, Any] = defaultdict(lambda: [])
info['has_personality'] = {'__gxx_personality_v0'}.issubset(self.required_symbols)
info['has_exceptions'] = {'_Unwind_Resume'}.issubset(self.required_symbols)
info['has_maybecxx11abi'] = self.set_maybe_cxx11abi(self.implemented_symbols)
return info
def arch_info_from_binary(self) -> Dict[str, Any]:
info: Dict[str, Any] = defaultdict(lambda: [])
info['elf_class'] = ''
info['elf_osabi'] = ''
info['elf_machine'] = ''
matches = ELF_CLASS_RE.findall(self.readelf_header_details)
for match in matches:
info['elf_class'] = match
break
matches = ELF_OSABI_RE.findall(self.readelf_header_details)
for match in matches:
info['elf_osabi'] = match
break
matches = ELF_MACHINE_RE.findall(self.readelf_header_details)
for match in matches:
info['elf_machine'] = match
break
return info
```
#### File: bin/lib/ce_utils.py
```python
import itertools
import json
import logging
import time
from typing import Optional, Union, Set, List
import click
from lib.amazon import get_current_key, release_for, get_releases, get_events_file, save_event_file
from lib.env import Config
from lib.instance import Instance
from lib.releases import Hash, Release
logger = logging.getLogger(__name__)
def sizeof_fmt(num: Union[int, float], suffix='B') -> str:
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def describe_current_release(cfg: Config) -> str:
current = get_current_key(cfg)
if not current:
return "none"
r = release_for(get_releases(), current)
if r:
return str(r)
else:
return "non-standard release with s3 key '{}'".format(current)
def wait_for_autoscale_state(instance: Instance, state: str) -> None:
logger.info("Waiting for %s to reach autoscale lifecycle '%s'...", instance, state)
while True:
autoscale = instance.describe_autoscale()
if not autoscale:
logger.error("Instance is not longer in an ASG: stopping")
return
cur_state = autoscale['LifecycleState']
logger.debug("State is %s", cur_state)
if cur_state == state:
logger.info("...done")
return
time.sleep(5)
def get_events(cfg: Config) -> dict:
events = json.loads(get_events_file(cfg))
if 'ads' not in events:
events['ads'] = []
if 'decorations' not in events:
events['decorations'] = []
if 'motd' not in events:
events['motd'] = ''
return events
def save_events(cfg: Config, events) -> None:
save_event_file(cfg, json.dumps(events))
def update_motd(cfg: Config, motd: str) -> str:
events = get_events(cfg)
old_motd = events['motd']
events['motd'] = motd
save_events(cfg, events)
return old_motd
def are_you_sure(name: str, cfg: Optional[Config] = None) -> bool:
env_name = cfg.env.value if cfg else 'global'
while True:
typed = input(
f'Confirm operation: "{name}" in env {env_name}\nType the name of the environment to proceed: ')
if typed == env_name:
return True
def display_releases(current: Union[str, Hash], filter_branches: Set[str], releases: List[Release]) -> None:
max_branch_len = max(10, max((len(release.branch) for release in releases), default=10))
release_format = '{: <5} {: <' + str(max_branch_len) + '} {: <10} {: <10} {: <14}'
click.echo(release_format.format('Live', 'Branch', 'Version', 'Size', 'Hash'))
for _, grouped_releases in itertools.groupby(releases, lambda r: r.branch):
for release in grouped_releases:
if not filter_branches or release.branch in filter_branches:
click.echo(
release_format.format(
' -->' if (release.key == current or release.hash == current) else '',
release.branch, str(release.version), sizeof_fmt(release.size), str(release.hash))
)
def confirm_branch(branch: str) -> bool:
while True:
typed = input('Confirm build branch "{}"\nType the name of the branch: '.format(branch))
if typed == branch:
return True
def confirm_action(description: str) -> bool:
typed = input('{}: [Y/N]\n'.format(description))
return typed.upper() == 'Y'
```
#### File: bin/lib/config_safe_loader.py
```python
import yaml
# With thanks to:
# https://stackoverflow.com/questions/34667108/ignore-dates-and-times-while-parsing-yaml
class ConfigSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
"""
Remove implicit resolvers for a particular tag
Takes care not to modify resolvers in super classes.
"""
if 'yaml_implicit_resolvers' not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [(tag, regexp)
for tag, regexp in mappings
if tag != tag_to_remove]
ConfigSafeLoader.remove_implicit_resolver('tag:yaml.org,2002:timestamp')
```
#### File: infra/test/remote-test.py
```python
import glob
import json
import os
import requests
from argparse import ArgumentParser
from difflib import unified_diff
import re
import sys
parser = ArgumentParser()
parser.add_argument('url')
parser.add_argument('directory')
parser.add_argument('--update-compilers', action='store_true')
parser.add_argument('--disabled-by-default', action='store_true')
parser.add_argument('--bless', action='store_true')
FILTERS = [
['binary', 'labels', 'directives', 'commentOnly', 'intel'],
['binary', 'labels', 'directives', 'commentOnly'],
['labels', 'directives', 'commentOnly', 'intel'],
['labels', 'directives', 'commentOnly'],
]
def get(session, url, compiler, options, source, filters):
r = requests.post(url + 'api/compiler/' + compiler + '/compile', json={
'source': source,
'options': options,
'filters': {key: True for key in filters},
}, headers={'Accept': 'application/json'})
r.raise_for_status()
def fixup(obj):
try:
if 'text' in obj:
obj['text'] = re.sub(r'/tmp/compiler-explorer-[^/]+', '/tmp', obj['text'])
return obj
except:
print("Issues with obj '{}'".format(obj))
raise
result = r.json()
if 'asm' not in result:
result['asm'] = []
result['asm'] = [fixup(obj) for obj in result['asm']]
return result
def get_compilers(url):
r = requests.get(url + 'api/compilers', headers={'Accept': 'application/json'})
r.raise_for_status()
return list(sorted([url['id'] for url in r.json()]))
def main(args):
compilers = get_compilers(args.url)
compiler_json = os.path.join(args.directory, 'compilers.json')
compiler_map = {}
if os.path.exists(compiler_json):
compiler_map = json.load(open(compiler_json))
if args.update_compilers:
for compiler in compilers:
if compiler not in compiler_map:
print("New compiler: " + compiler)
compiler_map[compiler] = not args.disabled_by_default
for compiler in list(compiler_map):
if compiler not in compilers:
print("Compiler removed: " + compiler)
del compiler_map[compiler]
with open(compiler_json, 'w') as f:
f.write(json.dumps(compiler_map, indent=2))
print("Compilers updated to " + compiler_json)
return 0
else:
compilers = list(sorted(compilers))
expected = list(sorted(compiler_map.keys()))
if expected != compilers:
raise RuntimeError('Compiler list changed:\n{}'.format(
"\n".join(list(unified_diff(compilers, expected, fromfile="got", tofile="expected")))))
with requests.Session() as session:
for test_dir in glob.glob(os.path.join(args.directory, '*')):
if not os.path.isdir(test_dir):
continue
print('Testing ' + test_dir)
source_name = glob.glob(os.path.join(test_dir, 'test.*'))[0]
source = open(source_name).read()
options = open(os.path.join(test_dir, 'options')).read()
for compiler, enabled in compiler_map.iteritems():
if not enabled:
print(' Skipping compiler ' + compiler)
continue
print(' Compiler ' + compiler)
for filter_set in FILTERS:
print(' Filters ' + '; '.join(filter_set))
expected_filename = [compiler]
expected_filename.extend(sorted(filter_set))
expected_filename.append('json')
expected_file = os.path.join(test_dir, ".".join(expected_filename))
result = get(session, args.url, compiler, options, source, filter_set)
if args.bless:
with open(expected_file, 'w') as f:
f.write(json.dumps(result, indent=2))
else:
expected = json.load(open(expected_file))
if expected != result:
with open('/tmp/got.json', 'w') as f:
f.write(json.dumps(result, indent=2))
raise RuntimeError('Differences in {}'.format(expected_file))
if __name__ == '__main__':
sys.exit(main(parser.parse_args()))
```
|
{
"source": "jfalcou/tts",
"score": 3
}
|
#### File: tts/src/embed.py
```python
import re
import sys
import os.path
import argparse
# defaults paths to look for includes
default_includes_path = ['.']
# pp tokens regexp
r_escape_line = re.compile(r'^.*\\\n$')
r_empty_line = re.compile('^[ \t]*\n$')
r_pp_include = re.compile('^\s*#\s*include\s+["|<](.*)["|>]$')
r_pp_ifndef = re.compile('^\s*#\s*ifndef\s+(.*)\s*$')
r_pp_if_defined = re.compile('^\s*#\s*if\s+defined\(\s*(.*)\s*\)\s*$')
r_pp_if = re.compile('^\s*#\s*if.*$')
r_pp_endif = re.compile('^\s*#\s*endif.*$')
r_pp_define = re.compile('^\s*#\s*define\s+(.*)\s*$')
r_pp_pragma_once = re.compile('^\s*#\s*pragma\s+once\s*$')
r_C_one_line_comment = re.compile('^(.*?)\s*//.*$')
r_C_one_line_block_comment = re.compile('^(.*)/\*.*\*/(.*)$')
r_C_block_begin_comment = re.compile('(.*)/\*.*')
r_C_block_end_comment = re.compile('.*\*/(.*)')
# globals
will_escape = False
guard_stack = []
included_files = []
keep_guard = True
in_C_block_comments = False
"""
Parse options. Here is the list of supported options:
see `help`(s)
"""
def parse_opts(args):
p = argparse.ArgumentParser(
description='Aggregates headers into one single file')
p.add_argument('-I', type=str, action='append', metavar='<dir>', default=[], dest='includes_path',
help='Add the specified directory to the search path for include files')
p.add_argument('-v', dest='verbose', action='store_true', default=False,
help='Enable verbose mode, useful for debugging')
p.add_argument('--include-match', type=str, metavar='<regexp>', default='.*', dest='r_include',
help='The regexp to match includes that will be expanded')
p.add_argument('--guard-match', type=str, metavar='<regexp>', default='^.*_INCLUDED$', dest='r_guard',
help='The regexp to match includes guard')
p.add_argument('-o', type=argparse.FileType('w'), metavar='<output-file>', dest='output', default=sys.stdout,
help='The output file')
p.add_argument('filename', type=str, metavar='<input-file>',
help='The file to preprocess')
opts = p.parse_args(args)
opts.r_guard = re.compile(opts.r_guard)
opts.r_include = re.compile(opts.r_include)
return opts
"""
Print only if 'verbose' option is enabled
"""
def vprint(opts, what):
if opts.verbose:
sys.stderr.write('verbose: {}\n'.format(what))
"""
Try to find a valid path for the given filename, if not found then exit
"""
def get_path_for(f, opts):
for path in opts.includes_path:
path = os.path.join(path, f)
vprint(opts, 'try to include: {}'.format(path))
if os.path.isfile(path):
return path
sys.stderr.write('{}: file not found! aborting.\n'.format(f))
sys.exit(1)
"""
Preprocess a single line
"""
def pp_line(line, output, opts):
global will_escape
global keep_guard
global in_C_block_comments
is_escaped = will_escape
will_escape = False
if r_empty_line.match(line):
# skip empty lines
return
# we do not want to remove comments before the first guard as
# its content may be a license or whatever else important
if not keep_guard:
# C comments (one line) '//'
m = r_C_one_line_comment.match(line)
if m:
line = m.group(1)
# C (block) comments (one line) '/* */'
m = r_C_one_line_block_comment.match(line)
if m:
line = m.group(1) + m.group(2)
# C (block) comments '/*'
m = r_C_block_begin_comment.match(line)
if m:
in_C_block_comments = True
line = m.group(1)
# C (block) comments '*/'
m = r_C_block_end_comment.match(line)
if m:
in_C_block_comments = False
line = m.group(1)
return pp_line(line, output, opts)
# in C (block) comments
if in_C_block_comments:
return
# #include
m = r_pp_include.match(line)
if m and opts.r_include.match(m.group(1)):
keep_guard = False
path = get_path_for(m.group(1), opts)
pp_file(path, output, opts)
return
# we keep the guard only for the very first preprocessed file
if not keep_guard:
# #pragma once
if r_pp_pragma_once.match(line):
return
# #ifndef / #if defined(...)
m = r_pp_ifndef.match(line) or r_pp_if_defined.match(line)
if m and opts.r_guard.match(m.group(1)):
# we've found a new include guard, add a new counter to
# keep track of it!
guard_stack.append(1)
return
# #{if,ifndef} / #endif (pairs)
if len(guard_stack):
if r_pp_if.match(line):
guard_stack[-1] += 1
elif r_pp_endif.match(line):
guard_stack[-1] -= 1
if guard_stack[-1] < 0:
sys.stderr.write('non matching #endif found! aborting.')
sys.exit(1)
if guard_stack[-1] == 0:
# we hit the 'endif' part of the guard, so pop it from
# the stack counter and do not print it!
guard_stack.pop()
return
# define (guard)
# we do check this one only if we have a valid stack counter
# DO NOT PUT IT OUTSIDE OF THE `if len(guard_stack):`
m = r_pp_define.match(line)
if m and opts.r_guard.match(m.group(1)):
return
# add missing '\n' if needed, for example:
#
# '/* foo */\n'
# gets turned into:
# ''
#
# also:
#
# #define foo\
# /**/
# gets turned into:
# #define foo
# '\n'
#
if (is_escaped or len(line) > 0) and '\n' not in line:
line += '\n'
# check if the current line escape the next one
if r_escape_line.match(line):
will_escape = True
# everything has been checked now! so we can print the current line
output.write(line)
"""
Preprocess the file here!
"""
def pp_file(f, output, opts):
# use the absolute version of the filename to always get the same path
# e.g. ./foo.h == foo.h == bar/../foo.h
f = os.path.abspath(f)
if f in included_files:
# if included, then do not process it!
return
included_files.append(f)
vprint(opts, 'preprocessing: {}'.format(f))
try:
with open(f, 'r') as f:
dirname = os.path.dirname(f.name)
opts.includes_path.append(dirname)
for line in f.readlines():
pp_line(line, output, opts)
opts.includes_path.remove(dirname)
except IOError as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
"""
The top-level function
"""
def embed():
args = sys.argv[1:]
for path in default_includes_path:
args.append('-I')
args.append(path)
opts = parse_opts(args)
pp_file(opts.filename, opts.output, opts)
# The entry point!
if __name__ == '__main__':
embed()
```
|
{
"source": "jfald/AppDynamics-API-Client-Server",
"score": 3
}
|
#### File: jfald/AppDynamics-API-Client-Server/appDynamicsAPIClient.py
```python
import json
import requests
from xml.dom.minidom import parse, parseString
import xml.dom.minidom
class appDynamicsAPIClient:
def __init__(self,server,port,user,pw):
self.server = server
self.port = port
self.user = user
self.pw = pw
self.applications = []
# get list of application names
def getApplicationNames(self):
url = "http://"+self.server+":"+self.port+"/controller/rest/applications?output=JSON"
response = requests.get(url, auth=(self.user, self.pw))
data = response.json()
returnAppNames=[]
for d in data:
returnAppNames.append(str(d['name']))
return returnAppNames
# Using the appName get the list nodes for that application.
# Returns the machineName, id, node name, tier name, agent type, agent version
def getNodes(self, appName):
response = requests.get('http://'+self.server+':'+self.port+'/controller/rest/applications/'+appName+'/nodes?output=JSON', auth=(self.user, self.pw))
nodes = response.json()
nodeList = []
for node in nodes:
nodeList.append([str(node['machineName']),str(node['machineId']),str(node['name']),str(node['tierName']),str(node['agentType']), str(node['appAgentVersion'])])
return nodeList
# Using the appName get the list business transactions for that application.
def getBT(self, appName):
response = requests.get('http://'+self.server+':'+self.port+'/controller/rest/applications/'+appName+'/business-transactions?output=JSON', auth=(self.user, self.pw))
nodes = response.json()
nodeList = []
for node in nodes:
nodeList.append([str(node['tierName']),str(node['entryPointType']),str(node['internalName']),str(node['name'])])
return nodeList
# Using the appName get the list tiers for that application.
# Returns the tier name, type, agentType, number of nodes
def getTiers(self, appName):
url = 'http://'+self.server+':'+self.port+'/controller/rest/applications/'+appName+'/tiers?output=JSON'
response = requests.get(url, auth=(self.user, self.pw))
tiers = response.json()
tiersList = []
for tier in tiers:
tiersList.append([str(tier['name']),str(tier['type']),str(tier['agentType']),str(tier['numberOfNodes'])])
return tiersList
# Using the appName and tierName, get a list of nodes for that tier
# Returns the tier name, type, agentType, number of nodes
def getNodesInTier(self, appName, tierName):
response = requests.get('http://'+self.server+':'+self.port+'/controller/rest/applications/'+appName+'/tiers/'+tierName+'/nodes?output=JSON', auth=(self.user, self.pw))
tiers = response.json()
tiersList = []
for tier in tiers:
tiersList.append([str(tier['name']),str(tier['type']),str(tier['agentType']),str(tier['appAgentVersion'])])
return tiersList
# Using the appName get the health rules
# This is only available as XML data
# Data returned is something like-- name, type, enable, duration-min, wait-time-min, condition-value-type, condition-value, operator, logical-metric-name
def getHealthRules(self,appName):
url = 'http://'+self.server+':'+self.port+'/controller/healthrules/'+appName
response = requests.get(url,auth=(self.user, self.pw))
DOMTree = xml.dom.minidom.parseString(response.text)
collection = DOMTree.documentElement
healthRules = collection.getElementsByTagName("health-rule")
hrList = []
for hr in healthRules:
name = hr.getElementsByTagName('name')[0]
ruleType = hr.getElementsByTagName('type')[0]
enabled = hr.getElementsByTagName('enabled')[0]
durationMin = hr.getElementsByTagName('duration-min')[0]
waitTimeMin = hr.getElementsByTagName('wait-time-min')[0]
conditionValueType = hr.getElementsByTagName('condition-value-type')[0]
conditionValue = hr.getElementsByTagName('condition-value')[0]
operator = hr.getElementsByTagName('operator')[0]
logicalMetricName = hr.getElementsByTagName('logical-metric-name')[0]
hrList.append([name.childNodes[0].data, ruleType.childNodes[0].data, enabled.childNodes[0].data, durationMin.childNodes[0].data, waitTimeMin.childNodes[0].data, conditionValueType.childNodes[0].data, conditionValue.childNodes[0].data,operator.childNodes[0].data,logicalMetricName.childNodes[0].data])
return hrList
# Using the appName get the policies
# This is only available as XML data
# Data returned is something like-- name, type, enable, duration-min, wait-time-min, condition-value-type, condition-value, operator, logical-metric-name
def getPolicies(self,appName):
url = 'http://'+self.server+':'+self.port+'/controller/policies/'+appName
response = requests.get(url,auth=("restapi@customer1", "Restful"))
try:
policies = response.json()
except:
return
policyList = []
awt = []
eft = []
enft = []
for po in policies:
for tempawt in po['actionWrapperTemplates']:
awt.append(str(tempawt['actionTag']))
for tempeft in po['eventFilterTemplate']['eventTypes']:
#print tempeft
eft.append(tempeft)
for tempenft in po['entityFilterTemplates']:
enft.append(str(tempenft['entityType']))
policyList.append([str(po['enabled']),str(po['name']),", ".join(awt),", ".join(eft),", ".join(enft)]);
awt = []
eft = []
enft = []
return policyList
if __name__ == "__main__":
print "Running locally."
apiclient=appDynamicsAPIClient("controllername","8090","username@customer1","password")
# Get AppNames
print "App Names"
appNames=apiclient.getApplicationNames()
for ap in appNames:
print ap
# Get NodeNames
print "Node Names"
nodeNames=apiclient.getNodes("appName")
for node in nodeNames:
print node
# Get TierNames
print "Tier Names"
tierNames=apiclient.getTiers("appName")
for tier in tierNames:
print tier
# Get NodesInTier
print "Nodes In Tier"
nodeNames=apiclient.getNodesInTier("appName","Tier Name")
for node in nodeNames:
print node
# Get NodesInTier
print "Policies"
policiesList=apiclient.getPolicies("appName")
for po in policiesList:
print po
```
#### File: AppDynamics-API-Client-Server/misc/CreateReport.py
```python
import json
import requests
import datetime
import zipfile
import os
##Get incoming variables of
# time-range-type
# And duration-in-mins
# OR start-time
# OR end-time
class CreateReport:
def __init__(self, user="username@customer1", pw="password", appName="app", interval="60"):
self.user = user
self.pw = pw
self.apiClient = None
self.interval = interval
self.appName = appName
#Read the file that has the urls
#Add time to url
#call the writeToOutFile as approprite
def getData(self):
#Pass user and password too?
#Read from file each line should be a url to retrieve
nbrLines=0
nodeName = ""
with open("urls/"+self.appName) as f, open(self.appName+".csv", 'a') as o:
o.write("Node Name,Metric Name,TimeStamp,min,max,sum,value,current\n")
for urlline in f:
urlline = urlline.strip()
#If it is not a comment then it better be a url
if urlline == "":
pass
elif not urlline.startswith('#'):
# Check for rollup and output
# rollup=false&output=JSON
index = urlline.index('?') + 1
if "rollup=" not in urlline:
urlline = urlline[:index] + "rollup=false&" + urlline[index:]
if "output=" not in urlline:
urlline = urlline[:index] + "output=JSON&" + urlline[index:]
# Check if number is at the end of the line
index = urlline.rfind('=') + 1
numberCheck = urlline[index:]
if numberCheck.isdigit() :
urlline = urlline[:index]
print urlline
urlline = urlline + self.interval
nbrLines += 1
response = requests.get(urlline, auth=(self.user, self.pw))
data = response.json()
self.writeToOutFile(nodeName,data,o)
#if it starts with #S then an app name is expected, it will be used as a value in the line for now.
elif urlline.startswith('#S'):
nodeName = urlline[2:]
#End of if elif
# end of for urlline in F:
#end of with open...
print "Number of URLs:" + str(nbrLines)
#Hopefully this works. I'm not sure about passing the "o"
def writeToOutFile(self,nodeName,data,o):
#Should I have it in it's own method?
for ob in data:
mn = str(ob['metricName'])
for metric in ob['metricValues']:
tstamp=str(metric['startTimeInMillis'])
fd = datetime.datetime.fromtimestamp(int(tstamp[:10])).strftime('%Y-%m-%d %H:%M:%S')
o.write(nodeName + "," + mn + "," + fd + "," + str(metric['min']) + "," + str(metric['max']) + "," + str(metric['sum']) + "," + str(metric['value']) + "," + str(metric['current'])+ "\n")
#This should work for writing to std out.
def writeToPrintOut(self,nodeName,data):
for ob in data:
mn = str(ob['metricName'])
for metric in ob['metricValues']:
tstamp=str(metric['startTimeInMillis'])
fd = datetime.datetime.fromtimestamp(int(tstamp[:10])).strftime('%Y-%m-%d %H:%M:%S')
print nodeName + "," + mn + "," + fd + "," + str(metric['min']) + "," + str(metric['max']) + "," + str(metric['sum']) + "," + str(metric['value']) + "," + str(metric['current'])
#Zip up the output
def zipOutput(self):
now = datetime.datetime.now()
pnow = now.strftime("%Y%m%d%H%M%S")
zipFileName="archive/"+self.appName+"/"+self.appName+"OutData"+pnow+".zip"
outFileName=self.appName+".csv"
with zipfile.ZipFile(zipFileName, 'w') as myzip:
myzip.write(outFileName)
os.remove(outFileName)
return zipFileName
def createNewReport(self):
self.getData();
return self.zipOutput()
if __name__ == "__main__":
doit = CreateReport("username@customer1", "password", "template", "60")
doit.getData();
doit.zipOutput()
```
|
{
"source": "jfalkner/github_project_management",
"score": 3
}
|
#### File: github_project_management/github_project_management/export.py
```python
from collections import Counter
from datetime import datetime
from datetime import timedelta
from datetime import date
import pytz
import github3
from github_project_management import constants as GPMC
from github_project_management import list_issues
from github_project_management import milestone_url
def export(
gh_user,
gh_password,
gh_api_url,
configs,
# Defaults: only display the GH issue and format dates in ISO style.
date_format=lambda x: x.strftime('%Y-%m-%d')):
# Track all issues in the timeframe.
tz = pytz.timezone('US/Pacific')
today = tz.localize(datetime.today())
recent_end_date = today - timedelta(seconds=(today.hour * 60 * 60 + today.minute * 60 + today.second), microseconds=today.microsecond)
recent_start_date = recent_end_date - timedelta(days=6)
# Iterate through all the issues that match the configs.
from collections import defaultdict
key2projects = defaultdict(list)
rows = []
for row in list_issues(gh_user, gh_password, gh_api_url, configs, recent_start_date, recent_end_date):
key = (row[GPMC.REPO_USER], row[GPMC.REPO_NAME], row[GPMC.ISSUE_NUMBER])
# Only keep one copy of the row's data. Issues can be in multiple groups.
if row[GPMC.ISSUE_NUMBER] not in key2projects:
rows.append(row)
# Keep track of all groups that the issue was part of.
if row[GPMC.GROUPING_TITLE]:
key2projects[key].append(row[GPMC.GROUPING_TITLE])
import csv
from github_project_management.constants import HEADER
filename = 'projects.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(HEADER)
for row in rows:
key = (row[GPMC.REPO_USER], row[GPMC.REPO_NAME], row[GPMC.ISSUE_NUMBER])
row[GPMC.GROUPING_TITLE] = ','.join(key2projects[key])
try:
writer.writerow([row.get(col, None) for col in HEADER])
except Exception as ex:
print row
print ex
def main():
"""Runs the weekly update code
Optional parameters. Will be prompted for unless set.
-gh_user = GitHub login name. Can also set as env variable of same name.
-gh_pass = GitHub password. Can also set as env variable of same name.
Required parameters.
-gh_api = GitHub URL for the enterprise instance being used.
-template = Markdown template for the weekly.
-config = JSON formatted configuration.
"""
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-gh_user', action="store", dest='gh_user', help='GitHub login name. Can also set as env variable of same name.')
parser.add_argument('-gh_pass', action="store", dest='gh_pass', help='GitHub password. Can also set as env variable of same name.')
parser.add_argument('-gh_api', action="store", dest='gh_api', help='GitHub URL for the enterprise instance being used.')
parser.add_argument('-config', action="store", dest='config', help='JSON formatted configuration.')
args = parser.parse_args(sys.argv[1:])
print "Running weekly code"
# Expected arguments.
gh_user = None
if args.gh_user:
gh_user = args.gh_user
elif 'gh_user' in sys.env:
gh_user = sys.env['gh_user']
else:
gh_user = raw_input('GitHub login:')
gh_pass = None
if args.gh_pass:
gh_pass = args.gh_pass
elif 'gh_pass' in sys.env:
gh_pass = sys.env['gh_pass']
else:
gh_pass = getpass('GitHub password:')
gh_api = args.gh_api
# Parse all the other config from the JSON. Should have the template in there too.
import json
config_json = None
with open(args.config, 'r') as jf:
config_json = json.load(jf)
configs = config_json['projects']
# Run the weekly update.
export(
gh_user,
gh_pass,
gh_api,
configs)
if __name__ == "__main__":
main()
```
#### File: github_project_management/github_project_management/utils.py
```python
import csv
from github_project_management.constants import HEADER
def save_as_csv(rows, filename):
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(HEADER)
for row in rows:
writer.writerow([row[col] for col in HEADER])
```
|
{
"source": "jfallaire/generator-ps-boilerplate-project",
"score": 2
}
|
#### File: extensionRunner/cdf/blob_store.py
```python
from attr import attrib, attrs
from enum import auto
from typing import List, Optional as Opt, Union
from .root import CASING, CoveoInterface, ExceptionBase, JidEnumFlag, JidType, MultiOut, api
@attrs(kw_only=True, auto_attribs=True)
class BlobStoreException(ExceptionBase, hint="Coveo.BlobStoreService.BlobStoreException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidBlobIdException(BlobStoreException, hint="Coveo.BlobStoreService.InvalidBlobIdException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class OutOfRangeException(BlobStoreException, hint="Coveo.BlobStoreService.OutOfRangeException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class BlobIdExistsException(BlobStoreException, hint="Coveo.BlobStoreService.BlobIdExistsException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class IncompleteBlobException(BlobStoreException, hint="Coveo.BlobStoreService.IncompleteBlobException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidChunkSizeException(BlobStoreException, hint="Coveo.BlobStoreService.InvalidChunkSizeException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InternalServerException(BlobStoreException, hint="Coveo.BlobStoreService.InternalServerException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ResendBlobException(BlobStoreException, hint="Coveo.BlobStoreService.ResendBlobException"):
def __init__(self) -> None:
...
class ExpirationPolicyType(JidEnumFlag):
"""Defines garbage collection policy types.
Attributes:
Perennial: No expiration. Blob must be deleted explicitly.
TTLAfterCreation: Blob will be deleted when its lifespan reaches the specified threshold.
"""
Perennial: int = auto()
TTLAfterCreation: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class ExpirationPolicy(JidType, hint="Coveo.BlobStoreService.ExpirationPolicy"):
"""Defines a garbage collection policy.
Attributes:
type_: Type of expiration policy (See ExpirationPolicyType).
life_span_s: Desired lifespan of the object based on the policy type (in seconds). Ignored when using 'Perennial' policy type.
"""
type_: Opt[ExpirationPolicyType] = attrib(default=None, metadata={CASING: "Type"})
life_span_s: Opt[int] = attrib(default=None, metadata={CASING: "LifeSpan_s"})
def __init__(
self,
*,
type_: Opt[ExpirationPolicyType] = attrib(default=None, metadata={CASING: "Type"}),
life_span_s: Opt[int] = attrib(default=None, metadata={CASING: "LifeSpan_s"}),
) -> None:
"""
Parameters:
type_: Type of expiration policy (See ExpirationPolicyType).
life_span_s: Desired lifespan of the object based on the policy type (in seconds). Ignored when using 'Perennial' policy type.
"""
class IBlobStore(CoveoInterface):
"""The blob store API exposes methods to interact with this blob store. It can be used to store and retrieve any type of data."""
@api("POST/blobs")
def add_blob(
self,
*,
data: Union[str, bytes],
is_data_complete: bool,
expiration_policy: ExpirationPolicy,
total_size: int = 0,
) -> str:
"""Adds a new blob (or part of it) to the store.
Parameters:
data: The data to store.
is_data_complete: Indicates if the blob is data complete. When passing false, the 'AppendData' method must be used to complete the data.
expiration_policy: The expiration policy to use. Passing null is equivalent to passing ExpirationTypePolicy.Perennial.
total_size: The complete blob size
"""
@api("PUT/blobs/{blob_id}")
def add_blob_with_id(
self,
*,
blob_id: str,
data: Union[str, bytes],
is_data_complete: bool,
expiration_policy: ExpirationPolicy,
total_size: int = 0,
) -> None:
"""Adds a new blob (or part of it) to the store using a custom Id.
Parameters:
blob_id: A custom and unique Id that identifies the blob.
data: The data to store.
is_data_complete: Indicates if the blob is data complete. When passing false, the 'AppendData' method must be used to complete the data.
expiration_policy: The expiration policy to use. Passing null is equivalent to passing ExpirationTypePolicy.Perennial.
total_size: The complete blob size
"""
@api("POST/blobs/{blob_id}/data")
def append_data(self, *, blob_id: str, data: Union[str, bytes], is_data_complete: bool) -> None:
"""Appends bytes to an existing, incomplete blob.
Parameters:
blob_id: The Id of a blob.
data: The data to append.
is_data_complete: Indicates if the blob is now data complete.
"""
@api("GET/blobs/{blob_id}/size")
def get_blob_size(self, *, blob_id: str) -> int:
"""Returns the current size of a blob from the store.
Parameters:
blob_id: The Id of a blob.
"""
@api("GET/blobs/{blob_id}")
def get_blob(self, *, blob_id: str, start_pos: int, size: int) -> MultiOut:
"""Returns a blob (or part of it) from the store.
Parameters:
blob_id: The Id of a blob.
start_pos: The byte position where reading should begin.
size: The amount of bytes to read.
"""
@api("DELETE/blobs/{blob_id}")
def delete_blob(self, *, blob_id: str) -> None:
"""Deletes a blob from the store.
Parameters:
blob_id: The Id of a blob.
"""
@api("DELETE/blobs")
def delete_blobs(self, *, blob_ids: List[str]) -> None:
"""Deletes a batch of blobs from the blob store.
Parameters:
blob_ids: A batch of blob Ids.
"""
@api("DELETE/blobs?prefix={Prefix}")
def delete_blobs_where_id_starts_with(self, *, prefix: str) -> None:
"""Deletes all blobs where the BlobId starts with 'Prefix'.
Parameters:
prefix: A prefix used to locate blobs.
"""
@api("GET/blobs/{blob_id}?exist}")
def blob_exists(self, *, blob_id: str) -> bool:
"""Indicates if a blob exists in the store. Remark: A blob exists even when it's not data complete.
Parameters:
blob_id: The Id of a blob.
"""
@api("POST/blobs/duplicate")
def duplicate_blobs(self, *, original_blob_ids: List[str]) -> List[str]:
"""Duplicates a list of blobs in the store."""
```
#### File: extensionRunner/cdf/data.py
```python
from attr import attrib, attrs
from datetime import datetime
from enum import auto
from typing import Any, List, Optional as Opt
from .root import CASING, CoveoInterface, ExceptionBase, JidEnumFlag, JidType
class ExprOp(JidEnumFlag):
Add: int = auto()
Sub: int = auto()
Mul: int = auto()
Div: int = auto()
Mod: int = auto()
Power: int = auto()
ShiftL: int = auto()
ShiftR: int = auto()
Neg: int = auto()
Eq: int = auto()
Ne: int = auto()
Lt: int = auto()
Le: int = auto()
Gt: int = auto()
Ge: int = auto()
Not: int = auto()
AndAlso: int = auto()
OrElse: int = auto()
BitAnd: int = auto()
BitOr: int = auto()
BitNot: int = auto()
BitXor: int = auto()
Contains: int = auto()
Field: int = auto()
Cte: int = auto()
Alias: int = auto()
New: int = auto()
Select: int = auto()
SelectMany: int = auto()
Table: int = auto()
Join: int = auto()
Where: int = auto()
OrderByA: int = auto()
OrderByD: int = auto()
ThenByA: int = auto()
ThenByD: int = auto()
Min: int = auto()
Max: int = auto()
Sum: int = auto()
Avg: int = auto()
Count: int = auto()
Skip: int = auto()
Take: int = auto()
Union: int = auto()
Intersect: int = auto()
Diff: int = auto()
Distinct: int = auto()
GroupBy: int = auto()
GroupJoin: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class Expr(JidType, hint="Coveo.Data.Expr"):
op: Opt[ExprOp] = None
def __init__(self, *, op: Opt[ExprOp] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprAlias(Expr, hint="Coveo.Data.ExprAlias"):
number: Opt[int] = None
def __init__(self, *, number: Opt[int] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprTable(Expr, hint="Coveo.Data.ExprTable"):
name: Opt[str] = None
alias: Opt[ExprAlias] = None
def __init__(self, *, name: Opt[str] = None, alias: Opt[ExprAlias] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprJoin(Expr, hint="Coveo.Data.ExprJoin"):
outer: Opt[Expr] = None
inner: Opt[Expr] = None
outer_key_selector: Opt[Expr] = None
inner_key_selector: Opt[Expr] = None
def __init__(
self,
*,
outer: Opt[Expr] = None,
inner: Opt[Expr] = None,
outer_key_selector: Opt[Expr] = None,
inner_key_selector: Opt[Expr] = None,
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprSelect(Expr, hint="Coveo.Data.ExprSelect"):
source: Opt[Expr] = None
selector: Opt[Expr] = None
def __init__(self, *, source: Opt[Expr] = None, selector: Opt[Expr] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprWhere(Expr, hint="Coveo.Data.ExprWhere"):
source: Opt[Expr] = None
filter_: Opt[Expr] = attrib(default=None, metadata={CASING: "Filter"})
def __init__(
self, *, source: Opt[Expr] = None, filter_: Opt[Expr] = attrib(default=None, metadata={CASING: "Filter"})
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprBinary(Expr, hint="Coveo.Data.ExprBinary"):
left: Opt[Expr] = None
right: Opt[Expr] = None
def __init__(self, *, left: Opt[Expr] = None, right: Opt[Expr] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprUnary(Expr, hint="Coveo.Data.ExprUnary"):
expr: Opt[Expr] = None
def __init__(self, *, expr: Opt[Expr] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprField(Expr, hint="Coveo.Data.ExprField"):
expr: Opt[Expr] = None
name: Opt[str] = None
def __init__(self, *, expr: Opt[Expr] = None, name: Opt[str] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprCte(Expr, hint="Coveo.Data.ExprCte"):
value: Opt[Any] = None
def __init__(self, *, value: Opt[Any] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprNew(Expr, hint="Coveo.Data.ExprNew"):
exprs: Opt[List[Expr]] = None
def __init__(self, *, exprs: Opt[List[Expr]] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprContains(Expr, hint="Coveo.Data.ExprContains"):
left: Opt[Expr] = None
right: Opt[Expr] = None
def __init__(self, *, left: Opt[Expr] = None, right: Opt[Expr] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprSelectMany(Expr, hint="Coveo.Data.ExprSelectMany"):
source1: Opt[Expr] = None
source2: Opt[Expr] = None
alias2: Opt[ExprAlias] = None
def __init__(self, *, source1: Opt[Expr] = None, source2: Opt[Expr] = None, alias2: Opt[ExprAlias] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprOrderBy(Expr, hint="Coveo.Data.ExprOrderBy"):
source: Opt[Expr] = None
key: Opt[Expr] = None
def __init__(self, *, source: Opt[Expr] = None, key: Opt[Expr] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExprSkipOrTake(Expr, hint="Coveo.Data.ExprSkipOrTake"):
source: Opt[Expr] = None
how_many: Opt[int] = None
def __init__(self, *, source: Opt[Expr] = None, how_many: Opt[int] = None) -> None:
...
class DbRowState(JidEnumFlag):
Added: int = auto()
Updated: int = auto()
Deleted: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class DbRow(JidType, hint="Coveo.Data.DbRow"):
row_id: Opt[int] = None
row_state: Opt[DbRowState] = None
delta_id: Opt[int] = None
old_delta_id: Opt[int] = None
def __init__(
self,
*,
row_id: Opt[int] = None,
row_state: Opt[DbRowState] = None,
delta_id: Opt[int] = None,
old_delta_id: Opt[int] = None,
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DbLink(DbRow, hint="Coveo.Data.DbLink"):
from_row_id: Opt[int] = None
to_row_id: Opt[int] = None
def __init__(self, *, from_row_id: Opt[int] = None, to_row_id: Opt[int] = None) -> None:
...
class DbErrorKind(JidEnumFlag):
Appl: int = auto()
Unexpected: int = auto()
ConcurrentUpdate: int = auto()
Duplicate: int = auto()
BadFk: int = auto()
MissingValue: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class DbError(JidType, hint="Coveo.Data.DbError"):
row_id: Opt[str] = None
kind: Opt[DbErrorKind] = None
msg: Opt[str] = None
def __init__(self, *, row_id: Opt[str] = None, kind: Opt[DbErrorKind] = None, msg: Opt[str] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DbErrorField(DbError, hint="Coveo.Data.DbErrorField"):
field_name: Opt[str] = None
field_value: Opt[Any] = None
def __init__(self, *, field_name: Opt[str] = None, field_value: Opt[Any] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class OldToNewRowId(JidType, hint="Coveo.Data.OldToNewRowId"):
old_id: Opt[int] = None
new_id: Opt[int] = None
def __init__(self, *, old_id: Opt[int] = None, new_id: Opt[int] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DbDelta(JidType, hint="Coveo.Data.DbDelta"):
what: Opt[List[DbRow]] = None
who: Opt[str] = None
when: Opt[datetime] = None
why: Opt[str] = None
delta_id: Opt[int] = None
errors: Opt[List[DbError]] = None
row_id_map: Opt[List[OldToNewRowId]] = None
def __init__(
self,
*,
what: Opt[List[DbRow]] = None,
who: Opt[str] = None,
when: Opt[datetime] = None,
why: Opt[str] = None,
delta_id: Opt[int] = None,
errors: Opt[List[DbError]] = None,
row_id_map: Opt[List[OldToNewRowId]] = None,
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DataException(ExceptionBase, hint="Coveo.Data.DataException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class CommitException(DataException, hint="Coveo.Data.CommitException"):
errors: Opt[List[DbError]] = None
def __init__(self, *, errors: Opt[List[DbError]] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DataTupleField(JidType, hint="Coveo.Data.DataTupleField"):
name: Opt[str] = None
value: Opt[Any] = None
def __init__(self, *, name: Opt[str] = None, value: Opt[Any] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DataTuple(JidType, hint="Coveo.Data.DataTuple"):
tuple_id: Opt[str] = None
tuple_type: Opt[str] = None
tuple_fields: Opt[List[DataTupleField]] = None
def __init__(
self, *, tuple_id: Opt[str] = None, tuple_type: Opt[str] = None, tuple_fields: Opt[List[DataTupleField]] = None
) -> None:
...
class IDbServer(CoveoInterface):
...
```
#### File: extensionRunner/cdf/diagnostics.py
```python
from enum import auto
from .root import CoveoInterface, JidEnumFlag, api
class DumpType(JidEnumFlag):
"""Defines dump types."""
Mini: int = auto()
Normal: int = auto()
Full: int = auto()
class IDiagnostics(CoveoInterface):
@api("POST/dump")
def dump_by_name(self, *, name: str, user_name: str, dump_type: DumpType = DumpType.Full) -> None:
"""Dumps a process identified by a name.
Parameters:
name: The name used to identify the process.
user_name: The user that made the request.
dump_type: The type of dump.
"""
@api("POST/dump_pid/{process_id}")
def dump_by_pid(self, *, process_id: int, user_name: str, dump_type: DumpType = DumpType.Full) -> None:
"""Dumps a process identified by a PID.
Parameters:
process_id: The process id.
user_name: The user that made the request.
dump_type: The type of dump.
"""
@api("POST/crash")
def crash(self) -> None:
...
class IAgentDiagnosticsManager(CoveoInterface):
"""The diagnostics API for agents."""
@api("POST/agents/{agent_name}/dump")
def dump_agent(self, *, agent_name: str, user_name: str, dump_type: DumpType = DumpType.Full) -> None:
"""Dumps an agent process.
Parameters:
agent_name: The id of the agent.
user_name: The user that made the request.
dump_type: The type of dump.
"""
@api("POST/agents/{agent_name}/dump_instance/{instance_id}")
def dump_instance(
self, *, agent_name: str, instance_id: str, user_name: str, dump_type: DumpType = DumpType.Full
) -> None:
"""Dumps an instance process.
Parameters:
agent_name: The id of the agent.
instance_id: The id of the instance.
user_name: The user that made the request.
dump_type: The type of dump.
"""
```
#### File: extensionRunner/cdf/document_definition.py
```python
from attr import attrib, attrs
from enum import auto
from typing import Any, Dict, List, Optional as Opt, Union
from .root import CASING, JidEnumFlag, JidType
class CompressionType(JidEnumFlag):
"""
Attributes:
Uncompressed: Document is uncompressed
ZLib: Data is compressed with zlib
GZip: Data is compressed with GZip
LZMA: Data is compressed with LZMA (e.g. 7-zip)
Deflate: Data is compressed with Zlib (No Header, e.g. DeflateStream from .Net)
"""
Uncompressed: int = auto()
ZLib: int = auto()
GZip: int = auto()
LZMA: int = auto()
Deflate: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class BlobEntry(JidType, hint="Coveo.BlobEntry"):
"""A structure that represents a blob entry from a store.
Attributes:
id_: The Id of the blob.
inline_blob: The blob content when inline.
compression: The compression method on the blob
"""
id_: Opt[str] = attrib(default=None, metadata={CASING: "Id"})
inline_blob: Opt[Union[str, bytes]] = None
compression: Opt[CompressionType] = None
def __init__(
self,
*,
id_: Opt[str] = attrib(default=None, metadata={CASING: "Id"}),
inline_blob: Opt[Union[str, bytes]] = None,
compression: Opt[CompressionType] = None,
) -> None:
"""
Parameters:
id_: The Id of the blob.
inline_blob: The blob content when inline.
compression: The compression method on the blob
"""
@attrs(kw_only=True, auto_attribs=True)
class LocalBlobEntry(BlobEntry, hint="Coveo.LocalBlobEntry"):
"""Blob entry that is stored locally
Attributes:
file_name: the local filename to access the blob from
"""
file_name: Opt[str] = None
def __init__(self, *, file_name: Opt[str] = None) -> None:
"""
Parameters:
file_name: the local filename to access the blob from
"""
class PermissionIdentityType(JidEnumFlag):
"""Defines permission identity types.
Attributes:
Unknown: Represents a standard, or undefined identity.
User: Represents a 'User' identity.
Group: Represents a 'Group' identity.
VirtualGroup: Represents a 'VirtualGroup' identity.
"""
Unknown: int = auto()
User: int = auto()
Group: int = auto()
VirtualGroup: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class Permission(JidType, hint="Coveo.Permission"):
"""A structure that represents a single permission.
Attributes:
identity_type: The type of identity.
security_provider: The name of the security provider.
identity: The identity, as defined by the specified security provider.
additional_info: The additional information
"""
identity_type: Opt[PermissionIdentityType] = None
security_provider: Opt[str] = None
identity: Opt[str] = None
additional_info: Opt[Dict[str, str]] = None
def __init__(
self,
*,
identity_type: Opt[PermissionIdentityType] = None,
security_provider: Opt[str] = None,
identity: Opt[str] = None,
additional_info: Opt[Dict[str, str]] = None,
) -> None:
"""
Parameters:
identity_type: The type of identity.
security_provider: The name of the security provider.
identity: The identity, as defined by the specified security provider.
additional_info: The additional information
"""
@attrs(kw_only=True, auto_attribs=True)
class SecurityIdentity(JidType, hint="Coveo.SecurityIdentity"):
"""A structure that represents a single security identity. Also known as a declarator.
Attributes:
identity_type: The type of security identity
provider: Security provider associated with the identity.
name: Name of the security identity.
additional_info: Additional information associated with the security identity as key-value pairs.
"""
identity_type: Opt[PermissionIdentityType] = None
provider: Opt[str] = None
name: Opt[str] = None
additional_info: Opt[Dict[str, str]] = None
def __init__(
self,
*,
identity_type: Opt[PermissionIdentityType] = None,
provider: Opt[str] = None,
name: Opt[str] = None,
additional_info: Opt[Dict[str, str]] = None,
) -> None:
"""
Parameters:
identity_type: The type of security identity
provider: Security provider associated with the identity.
name: Name of the security identity.
additional_info: Additional information associated with the security identity as key-value pairs.
"""
@attrs(kw_only=True, auto_attribs=True)
class PermissionSet(JidType, hint="Coveo.PermissionSet"):
"""A structure that represents a collection of allowed and denied permissions.
Attributes:
allow_anonymous: Indicates if anonymous users (i.e.: everyone) are allowed.
allowed_permissions: The list of allowed permissions.
denied_permissions: The list of denied permissions.
name: An optional permission set name.
"""
allow_anonymous: Opt[bool] = None
allowed_permissions: Opt[List[Permission]] = None
denied_permissions: Opt[List[Permission]] = None
name: Opt[str] = None
def __init__(
self,
*,
allow_anonymous: Opt[bool] = None,
allowed_permissions: Opt[List[Permission]] = None,
denied_permissions: Opt[List[Permission]] = None,
name: Opt[str] = None,
) -> None:
"""
Parameters:
allow_anonymous: Indicates if anonymous users (i.e.: everyone) are allowed.
allowed_permissions: The list of allowed permissions.
denied_permissions: The list of denied permissions.
name: An optional permission set name.
"""
@attrs(kw_only=True, auto_attribs=True)
class PermissionLevel(JidType, hint="Coveo.PermissionLevel"):
"""A structure that represents a level of permission where multiple permission sets can be specified.
Attributes:
name: An optional permission level name.
"""
permission_sets: Opt[List[PermissionSet]] = None
name: Opt[str] = None
def __init__(self, *, permission_sets: Opt[List[PermissionSet]] = None, name: Opt[str] = None) -> None:
"""
Parameters:
name: An optional permission level name.
"""
@attrs(kw_only=True, auto_attribs=True)
class PermissionModel(JidType, hint="Coveo.PermissionModel"):
"""A structure that represent a permissions model that contains one or many permission levels."""
permission_levels: Opt[List[PermissionLevel]] = None
def __init__(self, *, permission_levels: Opt[List[PermissionLevel]] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class SummarySentence(JidType, hint="Coveo.SummarySentence"):
"""
Attributes:
text: The summary sentence text.
word_count: The number of words in the summary sentence.
score: The score of the sentence.
"""
text: Opt[str] = None
word_count: Opt[int] = None
score: Opt[int] = None
def __init__(self, *, text: Opt[str] = None, word_count: Opt[int] = None, score: Opt[int] = None) -> None:
"""
Parameters:
text: The summary sentence text.
word_count: The number of words in the summary sentence.
score: The score of the sentence.
"""
@attrs(kw_only=True, auto_attribs=True)
class DataStreamValue(JidType, hint="Coveo.DataStreamValue"):
"""A structure that represents a data stream.
Attributes:
value: The blob entry containing the data.
origin: The name of the component that created this data.
"""
value: Opt[BlobEntry] = None
origin: Opt[str] = None
def __init__(self, *, value: Opt[BlobEntry] = None, origin: Opt[str] = None) -> None:
"""
Parameters:
value: The blob entry containing the data.
origin: The name of the component that created this data.
"""
@attrs(kw_only=True, auto_attribs=True)
class MetaDataValue(JidType, hint="Coveo.MetaDataValue"):
"""A structure that represents a collection of meta data from the same origin
Attributes:
values: The map of meta data.
origin: The origin of the meta data.
"""
values: Opt[Dict[str, List[Any]]] = None
origin: Opt[str] = None
def __init__(self, *, values: Opt[Dict[str, List[Any]]] = None, origin: Opt[str] = None) -> None:
"""
Parameters:
values: The map of meta data.
origin: The origin of the meta data.
"""
class OperationType(JidEnumFlag):
"""Defines document operation types.
Attributes:
Add: Add the document.
Delete: Delete a specific document.
DeleteOlderThan: Delete documents that are older than /OperationId/.
DeleteAndChildren: Delete the document and its children.
"""
Add: int = auto()
Delete: int = auto()
DeleteOlderThan: int = auto()
DeleteAndChildren: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class Document(JidType, hint="Coveo.Document"):
"""A structure that represents a document.
Attributes:
operation_id: An Id used to order operations.
index_identifier: An identifier used to identify the index this document should go to.
source_operation_id: The Id of the source operation that last processed this document.
type_: The operation to perform on this document.
source_key: The Id of the source that contains this document.
source_id: The unique Id of the source that contains this document.
organization_id: The Id of the organization to which this document belong.
id_: The Id of this document.
parent_id: The Id of the parent document.
top_parent_id: The Id of the top parent document.
permissions: The permissions of this document.
meta_data: The meta data values pertaining to this document.
data_streams: The data streams (blobs) associated with this document.
attachments: The collection of children documents.
attachment_ids: The collection of children ids.
"""
operation_id: Opt[int] = None
index_identifier: Opt[str] = None
source_operation_id: Opt[str] = None
type_: Opt[OperationType] = attrib(default=None, metadata={CASING: "Type"})
source_key: Opt[str] = None
source_id: Opt[str] = None
organization_id: Opt[str] = None
id_: Opt[str] = attrib(default=None, metadata={CASING: "Id"})
parent_id: Opt[str] = None
top_parent_id: Opt[str] = None
permissions: Opt[List[PermissionLevel]] = None
meta_data: Opt[List[MetaDataValue]] = None
data_streams: Opt[Dict[str, List[DataStreamValue]]] = None
attachments: "Opt[List[Document]]" = None
attachment_ids: Opt[List[str]] = None
def __init__(
self,
*,
operation_id: Opt[int] = None,
index_identifier: Opt[str] = None,
source_operation_id: Opt[str] = None,
type_: Opt[OperationType] = attrib(default=None, metadata={CASING: "Type"}),
source_key: Opt[str] = None,
source_id: Opt[str] = None,
organization_id: Opt[str] = None,
id_: Opt[str] = attrib(default=None, metadata={CASING: "Id"}),
parent_id: Opt[str] = None,
top_parent_id: Opt[str] = None,
permissions: Opt[List[PermissionLevel]] = None,
meta_data: Opt[List[MetaDataValue]] = None,
data_streams: Opt[Dict[str, List[DataStreamValue]]] = None,
attachments: "Opt[List[Document]]" = None,
attachment_ids: Opt[List[str]] = None,
) -> None:
"""
Parameters:
operation_id: An Id used to order operations.
index_identifier: An identifier used to identify the index this document should go to.
source_operation_id: The Id of the source operation that last processed this document.
type_: The operation to perform on this document.
source_key: The Id of the source that contains this document.
source_id: The unique Id of the source that contains this document.
organization_id: The Id of the organization to which this document belong.
id_: The Id of this document.
parent_id: The Id of the parent document.
top_parent_id: The Id of the top parent document.
permissions: The permissions of this document.
meta_data: The meta data values pertaining to this document.
data_streams: The data streams (blobs) associated with this document.
attachments: The collection of children documents.
attachment_ids: The collection of children ids.
"""
```
#### File: extensionRunner/cdf/document_processor_script.py
```python
from attr import attrs
from typing import Any, Dict, List, Optional as Opt, Union
from .root import CoveoInterface, JidType, api
from .document_processor import DocumentProcessorException
from .document_definition import BlobEntry, CompressionType, DataStreamValue, MetaDataValue, PermissionLevel
from .script_store import ScriptPackage
from .logger import LogEntry
from .document_config_definition import DocumentProcessorScriptParameters
@attrs(kw_only=True, auto_attribs=True)
class DocumentProcessorScriptException(DocumentProcessorException, hint="Coveo.DocumentProcessorScriptException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class MissingDocumentProcessorConfigException(
DocumentProcessorScriptException, hint="Coveo.MissingDocumentProcessorConfigException"
):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidDocumentProcessorParametersException(
DocumentProcessorScriptException, hint="Coveo.InvalidDocumentProcessorParametersException"
):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class UnknownScriptLanguageException(DocumentProcessorScriptException, hint="Coveo.UnknownScriptLanguageException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ScriptCompilationException(DocumentProcessorScriptException, hint="Coveo.ScriptCompilationException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class UnknownIdException(DocumentProcessorScriptException, hint="Coveo.UnknownIdException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ScriptingEngineReturnedErrorException(
DocumentProcessorScriptException, hint="Coveo.ScriptingEngineReturnedErrorException"
):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ScriptIsDisabledException(DocumentProcessorScriptException, hint="Coveo.ScriptIsDisabledException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class PythonScriptException(DocumentProcessorScriptException, hint="Coveo.PythonScriptException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class PythonPackageException(DocumentProcessorScriptException, hint="Coveo.PythonPackageException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ScriptSkippedException(DocumentProcessorScriptException, hint="Coveo.ScriptSkippedException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class RejectedFromScriptException(DocumentProcessorScriptException, hint="Coveo.RejectedFromScriptException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ScriptingEngine(JidType, hint="Coveo.ScriptingEngine"):
"""
Attributes:
executable: The program to execute
command_line: The command line to pass to the executable
stream_data_path: Where the engine will save the stream created by the script
recycle_interval: Number of scripts to execute before recycling the engine
environment_variables: Additional environment variables for the script engine process
number_to_keep_hot: Number of engine to keep ready for execution
connection_string: Connection string to the engine
org_isolation: Whether or not to have an engine for each organisation
number_in_standby: Number of engine to keep in standby for execution
"""
executable: Opt[str] = None
command_line: Opt[str] = None
stream_data_path: Opt[str] = None
recycle_interval: Opt[int] = None
environment_variables: Opt[List[str]] = None
number_to_keep_hot: int = 10
connection_string: Opt[str] = None
org_isolation: bool = True
number_in_standby: int = 2
def __init__(
self,
*,
executable: Opt[str] = None,
command_line: Opt[str] = None,
stream_data_path: Opt[str] = None,
recycle_interval: Opt[int] = None,
environment_variables: Opt[List[str]] = None,
number_to_keep_hot: int = 10,
connection_string: Opt[str] = None,
org_isolation: bool = True,
number_in_standby: int = 2,
) -> None:
"""
Parameters:
executable: The program to execute
command_line: The command line to pass to the executable
stream_data_path: Where the engine will save the stream created by the script
recycle_interval: Number of scripts to execute before recycling the engine
environment_variables: Additional environment variables for the script engine process
number_to_keep_hot: Number of engine to keep ready for execution
connection_string: Connection string to the engine
org_isolation: Whether or not to have an engine for each organisation
number_in_standby: Number of engine to keep in standby for execution
"""
@attrs(kw_only=True, auto_attribs=True)
class DocumentProcessorScriptConfiguration(JidType, hint="Coveo.DocumentProcessorScriptConfiguration"):
"""
Attributes:
engines: The engine definitons
maximum_blob_size_to_return: Maximum inline stream size (in mb) that we can send to the engine
compiled_code_cache_size: The size of compiled code to keep in cache
compiled_code_cache_expiration_millis: The time in millis to keep a compiled code blob cached
output_engine_logs_to_own_log: Whether to output the logs produced by the engines to our log
blob_compression_type: The compression type to use when compressing output blobs
"""
engines: Opt[Dict[str, ScriptingEngine]] = None
maximum_blob_size_to_return: int = 10
compiled_code_cache_size: int = 10485760
compiled_code_cache_expiration_millis: int = 30000
output_engine_logs_to_own_log: Opt[bool] = None
blob_compression_type: CompressionType = CompressionType.ZLib
def __init__(
self,
*,
engines: Opt[Dict[str, ScriptingEngine]] = None,
maximum_blob_size_to_return: int = 10,
compiled_code_cache_size: int = 10485760,
compiled_code_cache_expiration_millis: int = 30000,
output_engine_logs_to_own_log: Opt[bool] = None,
blob_compression_type: CompressionType = CompressionType.ZLib,
) -> None:
"""
Parameters:
engines: The engine definitons
maximum_blob_size_to_return: Maximum inline stream size (in mb) that we can send to the engine
compiled_code_cache_size: The size of compiled code to keep in cache
compiled_code_cache_expiration_millis: The time in millis to keep a compiled code blob cached
output_engine_logs_to_own_log: Whether to output the logs produced by the engines to our log
blob_compression_type: The compression type to use when compressing output blobs
"""
class IScriptAdmin(CoveoInterface):
"""The API to change a script blade configuration."""
@api("POST/config")
def set_config(self, *, config: DocumentProcessorScriptConfiguration) -> None:
"""Change the script configuration.
Parameters:
config: The new configuration.
"""
@api("GET/config")
def get_config(self) -> DocumentProcessorScriptConfiguration:
"""Get the script blade configuration."""
@api("POST/prepare")
def prepare_packages(
self, *, organization_id: str, packages: List[str], language: str, location: str, merge: bool
) -> List[ScriptPackage]:
"""
Parameters:
organization_id: The organization identifier.
packages: The list of packages to prepare
language: The language of the packages, ex: python
location: Where to put the zip package in S3.
merge: Whether to merge all packages together in one zip.
"""
@api("POST/resume")
def resume_scripting_engines(self) -> None:
"""Resume all paused scripting engines and disables all future pausing."""
@attrs(kw_only=True, auto_attribs=True)
class ScriptExecutionResult(JidType, hint="Coveo.ScriptExecutionResult"):
"""
Attributes:
meta_data: The meta data to update the document with
permissions: The new permissions for document.
log_entries: The logs entries we got while executing the script
data_streams: The new data streams to add on the document
system_log_entries: The system related logs entries we got while executing the script
"""
meta_data: Opt[Dict[str, List[Any]]] = None
permissions: Opt[List[PermissionLevel]] = None
log_entries: Opt[List[LogEntry]] = None
data_streams: Opt[Dict[str, BlobEntry]] = None
system_log_entries: Opt[List[LogEntry]] = None
def __init__(
self,
*,
meta_data: Opt[Dict[str, List[Any]]] = None,
permissions: Opt[List[PermissionLevel]] = None,
log_entries: Opt[List[LogEntry]] = None,
data_streams: Opt[Dict[str, BlobEntry]] = None,
system_log_entries: Opt[List[LogEntry]] = None,
) -> None:
"""
Parameters:
meta_data: The meta data to update the document with
permissions: The new permissions for document.
log_entries: The logs entries we got while executing the script
data_streams: The new data streams to add on the document
system_log_entries: The system related logs entries we got while executing the script
"""
class IScriptingEngine(CoveoInterface):
@api("POST/compile")
def compile(self, *, script_id: str, code: str) -> Union[str, bytes]:
"""
Parameters:
script_id: The id of the script to compile
code: The code to compile
"""
@api("POST/execute", id_="Id")
def execute(
self,
*,
parameters: DocumentProcessorScriptParameters,
id_: str,
meta_data: List[MetaDataValue],
meta_data_file: str,
permissions: List[PermissionLevel],
data_streams: Dict[str, List[DataStreamValue]],
package_paths: List[str],
) -> ScriptExecutionResult:
"""
Parameters:
parameters: The script parameters.
id_: The Id of this document.
meta_data: The meta data values pertaining to this document.
meta_data_file: File containing the document meta data
permissions: The permissions of this document.
data_streams: The requested data streams
package_paths: Additional folders to load packages from.
"""
@api("POST/prepare")
def prepare_packages(self, *, packages: List[str], working_path: str, merge: bool) -> List[ScriptPackage]:
"""
Parameters:
packages: The list of packages to prepare
working_path: The working path
merge: Whether to merge all packages together in one zip.
"""
@api("GET/logs")
def get_last_log_entries(self) -> List[LogEntry]:
...
```
#### File: extensionRunner/cdf/document_tracker.py
```python
from attr import attrib, attrs
from datetime import datetime
from enum import auto
from typing import Any, Dict, Optional as Opt
from .root import CASING, CoveoInterface, JidEnumFlag, JidType, api
class TaskType(JidEnumFlag):
Streaming: int = auto()
Consuming: int = auto()
Crawling: int = auto()
Processing: int = auto()
Mapping: int = auto()
Extension: int = auto()
Indexing: int = auto()
Detection: int = auto()
class OperationType(JidEnumFlag):
Add: int = auto()
Delete: int = auto()
AddByReference: int = auto()
class ResultType(JidEnumFlag):
Completed: int = auto()
Warning: int = auto()
Rejected: int = auto()
Error: int = auto()
Skipped: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class TrackingInformation(JidType, hint="Coveo.DocumentTracker.TrackingInformation"):
uri: Opt[str] = attrib(default=None, metadata={CASING: "URI"})
organization_id: Opt[str] = None
source_id: Opt[str] = None
request_id: Opt[str] = None
date_time: Opt[datetime] = None
task: Opt[TaskType] = None
operation: Opt[OperationType] = None
result: Opt[ResultType] = None
meta: Opt[Dict[str, Any]] = None
resource_id: Opt[str] = None
def __init__(
self,
*,
uri: Opt[str] = attrib(default=None, metadata={CASING: "URI"}),
organization_id: Opt[str] = None,
source_id: Opt[str] = None,
request_id: Opt[str] = None,
date_time: Opt[datetime] = None,
task: Opt[TaskType] = None,
operation: Opt[OperationType] = None,
result: Opt[ResultType] = None,
meta: Opt[Dict[str, Any]] = None,
resource_id: Opt[str] = None,
) -> None:
...
class IDocumentTracker(CoveoInterface):
@api("POST/add")
def add(self, *, information: TrackingInformation) -> None:
...
```
#### File: extensionRunner/cdf/index_service.py
```python
from attr import attrib, attrs
from datetime import datetime
from enum import auto
from typing import Dict, List, Optional as Opt
from .root import CASING, CoveoInterface, ExceptionBase, JidEnumFlag, JidType, MultiOut, api
from .indexer_config import (
CollaborativeRanking,
Collection,
Field,
HighlightTag,
PhysicalIndex,
QueryHighlighter,
Ranking,
ResultsPreviewer,
SearchCertificate,
Slice,
Source,
System,
TagField,
)
from .index_tracking import IndexStatus
from .document_definition import PermissionModel, PermissionSet
from .security import EffectivePermissionsListingOptions, PermissionModelInformation
from .security_provider import SID
@attrs(kw_only=True, auto_attribs=True)
class IndexException(ExceptionBase, hint="Coveo.IndexService.IndexException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidBinaryVersionException(IndexException, hint="Coveo.IndexService.InvalidBinaryVersionException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class OutOfRangeException(ExceptionBase, hint="Coveo.IndexService.OutOfRangeException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidDocumentKeyException(IndexException, hint="Coveo.IndexService.InvalidDocumentKeyException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DocumentNotFoundException(IndexException, hint="Coveo.IndexService.DocumentNotFoundException"):
def __init__(self) -> None:
...
class BladeState(JidEnumFlag):
Created: int = auto()
Initialized: int = auto()
Starting: int = auto()
Running: int = auto()
WaitingForConfig: int = auto()
OutOfSync: int = auto()
ShuttingDown: int = auto()
Synchronizing: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class IndexerConfig(JidType, hint="Coveo.IndexService.IndexerConfig"):
"""Some global configuration for the indexer blade
Attributes:
tagging_manager_uri: The URI to the tagging manager
security_server_uri: The security server URI
mirror_name: The mirror name of this index
index_path: The physical path for the index
realtime_indexing_path: The physical path for the realtime indexing files.
slice_paths: The physical paths for each slice.
index_identifier: The index identifier.
config_cache_page_size: The b-tree page size for the config cache.
config_cache_size: The b-tree cache size for the config cache.
"""
tagging_manager_uri: Opt[str] = attrib(default=None, metadata={CASING: "TaggingManagerURI"})
security_server_uri: Opt[str] = attrib(default=None, metadata={CASING: "SecurityServerURI"})
mirror_name: Opt[str] = None
index_path: Opt[str] = None
realtime_indexing_path: Opt[str] = None
slice_paths: Opt[Dict[str, str]] = None
index_identifier: Opt[str] = None
config_cache_page_size: int = 1048576
config_cache_size: int = 67108864
def __init__(
self,
*,
tagging_manager_uri: Opt[str] = attrib(default=None, metadata={CASING: "TaggingManagerURI"}),
security_server_uri: Opt[str] = attrib(default=None, metadata={CASING: "SecurityServerURI"}),
mirror_name: Opt[str] = None,
index_path: Opt[str] = None,
realtime_indexing_path: Opt[str] = None,
slice_paths: Opt[Dict[str, str]] = None,
index_identifier: Opt[str] = None,
config_cache_page_size: int = 1048576,
config_cache_size: int = 67108864,
) -> None:
"""
Parameters:
tagging_manager_uri: The URI to the tagging manager
security_server_uri: The security server URI
mirror_name: The mirror name of this index
index_path: The physical path for the index
realtime_indexing_path: The physical path for the realtime indexing files.
slice_paths: The physical paths for each slice.
index_identifier: The index identifier.
config_cache_page_size: The b-tree page size for the config cache.
config_cache_size: The b-tree cache size for the config cache.
"""
@attrs(kw_only=True, auto_attribs=True)
class ElasticSearchConnection(JidType, hint="Coveo.IndexService.ElasticSearchConnection"):
"""Elasticsearch connection.
Attributes:
host: The URI of th elasticsearch host (like 'host.adomain.com').
port: The port to used (default is 9200).
username: The user name for http_auth.
password: <PASSWORD> for http_auth.
url_prefix: The URL prefix.
use_ssl: Whether we use SSL or not.
verify_certs: Whether we check the certificates or not.
ca_certs: Optional path to CA bundle on disk.
client_cert: Path to the file containing the private key and the certificate, or cert only if using CientKey.
client_key: Path to the file containing the private key if using separate cert and key files (client_cert will contain only the cert).
aws_access_key: The AWS access key.
aws_secret_key: The AWS secret key.
aws_region: The AWS region.
aws_service_name: The AWS service name.
"""
host: Opt[str] = None
port: int = 9200
username: Opt[str] = None
password: Opt[str] = None
url_prefix: Opt[str] = attrib(default=None, metadata={CASING: "URLPrefix"})
use_ssl: Opt[bool] = attrib(default=None, metadata={CASING: "UseSSL"})
verify_certs: Opt[bool] = None
ca_certs: Opt[str] = attrib(default=None, metadata={CASING: "CACerts"})
client_cert: Opt[str] = None
client_key: Opt[str] = None
aws_access_key: Opt[str] = attrib(default=None, metadata={CASING: "AWSAccessKey"})
aws_secret_key: Opt[str] = attrib(default=None, metadata={CASING: "AWSSecretKey"})
aws_region: Opt[str] = attrib(default=None, metadata={CASING: "AWSRegion"})
aws_service_name: Opt[str] = attrib(default=None, metadata={CASING: "AWSServiceName"})
def __init__(
self,
*,
host: Opt[str] = None,
port: int = 9200,
username: Opt[str] = None,
password: Opt[str] = None,
url_prefix: Opt[str] = attrib(default=None, metadata={CASING: "URLPrefix"}),
use_ssl: Opt[bool] = attrib(default=None, metadata={CASING: "UseSSL"}),
verify_certs: Opt[bool] = None,
ca_certs: Opt[str] = attrib(default=None, metadata={CASING: "CACerts"}),
client_cert: Opt[str] = None,
client_key: Opt[str] = None,
aws_access_key: Opt[str] = attrib(default=None, metadata={CASING: "AWSAccessKey"}),
aws_secret_key: Opt[str] = attrib(default=None, metadata={CASING: "AWSSecretKey"}),
aws_region: Opt[str] = attrib(default=None, metadata={CASING: "AWSRegion"}),
aws_service_name: Opt[str] = attrib(default=None, metadata={CASING: "AWSServiceName"}),
) -> None:
"""
Parameters:
host: The URI of th elasticsearch host (like 'host.adomain.com').
port: The port to used (default is 9200).
username: The user name for http_auth.
password: The <PASSWORD> for http_auth.
url_prefix: The URL prefix.
use_ssl: Whether we use SSL or not.
verify_certs: Whether we check the certificates or not.
ca_certs: Optional path to CA bundle on disk.
client_cert: Path to the file containing the private key and the certificate, or cert only if using CientKey.
client_key: Path to the file containing the private key if using separate cert and key files (client_cert will contain only the cert).
aws_access_key: The AWS access key.
aws_secret_key: The AWS secret key.
aws_region: The AWS region.
aws_service_name: The AWS service name.
"""
@attrs(kw_only=True, auto_attribs=True)
class ElasticSearchBladeConfig(JidType, hint="Coveo.IndexService.ElasticSearchBladeConfig"):
"""Some global configuration for the elasticsearch indexer blade
Attributes:
message_store: Optional folder where to keep a copy of the add_document messages.
logger_config: Optional logger configuration. Format to be defined.
"""
message_store: Opt[str] = None
logger_config: Opt[str] = None
def __init__(self, *, message_store: Opt[str] = None, logger_config: Opt[str] = None) -> None:
"""
Parameters:
message_store: Optional folder where to keep a copy of the add_document messages.
logger_config: Optional logger configuration. Format to be defined.
"""
@attrs(kw_only=True, auto_attribs=True)
class ElasticSearchConfig(JidType, hint="Coveo.IndexService.ElasticSearchConfig"):
"""Some global configuration for the elasticsearch indexer blade and search API.
Attributes:
connection_v: List of elasticsearch connections.
indexer_config: Indexer blade config.
"""
connection_v: Opt[List[ElasticSearchConnection]] = None
indexer_config: Opt[ElasticSearchBladeConfig] = None
def __init__(
self,
*,
connection_v: Opt[List[ElasticSearchConnection]] = None,
indexer_config: Opt[ElasticSearchBladeConfig] = None,
) -> None:
"""
Parameters:
connection_v: List of elasticsearch connections.
indexer_config: Indexer blade config.
"""
@attrs(kw_only=True, auto_attribs=True)
class IndexState(JidType, hint="Coveo.IndexService.IndexState"):
"""Internal state for the index.
Attributes:
inconsistent_index: Whether the index is in an inconsistent state.
inconsistent_config: Whether the index config is in an inconsistent state.
"""
inconsistent_index: Opt[bool] = None
inconsistent_config: Opt[bool] = None
def __init__(self, *, inconsistent_index: Opt[bool] = None, inconsistent_config: Opt[bool] = None) -> None:
"""
Parameters:
inconsistent_index: Whether the index is in an inconsistent state.
inconsistent_config: Whether the index config is in an inconsistent state.
"""
class IIndexAdmin(CoveoInterface):
"""Main interface used to control an index node."""
@api("GET/ranking")
def get_ranking(self) -> Ranking:
"""Get ranking configuration for the index."""
@api("PUT/ranking")
def update_ranking(self, *, ranking: Ranking) -> None:
"""Update ranking configuration in the index.
Parameters:
ranking: The updated configuration for ranking.
"""
@api("GET/system")
def get_system(self) -> System:
"""Get system configuration for the index."""
@api("PUT/system", system="system")
def update_system(self, *, system: System) -> None:
"""Update system configuration in the index.
Parameters:
system: The updated configuration for system.
"""
@api("GET/query_highlighter")
def get_query_highlighter(self) -> QueryHighlighter:
"""Get query highlighter configuration for the index."""
@api("PUT/query_highlighter")
def update_query_highlighter(self, *, query_highlighter: QueryHighlighter) -> None:
"""Update query highlighter configuration in the index.
Parameters:
query_highlighter: The updated configuration for query highlighter.
"""
@api("GET/query_highlighter/highlight_tags")
def get_highlight_tags(self) -> List[HighlightTag]:
"""Get all the highlight tags in the index."""
@api("GET/query_highlighter/highlight_tags/{highlight_tag_id}", highlight_tag_id="HighlightTagID")
def get_highlight_tag(self, *, highlight_tag_id: int) -> HighlightTag:
"""Get a highlight tag from the index.
Parameters:
highlight_tag_id: The id of the highlight tag.
"""
@api("POST/query_highlighter/highlight_tags")
def add_highlight_tag(self, *, highlight_tag: HighlightTag) -> None:
"""Add a highlight tag to the index.
Parameters:
highlight_tag: The new highlight tag.
"""
@api("PUT/query_highlighter/highlight_tags/{highlight_tag_id}", highlight_tag_id="HighlightTagID")
def update_highlight_tag(self, *, highlight_tag_id: int, highlight_tag: HighlightTag) -> None:
"""Update a highlight tag in the index.
Parameters:
highlight_tag_id: The id of the highlight tag.
highlight_tag: The updated highlight tag.
"""
@api("DELETE/query_highlighter/highlight_tags/{highlight_tag_id}", highlight_tag_id="HighlightTagID")
def delete_highlight_tag(self, *, highlight_tag_id: int) -> None:
"""Delete a highlight tag contained in the index.
Parameters:
highlight_tag_id: The id of the highlight tag.
"""
@api("GET/slices")
def get_slices(self) -> List[Slice]:
"""Get all the slices in the index."""
@api("GET/slices/{slice_id}", slice_id="SliceID")
def get_slice(self, *, slice_id: int) -> Slice:
"""Get a slice from the index.
Parameters:
slice_id: The id of the slice.
"""
@api("POST/slices", slice_="Slice")
def add_slice(self, *, slice_: Slice) -> None:
"""Add a slice to the index.
Parameters:
slice_: The new slice.
"""
@api("PUT/slices/{slice_id}", slice_id="SliceID", slice_="Slice")
def update_slice(self, *, slice_id: int, slice_: Slice) -> None:
"""Update a slice in the index.
Parameters:
slice_id: The id of the slice.
slice_: The updated slice.
"""
@api("DELETE/slices/{slice_id}", slice_id="SliceID")
def delete_slice(self, *, slice_id: int) -> None:
"""Delete a slice from the index.
Parameters:
slice_id: The id of the slice.
"""
@api("GET/results_previewers")
def get_results_previewers(self) -> List[ResultsPreviewer]:
"""Get all the results previewers in the index."""
@api("GET/results_previewers/{results_previewer_id}", results_previewer_id="ResultsPreviewerID")
def get_results_previewer(self, *, results_previewer_id: int) -> ResultsPreviewer:
"""Get a results previewer from the index.
Parameters:
results_previewer_id: The id of the results previewer.
"""
@api("POST/results_previewers")
def add_results_previewer(self, *, results_previewer: ResultsPreviewer) -> None:
"""Add a results previewer to the index.
Parameters:
results_previewer: The new results previewer.
"""
@api("PUT/results_previewers/{results_previewer_id}", results_previewer_id="ResultsPreviewerID")
def update_results_previewer(self, *, results_previewer_id: int, results_previewer: ResultsPreviewer) -> None:
"""Update a results previewer in the index.
Parameters:
results_previewer_id: The id of the results previewer.
results_previewer: The updated results previewer.
"""
@api("DELETE/results_previewers/{results_previewer_id}", results_previewer_id="ResultsPreviewerID")
def delete_results_previewer(self, *, results_previewer_id: int) -> None:
"""Delete a results previewer from the index.
Parameters:
results_previewer_id: The id of the results previewer.
"""
@api("GET/tag_fields")
def get_tag_fields(self) -> List[TagField]:
"""Get all the tag fields in the index."""
@api("GET/tag_fields/{tag_field_id}", tag_field_id="TagFieldID")
def get_tag_field(self, *, tag_field_id: int) -> TagField:
"""Get a tag field from the index.
Parameters:
tag_field_id: The id of the tag field.
"""
@api("POST/tag_fields")
def add_tag_field(self, *, tag_field: TagField) -> None:
"""Add a tag field to the index.
Parameters:
tag_field: The new tag field.
"""
@api("PUT/tag_fields/{tag_field_id}", tag_field_id="TagFieldID")
def update_tag_field(self, *, tag_field_id: int, tag_field: TagField) -> None:
"""Update a tag field in the index.
Parameters:
tag_field_id: The id of the tag field.
tag_field: The updated tag field.
"""
@api("DELETE/tag_fields/{tag_field_id}", tag_field_id="TagFieldID")
def delete_tag_field(self, *, tag_field_id: int) -> None:
"""Delete a tag field from the index.
Parameters:
tag_field_id: The id of the tag field.
"""
@api("GET/collaborative_ranking")
def get_collaborative_ranking(self) -> CollaborativeRanking:
"""Get collaborative ranking configuration for the index."""
@api("PUT/collaborative_ranking")
def update_collaborative_ranking(self, *, collaborative_ranking: CollaborativeRanking) -> None:
"""Update collaborative ranking configuration in the index.
Parameters:
collaborative_ranking: The updated configuration for collaborative ranking.
"""
@api("GET/physical_index")
def get_physical_index(self) -> PhysicalIndex:
"""Get physical index configuration for the index."""
@api("PUT/physical_index")
def update_physical_index(self, *, physical_index: PhysicalIndex) -> None:
"""Update physical index configuration in the index.
Parameters:
physical_index: The updated configuration for physical index.
"""
@api("GET/search_certificates")
def get_search_certificates(self) -> List[SearchCertificate]:
"""Get the search certificates for the index."""
@api("GET/search_certificates/{search_certificate_id}", search_certificate_id="SearchCertificateID")
def get_search_certificate(self, *, search_certificate_id: int) -> SearchCertificate:
"""Get a search certificate from the index.
Parameters:
search_certificate_id: The id of the search certificate.
"""
@api("POST/search_certificates")
def add_search_certificate(self, *, search_certificate: SearchCertificate) -> None:
"""Add a new search certificate to the index.
Parameters:
search_certificate: The new search certificate
"""
@api("PUT/search_certificates/{search_certificate_id}", search_certificate_id="SearchCertificateID")
def update_search_certificate(self, *, search_certificate_id: int, search_certificate: SearchCertificate) -> None:
"""Update a search certificate in the index.
Parameters:
search_certificate_id: The id of the search certificate.
search_certificate: The updated search certificate.
"""
@api("DELETE/search_certificates/{search_certificate_id}", search_certificate_id="SearchCertificateID")
def delete_search_certificate(self, *, search_certificate_id: int) -> None:
"""Delete a search certificate from the index.
Parameters:
search_certificate_id: The id of the search certificate.
"""
@api("PUT/index")
def set_config(self, *, configuration: IndexerConfig) -> None:
"""Set the indexer configuration
Parameters:
configuration: The new configuration.
"""
@api("GET/index")
def get_config(self) -> IndexerConfig:
"""Get the indexer configuration"""
@api("GET/status")
def get_state(self) -> BladeState:
"""Get the state of the index."""
@api("DELETE/index")
def delete_index_data(self, *, delete_config: bool) -> None:
"""Delete all files used by an index node.
Parameters:
delete_config: True to delete the config folder.
"""
@api("GET/creation_date")
def get_creation_date(self) -> datetime:
"""Get the index creation date. Will be set to the source's value on the destination when synchronizing."""
@api("GET/readonly")
def is_in_read_only_mode(self) -> bool:
...
@api("PUT/readonly")
def set_read_only_mode(self, *, read_only_mode: bool) -> None:
...
@api("POST/restore_security_module")
def restore_security_module(self) -> None:
...
@api("POST/check_integrity")
def check_index_integrity(self) -> None:
...
@api("POST/cancel_check_integrity")
def cancel_index_integrity_check(self) -> None:
...
@api("GET/check_integrity_in_progress")
def is_checking_integrity(self) -> bool:
...
@api("POST/commit_transaction")
def commit_current_transaction(self, *, wait_for_documents: bool = False) -> None:
...
@api("POST/flush")
def flush(self) -> None:
...
@api("POST/export_tags")
def export_tags(self, *, output_file_name: str) -> None:
...
@api("DELETE/groupby_cache")
def clear_group_by_cache(self) -> None:
...
@api("POST/rebuild_wcl")
def rebuild_wcl(self) -> None:
...
@api("DELETE/stem_classes_correlation")
def decorrelate_stem_classes(self) -> None:
...
@api("DELETE/profilings_logs")
def delete_old_profiling_logs(self, *, days_to_keep_profiling_logs: int) -> None:
...
@api("GET/statistics")
def get_indexer_statistics(self) -> IndexStatus:
...
@api("GET/document_keys")
def get_document_keys(self, *, starting_document_key: str, page_size: int = 1000) -> List[str]:
"""Gets a list of indexed document keys, starting from the specified position.
Parameters:
starting_document_key: The key used to specify where to start the document keys listing.
page_size: The maximum number of document keys to list.
"""
@api("POST/document_permission_model")
def get_document_permission_model(self, *, document_key: str) -> PermissionModel:
...
@api("POST/document_permission_model_info")
def get_document_permission_model_information(
self, *, document_key: str, filter_effective_permissions_duplicates: bool = True
) -> PermissionModelInformation:
...
@api("POST/document_effective_permissions")
def get_document_effective_permissions(
self, *, document_key: str, beautify_effective_permissions: bool = True
) -> PermissionSet:
...
@api("POST/document_effective_permissions_info")
def get_document_effective_permissions_information(
self, *, document_key: str, listing_options: EffectivePermissionsListingOptions
) -> MultiOut:
...
@api("POST/dump_all_unique_permission_models_to_file")
def dump_all_unique_permission_models_to_file(self) -> None:
...
@api("POST/are_security_identities_used_in_document_permissions")
def are_security_identities_used_in_document_permissions(self, *, security_identities: List[SID]) -> List[bool]:
...
class IIndexingConfig(CoveoInterface):
"""Interface used to modify configurations that affect document indexing."""
@api("GET/collections")
def get_collections(self) -> List[Collection]:
"""Get all the collections in the index."""
@api("GET/collections/{collection_id}", collection_id="CollectionID")
def get_collection(self, *, collection_id: int) -> Collection:
"""Get the config of a collection from the index.
Parameters:
collection_id: The id of the collection.
"""
@api("PUT/collections")
def set_collections(self, *, collections: List[Collection]) -> None:
"""Set the collections in the index.
Parameters:
collections: The collections for the index.
"""
@api("POST/collections")
def add_collection(self, *, collection: Collection) -> None:
"""Add a collection to the index.
Parameters:
collection: The configuration for the new collection.
"""
@api("PUT/collections/{collection_id}", collection_id="CollectionID")
def update_collection(self, *, collection_id: int, collection: Collection) -> None:
"""Update a collection in the index.
Parameters:
collection_id: The id of the collection.
collection: The updated configuration for the collection.
"""
@api("DELETE/collections/{collection_id}", collection_id="CollectionID")
def delete_collection(self, *, collection_id: int) -> None:
"""Delete a collection from the index.
Parameters:
collection_id: The id of the collection.
"""
@api("GET/collections/{collection_id}/sources", collection_id="CollectionID")
def get_sources(self, *, collection_id: int) -> List[Source]:
"""Get all the sources for a collection.
Parameters:
collection_id: The id of the parent collection.
"""
@api("GET/collections/{collection_id}/sources/{source_id}", collection_id="CollectionID", source_id="SourceID")
def get_source(self, *, collection_id: int, source_id: int) -> Source:
"""Get the config of a source for a collection.
Parameters:
collection_id: The id of the parent collection.
source_id: The id of the source.
"""
@api("PUT/collections/{collection_id}/sources", collection_id="CollectionID")
def set_sources(self, *, collection_id: int, sources: List[Source]) -> None:
"""Set the sources for a collection in the index.
Parameters:
collection_id: The id of the parent collection.
sources: The sources for the collection in the index.
"""
@api("POST/collections/{collection_id}/sources", collection_id="CollectionID")
def add_source(self, *, collection_id: int, source: Source) -> None:
"""Add a source to the index.
Parameters:
collection_id: The id of the parent collection.
source: The configuration for the new source.
"""
@api("PUT/collections/{collection_id}/sources/{source_id}", collection_id="CollectionID", source_id="SourceID")
def update_source(self, *, collection_id: int, source_id: int, source: Source) -> None:
"""Update a source in the index.
Parameters:
collection_id: The id of the parent collection.
source_id: The id of the source.
source: The updated configuration for the source.
"""
@api("DELETE/collections/{collection_id}/sources/{source_id}", collection_id="CollectionID", source_id="SourceID")
def delete_source(self, *, collection_id: int, source_id: int) -> None:
"""Delete a source contained in the index.
Parameters:
collection_id: The id of the parent collection.
source_id: The id of the source.
"""
@api("GET/fields")
def get_fields(self) -> List[Field]:
"""Get all fields in the index."""
@api("PUT/fields/id")
def set_fields_by_id(self, *, fields: List[Field]) -> None:
"""Set the fields in the index.
Parameters:
fields: The fields for the index.
"""
@api("PUT/fields/name")
def set_fields_by_name(self, *, fields: List[Field]) -> None:
"""Set the fields in the index.
Parameters:
fields: The fields for the index.
"""
@api("GET/fields/id/{field_id}", field_id="FieldID")
def get_field_by_id(self, *, field_id: int) -> Field:
"""Get the config of a field from the index.
Parameters:
field_id: The id of the field.
"""
@api("GET/fields/name/{field_name}")
def get_field_by_name(self, *, field_name: str) -> Field:
"""Get the config of a field from the index.
Parameters:
field_name: The name of the field.
"""
@api("POST/fields/batch")
def add_fields(self, *, fields: List[Field]) -> None:
"""Add a batch of fields to the index.
Parameters:
fields: The configurations for the new fields.
"""
@api("PUT/fields/batch/id")
def update_fields_by_id(self, *, fields: List[Field]) -> None:
"""Update a batch fields in the index.
Parameters:
fields: The updated configurations for the fields.
"""
@api("PUT/fields/batch/name")
def update_fields_by_name(self, *, fields: List[Field]) -> None:
"""Update a batch of fields in the index.
Parameters:
fields: The updated configurations for the fields.
"""
@api("DELETE/fields/batch/id", field_ids="FieldIDs")
def delete_fields_by_id(self, *, field_ids: List[int]) -> None:
"""Delete a batch of field contained in the index.
Parameters:
field_ids: The ids of the fields.
"""
@api("DELETE/fields/batch/name")
def delete_fields_by_name(self, *, field_names: List[str]) -> None:
"""Delete a batch of fields contained in the index.
Parameters:
field_names: The names of the fields.
"""
@api("POST/fields")
def add_field(self, *, field: Field) -> None:
"""Add a field to the index.
Parameters:
field: The configuration for the new field.
"""
@api("PUT/fields/id/{field_id}", field_id="FieldID")
def update_field_by_id(self, *, field_id: int, field: Field) -> None:
"""Update a field in the index.
Parameters:
field_id: The id of the field.
field: The updated configuration for the field.
"""
@api("PUT/fields/name/{field_name}")
def update_field_by_name(self, *, field_name: str, field: Field) -> None:
"""Update a field in the index.
Parameters:
field_name: The name of the field.
field: The updated configuration for the field.
"""
@api("DELETE/fields/id/{field_id}", field_id="FieldID")
def delete_field_by_id(self, *, field_id: int) -> None:
"""Delete a field contained in the index.
Parameters:
field_id: The id of the field.
"""
@api("DELETE/fields/name/{field_name}")
def delete_field_by_name(self, *, field_name: str) -> None:
"""Delete a field contained in the index.
Parameters:
field_name: The name of the field.
"""
class IElasticSearchIndexAdmin(CoveoInterface):
"""Main interface used to control an index node."""
@api("PUT/elasticsearch")
def set_elastic_search_config(self, *, configuration: ElasticSearchConfig) -> None:
"""Set the elasticsearch configuration
Parameters:
configuration: The new configuration.
"""
@api("GET/elasticsearch")
def get_elastic_search_config(self) -> ElasticSearchConfig:
"""Get the elasticsearch configuration"""
```
#### File: extensionRunner/cdf/index_tracking.py
```python
from attr import attrib, attrs
from datetime import datetime
from enum import auto
from typing import List, Optional as Opt
from .root import CASING, JidEnumFlag, JidType
from .tracking import MetricEntry, StatusEntry
@attrs(kw_only=True, auto_attribs=True)
class IndexSourceStatus(JidType, hint="Coveo.IndexTracking.IndexSourceStatus"):
"""Status of a single source in the index
Attributes:
collection_id: ID of collection containing the source
source_id: Source ID
document_count: Number of documents in the source
document_total_size: Total size of documents in the source
pending_docs_to_add: Number of documents waiting to be added in transactions for this source
pending_docs_to_update: Number of documents waiting to be updated in transactions for this source
pending_docs_to_delete: Number of documents waiting to be deleted in transactions for this source
"""
collection_id: Opt[int] = None
source_id: Opt[int] = None
document_count: Opt[int] = None
document_total_size: Opt[int] = None
pending_docs_to_add: Opt[int] = None
pending_docs_to_update: Opt[int] = None
pending_docs_to_delete: Opt[int] = None
def __init__(
self,
*,
collection_id: Opt[int] = None,
source_id: Opt[int] = None,
document_count: Opt[int] = None,
document_total_size: Opt[int] = None,
pending_docs_to_add: Opt[int] = None,
pending_docs_to_update: Opt[int] = None,
pending_docs_to_delete: Opt[int] = None,
) -> None:
"""
Parameters:
collection_id: ID of collection containing the source
source_id: Source ID
document_count: Number of documents in the source
document_total_size: Total size of documents in the source
pending_docs_to_add: Number of documents waiting to be added in transactions for this source
pending_docs_to_update: Number of documents waiting to be updated in transactions for this source
pending_docs_to_delete: Number of documents waiting to be deleted in transactions for this source
"""
@attrs(kw_only=True, auto_attribs=True)
class LexiconMemoryBreakdown(JidType, hint="Coveo.IndexTracking.LexiconMemoryBreakdown"):
"""All the memory used by the lexicon
Attributes:
b_tree_caches: Memory used by the BTree caches
facets_cache: Memory used by the facets cache
terms_cache: Memory used by the terms cache
term_ids_cache: Memory used by the string to identifier mapping cache
sort_cache_num_fields: Memory used by the numerical fields sort cache
sort_cache_string_fields: Memory used by the string fields sort cache
sort_string_table: Memory used by the sort cache string table
evaluator_long_fields: Memory used by the long field evaluators
evaluator_long64fields: Memory used by the 64 bits long field evaluators
evaluator_date_fields: Memory used by the date field evaluators
evaluator_double_fields: Memory used by the double field evaluators
word_corrector_lexicon: Memory used by the word corrector lexicon structure
facets: Memory used by the facets structure
stem_expansion_map: Memory used by the stem expansion map structure
total: Total memory used by the lexicon
"""
b_tree_caches: Opt[int] = None
facets_cache: Opt[int] = None
terms_cache: Opt[int] = None
term_ids_cache: Opt[int] = attrib(default=None, metadata={CASING: "TermIDsCache"})
sort_cache_num_fields: Opt[int] = None
sort_cache_string_fields: Opt[int] = None
sort_string_table: Opt[int] = None
evaluator_long_fields: Opt[int] = None
evaluator_long64fields: Opt[int] = attrib(default=None, metadata={CASING: "EvaluatorLong64Fields"})
evaluator_date_fields: Opt[int] = None
evaluator_double_fields: Opt[int] = None
word_corrector_lexicon: Opt[int] = None
facets: Opt[int] = None
stem_expansion_map: Opt[int] = None
total: Opt[int] = None
def __init__(
self,
*,
b_tree_caches: Opt[int] = None,
facets_cache: Opt[int] = None,
terms_cache: Opt[int] = None,
term_ids_cache: Opt[int] = attrib(default=None, metadata={CASING: "TermIDsCache"}),
sort_cache_num_fields: Opt[int] = None,
sort_cache_string_fields: Opt[int] = None,
sort_string_table: Opt[int] = None,
evaluator_long_fields: Opt[int] = None,
evaluator_long64fields: Opt[int] = attrib(default=None, metadata={CASING: "EvaluatorLong64Fields"}),
evaluator_date_fields: Opt[int] = None,
evaluator_double_fields: Opt[int] = None,
word_corrector_lexicon: Opt[int] = None,
facets: Opt[int] = None,
stem_expansion_map: Opt[int] = None,
total: Opt[int] = None,
) -> None:
"""
Parameters:
b_tree_caches: Memory used by the BTree caches
facets_cache: Memory used by the facets cache
terms_cache: Memory used by the terms cache
term_ids_cache: Memory used by the string to identifier mapping cache
sort_cache_num_fields: Memory used by the numerical fields sort cache
sort_cache_string_fields: Memory used by the string fields sort cache
sort_string_table: Memory used by the sort cache string table
evaluator_long_fields: Memory used by the long field evaluators
evaluator_long64fields: Memory used by the 64 bits long field evaluators
evaluator_date_fields: Memory used by the date field evaluators
evaluator_double_fields: Memory used by the double field evaluators
word_corrector_lexicon: Memory used by the word corrector lexicon structure
facets: Memory used by the facets structure
stem_expansion_map: Memory used by the stem expansion map structure
total: Total memory used by the lexicon
"""
@attrs(kw_only=True, auto_attribs=True)
class MemoryBreakdown(JidType, hint="Coveo.IndexTracking.MemoryBreakdown"):
"""All the memory used by indexing structures
Attributes:
facet_lookup_cache: Memory used by the facet lookup cache
expression_cache: Memory used by the expressions cache
documents_cache: Memory used by the documents cache
transaction_writer: Memory used by the transaction writer
transaction_optimizer: Memory used by the transaction optimizer
transaction_reader: Memory used by the transaction reader
lexicon: Memory used by the lexicon (including facets)
authorization_manager: Memory used by the authorization manager
indexed_documents: Memory used by the indexed documents structure
collections: Memory used by the collections structure
file_security: Memory used by the file security structure and cache
ranking: Memory used by the ranking engine
total: Total memory used by the index structures
"""
facet_lookup_cache: Opt[int] = None
expression_cache: Opt[int] = None
documents_cache: Opt[int] = None
transaction_writer: Opt[int] = None
transaction_optimizer: Opt[int] = None
transaction_reader: Opt[int] = None
lexicon: Opt[LexiconMemoryBreakdown] = None
authorization_manager: Opt[int] = None
indexed_documents: Opt[int] = None
collections: Opt[int] = None
file_security: Opt[int] = None
ranking: Opt[int] = None
total: Opt[int] = None
def __init__(
self,
*,
facet_lookup_cache: Opt[int] = None,
expression_cache: Opt[int] = None,
documents_cache: Opt[int] = None,
transaction_writer: Opt[int] = None,
transaction_optimizer: Opt[int] = None,
transaction_reader: Opt[int] = None,
lexicon: Opt[LexiconMemoryBreakdown] = None,
authorization_manager: Opt[int] = None,
indexed_documents: Opt[int] = None,
collections: Opt[int] = None,
file_security: Opt[int] = None,
ranking: Opt[int] = None,
total: Opt[int] = None,
) -> None:
"""
Parameters:
facet_lookup_cache: Memory used by the facet lookup cache
expression_cache: Memory used by the expressions cache
documents_cache: Memory used by the documents cache
transaction_writer: Memory used by the transaction writer
transaction_optimizer: Memory used by the transaction optimizer
transaction_reader: Memory used by the transaction reader
lexicon: Memory used by the lexicon (including facets)
authorization_manager: Memory used by the authorization manager
indexed_documents: Memory used by the indexed documents structure
collections: Memory used by the collections structure
file_security: Memory used by the file security structure and cache
ranking: Memory used by the ranking engine
total: Total memory used by the index structures
"""
@attrs(kw_only=True, auto_attribs=True)
class IndexSliceStatus(JidType, hint="Coveo.IndexTracking.IndexSliceStatus"):
"""Status of an index slice
Attributes:
slice_id: Slice ID
last_transactions_application: Date/time of last application of transactions in the slice
visibility_delay: Time in seconds between creating and applying the the most recent transaction
documents_fragmentation: Percentage between 0 and 100 of fragmentation for document-based structures
"""
slice_id: Opt[int] = None
last_transactions_application: Opt[datetime] = None
visibility_delay: Opt[int] = None
documents_fragmentation: Opt[int] = None
def __init__(
self,
*,
slice_id: Opt[int] = None,
last_transactions_application: Opt[datetime] = None,
visibility_delay: Opt[int] = None,
documents_fragmentation: Opt[int] = None,
) -> None:
"""
Parameters:
slice_id: Slice ID
last_transactions_application: Date/time of last application of transactions in the slice
visibility_delay: Time in seconds between creating and applying the the most recent transaction
documents_fragmentation: Percentage between 0 and 100 of fragmentation for document-based structures
"""
@attrs(kw_only=True, auto_attribs=True)
class IndexStatus(StatusEntry, hint="Coveo.IndexTracking.IndexStatus"):
"""Status entry sent by the Indexer blade
Attributes:
pending_pre_transactions: Total number of pending pre-transactions waiting to be optimized
pending_transactions: Total number of pending transactions waiting to be applied
disk_space_used: Total disk space used by the index for all slices
remaining_disk_space: The remaining disk space on the index drive.
total_memory_used: Total memory used by all the index structures (excluding realtime indexing)
document_count: Total number of documents in the index
document_total_size: Total size of documents in the index
pending_docs_to_add: Total number of documents waiting to be added in transactions
pending_docs_to_update: Total number of documents waiting to be updated in transactions
pending_docs_to_delete: Total number of documents waiting to be deleted in transactions
visibility_delay: Time in seconds between creating and applying the the most recent transaction
realtime_pending_pre_transactions: Total number of pending pre-transactions waiting to be optimized in the realtime portion of the index
realtime_pending_transactions: Total number of pending transactions waiting to be applied in the realtime portion of the index
realtime_disk_space_used: Total disk space used by the realtime portion of the index
realtime_total_memory_used: Total memory used by all the realtime index structures
realtime_document_count: Total number of documents in the realtime portion of the index
realtime_document_total_size: Total size of documents in the realtime portion of the index
realtime_pending_docs_to_add: Total number of documents waiting to be added in transactions in the realtime portion of the index
realtime_pending_docs_to_update: Total number of documents waiting to be updated in transactions in the realtime portion of the index
realtime_pending_docs_to_delete: Total number of documents waiting to be deleted in transactions in the realtime portion of the index
realtime_visibility_delay: Time in seconds between creating and applying the the most recent transaction in the realtime portion of the index
fragmentation_level: Index fragmentation level
last_commit: Date/time of last Commit operation in Indexer blade (for pre-transactions)
sources: Status of each index source
slices: Status of each index slice
resident_set_size: Resident set size for the process
virtual_memory_size: Virtual memory size for the process
peak_resident_set_size: Peak resident set size for the process
peak_virtual_memory_size: Peak virtual memory size for the process
total_physical_memory: Total physical memory on the server
total_disk_space: Total disk space on the index drive
total_ocr_pages: The total number of pages that were extracted by the OCR module for documents
documents_fragmentation: Percentage between 0 and 100 of fragmentation for document-based structures
"""
pending_pre_transactions: Opt[int] = None
pending_transactions: Opt[int] = None
disk_space_used: Opt[int] = None
remaining_disk_space: Opt[int] = None
total_memory_used: Opt[MemoryBreakdown] = None
document_count: Opt[int] = None
document_total_size: Opt[int] = None
pending_docs_to_add: Opt[int] = None
pending_docs_to_update: Opt[int] = None
pending_docs_to_delete: Opt[int] = None
visibility_delay: Opt[int] = None
realtime_pending_pre_transactions: Opt[int] = None
realtime_pending_transactions: Opt[int] = None
realtime_disk_space_used: Opt[int] = None
realtime_total_memory_used: Opt[MemoryBreakdown] = None
realtime_document_count: Opt[int] = None
realtime_document_total_size: Opt[int] = None
realtime_pending_docs_to_add: Opt[int] = None
realtime_pending_docs_to_update: Opt[int] = None
realtime_pending_docs_to_delete: Opt[int] = None
realtime_visibility_delay: Opt[int] = None
fragmentation_level: Opt[int] = None
last_commit: Opt[datetime] = None
sources: Opt[List[IndexSourceStatus]] = None
slices: Opt[List[IndexSliceStatus]] = None
resident_set_size: Opt[int] = None
virtual_memory_size: Opt[int] = None
peak_resident_set_size: Opt[int] = None
peak_virtual_memory_size: Opt[int] = None
total_physical_memory: Opt[int] = None
total_disk_space: Opt[int] = None
total_ocr_pages: Opt[int] = None
documents_fragmentation: Opt[int] = None
def __init__(
self,
*,
pending_pre_transactions: Opt[int] = None,
pending_transactions: Opt[int] = None,
disk_space_used: Opt[int] = None,
remaining_disk_space: Opt[int] = None,
total_memory_used: Opt[MemoryBreakdown] = None,
document_count: Opt[int] = None,
document_total_size: Opt[int] = None,
pending_docs_to_add: Opt[int] = None,
pending_docs_to_update: Opt[int] = None,
pending_docs_to_delete: Opt[int] = None,
visibility_delay: Opt[int] = None,
realtime_pending_pre_transactions: Opt[int] = None,
realtime_pending_transactions: Opt[int] = None,
realtime_disk_space_used: Opt[int] = None,
realtime_total_memory_used: Opt[MemoryBreakdown] = None,
realtime_document_count: Opt[int] = None,
realtime_document_total_size: Opt[int] = None,
realtime_pending_docs_to_add: Opt[int] = None,
realtime_pending_docs_to_update: Opt[int] = None,
realtime_pending_docs_to_delete: Opt[int] = None,
realtime_visibility_delay: Opt[int] = None,
fragmentation_level: Opt[int] = None,
last_commit: Opt[datetime] = None,
sources: Opt[List[IndexSourceStatus]] = None,
slices: Opt[List[IndexSliceStatus]] = None,
resident_set_size: Opt[int] = None,
virtual_memory_size: Opt[int] = None,
peak_resident_set_size: Opt[int] = None,
peak_virtual_memory_size: Opt[int] = None,
total_physical_memory: Opt[int] = None,
total_disk_space: Opt[int] = None,
total_ocr_pages: Opt[int] = None,
documents_fragmentation: Opt[int] = None,
) -> None:
"""
Parameters:
pending_pre_transactions: Total number of pending pre-transactions waiting to be optimized
pending_transactions: Total number of pending transactions waiting to be applied
disk_space_used: Total disk space used by the index for all slices
remaining_disk_space: The remaining disk space on the index drive.
total_memory_used: Total memory used by all the index structures (excluding realtime indexing)
document_count: Total number of documents in the index
document_total_size: Total size of documents in the index
pending_docs_to_add: Total number of documents waiting to be added in transactions
pending_docs_to_update: Total number of documents waiting to be updated in transactions
pending_docs_to_delete: Total number of documents waiting to be deleted in transactions
visibility_delay: Time in seconds between creating and applying the the most recent transaction
realtime_pending_pre_transactions: Total number of pending pre-transactions waiting to be optimized in the realtime portion of the index
realtime_pending_transactions: Total number of pending transactions waiting to be applied in the realtime portion of the index
realtime_disk_space_used: Total disk space used by the realtime portion of the index
realtime_total_memory_used: Total memory used by all the realtime index structures
realtime_document_count: Total number of documents in the realtime portion of the index
realtime_document_total_size: Total size of documents in the realtime portion of the index
realtime_pending_docs_to_add: Total number of documents waiting to be added in transactions in the realtime portion of the index
realtime_pending_docs_to_update: Total number of documents waiting to be updated in transactions in the realtime portion of the index
realtime_pending_docs_to_delete: Total number of documents waiting to be deleted in transactions in the realtime portion of the index
realtime_visibility_delay: Time in seconds between creating and applying the the most recent transaction in the realtime portion of the index
fragmentation_level: Index fragmentation level
last_commit: Date/time of last Commit operation in Indexer blade (for pre-transactions)
sources: Status of each index source
slices: Status of each index slice
resident_set_size: Resident set size for the process
virtual_memory_size: Virtual memory size for the process
peak_resident_set_size: Peak resident set size for the process
peak_virtual_memory_size: Peak virtual memory size for the process
total_physical_memory: Total physical memory on the server
total_disk_space: Total disk space on the index drive
total_ocr_pages: The total number of pages that were extracted by the OCR module for documents
documents_fragmentation: Percentage between 0 and 100 of fragmentation for document-based structures
"""
class IndexMetricOperation(JidEnumFlag):
"""Type of operation that can send metrics in the Indexer blade
Attributes:
Added: One or more documents were added
Updated: One or more documents were updated
Deleted: One or more documents were deleted
"""
Added: int = auto()
Updated: int = auto()
Deleted: int = auto()
class IndexMetricStatus(JidEnumFlag):
"""Status of an Indexer blade metric, depending on where it is sent in the indexing process
Attributes:
Received: Operation was received by the Indexer blade and added to a transaction
Finished: Operation has been applied to the index and is now completed
"""
Received: int = auto()
Finished: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class BaseIndexerMetric(MetricEntry, hint="Coveo.IndexTracking.BaseIndexerMetric"):
"""Base metric entry class used for all metric entries sent by the Indexer blade
Attributes:
instance_id: ID of the Indexer instance sending the metric
"""
instance_id: Opt[str] = None
def __init__(self, *, instance_id: Opt[str] = None) -> None:
"""
Parameters:
instance_id: ID of the Indexer instance sending the metric
"""
@attrs(kw_only=True, auto_attribs=True)
class IndexMetric(BaseIndexerMetric, hint="Coveo.IndexTracking.IndexMetric"):
"""Metric entry sent by the Indexer blade during indexing operations
Attributes:
source_operation_id: ID of source operation bound to the metric
slice_id: ID of slice that received the operation
collection_id: ID of collection containing affected source
source_id: ID of affected source
operation: Type of operation performed
status: Status of the operation (depending on where it is sent in the indexing process)
count: Number of affected documents
size: Total size of affected documents
error: Error message if the operation failed
"""
source_operation_id: Opt[str] = None
slice_id: Opt[int] = None
collection_id: Opt[int] = None
source_id: Opt[int] = None
operation: Opt[IndexMetricOperation] = None
status: Opt[IndexMetricStatus] = None
count: Opt[int] = None
size: Opt[int] = None
error: Opt[str] = None
def __init__(
self,
*,
source_operation_id: Opt[str] = None,
slice_id: Opt[int] = None,
collection_id: Opt[int] = None,
source_id: Opt[int] = None,
operation: Opt[IndexMetricOperation] = None,
status: Opt[IndexMetricStatus] = None,
count: Opt[int] = None,
size: Opt[int] = None,
error: Opt[str] = None,
) -> None:
"""
Parameters:
source_operation_id: ID of source operation bound to the metric
slice_id: ID of slice that received the operation
collection_id: ID of collection containing affected source
source_id: ID of affected source
operation: Type of operation performed
status: Status of the operation (depending on where it is sent in the indexing process)
count: Number of affected documents
size: Total size of affected documents
error: Error message if the operation failed
"""
@attrs(kw_only=True, auto_attribs=True)
class QueryMetric(BaseIndexerMetric, hint="Coveo.IndexTracking.QueryMetric"):
"""Metric entry sent by the Indexer blade during queries
Attributes:
matches: Number of matching documents
filtered_matches: Number of matching documents once filtering has been performed
duration: Total query duration, in seconds
query_cpu_time: Duration of the actual query execution, excluding waiting time before being processed
"""
matches: Opt[int] = None
filtered_matches: Opt[int] = None
duration: Opt[float] = None
query_cpu_time: Opt[float] = attrib(default=None, metadata={CASING: "QueryCPUTime"})
def __init__(
self,
*,
matches: Opt[int] = None,
filtered_matches: Opt[int] = None,
duration: Opt[float] = None,
query_cpu_time: Opt[float] = attrib(default=None, metadata={CASING: "QueryCPUTime"}),
) -> None:
"""
Parameters:
matches: Number of matching documents
filtered_matches: Number of matching documents once filtering has been performed
duration: Total query duration, in seconds
query_cpu_time: Duration of the actual query execution, excluding waiting time before being processed
"""
@attrs(kw_only=True, auto_attribs=True)
class IntervalQueryMetric(BaseIndexerMetric, hint="Coveo.IndexTracking.IntervalQueryMetric"):
"""Query metrics for an interval of time
Attributes:
nb_query: Number of queries for that time period
avg_duration: Average query duration, in seconds
"""
nb_query: Opt[int] = None
avg_duration: Opt[float] = None
def __init__(self, *, nb_query: Opt[int] = None, avg_duration: Opt[float] = None) -> None:
"""
Parameters:
nb_query: Number of queries for that time period
avg_duration: Average query duration, in seconds
"""
```
#### File: extensionRunner/cdf/job_service.py
```python
from attr import attrib, attrs
from enum import auto
from typing import Dict, Optional as Opt
from .root import CASING, CoveoInterface, JidEnumFlag, JidType, api
from .config_definition import Parameter
class JobStatus(JidEnumFlag):
"""Possible status of the Jobs.
Attributes:
Ready: The Job is still pending for execution.
Running: The Job is currently running.
Done: The Job is completed.
Error: The Job failed to be executed.
Interrupted: A resumable Job was interrupted.
"""
Ready: int = auto()
Running: int = auto()
Done: int = auto()
Error: int = auto()
Interrupted: int = auto()
class JobPriority(JidEnumFlag):
"""Priority level associated to a job."""
Highest: int = auto()
High: int = auto()
Normal: int = auto()
Low: int = auto()
Lowest: int = auto()
class JobInterrupt(JidEnumFlag):
"""Possible interruptions that can be received after a heatbeat.
Attributes:
Continue: Continue current operation
Stop: Stop
Pause: Pause
Dump: Dump
"""
Continue: int = auto()
Stop: int = auto()
Pause: int = auto()
Dump: int = auto()
class JobHandlerType(JidEnumFlag):
"""Possible interruptions that can be received after a heatbeat.
Attributes:
Clustered: Hosted in the cloud cluster
Dedicated: Hosted in a dedicated loud instance
OnPremises: hosted on-premises
ClusteredTask: Hosted in the cloud cluster in task mode
"""
Clustered: int = auto()
Dedicated: int = auto()
OnPremises: int = auto()
ClusteredTask: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class JobHandlerInformation(JidType, hint="Coveo.JobService.JobHandlerInformation"):
"""Job Handler Information. Contains informations on the physical machine the handler is running on.
Attributes:
name: The instance name of the Job Handler.
host_name: The name of the host machine the job handler is running on.
assembly_version: The version of the Job Handler's dlls.
"""
name: Opt[str] = None
host_name: Opt[str] = None
assembly_version: Opt[str] = None
def __init__(self, *, name: Opt[str] = None, host_name: Opt[str] = None, assembly_version: Opt[str] = None) -> None:
"""
Parameters:
name: The instance name of the Job Handler.
host_name: The name of the host machine the job handler is running on.
assembly_version: The version of the Job Handler's dlls.
"""
@attrs(kw_only=True, auto_attribs=True)
class JobHeartbeat(JidType, hint="Coveo.JobService.JobHeartbeat"):
"""A structure that represents a Job Heartbeat.
Attributes:
job_handler_information: Informations about the Job Handler currently running the job.
status: The status of the job.
metrics: Job metrics collected by the job handler.
"""
job_handler_information: Opt[JobHandlerInformation] = None
status: Opt[JobStatus] = None
metrics: Opt[Dict[str, str]] = None
def __init__(
self,
*,
job_handler_information: Opt[JobHandlerInformation] = None,
status: Opt[JobStatus] = None,
metrics: Opt[Dict[str, str]] = None,
) -> None:
"""
Parameters:
job_handler_information: Informations about the Job Handler currently running the job.
status: The status of the job.
metrics: Job metrics collected by the job handler.
"""
@attrs(kw_only=True, auto_attribs=True)
class JobHandlerConfig(JidType, hint="Coveo.JobService.JobHandlerConfig"):
"""Job Handler Configuration
Attributes:
organization_id: The organization Id that this Job Handler is dedicated to, if any.
instance_id: The instance Id that this Job Handler is dedicated to, if any.
service_uri: The Uri of the service the Job Handler must use.
node_agent_uri: The Uri of the Node Agent used to create crawler instances.
node_agent_blob_store_uri: The Uri of the BlobStore set on the Node Agent to download packages.
node_manager_uri: The Uri of the Node Manager used to update packages.
api_key: The API Key used to authenticate to the service.
polling_interval: Interval, in seconds, at which the handler polls for a pending job to execute.
heartbeat_interval: Interval, in seconds, at which the handler reports status of a job being executed.
parameters: Additionnal Parameters
"""
organization_id: Opt[str] = None
instance_id: Opt[str] = None
service_uri: Opt[str] = None
node_agent_uri: Opt[str] = None
node_agent_blob_store_uri: Opt[str] = None
node_manager_uri: Opt[str] = None
api_key: Opt[str] = attrib(default=None, metadata={CASING: "APIKey"})
polling_interval: int = 5
heartbeat_interval: int = 30
parameters: Opt[Dict[str, Parameter]] = None
def __init__(
self,
*,
organization_id: Opt[str] = None,
instance_id: Opt[str] = None,
service_uri: Opt[str] = None,
node_agent_uri: Opt[str] = None,
node_agent_blob_store_uri: Opt[str] = None,
node_manager_uri: Opt[str] = None,
api_key: Opt[str] = attrib(default=None, metadata={CASING: "APIKey"}),
polling_interval: int = 5,
heartbeat_interval: int = 30,
parameters: Opt[Dict[str, Parameter]] = None,
) -> None:
"""
Parameters:
organization_id: The organization Id that this Job Handler is dedicated to, if any.
instance_id: The instance Id that this Job Handler is dedicated to, if any.
service_uri: The Uri of the service the Job Handler must use.
node_agent_uri: The Uri of the Node Agent used to create crawler instances.
node_agent_blob_store_uri: The Uri of the BlobStore set on the Node Agent to download packages.
node_manager_uri: The Uri of the Node Manager used to update packages.
api_key: The API Key used to authenticate to the service.
polling_interval: Interval, in seconds, at which the handler polls for a pending job to execute.
heartbeat_interval: Interval, in seconds, at which the handler reports status of a job being executed.
parameters: Additionnal Parameters
"""
class IJobHandler(CoveoInterface):
@api("GET/config")
def get_config(self) -> JobHandlerConfig:
...
@api("PUT/config")
def set_config(self, *, config: JobHandlerConfig) -> None:
...
```
#### File: extensionRunner/cdf/logger.py
```python
from attr import attrs
from datetime import datetime
from enum import auto
from typing import Dict, Optional as Opt
from .root import CoveoInterface, ExceptionBase, JidEnumFlag, JidType
class SeverityType(JidEnumFlag):
"""Defines log severities.
Attributes:
Notification: Remark: The 'Notification' severity cannot be filtered out.
"""
Debug: int = auto()
Detail: int = auto()
Normal: int = auto()
Important: int = auto()
Warning: int = auto()
Error: int = auto()
Fatal: int = auto()
Notification: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class LogEntry(JidType, hint="Coveo.LogEntry"):
"""A structure that represents a log entry.
Attributes:
severity: The severity.
comment: The main comment/description/message.
fields: A collection of fields and values that provide additional information.
date: The creation date time
duration: The duration
"""
severity: SeverityType = SeverityType.Normal
comment: Opt[str] = None
fields: Opt[Dict[str, str]] = None
date: Opt[datetime] = None
duration: Opt[float] = None
def __init__(
self,
*,
severity: SeverityType = SeverityType.Normal,
comment: Opt[str] = None,
fields: Opt[Dict[str, str]] = None,
date: Opt[datetime] = None,
duration: Opt[float] = None,
) -> None:
"""
Parameters:
severity: The severity.
comment: The main comment/description/message.
fields: A collection of fields and values that provide additional information.
date: The creation date time
duration: The duration
"""
class ILog(CoveoInterface):
"""The logger API exposes methods to add, update and close log entries."""
@attrs(kw_only=True, auto_attribs=True)
class LogException(ExceptionBase, hint="Coveo.LogException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidLogEntryException(LogException, hint="Coveo.InvalidLogEntryException"):
def __init__(self) -> None:
...
```
#### File: extensionRunner/cdf/metrics_service.py
```python
from attr import attrs
from typing import Optional as Opt
from .tracking import MetricEntry
@attrs(kw_only=True, auto_attribs=True)
class QueryMetric(MetricEntry, hint="Coveo.MetricsService.QueryMetric"):
"""A structure that represents a query metric."""
mirror: Opt[str] = None
user: Opt[str] = None
query: Opt[str] = None
nb_results: Opt[int] = None
duration: Opt[float] = None
def __init__(
self,
*,
mirror: Opt[str] = None,
user: Opt[str] = None,
query: Opt[str] = None,
nb_results: Opt[int] = None,
duration: Opt[float] = None,
) -> None:
...
```
#### File: extensionRunner/cdf/native_field_service.py
```python
from attr import attrs
from typing import List
from .root import CoveoInterface, ExceptionBase, api
from .indexer_config import Field
@attrs(kw_only=True, auto_attribs=True)
class ServiceException(ExceptionBase, hint="Coveo.NativeFieldService.ServiceException"):
def __init__(self) -> None:
...
class INativeFieldService(CoveoInterface):
"""Interface used to retrieve the fields of an organization"""
@api("GET/internal/organizations/{organization_id}/native/fields", organization_id="organizationId")
def get_fields(self, *, organization_id: str) -> List[Field]:
"""Get all the fields for an organization.
Parameters:
organization_id: The id of the organization.
"""
```
#### File: extensionRunner/cdf/prometheus.py
```python
from .root import CoveoInterface, api
class IPrometheus(CoveoInterface):
"""The Prometheus interface"""
@api("GET/metrics")
def generate_metrics(self) -> str:
"""Returns the metrics"""
```
#### File: extensionRunner/cdf/queue_rabbit_admin_service.py
```python
from attr import attrib, attrs
from datetime import datetime
from enum import auto
from typing import Any, Dict, List, Optional as Opt
from .root import CASING, CoveoInterface, JidEnumFlag, JidType, api
@attrs(kw_only=True, auto_attribs=True)
class ExchangeType(JidType, hint="Coveo.QueueRabbitAdmin.ExchangeType"):
"""A structure that represents an exchange type.
Attributes:
name: The exchange type's name. Used as Id.
description: The description of the exchange.
enabled: Indicates if the exchange type is enabled.
"""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
description: Opt[str] = attrib(default=None, metadata={CASING: "description"})
enabled: Opt[bool] = attrib(default=None, metadata={CASING: "enabled"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
description: Opt[str] = attrib(default=None, metadata={CASING: "description"}),
enabled: Opt[bool] = attrib(default=None, metadata={CASING: "enabled"}),
) -> None:
"""
Parameters:
name: The exchange type's name. Used as Id.
description: The description of the exchange.
enabled: Indicates if the exchange type is enabled.
"""
class NodeType(JidEnumFlag):
"""Defines the various types of node."""
ram: int = auto()
disc: int = auto()
class DestinationType(JidEnumFlag):
"""Defines the destinations available to a binding."""
queue: int = auto()
exchange: int = auto()
class StatisticsLevel(JidEnumFlag):
"""Defines the granularity of statistics events.
Attributes:
none: Do not emit statistics events.
coarse: Emit per-queue / per-channel / per-connection statistics events.
fine: Also emit per-message statistics events.
"""
none: int = auto()
coarse: int = auto()
fine: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class MessagesDetails(JidType, hint="Coveo.QueueRabbitAdmin.MessagesDetails"):
"""A structure that contains a snapshot of the current message activity.
Attributes:
rate: Indicates the amount of messages since the last 'Interval' passed.
interval: The interval used to calculate the 'Rate'.
last_event: The time elapsed since the last message.
"""
rate: Opt[float] = attrib(default=None, metadata={CASING: "rate"})
interval: Opt[int] = attrib(default=None, metadata={CASING: "interval"})
last_event: Opt[int] = attrib(default=None, metadata={CASING: "last_event"})
def __init__(
self,
*,
rate: Opt[float] = attrib(default=None, metadata={CASING: "rate"}),
interval: Opt[int] = attrib(default=None, metadata={CASING: "interval"}),
last_event: Opt[int] = attrib(default=None, metadata={CASING: "last_event"}),
) -> None:
"""
Parameters:
rate: Indicates the amount of messages since the last 'Interval' passed.
interval: The interval used to calculate the 'Rate'.
last_event: The time elapsed since the last message.
"""
@attrs(kw_only=True, auto_attribs=True)
class MessageStats(JidType, hint="Coveo.QueueRabbitAdmin.MessageStats"):
"""A structure that contains statistics about message activity."""
ack: Opt[int] = attrib(default=None, metadata={CASING: "ack"})
ack_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "ack_details"})
confirm: Opt[int] = attrib(default=None, metadata={CASING: "confirm"})
confirm_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "confirm_details"})
deliver: Opt[int] = attrib(default=None, metadata={CASING: "deliver"})
deliver_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "deliver_details"})
deliver_get: Opt[int] = attrib(default=None, metadata={CASING: "deliver_get"})
deliver_get_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "deliver_get_details"})
deliver_no_ack: Opt[int] = attrib(default=None, metadata={CASING: "deliver_no_ack"})
deliver_no_ack_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "deliver_no_ack_details"})
get: Opt[int] = None
get_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "get_details"})
get_no_ack: Opt[int] = attrib(default=None, metadata={CASING: "get_no_ack"})
get_no_ack_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "get_no_ack_details"})
publish: Opt[int] = attrib(default=None, metadata={CASING: "publish"})
publish_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "publish_details"})
publish_in: Opt[int] = attrib(default=None, metadata={CASING: "publish_in"})
publish_in_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "publish_in_details"})
publish_out: Opt[int] = attrib(default=None, metadata={CASING: "publish_out"})
publish_out_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "publish_out_details"})
redeliver: Opt[int] = attrib(default=None, metadata={CASING: "redeliver"})
redeliver_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "redeliverDetails"})
return_unroutable: Opt[int] = attrib(default=None, metadata={CASING: "return_unroutable"})
return_unroutable_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "return_unroutable_details"}
)
def __init__(
self,
*,
ack: Opt[int] = attrib(default=None, metadata={CASING: "ack"}),
ack_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "ack_details"}),
confirm: Opt[int] = attrib(default=None, metadata={CASING: "confirm"}),
confirm_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "confirm_details"}),
deliver: Opt[int] = attrib(default=None, metadata={CASING: "deliver"}),
deliver_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "deliver_details"}),
deliver_get: Opt[int] = attrib(default=None, metadata={CASING: "deliver_get"}),
deliver_get_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "deliver_get_details"}),
deliver_no_ack: Opt[int] = attrib(default=None, metadata={CASING: "deliver_no_ack"}),
deliver_no_ack_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "deliver_no_ack_details"}
),
get: Opt[int] = None,
get_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "get_details"}),
get_no_ack: Opt[int] = attrib(default=None, metadata={CASING: "get_no_ack"}),
get_no_ack_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "get_no_ack_details"}),
publish: Opt[int] = attrib(default=None, metadata={CASING: "publish"}),
publish_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "publish_details"}),
publish_in: Opt[int] = attrib(default=None, metadata={CASING: "publish_in"}),
publish_in_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "publish_in_details"}),
publish_out: Opt[int] = attrib(default=None, metadata={CASING: "publish_out"}),
publish_out_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "publish_out_details"}),
redeliver: Opt[int] = attrib(default=None, metadata={CASING: "redeliver"}),
redeliver_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "redeliverDetails"}),
return_unroutable: Opt[int] = attrib(default=None, metadata={CASING: "return_unroutable"}),
return_unroutable_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "return_unroutable_details"}
),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class Listener(JidType, hint="Coveo.QueueRabbitAdmin.Listener"):
"""A structure that contains information about a listener.
Attributes:
node: The name of the node being listened.
protocol: The name of the protocol used.
ip_address: The IP address of the listener.
port: The port of the listener.
"""
node: Opt[str] = attrib(default=None, metadata={CASING: "node"})
protocol: Opt[str] = attrib(default=None, metadata={CASING: "protocol"})
ip_address: Opt[str] = attrib(default=None, metadata={CASING: "ip_address"})
port: Opt[int] = attrib(default=None, metadata={CASING: "port"})
def __init__(
self,
*,
node: Opt[str] = attrib(default=None, metadata={CASING: "node"}),
protocol: Opt[str] = attrib(default=None, metadata={CASING: "protocol"}),
ip_address: Opt[str] = attrib(default=None, metadata={CASING: "ip_address"}),
port: Opt[int] = attrib(default=None, metadata={CASING: "port"}),
) -> None:
"""
Parameters:
node: The name of the node being listened.
protocol: The name of the protocol used.
ip_address: The IP address of the listener.
port: The port of the listener.
"""
@attrs(kw_only=True, auto_attribs=True)
class Context(JidType, hint="Coveo.QueueRabbitAdmin.Context"):
node: Opt[str] = attrib(default=None, metadata={CASING: "node"})
description: Opt[str] = attrib(default=None, metadata={CASING: "description"})
path: Opt[str] = attrib(default=None, metadata={CASING: "path"})
port: Opt[int] = attrib(default=None, metadata={CASING: "port"})
ignore_in_use: Opt[bool] = attrib(default=None, metadata={CASING: "ignore_in_use"})
def __init__(
self,
*,
node: Opt[str] = attrib(default=None, metadata={CASING: "node"}),
description: Opt[str] = attrib(default=None, metadata={CASING: "description"}),
path: Opt[str] = attrib(default=None, metadata={CASING: "path"}),
port: Opt[int] = attrib(default=None, metadata={CASING: "port"}),
ignore_in_use: Opt[bool] = attrib(default=None, metadata={CASING: "ignore_in_use"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class AuthMechanism(JidType, hint="Coveo.QueueRabbitAdmin.AuthMechanism"):
"""A structure that contains information about an authentication mechanism.
Attributes:
name: The name of the authentication mechanism to use. Built-in mechanisms are PLAIN, AMQPLAIN and RABBIT-CR-DEMO. EXTERNAL is also available and defines a mechanism that is implemented by a plugin. See http://www.rabbitmq.com/authentication.html
"""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
description: Opt[str] = attrib(default=None, metadata={CASING: "description"})
enabled: Opt[bool] = attrib(default=None, metadata={CASING: "enabled"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
description: Opt[str] = attrib(default=None, metadata={CASING: "description"}),
enabled: Opt[bool] = attrib(default=None, metadata={CASING: "enabled"}),
) -> None:
"""
Parameters:
name: The name of the authentication mechanism to use. Built-in mechanisms are PLAIN, AMQPLAIN and RABBIT-CR-DEMO. EXTERNAL is also available and defines a mechanism that is implemented by a plugin. See http://www.rabbitmq.com/authentication.html
"""
@attrs(kw_only=True, auto_attribs=True)
class Application(JidType, hint="Coveo.QueueRabbitAdmin.Application"):
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
description: Opt[str] = attrib(default=None, metadata={CASING: "description"})
version: Opt[str] = attrib(default=None, metadata={CASING: "version"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
description: Opt[str] = attrib(default=None, metadata={CASING: "description"}),
version: Opt[str] = attrib(default=None, metadata={CASING: "version"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class QueueTotals(JidType, hint="Coveo.QueueRabbitAdmin.QueueTotals"):
"""A structure that contains statistics about the state of queued messages."""
messages: Opt[int] = attrib(default=None, metadata={CASING: "messages"})
messages_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messages_details"})
messages_ready: Opt[int] = attrib(default=None, metadata={CASING: "messages_ready"})
messages_ready_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messages_ready_details"})
messages_unacknowledged: Opt[int] = attrib(default=None, metadata={CASING: "messages_unacknowledged"})
messages_unacknowledged_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_unacknowledged_details"}
)
def __init__(
self,
*,
messages: Opt[int] = attrib(default=None, metadata={CASING: "messages"}),
messages_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messages_details"}),
messages_ready: Opt[int] = attrib(default=None, metadata={CASING: "messages_ready"}),
messages_ready_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_ready_details"}
),
messages_unacknowledged: Opt[int] = attrib(default=None, metadata={CASING: "messages_unacknowledged"}),
messages_unacknowledged_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_unacknowledged_details"}
),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ObjectTotals(JidType, hint="Coveo.QueueRabbitAdmin.ObjectTotals"):
"""A structure that contains various high-level totals."""
consumers: Opt[int] = attrib(default=None, metadata={CASING: "consumers"})
queues: Opt[int] = attrib(default=None, metadata={CASING: "queues"})
exchanges: Opt[int] = attrib(default=None, metadata={CASING: "exchanges"})
connections: Opt[int] = attrib(default=None, metadata={CASING: "connections"})
channels: Opt[int] = attrib(default=None, metadata={CASING: "channels"})
def __init__(
self,
*,
consumers: Opt[int] = attrib(default=None, metadata={CASING: "consumers"}),
queues: Opt[int] = attrib(default=None, metadata={CASING: "queues"}),
exchanges: Opt[int] = attrib(default=None, metadata={CASING: "exchanges"}),
connections: Opt[int] = attrib(default=None, metadata={CASING: "connections"}),
channels: Opt[int] = attrib(default=None, metadata={CASING: "channels"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class Overview(JidType, hint="Coveo.QueueRabbitAdmin.Overview"):
"""A structure that represents a top-down overview of a server, which includes information about its configuration and state as well as statistics about message activity.
Attributes:
management_version: The version of the management plugin.
statistics_level: The granularity of statistics.
message_stats: Contains statistics about server-wide message activity. Totals are cumulative, much like what can be seen in RabbitMQ's HTML admin.
queue_totals: Contains a server-wide snapshot of the state of queued messages.
object_totals: Contains the current total of various RabbitMQ objects.
node: This node's name.
statistics_db_node: The name of the statistics database node.
"""
management_version: Opt[str] = attrib(default=None, metadata={CASING: "management_version"})
statistics_level: Opt[StatisticsLevel] = attrib(default=None, metadata={CASING: "statistics_level"})
exchange_types: Opt[List[ExchangeType]] = attrib(default=None, metadata={CASING: "exchange_types"})
rabbitmq_version: Opt[str] = attrib(default=None, metadata={CASING: "rabbitmq_version"})
erlang_version: Opt[str] = attrib(default=None, metadata={CASING: "erlang_version"})
erlang_full_version: Opt[str] = attrib(default=None, metadata={CASING: "erlang_full_version"})
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"})
queue_totals: Opt[QueueTotals] = attrib(default=None, metadata={CASING: "queue_totals"})
object_totals: Opt[ObjectTotals] = attrib(default=None, metadata={CASING: "object_totals"})
node: Opt[str] = attrib(default=None, metadata={CASING: "node"})
statistics_db_node: Opt[str] = attrib(default=None, metadata={CASING: "statistics_db_node"})
listeners: Opt[List[Listener]] = attrib(default=None, metadata={CASING: "listeners"})
contexts: Opt[List[Context]] = attrib(default=None, metadata={CASING: "contexts"})
def __init__(
self,
*,
management_version: Opt[str] = attrib(default=None, metadata={CASING: "management_version"}),
statistics_level: Opt[StatisticsLevel] = attrib(default=None, metadata={CASING: "statistics_level"}),
exchange_types: Opt[List[ExchangeType]] = attrib(default=None, metadata={CASING: "exchange_types"}),
rabbitmq_version: Opt[str] = attrib(default=None, metadata={CASING: "rabbitmq_version"}),
erlang_version: Opt[str] = attrib(default=None, metadata={CASING: "erlang_version"}),
erlang_full_version: Opt[str] = attrib(default=None, metadata={CASING: "erlang_full_version"}),
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"}),
queue_totals: Opt[QueueTotals] = attrib(default=None, metadata={CASING: "queue_totals"}),
object_totals: Opt[ObjectTotals] = attrib(default=None, metadata={CASING: "object_totals"}),
node: Opt[str] = attrib(default=None, metadata={CASING: "node"}),
statistics_db_node: Opt[str] = attrib(default=None, metadata={CASING: "statistics_db_node"}),
listeners: Opt[List[Listener]] = attrib(default=None, metadata={CASING: "listeners"}),
contexts: Opt[List[Context]] = attrib(default=None, metadata={CASING: "contexts"}),
) -> None:
"""
Parameters:
management_version: The version of the management plugin.
statistics_level: The granularity of statistics.
message_stats: Contains statistics about server-wide message activity. Totals are cumulative, much like what can be seen in RabbitMQ's HTML admin.
queue_totals: Contains a server-wide snapshot of the state of queued messages.
object_totals: Contains the current total of various RabbitMQ objects.
node: This node's name.
statistics_db_node: The name of the statistics database node.
"""
@attrs(kw_only=True, auto_attribs=True)
class Partition(JidType, hint="Coveo.QueueRabbitAdmin.Partition"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class Node(JidType, hint="Coveo.QueueRabbitAdmin.Node"):
"""A structure that contains information about a node.
Attributes:
name: The name of the node. Used as Id.
"""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
type_: Opt[NodeType] = attrib(default=None, metadata={CASING: "type"})
running: Opt[bool] = attrib(default=None, metadata={CASING: "running"})
os_pid: Opt[int] = attrib(default=None, metadata={CASING: "os_pid"})
mem_ets: Opt[int] = attrib(default=None, metadata={CASING: "mem_ets"})
mem_binary: Opt[int] = attrib(default=None, metadata={CASING: "mem_binary"})
mem_proc: Opt[int] = attrib(default=None, metadata={CASING: "mem_proc"})
mem_proc_used: Opt[int] = attrib(default=None, metadata={CASING: "mem_procUsed"})
mem_atom: Opt[int] = attrib(default=None, metadata={CASING: "mem_atom"})
mem_atom_used: Opt[int] = attrib(default=None, metadata={CASING: "mem_atomUsed"})
mem_code: Opt[int] = attrib(default=None, metadata={CASING: "mem_code"})
fd_used: Opt[str] = attrib(default=None, metadata={CASING: "fd_used"})
fd_total: Opt[int] = attrib(default=None, metadata={CASING: "fd_total"})
sockets_used: Opt[int] = attrib(default=None, metadata={CASING: "sockets_used"})
sockets_total: Opt[int] = attrib(default=None, metadata={CASING: "sockets_total"})
mem_used: Opt[int] = attrib(default=None, metadata={CASING: "mem_used"})
mem_limit: Opt[int] = attrib(default=None, metadata={CASING: "mem_limit"})
mem_alarm: Opt[bool] = attrib(default=None, metadata={CASING: "mem_alarm"})
disk_free_limit: Opt[int] = attrib(default=None, metadata={CASING: "disk_free_limit"})
disk_free: Opt[int] = attrib(default=None, metadata={CASING: "disk_free"})
disk_free_alarm: Opt[bool] = attrib(default=None, metadata={CASING: "disk_free_alarm"})
proc_used: Opt[int] = attrib(default=None, metadata={CASING: "proc_used"})
proc_total: Opt[int] = attrib(default=None, metadata={CASING: "proc_total"})
statistics_level: Opt[StatisticsLevel] = attrib(default=None, metadata={CASING: "statistics_level"})
erlang_version: Opt[str] = attrib(default=None, metadata={CASING: "erlang_version"})
uptime: Opt[int] = attrib(default=None, metadata={CASING: "uptime"})
run_queue: Opt[int] = attrib(default=None, metadata={CASING: "run_queue"})
processors: Opt[int] = attrib(default=None, metadata={CASING: "processors"})
partitions: Opt[List[Partition]] = attrib(default=None, metadata={CASING: "partitions"})
exchange_types: Opt[List[ExchangeType]] = attrib(default=None, metadata={CASING: "exchange_types"})
auth_mechanisms: Opt[List[AuthMechanism]] = attrib(default=None, metadata={CASING: "auth_mechanisms"})
applications: Opt[List[Application]] = attrib(default=None, metadata={CASING: "applications"})
contexts: Opt[List[Context]] = attrib(default=None, metadata={CASING: "contexts"})
external_stats_not_running: Opt[bool] = attrib(default=None, metadata={CASING: "external_stats_not_running"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
type_: Opt[NodeType] = attrib(default=None, metadata={CASING: "type"}),
running: Opt[bool] = attrib(default=None, metadata={CASING: "running"}),
os_pid: Opt[int] = attrib(default=None, metadata={CASING: "os_pid"}),
mem_ets: Opt[int] = attrib(default=None, metadata={CASING: "mem_ets"}),
mem_binary: Opt[int] = attrib(default=None, metadata={CASING: "mem_binary"}),
mem_proc: Opt[int] = attrib(default=None, metadata={CASING: "mem_proc"}),
mem_proc_used: Opt[int] = attrib(default=None, metadata={CASING: "mem_procUsed"}),
mem_atom: Opt[int] = attrib(default=None, metadata={CASING: "mem_atom"}),
mem_atom_used: Opt[int] = attrib(default=None, metadata={CASING: "mem_atomUsed"}),
mem_code: Opt[int] = attrib(default=None, metadata={CASING: "mem_code"}),
fd_used: Opt[str] = attrib(default=None, metadata={CASING: "fd_used"}),
fd_total: Opt[int] = attrib(default=None, metadata={CASING: "fd_total"}),
sockets_used: Opt[int] = attrib(default=None, metadata={CASING: "sockets_used"}),
sockets_total: Opt[int] = attrib(default=None, metadata={CASING: "sockets_total"}),
mem_used: Opt[int] = attrib(default=None, metadata={CASING: "mem_used"}),
mem_limit: Opt[int] = attrib(default=None, metadata={CASING: "mem_limit"}),
mem_alarm: Opt[bool] = attrib(default=None, metadata={CASING: "mem_alarm"}),
disk_free_limit: Opt[int] = attrib(default=None, metadata={CASING: "disk_free_limit"}),
disk_free: Opt[int] = attrib(default=None, metadata={CASING: "disk_free"}),
disk_free_alarm: Opt[bool] = attrib(default=None, metadata={CASING: "disk_free_alarm"}),
proc_used: Opt[int] = attrib(default=None, metadata={CASING: "proc_used"}),
proc_total: Opt[int] = attrib(default=None, metadata={CASING: "proc_total"}),
statistics_level: Opt[StatisticsLevel] = attrib(default=None, metadata={CASING: "statistics_level"}),
erlang_version: Opt[str] = attrib(default=None, metadata={CASING: "erlang_version"}),
uptime: Opt[int] = attrib(default=None, metadata={CASING: "uptime"}),
run_queue: Opt[int] = attrib(default=None, metadata={CASING: "run_queue"}),
processors: Opt[int] = attrib(default=None, metadata={CASING: "processors"}),
partitions: Opt[List[Partition]] = attrib(default=None, metadata={CASING: "partitions"}),
exchange_types: Opt[List[ExchangeType]] = attrib(default=None, metadata={CASING: "exchange_types"}),
auth_mechanisms: Opt[List[AuthMechanism]] = attrib(default=None, metadata={CASING: "auth_mechanisms"}),
applications: Opt[List[Application]] = attrib(default=None, metadata={CASING: "applications"}),
contexts: Opt[List[Context]] = attrib(default=None, metadata={CASING: "contexts"}),
external_stats_not_running: Opt[bool] = attrib(default=None, metadata={CASING: "external_stats_not_running"}),
) -> None:
"""
Parameters:
name: The name of the node. Used as Id.
"""
@attrs(kw_only=True, auto_attribs=True)
class Permission(JidType, hint="Coveo.QueueRabbitAdmin.Permission"):
"""A structure that represents a user permission. See http://www.rabbitmq.com/access-control.html for detailed information.
Attributes:
vhost: The name of the virtual host.
user: The name of the user.
configure: A regex used to identify queues and exchanges that can be configured by the user.
write: A regex used to identify queues and exchanges that the user is allowed to inject messages into.
read: A regex used to identify queues and exchanges that can be read (i.e.: message get) by the user.
"""
vhost: Opt[str] = attrib(default=None, metadata={CASING: "vhost"})
user: Opt[str] = attrib(default=None, metadata={CASING: "user"})
configure: Opt[str] = attrib(default=None, metadata={CASING: "configure"})
write: Opt[str] = attrib(default=None, metadata={CASING: "write"})
read: Opt[str] = attrib(default=None, metadata={CASING: "read"})
def __init__(
self,
*,
vhost: Opt[str] = attrib(default=None, metadata={CASING: "vhost"}),
user: Opt[str] = attrib(default=None, metadata={CASING: "user"}),
configure: Opt[str] = attrib(default=None, metadata={CASING: "configure"}),
write: Opt[str] = attrib(default=None, metadata={CASING: "write"}),
read: Opt[str] = attrib(default=None, metadata={CASING: "read"}),
) -> None:
"""
Parameters:
vhost: The name of the virtual host.
user: The name of the user.
configure: A regex used to identify queues and exchanges that can be configured by the user.
write: A regex used to identify queues and exchanges that the user is allowed to inject messages into.
read: A regex used to identify queues and exchanges that can be read (i.e.: message get) by the user.
"""
@attrs(kw_only=True, auto_attribs=True)
class Capabilities(JidType, hint="Coveo.QueueRabbitAdmin.Capabilities"):
"""A structure that declares the capabilities of a client."""
publisher_confirms: Opt[bool] = attrib(default=None, metadata={CASING: "publisher_confirms"})
exchange_exchange_bindings: Opt[bool] = attrib(default=None, metadata={CASING: "exchange_exchange_bindings"})
consumer_cancel_notify: Opt[bool] = attrib(default=None, metadata={CASING: "consumer_cancel_notify"})
basic_nack: Opt[bool] = attrib(default=None, metadata={CASING: "basic_nack"})
def __init__(
self,
*,
publisher_confirms: Opt[bool] = attrib(default=None, metadata={CASING: "publisher_confirms"}),
exchange_exchange_bindings: Opt[bool] = attrib(default=None, metadata={CASING: "exchange_exchange_bindings"}),
consumer_cancel_notify: Opt[bool] = attrib(default=None, metadata={CASING: "consumer_cancel_notify"}),
basic_nack: Opt[bool] = attrib(default=None, metadata={CASING: "basic_nack"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ClientProperties(JidType, hint="Coveo.QueueRabbitAdmin.ClientProperties"):
"""A structure that contains identification information about a client."""
platform: Opt[str] = attrib(default=None, metadata={CASING: "platform"})
product: Opt[str] = attrib(default=None, metadata={CASING: "product"})
capabilities: Opt[Capabilities] = attrib(default=None, metadata={CASING: "capabilities"})
copyright_: Opt[str] = attrib(default=None, metadata={CASING: "copyright"})
information: Opt[str] = attrib(default=None, metadata={CASING: "information"})
version: Opt[str] = attrib(default=None, metadata={CASING: "version"})
def __init__(
self,
*,
platform: Opt[str] = attrib(default=None, metadata={CASING: "platform"}),
product: Opt[str] = attrib(default=None, metadata={CASING: "product"}),
capabilities: Opt[Capabilities] = attrib(default=None, metadata={CASING: "capabilities"}),
copyright_: Opt[str] = attrib(default=None, metadata={CASING: "copyright"}),
information: Opt[str] = attrib(default=None, metadata={CASING: "information"}),
version: Opt[str] = attrib(default=None, metadata={CASING: "version"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class Connection(JidType, hint="Coveo.QueueRabbitAdmin.Connection"):
"""A structure that contains information and statistics about an open connection.
Attributes:
name: The name of the connection. Used as Id.
"""
recv_oct: Opt[int] = attrib(default=None, metadata={CASING: "recv_oct"})
recv_oct_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "recv_oct_details"})
recv_cnt: Opt[int] = attrib(default=None, metadata={CASING: "recv_cnt"})
send_oct: Opt[int] = attrib(default=None, metadata={CASING: "send_oct"})
send_oct_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "send_oct_details"})
send_cnt: Opt[int] = attrib(default=None, metadata={CASING: "send_cnt"})
send_pend: Opt[int] = attrib(default=None, metadata={CASING: "send_pend"})
state: Opt[str] = attrib(default=None, metadata={CASING: "state"})
last_blocked_by: Opt[str] = attrib(default=None, metadata={CASING: "last_blocked_by"})
last_blocked_age: Opt[str] = attrib(default=None, metadata={CASING: "last_blocked_age"})
channels: Opt[int] = attrib(default=None, metadata={CASING: "channels"})
type_: Opt[str] = attrib(default=None, metadata={CASING: "type"})
node: Opt[str] = attrib(default=None, metadata={CASING: "node"})
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
host: Opt[str] = attrib(default=None, metadata={CASING: "host"})
port: Opt[int] = attrib(default=None, metadata={CASING: "port"})
peer_host: Opt[str] = attrib(default=None, metadata={CASING: "peer_host"})
peer_port: Opt[int] = attrib(default=None, metadata={CASING: "peer_port"})
ssl: Opt[bool] = attrib(default=None, metadata={CASING: "ssl"})
peer_cert_subject: Opt[str] = attrib(default=None, metadata={CASING: "peer_cert_subject"})
peer_cert_issuer: Opt[str] = attrib(default=None, metadata={CASING: "peer_cert_issuer"})
peer_cert_validity: Opt[str] = attrib(default=None, metadata={CASING: "peer_cert_validity"})
auth_mechanism: Opt[str] = attrib(default=None, metadata={CASING: "auth_mechanism"})
ssl_protocol: Opt[str] = attrib(default=None, metadata={CASING: "ssl_protocol"})
ssl_key_exchange: Opt[str] = attrib(default=None, metadata={CASING: "ssl_key_exchange"})
ssl_cipher: Opt[str] = attrib(default=None, metadata={CASING: "ssl_cipher"})
ssl_hash: Opt[str] = attrib(default=None, metadata={CASING: "ssl_hash"})
protocol: Opt[str] = attrib(default=None, metadata={CASING: "protocol"})
user: Opt[str] = attrib(default=None, metadata={CASING: "user"})
vhost: Opt[str] = attrib(default=None, metadata={CASING: "vhost"})
timeout: Opt[int] = attrib(default=None, metadata={CASING: "timeout"})
frame_max: Opt[int] = attrib(default=None, metadata={CASING: "frame_max"})
client_properties: Opt[ClientProperties] = attrib(default=None, metadata={CASING: "client_properties"})
def __init__(
self,
*,
recv_oct: Opt[int] = attrib(default=None, metadata={CASING: "recv_oct"}),
recv_oct_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "recv_oct_details"}),
recv_cnt: Opt[int] = attrib(default=None, metadata={CASING: "recv_cnt"}),
send_oct: Opt[int] = attrib(default=None, metadata={CASING: "send_oct"}),
send_oct_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "send_oct_details"}),
send_cnt: Opt[int] = attrib(default=None, metadata={CASING: "send_cnt"}),
send_pend: Opt[int] = attrib(default=None, metadata={CASING: "send_pend"}),
state: Opt[str] = attrib(default=None, metadata={CASING: "state"}),
last_blocked_by: Opt[str] = attrib(default=None, metadata={CASING: "last_blocked_by"}),
last_blocked_age: Opt[str] = attrib(default=None, metadata={CASING: "last_blocked_age"}),
channels: Opt[int] = attrib(default=None, metadata={CASING: "channels"}),
type_: Opt[str] = attrib(default=None, metadata={CASING: "type"}),
node: Opt[str] = attrib(default=None, metadata={CASING: "node"}),
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
host: Opt[str] = attrib(default=None, metadata={CASING: "host"}),
port: Opt[int] = attrib(default=None, metadata={CASING: "port"}),
peer_host: Opt[str] = attrib(default=None, metadata={CASING: "peer_host"}),
peer_port: Opt[int] = attrib(default=None, metadata={CASING: "peer_port"}),
ssl: Opt[bool] = attrib(default=None, metadata={CASING: "ssl"}),
peer_cert_subject: Opt[str] = attrib(default=None, metadata={CASING: "peer_cert_subject"}),
peer_cert_issuer: Opt[str] = attrib(default=None, metadata={CASING: "peer_cert_issuer"}),
peer_cert_validity: Opt[str] = attrib(default=None, metadata={CASING: "peer_cert_validity"}),
auth_mechanism: Opt[str] = attrib(default=None, metadata={CASING: "auth_mechanism"}),
ssl_protocol: Opt[str] = attrib(default=None, metadata={CASING: "ssl_protocol"}),
ssl_key_exchange: Opt[str] = attrib(default=None, metadata={CASING: "ssl_key_exchange"}),
ssl_cipher: Opt[str] = attrib(default=None, metadata={CASING: "ssl_cipher"}),
ssl_hash: Opt[str] = attrib(default=None, metadata={CASING: "ssl_hash"}),
protocol: Opt[str] = attrib(default=None, metadata={CASING: "protocol"}),
user: Opt[str] = attrib(default=None, metadata={CASING: "user"}),
vhost: Opt[str] = attrib(default=None, metadata={CASING: "vhost"}),
timeout: Opt[int] = attrib(default=None, metadata={CASING: "timeout"}),
frame_max: Opt[int] = attrib(default=None, metadata={CASING: "frame_max"}),
client_properties: Opt[ClientProperties] = attrib(default=None, metadata={CASING: "client_properties"}),
) -> None:
"""
Parameters:
name: The name of the connection. Used as Id.
"""
@attrs(kw_only=True, auto_attribs=True)
class ConnectionDetails(JidType, hint="Coveo.QueueRabbitAdmin.ConnectionDetails"):
"""A structure that contains the name and address of a connection."""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
peer_host: Opt[str] = attrib(default=None, metadata={CASING: "peer_host"})
peer_port: Opt[int] = attrib(default=None, metadata={CASING: "peer_port"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
peer_host: Opt[str] = attrib(default=None, metadata={CASING: "peer_host"}),
peer_port: Opt[int] = attrib(default=None, metadata={CASING: "peer_port"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class Channel(JidType, hint="Coveo.QueueRabbitAdmin.Channel"):
"""A structure that contains information about a channel."""
connection_details: Opt[ConnectionDetails] = attrib(default=None, metadata={CASING: "connection_details"})
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"})
idle_since: Opt[datetime] = attrib(default=None, metadata={CASING: "idle_since"})
transactional: Opt[bool] = attrib(default=None, metadata={CASING: "transactional"})
confirm: Opt[bool] = attrib(default=None, metadata={CASING: "confirm"})
consumer_count: Opt[int] = attrib(default=None, metadata={CASING: "consumer_count"})
messages_unacknowledged: Opt[int] = attrib(default=None, metadata={CASING: "messages_unacknowledged"})
messages_unconfirmed: Opt[int] = attrib(default=None, metadata={CASING: "messages_unconfirmed"})
messages_uncommitted: Opt[int] = attrib(default=None, metadata={CASING: "messages_uncommitted"})
acks_uncommitted: Opt[int] = attrib(default=None, metadata={CASING: "acks_uncommitted"})
prefetch_count: Opt[int] = attrib(default=None, metadata={CASING: "prefetch_count"})
client_flow_blocked: Opt[bool] = attrib(default=None, metadata={CASING: "client_flow_blocked"})
node: Opt[str] = attrib(default=None, metadata={CASING: "node"})
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
number: Opt[int] = attrib(default=None, metadata={CASING: "number"})
user: Opt[str] = attrib(default=None, metadata={CASING: "user"})
vhost: Opt[str] = attrib(default=None, metadata={CASING: "vhost"})
def __init__(
self,
*,
connection_details: Opt[ConnectionDetails] = attrib(default=None, metadata={CASING: "connection_details"}),
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"}),
idle_since: Opt[datetime] = attrib(default=None, metadata={CASING: "idle_since"}),
transactional: Opt[bool] = attrib(default=None, metadata={CASING: "transactional"}),
confirm: Opt[bool] = attrib(default=None, metadata={CASING: "confirm"}),
consumer_count: Opt[int] = attrib(default=None, metadata={CASING: "consumer_count"}),
messages_unacknowledged: Opt[int] = attrib(default=None, metadata={CASING: "messages_unacknowledged"}),
messages_unconfirmed: Opt[int] = attrib(default=None, metadata={CASING: "messages_unconfirmed"}),
messages_uncommitted: Opt[int] = attrib(default=None, metadata={CASING: "messages_uncommitted"}),
acks_uncommitted: Opt[int] = attrib(default=None, metadata={CASING: "acks_uncommitted"}),
prefetch_count: Opt[int] = attrib(default=None, metadata={CASING: "prefetch_count"}),
client_flow_blocked: Opt[bool] = attrib(default=None, metadata={CASING: "client_flow_blocked"}),
node: Opt[str] = attrib(default=None, metadata={CASING: "node"}),
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
number: Opt[int] = attrib(default=None, metadata={CASING: "number"}),
user: Opt[str] = attrib(default=None, metadata={CASING: "user"}),
vhost: Opt[str] = attrib(default=None, metadata={CASING: "vhost"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ExchangeDef(JidType, hint="Coveo.QueueRabbitAdmin.ExchangeDef"):
"""A structure that represents an exchange definition, which can be used to create an exchange.
Attributes:
name: The name of the exchange. Used as Id.
auto_delete: Indicates if the exchange will deleted automatically once all queues have finished using it.
"""
vhost: str = attrib(default="/", metadata={CASING: "vhost"})
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
type_: Opt[str] = attrib(default=None, metadata={CASING: "type"})
durable: bool = attrib(default=True, metadata={CASING: "durable"})
auto_delete: Opt[bool] = attrib(default=None, metadata={CASING: "auto_delete"})
internal: Opt[bool] = None
arguments: Opt[Dict[str, Any]] = attrib(default=None, metadata={CASING: "arguments"})
def __init__(
self,
*,
vhost: str = attrib(default="/", metadata={CASING: "vhost"}),
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
type_: Opt[str] = attrib(default=None, metadata={CASING: "type"}),
durable: bool = attrib(default=True, metadata={CASING: "durable"}),
auto_delete: Opt[bool] = attrib(default=None, metadata={CASING: "auto_delete"}),
internal: Opt[bool] = None,
arguments: Opt[Dict[str, Any]] = attrib(default=None, metadata={CASING: "arguments"}),
) -> None:
"""
Parameters:
name: The name of the exchange. Used as Id.
auto_delete: Indicates if the exchange will deleted automatically once all queues have finished using it.
"""
@attrs(kw_only=True, auto_attribs=True)
class Exchange(ExchangeDef, hint="Coveo.QueueRabbitAdmin.Exchange"):
"""A structure that represents an exchange."""
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"})
def __init__(
self, *, message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"})
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class BindingDef(JidType, hint="Coveo.QueueRabbitAdmin.BindingDef"):
"""A structure that represents a binding definition, which can be used to create a binding.
Attributes:
source: The name of the source exchange.
destination: The name of the destination exchange.
"""
vhost: str = attrib(default="/", metadata={CASING: "vhost"})
source: Opt[str] = attrib(default=None, metadata={CASING: "source"})
destination: Opt[str] = attrib(default=None, metadata={CASING: "destination"})
destination_type: Opt[DestinationType] = attrib(default=None, metadata={CASING: "destination_type"})
routing_key: Opt[str] = attrib(default=None, metadata={CASING: "routing_key"})
arguments: Opt[Dict[str, Any]] = attrib(default=None, metadata={CASING: "arguments"})
def __init__(
self,
*,
vhost: str = attrib(default="/", metadata={CASING: "vhost"}),
source: Opt[str] = attrib(default=None, metadata={CASING: "source"}),
destination: Opt[str] = attrib(default=None, metadata={CASING: "destination"}),
destination_type: Opt[DestinationType] = attrib(default=None, metadata={CASING: "destination_type"}),
routing_key: Opt[str] = attrib(default=None, metadata={CASING: "routing_key"}),
arguments: Opt[Dict[str, Any]] = attrib(default=None, metadata={CASING: "arguments"}),
) -> None:
"""
Parameters:
source: The name of the source exchange.
destination: The name of the destination exchange.
"""
@attrs(kw_only=True, auto_attribs=True)
class Binding(BindingDef, hint="Coveo.QueueRabbitAdmin.Binding"):
"""A structure that represents a binding."""
properties_key: Opt[str] = attrib(default=None, metadata={CASING: "properties_key"})
def __init__(self, *, properties_key: Opt[str] = attrib(default=None, metadata={CASING: "properties_key"})) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class BackingQueueStatus(JidType, hint="Coveo.QueueRabbitAdmin.BackingQueueStatus"):
"""A structure that contains information and statistics about a backing queue."""
q1: Opt[int] = attrib(default=None, metadata={CASING: "q1"})
q2: Opt[int] = attrib(default=None, metadata={CASING: "q2"})
delta: Opt[List[Any]] = attrib(default=None, metadata={CASING: "delta"})
q3: Opt[int] = attrib(default=None, metadata={CASING: "q3"})
q4: Opt[int] = attrib(default=None, metadata={CASING: "q4"})
len_: Opt[int] = attrib(default=None, metadata={CASING: "len"})
pending_acks: Opt[int] = attrib(default=None, metadata={CASING: "pending_acks"})
target_ram_count: Opt[str] = attrib(default=None, metadata={CASING: "target_ram_count"})
ram_msg_count: Opt[int] = attrib(default=None, metadata={CASING: "ram_msg_count"})
ram_ack_count: Opt[int] = attrib(default=None, metadata={CASING: "ram_ack_count"})
next_seq_id: Opt[int] = attrib(default=None, metadata={CASING: "next_seq_id"})
persistent_count: Opt[int] = attrib(default=None, metadata={CASING: "persistent_count"})
avg_ingress_rate: Opt[float] = attrib(default=None, metadata={CASING: "avg_ingress_rate"})
avg_egress_rate: Opt[float] = attrib(default=None, metadata={CASING: "avg_egress_rate"})
avg_ack_ingress_rate: Opt[float] = attrib(default=None, metadata={CASING: "avg_ack_ingress_rate"})
avg_ack_egress_rate: Opt[float] = attrib(default=None, metadata={CASING: "avg_ack_egress_rate"})
mirror_seen: Opt[int] = attrib(default=None, metadata={CASING: "mirror_seen"})
mirror_senders: Opt[int] = attrib(default=None, metadata={CASING: "mirror_senders"})
def __init__(
self,
*,
q1: Opt[int] = attrib(default=None, metadata={CASING: "q1"}),
q2: Opt[int] = attrib(default=None, metadata={CASING: "q2"}),
delta: Opt[List[Any]] = attrib(default=None, metadata={CASING: "delta"}),
q3: Opt[int] = attrib(default=None, metadata={CASING: "q3"}),
q4: Opt[int] = attrib(default=None, metadata={CASING: "q4"}),
len_: Opt[int] = attrib(default=None, metadata={CASING: "len"}),
pending_acks: Opt[int] = attrib(default=None, metadata={CASING: "pending_acks"}),
target_ram_count: Opt[str] = attrib(default=None, metadata={CASING: "target_ram_count"}),
ram_msg_count: Opt[int] = attrib(default=None, metadata={CASING: "ram_msg_count"}),
ram_ack_count: Opt[int] = attrib(default=None, metadata={CASING: "ram_ack_count"}),
next_seq_id: Opt[int] = attrib(default=None, metadata={CASING: "next_seq_id"}),
persistent_count: Opt[int] = attrib(default=None, metadata={CASING: "persistent_count"}),
avg_ingress_rate: Opt[float] = attrib(default=None, metadata={CASING: "avg_ingress_rate"}),
avg_egress_rate: Opt[float] = attrib(default=None, metadata={CASING: "avg_egress_rate"}),
avg_ack_ingress_rate: Opt[float] = attrib(default=None, metadata={CASING: "avg_ack_ingress_rate"}),
avg_ack_egress_rate: Opt[float] = attrib(default=None, metadata={CASING: "avg_ack_egress_rate"}),
mirror_seen: Opt[int] = attrib(default=None, metadata={CASING: "mirror_seen"}),
mirror_senders: Opt[int] = attrib(default=None, metadata={CASING: "mirror_senders"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ChannelDetails(JidType, hint="Coveo.QueueRabbitAdmin.ChannelDetails"):
"""A structure that contains information about a channel.
Attributes:
name: The name of the channel. Used as Id.
connection_name: The name of the connection used by this channel.
"""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
number: Opt[int] = attrib(default=None, metadata={CASING: "number"})
connection_name: Opt[str] = attrib(default=None, metadata={CASING: "connection_name"})
peer_host: Opt[str] = attrib(default=None, metadata={CASING: "peer_host"})
peer_port: Opt[int] = attrib(default=None, metadata={CASING: "peer_port"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
number: Opt[int] = attrib(default=None, metadata={CASING: "number"}),
connection_name: Opt[str] = attrib(default=None, metadata={CASING: "connection_name"}),
peer_host: Opt[str] = attrib(default=None, metadata={CASING: "peer_host"}),
peer_port: Opt[int] = attrib(default=None, metadata={CASING: "peer_port"}),
) -> None:
"""
Parameters:
name: The name of the channel. Used as Id.
connection_name: The name of the connection used by this channel.
"""
@attrs(kw_only=True, auto_attribs=True)
class NameVhostDetails(JidType, hint="Coveo.QueueRabbitAdmin.NameVhostDetails"):
"""A structure that identifies a named resource on a vhost.
Attributes:
name: The named resource.
vhost: The virtual host where the resource is located.
"""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
vhost: Opt[str] = attrib(default=None, metadata={CASING: "vhost"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
vhost: Opt[str] = attrib(default=None, metadata={CASING: "vhost"}),
) -> None:
"""
Parameters:
name: The named resource.
vhost: The virtual host where the resource is located.
"""
@attrs(kw_only=True, auto_attribs=True)
class ConsumerDetails(JidType, hint="Coveo.QueueRabbitAdmin.ConsumerDetails"):
"""A structure that contains details about a consumer."""
channel_details: Opt[ChannelDetails] = attrib(default=None, metadata={CASING: "channel_details"})
queue_details: Opt[NameVhostDetails] = attrib(default=None, metadata={CASING: "queue_details"})
consumer_tag: Opt[str] = attrib(default=None, metadata={CASING: "consumer_tag"})
exclusive: Opt[bool] = attrib(default=None, metadata={CASING: "exclusive"})
ack_required: Opt[bool] = attrib(default=None, metadata={CASING: "ack_required"})
def __init__(
self,
*,
channel_details: Opt[ChannelDetails] = attrib(default=None, metadata={CASING: "channel_details"}),
queue_details: Opt[NameVhostDetails] = attrib(default=None, metadata={CASING: "queue_details"}),
consumer_tag: Opt[str] = attrib(default=None, metadata={CASING: "consumer_tag"}),
exclusive: Opt[bool] = attrib(default=None, metadata={CASING: "exclusive"}),
ack_required: Opt[bool] = attrib(default=None, metadata={CASING: "ack_required"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class IncomingDetails(JidType, hint="Coveo.QueueRabbitAdmin.IncomingDetails"):
"""A structure that contains information and statistics about incoming messages."""
stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "stats"})
exchange: Opt[NameVhostDetails] = attrib(default=None, metadata={CASING: "exchange"})
def __init__(
self,
*,
stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "stats"}),
exchange: Opt[NameVhostDetails] = attrib(default=None, metadata={CASING: "exchange"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DeliveriesDetails(JidType, hint="Coveo.QueueRabbitAdmin.DeliveriesDetails"):
"""A structure that contains information and statistics about delivered messages."""
stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "stats"})
channel_details: Opt[ChannelDetails] = attrib(default=None, metadata={CASING: "channel_details"})
def __init__(
self,
*,
stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "stats"}),
channel_details: Opt[ChannelDetails] = attrib(default=None, metadata={CASING: "channel_details"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class QueueDef(JidType, hint="Coveo.QueueRabbitAdmin.QueueDef"):
"""A structure that represents a queue definition, which can be used to create a queue.
Attributes:
name: The name of the queue. Used as Id.
durable: Indicates if the queue is durable. Durable queues are not lost when RabbitMQ is shutdown. This setting has no impact on individual message durability which is set on a per-message basis. Default is true.
auto_delete: Indicates if the queue is to be deleted automatically once the last consumer unsubscribes.
"""
vhost: str = attrib(default="/", metadata={CASING: "vhost"})
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
durable: bool = attrib(default=True, metadata={CASING: "durable"})
auto_delete: Opt[bool] = attrib(default=None, metadata={CASING: "auto_delete"})
arguments: Opt[Dict[str, Any]] = attrib(default=None, metadata={CASING: "arguments"})
node: Opt[str] = attrib(default=None, metadata={CASING: "node"})
def __init__(
self,
*,
vhost: str = attrib(default="/", metadata={CASING: "vhost"}),
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
durable: bool = attrib(default=True, metadata={CASING: "durable"}),
auto_delete: Opt[bool] = attrib(default=None, metadata={CASING: "auto_delete"}),
arguments: Opt[Dict[str, Any]] = attrib(default=None, metadata={CASING: "arguments"}),
node: Opt[str] = attrib(default=None, metadata={CASING: "node"}),
) -> None:
"""
Parameters:
name: The name of the queue. Used as Id.
durable: Indicates if the queue is durable. Durable queues are not lost when RabbitMQ is shutdown. This setting has no impact on individual message durability which is set on a per-message basis. Default is true.
auto_delete: Indicates if the queue is to be deleted automatically once the last consumer unsubscribes.
"""
@attrs(kw_only=True, auto_attribs=True)
class Queue(QueueDef, hint="Coveo.QueueRabbitAdmin.Queue"):
"""A structure that contains information and statistics about a queue."""
memory: Opt[int] = attrib(default=None, metadata={CASING: "memory"})
owner_pid_details: Opt[ConnectionDetails] = attrib(default=None, metadata={CASING: "owner_pid_details"})
idle_since: Opt[datetime] = attrib(default=None, metadata={CASING: "idle_since"})
policy: Opt[str] = attrib(default=None, metadata={CASING: "policy"})
exclusive_consumer_tag: Opt[str] = attrib(default=None, metadata={CASING: "exclusive_consumer_tag"})
messages: Opt[int] = attrib(default=None, metadata={CASING: "messages"})
messages_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messages_details"})
messages_ready: Opt[int] = attrib(default=None, metadata={CASING: "messages_ready"})
messages_ready_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messages_ready_details"})
messages_unacknowledged: Opt[int] = attrib(default=None, metadata={CASING: "messages_unacknowledged"})
messages_unacknowledged_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_unacknowledged_details"}
)
consumers: Opt[int] = attrib(default=None, metadata={CASING: "consumers"})
active_consumers: Opt[int] = attrib(default=None, metadata={CASING: "active_consumers"})
slave_nodes: Opt[List[str]] = attrib(default=None, metadata={CASING: "slave_nodes"})
synchronised_slave_nodes: Opt[List[str]] = attrib(default=None, metadata={CASING: "synchronised_slave_nodes"})
backing_queue_status: Opt[BackingQueueStatus] = attrib(default=None, metadata={CASING: "backing_queue_status"})
incoming: Opt[List[IncomingDetails]] = attrib(default=None, metadata={CASING: "incoming"})
deliveries: Opt[List[DeliveriesDetails]] = attrib(default=None, metadata={CASING: "deliveries"})
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"})
consumer_details: Opt[List[ConsumerDetails]] = attrib(default=None, metadata={CASING: "consumer_details"})
status: Opt[str] = attrib(default=None, metadata={CASING: "status"})
sync_messages: Opt[int] = attrib(default=None, metadata={CASING: "sync_messages"})
effective_policy_definition: Opt[Dict[str, Any]] = attrib(
default=None, metadata={CASING: "effective_policy_definition"}
)
def __init__(
self,
*,
memory: Opt[int] = attrib(default=None, metadata={CASING: "memory"}),
owner_pid_details: Opt[ConnectionDetails] = attrib(default=None, metadata={CASING: "owner_pid_details"}),
idle_since: Opt[datetime] = attrib(default=None, metadata={CASING: "idle_since"}),
policy: Opt[str] = attrib(default=None, metadata={CASING: "policy"}),
exclusive_consumer_tag: Opt[str] = attrib(default=None, metadata={CASING: "exclusive_consumer_tag"}),
messages: Opt[int] = attrib(default=None, metadata={CASING: "messages"}),
messages_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messages_details"}),
messages_ready: Opt[int] = attrib(default=None, metadata={CASING: "messages_ready"}),
messages_ready_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_ready_details"}
),
messages_unacknowledged: Opt[int] = attrib(default=None, metadata={CASING: "messages_unacknowledged"}),
messages_unacknowledged_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_unacknowledged_details"}
),
consumers: Opt[int] = attrib(default=None, metadata={CASING: "consumers"}),
active_consumers: Opt[int] = attrib(default=None, metadata={CASING: "active_consumers"}),
slave_nodes: Opt[List[str]] = attrib(default=None, metadata={CASING: "slave_nodes"}),
synchronised_slave_nodes: Opt[List[str]] = attrib(default=None, metadata={CASING: "synchronised_slave_nodes"}),
backing_queue_status: Opt[BackingQueueStatus] = attrib(default=None, metadata={CASING: "backing_queue_status"}),
incoming: Opt[List[IncomingDetails]] = attrib(default=None, metadata={CASING: "incoming"}),
deliveries: Opt[List[DeliveriesDetails]] = attrib(default=None, metadata={CASING: "deliveries"}),
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"}),
consumer_details: Opt[List[ConsumerDetails]] = attrib(default=None, metadata={CASING: "consumer_details"}),
status: Opt[str] = attrib(default=None, metadata={CASING: "status"}),
sync_messages: Opt[int] = attrib(default=None, metadata={CASING: "sync_messages"}),
effective_policy_definition: Opt[Dict[str, Any]] = attrib(
default=None, metadata={CASING: "effective_policy_definition"}
),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class VhostDef(JidType, hint="Coveo.QueueRabbitAdmin.VhostDef"):
"""A structure that represents a virtual host definition, which can be used to create a virtual host."""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
def __init__(self, *, name: Opt[str] = attrib(default=None, metadata={CASING: "name"})) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class Vhost(VhostDef, hint="Coveo.QueueRabbitAdmin.Vhost"):
"""A structure that contains information and statistics about a virtual host.
Attributes:
tracing: Enables tracing, for debugging purposes. See http://www.rabbitmq.com/firehose.html
"""
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"})
messages: Opt[int] = attrib(default=None, metadata={CASING: "messages"})
messages_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messagesDetails"})
messages_ready: Opt[int] = attrib(default=None, metadata={CASING: "messages_ready"})
messages_ready_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messages_ready_details"})
messages_unacknowledged: Opt[int] = attrib(default=None, metadata={CASING: "messages_unacknowledged"})
messages_unacknowledged_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_unacknowledged_details"}
)
tracing: Opt[bool] = attrib(default=None, metadata={CASING: "tracing"})
def __init__(
self,
*,
message_stats: Opt[MessageStats] = attrib(default=None, metadata={CASING: "message_stats"}),
messages: Opt[int] = attrib(default=None, metadata={CASING: "messages"}),
messages_details: Opt[MessagesDetails] = attrib(default=None, metadata={CASING: "messagesDetails"}),
messages_ready: Opt[int] = attrib(default=None, metadata={CASING: "messages_ready"}),
messages_ready_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_ready_details"}
),
messages_unacknowledged: Opt[int] = attrib(default=None, metadata={CASING: "messages_unacknowledged"}),
messages_unacknowledged_details: Opt[MessagesDetails] = attrib(
default=None, metadata={CASING: "messages_unacknowledged_details"}
),
tracing: Opt[bool] = attrib(default=None, metadata={CASING: "tracing"}),
) -> None:
"""
Parameters:
tracing: Enables tracing, for debugging purposes. See http://www.rabbitmq.com/firehose.html
"""
@attrs(kw_only=True, auto_attribs=True)
class UserDef(JidType, hint="Coveo.QueueRabbitAdmin.UserDef"):
"""A structure that represents a user definition, which can be used to create a user."""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
password: Opt[str] = attrib(default=None, metadata={CASING: "password"})
password_hash: Opt[str] = attrib(default=None, metadata={CASING: "password_hash"})
tags: Opt[str] = attrib(default=None, metadata={CASING: "tags"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
password: Opt[str] = attrib(default=None, metadata={CASING: "password"}),
password_hash: Opt[str] = attrib(default=None, metadata={CASING: "password_hash"}),
tags: Opt[str] = attrib(default=None, metadata={CASING: "tags"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class User(UserDef, hint="Coveo.QueueRabbitAdmin.User"):
"""A structure that represents a user."""
auth_backend: Opt[str] = attrib(default=None, metadata={CASING: "auth_backend"})
def __init__(self, *, auth_backend: Opt[str] = attrib(default=None, metadata={CASING: "auth_backend"})) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class PolicyDef(JidType, hint="Coveo.QueueRabbitAdmin.PolicyDef"):
"""A structure that represents a policy.
Attributes:
name: The name of the policy. Used as Id.
"""
vhost: str = attrib(default="/", metadata={CASING: "vhost"})
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
pattern: Opt[str] = attrib(default=None, metadata={CASING: "pattern"})
definition: Opt[Dict[str, Any]] = attrib(default=None, metadata={CASING: "definition"})
priority: Opt[int] = attrib(default=None, metadata={CASING: "priority"})
def __init__(
self,
*,
vhost: str = attrib(default="/", metadata={CASING: "vhost"}),
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
pattern: Opt[str] = attrib(default=None, metadata={CASING: "pattern"}),
definition: Opt[Dict[str, Any]] = attrib(default=None, metadata={CASING: "definition"}),
priority: Opt[int] = attrib(default=None, metadata={CASING: "priority"}),
) -> None:
"""
Parameters:
name: The name of the policy. Used as Id.
"""
@attrs(kw_only=True, auto_attribs=True)
class ParameterDef(JidType, hint="Coveo.QueueRabbitAdmin.ParameterDef"):
"""A structure that represents a parameter definition.
Attributes:
component: The name of the component to which this parameter applies.
name: The name of the parameter. Used as Id.
"""
vhost: str = attrib(default="/", metadata={CASING: "vhost"})
component: Opt[str] = attrib(default=None, metadata={CASING: "component"})
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
value: Opt[str] = attrib(default=None, metadata={CASING: "value"})
def __init__(
self,
*,
vhost: str = attrib(default="/", metadata={CASING: "vhost"}),
component: Opt[str] = attrib(default=None, metadata={CASING: "component"}),
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
value: Opt[str] = attrib(default=None, metadata={CASING: "value"}),
) -> None:
"""
Parameters:
component: The name of the component to which this parameter applies.
name: The name of the parameter. Used as Id.
"""
@attrs(kw_only=True, auto_attribs=True)
class Definitions(JidType, hint="Coveo.QueueRabbitAdmin.Definitions"):
"""A structure that contains various resource definitions."""
rabbit_version: Opt[str] = attrib(default=None, metadata={CASING: "rabbit_version"})
users: Opt[List[UserDef]] = attrib(default=None, metadata={CASING: "users"})
vhosts: Opt[List[VhostDef]] = attrib(default=None, metadata={CASING: "vhosts"})
permissions: Opt[List[Permission]] = attrib(default=None, metadata={CASING: "permissions"})
queues: Opt[List[QueueDef]] = attrib(default=None, metadata={CASING: "queues"})
exchanges: Opt[List[ExchangeDef]] = attrib(default=None, metadata={CASING: "exchanges"})
bindings: Opt[List[BindingDef]] = attrib(default=None, metadata={CASING: "bindings"})
policies: Opt[List[PolicyDef]] = attrib(default=None, metadata={CASING: "policies"})
parameters: Opt[List[PolicyDef]] = attrib(default=None, metadata={CASING: "parameters"})
def __init__(
self,
*,
rabbit_version: Opt[str] = attrib(default=None, metadata={CASING: "rabbit_version"}),
users: Opt[List[UserDef]] = attrib(default=None, metadata={CASING: "users"}),
vhosts: Opt[List[VhostDef]] = attrib(default=None, metadata={CASING: "vhosts"}),
permissions: Opt[List[Permission]] = attrib(default=None, metadata={CASING: "permissions"}),
queues: Opt[List[QueueDef]] = attrib(default=None, metadata={CASING: "queues"}),
exchanges: Opt[List[ExchangeDef]] = attrib(default=None, metadata={CASING: "exchanges"}),
bindings: Opt[List[BindingDef]] = attrib(default=None, metadata={CASING: "bindings"}),
policies: Opt[List[PolicyDef]] = attrib(default=None, metadata={CASING: "policies"}),
parameters: Opt[List[PolicyDef]] = attrib(default=None, metadata={CASING: "parameters"}),
) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class NodeDef(JidType, hint="Coveo.QueueRabbitAdmin.NodeDef"):
"""A structure that represents a node definition."""
name: Opt[str] = attrib(default=None, metadata={CASING: "name"})
machine: Opt[str] = attrib(default=None, metadata={CASING: "machine"})
type_: NodeType = attrib(default=NodeType.disc, metadata={CASING: "type"})
port: Opt[int] = attrib(default=None, metadata={CASING: "port"})
base_path: Opt[str] = attrib(default=None, metadata={CASING: "base_path"})
rabbit_server_path: Opt[str] = attrib(default=None, metadata={CASING: "rabbit_server_path"})
erlang_home: Opt[str] = attrib(default=None, metadata={CASING: "erlang_home"})
def __init__(
self,
*,
name: Opt[str] = attrib(default=None, metadata={CASING: "name"}),
machine: Opt[str] = attrib(default=None, metadata={CASING: "machine"}),
type_: NodeType = attrib(default=NodeType.disc, metadata={CASING: "type"}),
port: Opt[int] = attrib(default=None, metadata={CASING: "port"}),
base_path: Opt[str] = attrib(default=None, metadata={CASING: "base_path"}),
rabbit_server_path: Opt[str] = attrib(default=None, metadata={CASING: "rabbit_server_path"}),
erlang_home: Opt[str] = attrib(default=None, metadata={CASING: "erlang_home"}),
) -> None:
...
class IQueueAdmin(CoveoInterface):
"""The QueueAdmin API exposes methods used to simplify the interaction between a client application and a RabbitMQ server. Detailed documentation can be found on RabbitMQ's website, and a quick reference is also available by accessing http://localhost:15672/api"""
@api("GET/overview")
def get_overview(self) -> Overview:
"""Returns the overview of a server, which includes various information about its configuration and state, as well as statistics about message activity."""
@api("GET/nodes")
def get_nodes(self) -> List[Node]:
"""Returns all nodes."""
@api("GET/nodes/{name}", name="name")
def get_node(self, *, name: str) -> Node:
"""Returns a specific node.
Parameters:
name: The name of a node.
"""
@api("GET/extensions")
def get_extensions(self) -> List[Dict[str, str]]:
"""Returns the extensions to the management plugin."""
@api("GET/definitions")
def get_definitions(self) -> Definitions:
"""Returns the definitions of the objects on a server - exchanges, queues, bindings, users, virtual hosts and permissions. (i.e.: everything apart from messages.)"""
@api("POST/definitions", definitions="definitions")
def merge_definitions(self, *, definitions: Definitions) -> None:
"""Merges a new/updated set of definitions to a server. Existing definitions are left untouched unless they're redefined in the new/updated set.
Parameters:
definitions: A set of new or updated definitions.
"""
@api("GET/connections")
def get_connections(self) -> List[Connection]:
"""Returns all open connections."""
@api("GET/connections/{name}", name="name")
def get_connection(self, *, name: str) -> Connection:
"""Returns an open connection.
Parameters:
name: The name of an open connection.
"""
@api("DELETE/connections/{name}", name="name")
def delete_connection(self, *, name: str) -> None:
"""Closes a connection.
Parameters:
name: The name of an open connection.
"""
@api("GET/channels")
def get_channels(self) -> List[Channel]:
"""Returns all open channels."""
@api("GET/channels/{name}", name="name")
def get_channel(self, *, name: str) -> Channel:
"""Returns a specific channel.
Parameters:
name: The name of a channel.
"""
@api("GET/exchanges")
def get_exchanges(self) -> List[Exchange]:
"""Returns all exchanges."""
@api("GET/exchanges/{vhost}", vhost="vhost")
def get_exchanges_for_vhost(self, *, vhost: str) -> List[Exchange]:
"""Returns all exchanges from a virtual host.
Parameters:
vhost: The name of a virtual host.
"""
@api("GET/exchanges/{vhost}/{name},out:404}", vhost="vhost", name="name")
def get_exchange(self, *, vhost: str, name: str) -> Exchange:
"""Returns an exchange.
Parameters:
vhost: The name of a virtual host.
name: The name of an exchange.
"""
@api("PUT/exchanges/{vhost}/{name},in:*,out:204}")
def add_exchange(self, *, exchange_def: ExchangeDef) -> None:
"""Adds a new exchange."""
@api("DELETE/exchanges/{vhost}/{name},out:404>false,out:204>tru}", vhost="vhost", name="name")
def delete_exchange(self, *, vhost: str, name: str) -> bool:
"""Deletes an exchange.
Parameters:
vhost: The name of a virtual host.
name: The name of an exchange.
"""
@api("GET/exchanges/{vhost}/{name}/bindings/source", vhost="vhost", name="name")
def get_exchange_bindings_when_source(self, *, vhost: str, name: str) -> List[Binding]:
"""Returns all bindings that use a specific exchange as the source.
Parameters:
vhost: The name of a virtual host.
name: The name of an exchange.
"""
@api("GET/exchanges/{vhost}/{name}/bindings/destination", vhost="vhost", name="name")
def get_exchange_bindings_when_destination(self, *, vhost: str, name: str) -> List[Binding]:
"""Returns all bindings that use a specific exchange as the destination.
Parameters:
vhost: The name of a virtual host.
name: The name of an exchange.
"""
@api("GET/queues")
def get_queues(self) -> List[Queue]:
"""Returns all queues."""
@api("GET/queues?columns={Columns}")
def get_queues_ex(self, *, columns: str) -> List[Queue]:
"""Returns all queues.
Parameters:
columns: The list of comma seperated columns to get
"""
@api("GET/queues/{vhost}", vhost="vhost")
def get_queues_for_vhost(self, *, vhost: str) -> List[Queue]:
"""Returns all queues from a virtual host.
Parameters:
vhost: The name of a virtual host.
"""
@api("GET/queues/{vhost}/{name},out:404}", vhost="vhost", name="name")
def get_queue(self, *, vhost: str, name: str) -> Queue:
"""Returns a specific queue.
Parameters:
vhost: The name of a virtual host.
name: The name of a queue.
"""
@api("PUT/queues/{vhost}/{name},in:*,out:204}")
def add_queue(self, *, queue_def: QueueDef) -> None:
"""Adds a new queue."""
@api("DELETE/queues/{vhost}/{name},out:404>false,out:204>tru}", vhost="vhost", name="name")
def delete_queue(self, *, vhost: str, name: str) -> bool:
"""Deletes a queue.
Parameters:
vhost: The name of a virtual host.
name: The name of a queue.
"""
@api("GET/queues/{vhost}/{name}/bindings", vhost="vhost", name="name")
def get_queue_bindings(self, *, vhost: str, name: str) -> List[Binding]:
"""Returns all bindings on a queue.
Parameters:
vhost: The name of a virtual host.
name: The name of a queue.
"""
@api("DELETE/queues/{vhost}/{name}/contents", vhost="vhost", name="name")
def delete_queue_contents(self, *, vhost: str, name: str) -> None:
"""Purges all messages within a queue.
Parameters:
vhost: The name of a virtual host.
name: The name of a queue.
"""
@api("GET/bindings")
def get_bindings(self) -> List[Binding]:
"""Returns all bindings."""
@api("GET/bindings/{vhost}", vhost="vhost")
def get_bindings_for_vhost(self, *, vhost: str) -> List[Binding]:
"""Returns all bindings from a virtual host.
Parameters:
vhost: The name of a virtual host.
"""
@api("GET/bindings/{vhost}/e/{exchange}/q/{queue}", vhost="vhost", exchange="exchange", queue="queue")
def get_bindings_for_exchange_and_queue(self, *, vhost: str, exchange: str, queue: str) -> List[Binding]:
"""Returns all bindings between an exchange and a queue. Remark: an exchange and a queue can be bound together multiple times.
Parameters:
vhost: The name of a virtual host.
exchange: The name of an exchange.
queue: The name of a queue.
"""
@api("GET/bindings/{vhost}/e/{exchange}/q/{queue}/~,out:404->", vhost="vhost", exchange="exchange", queue="queue")
def get_binding_for_exchange_and_queue(self, *, vhost: str, exchange: str, queue: str) -> Binding:
"""Returns the binding between an exchange and a queue with specific properties. Remark: an exchange and a queue can be bound together multiple times.
Parameters:
vhost: The name of a virtual host.
exchange: The name of an exchange.
queue: The name of a queue.
"""
@api("POST/bindings/{vhost}/e/{source}/q/{destination},in:}")
def add_binding(self, *, binding_def: BindingDef) -> None:
"""Adds a new binding."""
@api("DELETE/bindings/{vhost}/e/{exchange}/q/{queue}/~", vhost="vhost", exchange="exchange", queue="queue")
def delete_binding(self, *, vhost: str, exchange: str, queue: str) -> None:
"""Deletes a binding.
Parameters:
vhost: The name of a virtual host.
exchange: The name of an exchange.
queue: The name of a queue.
"""
@api("GET/vhosts")
def get_vhosts(self) -> List[Vhost]:
"""Returns all virtual hosts."""
@api("GET/vhosts/{name},out:404}", name="name")
def get_vhost(self, *, name: str) -> Vhost:
"""Returns a specific virtual host.
Parameters:
name: The name of a virtual host.
"""
@api("DELETE/vhosts/{name}", name="name")
def delete_vhost(self, *, name: str) -> None:
"""Deletes a virtual host.
Parameters:
name: The name of a virtual host.
"""
@api("GET/vhosts/{name}/permissions", name="name")
def get_vhost_permissions(self, *, name: str) -> List[Permission]:
"""Returns all permissions from a virtual host.
Parameters:
name: The name of a virtual host.
"""
@api("GET/users")
def get_users(self) -> List[User]:
"""Returns all users."""
@api("GET/users/{name}", name="name")
def get_user(self, *, name: str) -> User:
"""Returns a specific user.
Parameters:
name: The name of a user.
"""
@api("PUT/users/{name},in:}")
def add_user(self, *, user_def: UserDef) -> None:
"""Adds a new user."""
@api("DELETE/users/{name}", name="name")
def delete_user(self, *, name: str) -> None:
"""Deletes a user.
Parameters:
name: The name of a user.
"""
@api("GET/users/{name}/permissions", name="name")
def get_user_permissions(self, *, name: str) -> List[Permission]:
"""Returns all permissions for a specific user.
Parameters:
name: The name of a user.
"""
@api("GET/whoami")
def get_who_am_i(self) -> User:
"""Returns the currently authenticated user."""
@api("GET/permissions")
def get_permissions(self) -> List[Permission]:
"""Returns all permissions."""
@api("GET/permissions/{vhost}/{user}", vhost="vhost", user="user")
def get_vhost_user_permission(self, *, vhost: str, user: str) -> Permission:
"""Returns the permission of a user from a virtual host.
Parameters:
vhost: The name of a virtual host.
user: The name of a user.
"""
@api("PUT/permissions/{vhost}/{user}", vhost="vhost", user="user")
def add_vhost_user_permission(self, *, vhost: str, user: str, permission: Permission) -> None:
"""Adds a new permission.
Parameters:
vhost: The name of the virtual host.
user: The name of the user.
"""
@api("DELETE/permissions/{vhost}/{user}", vhost="vhost", user="user")
def delete_vhost_user_permission(self, *, vhost: str, user: str) -> None:
"""Deletes a permission.
Parameters:
vhost: The name of a virtual host.
user: The name of a user.
"""
@api("GET/policies")
def get_policies(self) -> List[PolicyDef]:
"""Returns all policies."""
@api("GET/policies/{vhost}", vhost="vhost")
def get_policies_for_vhost(self, *, vhost: str) -> List[PolicyDef]:
"""Returns all policies from a virtual host.
Parameters:
vhost: The name of a virtual host.
"""
@api("GET/policies/{vhost}/{name}", vhost="vhost", name="name")
def get_policy(self, *, vhost: str, name: str) -> PolicyDef:
"""Returns a specific policy.
Parameters:
vhost: The name of a virtual host.
name: The name of a policy.
"""
@api("PUT/policies/{vhost}/{name},in:}")
def add_policy(self, *, policy: PolicyDef) -> None:
"""Adds a new policy."""
@api("DELETE/policies/{vhost}/{name}", vhost="vhost", name="name")
def delete_policy(self, *, vhost: str, name: str) -> None:
"""Deletes a policy.
Parameters:
vhost: The name of a virtual host.
name: The name of a policy.
"""
@api("GET/parameters")
def get_parameters(self) -> List[ParameterDef]:
"""Returns the parameters of all components."""
@api("GET/parameters/{component}", component="component")
def get_parameters_for_component(self, *, component: str) -> List[ParameterDef]:
"""Returns the parameters of a component.
Parameters:
component: The name of a component.
"""
@api("GET/parameters/{component}/{vhost}", component="component", vhost="vhost")
def get_parameters_for_component_and_vhost(self, *, component: str, vhost: str) -> List[ParameterDef]:
"""Returns the parameters of a component from a virtual host.
Parameters:
component: The name of a component.
vhost: The name of a virtual host.
"""
@api("GET/parameters/{component}/{vhost}/{name}", component="component", vhost="vhost", name="name")
def get_parameter(self, *, component: str, vhost: str, name: str) -> ParameterDef:
"""Returns a component parameter.
Parameters:
component: The name of a component.
vhost: The name of a virtual host.
name: The name of a parameter.
"""
@api("PUT/parameters/{component}/{vhost}/{name},in:}")
def add_parameter(self, *, parameter: ParameterDef) -> None:
"""Adds a new parameter."""
@api("DELETE/parameters/{component}/{vhost}/{name}", component="component", vhost="vhost", name="name")
def delete_parameter(self, *, component: str, vhost: str, name: str) -> None:
"""Deletes a parameter.
Parameters:
component: The name of a component.
vhost: The name of a virtual host.
name: The name of a parameter.
"""
@attrs(kw_only=True, auto_attribs=True)
class ClusterDef(JidType, hint="Coveo.QueueRabbitAdmin.ClusterDef"):
"""A structure that represents a cluster definition."""
nodes: Opt[List[NodeDef]] = None
queues: Opt[List[QueueDef]] = None
cookie: Opt[str] = None
def __init__(
self, *, nodes: Opt[List[NodeDef]] = None, queues: Opt[List[QueueDef]] = None, cookie: Opt[str] = None
) -> None:
...
class IClusterAdmin(CoveoInterface):
"""The QueueAdmin API exposes methods used to simplify setup of a RabbitMQ cluster"""
```
#### File: cdf/root/enum.py
```python
from enum import Flag
class JidEnumFlag(Flag):
"""Base class for generated flag classes. repr|str may be used for serialization."""
def __serialized(self) -> str:
serialized = []
for flag in self.__class__:
bit = flag & self
if bit:
serialized.append(bit.name)
return '+'.join(serialized).replace('None_', 'None')
def __repr__(self) -> str:
return self.__serialized()
def __str__(self) -> str:
return self.__serialized()
```
#### File: cdf/root/jid_type.py
```python
from abc import ABCMeta, abstractmethod
from typing import ClassVar, Dict, Type, Optional, Any, TypeVar, Tuple, Iterator, Mapping, Callable
import attr
from inflection import camelize
from corepyutils.itertools import filter_keys
from corepyutils.annotations import find_annotations
CASING = 'CASING'
T = TypeVar('T', bound='JidType')
class _JidTypeCache:
def __init__(self, cls: Type['JidType']):
# find_annotations() will find the class variables/etc as well.
# clean it by removing anything that attr doesn't know.
self.fields: Dict[str, Type] = dict(filter_keys(source=find_annotations(cls, globals()),
keys={field.name for field in attr.fields(cls)}))
# create snake_case -> CamelCase lookup.
self.from_snake: Dict[str, str] = {field.name: field.metadata.get(CASING, camelize(field.name))
for field in attr.fields(cls)}
class JidTypeNotFoundException(Exception):
...
class JidTypeInterface(metaclass=ABCMeta):
@abstractmethod
def as_dict(self) -> Dict[str, Any]:
"""Since dataclasses.asdict() isn't customizable, we need to provide our own."""
@classmethod
@abstractmethod
def get_jid_type(cls, jid_type_hint: str) -> Type['JidTypeInterface']:
"""Returns the correct jid type. (jid_type_hint = _type)"""
@attr.s
class JidType(JidTypeInterface):
"""Base class for generated data classes."""
__namespace: ClassVar[Dict[str, Type['JidType']]] = {}
__type: ClassVar[str]
__cache_storage: ClassVar[Optional[_JidTypeCache]]
__deserializer: ClassVar[Optional[Callable[..., Any]]] = None
# noinspection PyMethodOverriding
def __init_subclass__(cls, hint: str, **kwargs: Any) -> None:
"""Register the current class into the namespace."""
assert not kwargs
cls.__type = hint # each subclass gets its own __type.
cls.__cache_storage = None
cls.__register_subclass(hint, cls)
def __setattr__(self, key: str, value: Any) -> None:
"""JIT-deserializer"""
try:
super().__setattr__(key, self.__deserialize(self.__cache.fields[key], value))
except KeyError:
if hasattr(self, key):
raise # the key should work; not supposed to happen.
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute {key}") from None
@property
def __cache(self) -> _JidTypeCache:
return self.__get_cache()
def as_dict(self) -> Dict[str, Any]:
"""Since dataclasses.asdict() isn't customizable, we need to provide our own."""
to_translate = ((field_name, getattr(self, field_name)) for field_name in self.__cache.fields)
return dict(self.__translate(to_translate, lookup=self.__cache.from_snake), _type=self.__type)
@classmethod
def get_jid_type(cls, jid_type_hint: str) -> Type['JidType']:
"""Returns the correct jid type. (jid_type_hint = _type)"""
try:
return cls.__namespace[jid_type_hint]
except KeyError:
raise JidTypeNotFoundException(jid_type_hint)
def __deserialize(self, type_: Type, value: Any) -> Any:
"""Break the circular import cycle. :shame: """
cls = self.__class__
if cls.__deserializer is None:
from .deserializer import deserialize
cls.__deserializer = deserialize
assert cls.__deserializer is not None
return cls.__deserializer(value, hint=type_) # using self.__deserializer() here will break things
@classmethod
def __register_subclass(cls, hint: str, klass: Type) -> None:
"""Register a class into the namespace."""
assert hint not in cls.__namespace
cls.__namespace[hint] = klass
@classmethod
def __get_cache(cls) -> _JidTypeCache:
if cls.__cache_storage is None:
cls.__cache_storage = _JidTypeCache(cls)
assert cls.__cache_storage is not None
return cls.__cache_storage
@staticmethod
def __translate(kvps: Iterator[Tuple[str, Any]], lookup: Mapping[str, str]) -> Iterator[Tuple[str, Any]]:
"""Iterate kvps but substitute the key for the one in lookup. Strips out _type."""
yield from ((lookup.get(key, key), value) for key, value in kvps if key != '_type')
@attr.s(auto_attribs=True, kw_only=True)
class ExceptionBase(JidType, Exception, hint='ExceptionBase'):
"""Wraps exceptions thrown by CDF; hint is fake."""
what: Optional[str] = None
name: Optional[str] = None
inner: Optional[Exception] = None # only exists for the duration of __init__
# noinspection PyUnusedLocal
def __init__(self, *, what: str = None, name: str = None, inner: Exception = None): # type: ignore
...
def __attrs_post_init__(self) -> None:
self.what = self.what or (str(self.inner) if self.inner else 'No Exception message was available.')
super(Exception, self).__init__(self.what)
if self.inner:
super(Exception, self).__setattr__('__cause__', self.inner)
self.inner = None # hide the callstack from JidType.as_dict()
@classmethod
def from_exception(cls, exception: Exception) -> 'ExceptionBase':
"""return an instance of CDFException out of any exception"""
exception_message = str(exception).strip('"').strip("'")
return cls(what=exception_message, name=exception.__class__.__name__, inner=exception)
def __str__(self) -> str:
return self.what or super().__str__()
```
#### File: extensionRunner/cdf/script_store.py
```python
from attr import attrs
from typing import Optional as Opt
from .root import CoveoInterface, ExceptionBase, JidType, api
@attrs(kw_only=True, auto_attribs=True)
class ScriptStoreException(ExceptionBase, hint="Coveo.ScriptStoreException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class ScriptNotFoundException(ScriptStoreException, hint="Coveo.ScriptNotFoundException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidVersionException(ScriptStoreException, hint="Coveo.InvalidVersionException"):
def __init__(self) -> None:
...
class IScriptStore(CoveoInterface):
"""The script store API exposes methods to interact with this script store."""
@api("GET/scripts/{id}/{version}", id_="Id")
def get(self, *, id_: str, version: str) -> str:
"""Returns a script from the store.
Parameters:
id_: The script id.
version: The script version.
"""
@api("GET/enabled/{id}", id_="Id")
def is_enabled(self, *, id_: str) -> bool:
"""Returns whether a script is enabled.
Parameters:
id_: The script id.
"""
@api("GET/last_version_id/{id}", id_="Id")
def get_last_version_id(self, *, id_: str) -> str:
"""Returns the id of the last version of a script.
Parameters:
id_: The script id.
"""
@attrs(kw_only=True, auto_attribs=True)
class ScriptPackage(JidType, hint="Coveo.ScriptPackage"):
"""
Attributes:
name: The package name
location: The package location
version: The package version
"""
name: Opt[str] = None
location: Opt[str] = None
version: Opt[str] = None
def __init__(self, *, name: Opt[str] = None, location: Opt[str] = None, version: Opt[str] = None) -> None:
"""
Parameters:
name: The package name
location: The package location
version: The package version
"""
```
#### File: extensionRunner/cdf/tagging_consumer.py
```python
from attr import attrib, attrs
from enum import auto
from typing import List, Optional as Opt
from .root import CASING, CoveoInterface, JidEnumFlag, JidType, api
from .search_service import NameValuePair, QueryParamsForTagging
from .security_provider import SID
class OpCode(JidEnumFlag):
"""List of the codes for tagging operations
Attributes:
InvalidTagOpCode: Invalid tag operation.
Tag: Add a tag on a given set of documents.
UnTag: Remove a tag on a given set of documents.
ClearTag: Remove the given tag name-value pairs from all documents in the index.
ClearAllTagValues: Clear the given tags from all documents in the index.
ClearDocumentTags: Clear the given tags from the targeted document.
ClearDocumentsTags: Clear the given tags from the documents targeted by a query.
"""
InvalidTagOpCode: int = auto()
Tag: int = auto()
UnTag: int = auto()
ClearTag: int = auto()
ClearAllTagValues: int = auto()
ClearDocumentTags: int = auto()
ClearDocumentsTags: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class TaggingCommand(JidType, hint="Coveo.TaggingCommand"):
"""The definition of tagging command.
Attributes:
op_code: Whether to tag, untag, or clear the tag.
query_params: Info about the query for perform tagging by query.
target_document: Document (to tag/untag). (Used by single-document tagging, not used by tagging by query.)
name_value_pairs: List of tag field name, tag value name for batch tagging (needed for tagging by query).
robot: Whether tagging is performed by a robot. We commit less often for robots.
super_user_mode: Whether tagging is performed in super user mode.
sid: The SIDDeclarator for the user performing the tagging request, will be used to construct the query info in all mirrors.
sid_for_internal_sid: Will be used to construct the query info in all mirrors.
full_user_name: Full user name. Set only when tagging with document keys (when m_QueryInfo in null).
"""
op_code: Opt[OpCode] = None
query_params: Opt[QueryParamsForTagging] = None
target_document: Opt[str] = None
name_value_pairs: Opt[List[NameValuePair]] = None
robot: Opt[bool] = None
super_user_mode: Opt[bool] = None
sid: Opt[SID] = attrib(default=None, metadata={CASING: "SID"})
sid_for_internal_sid: Opt[List[SID]] = attrib(default=None, metadata={CASING: "SIDForInternalSID"})
full_user_name: Opt[str] = None
def __init__(
self,
*,
op_code: Opt[OpCode] = None,
query_params: Opt[QueryParamsForTagging] = None,
target_document: Opt[str] = None,
name_value_pairs: Opt[List[NameValuePair]] = None,
robot: Opt[bool] = None,
super_user_mode: Opt[bool] = None,
sid: Opt[SID] = attrib(default=None, metadata={CASING: "SID"}),
sid_for_internal_sid: Opt[List[SID]] = attrib(default=None, metadata={CASING: "SIDForInternalSID"}),
full_user_name: Opt[str] = None,
) -> None:
"""
Parameters:
op_code: Whether to tag, untag, or clear the tag.
query_params: Info about the query for perform tagging by query.
target_document: Document (to tag/untag). (Used by single-document tagging, not used by tagging by query.)
name_value_pairs: List of tag field name, tag value name for batch tagging (needed for tagging by query).
robot: Whether tagging is performed by a robot. We commit less often for robots.
super_user_mode: Whether tagging is performed in super user mode.
sid: The SIDDeclarator for the user performing the tagging request, will be used to construct the query info in all mirrors.
sid_for_internal_sid: Will be used to construct the query info in all mirrors.
full_user_name: Full user name. Set only when tagging with document keys (when m_QueryInfo in null).
"""
class ITaggingConsumer(CoveoInterface):
@api("POST/tag")
def update_tagging(self, *, tagging_command: TaggingCommand) -> None:
...
```
#### File: extensionRunner/corepyutils/casing.py
```python
import functools
import re
from typing import Match, Iterable, Dict, Callable, Any, TypeVar, List
import inflection
from .annotations import find_annotations
T = TypeVar('T')
# noinspection PyDefaultArgument
def snake_case(string: str, bad_casing: Iterable[str] = ()) -> str:
"""return the snake cased version of a string. bad casings may be specified: if the bad casing is found, the word
is replaced with Titlecase:
Without bad_casing: SomeTimeOut_s -> some_time_out_s
With ['TimeOut'] as bad_casing: SomeTimeOut_s -> some_timeout_s
"""
# find groups of uppercase letters like: Some(URIs), (CDF)Node, (DPMs), (IDs)
# alter the groups as such: Some(Uris), (CdfNo)ode, (Dpms), (Ids)
# this will remove most ambiguities for inflection.underscore() to react correctly
def _replace_caps_clusters(match: Match) -> str:
sub: str = match.group()
if len(sub) <= 3: # DBs / DPM / ID / Id...
return sub.title()
boundary = -2 if sub[-1].isupper() else -1
return sub[:boundary].title() + sub[boundary:]
prepared = re.sub(
pattern=re.compile(r'([A-Z]{2,}[a-z]?(?=$|[^a-z]))'),
repl=_replace_caps_clusters,
string=string)
# check if we can find any of the words and fix their casing
for word in bad_casing:
if word in prepared:
prepared = prepared.replace(word, word.title())
result = inflection.underscore(prepared)
assert isinstance(result, str) # mypy
def _remove_digits_underscore(match: Match) -> str:
sub: str = match.group()
assert sub[-1] == '_'
return sub[:-1]
# inflection will add an underscore after numbers. we don't want that.
result = re.sub(pattern=r'\d+_', repl=_remove_digits_underscore, string=result)
return result
class _FlexcaseDecorator:
"""Allow passing kwargs to a method without consideration for casing or underscores."""
__slots__ = 'strip_extra', 'allowed_extras'
def __init__(self, *, strip_extra: bool = True, allowed_extras: List[str] = None) -> None:
self.strip_extra = strip_extra
self.allowed_extras = allowed_extras
def __call__(self, fn: Callable[..., T]) -> Callable[..., T]:
_aliases: Dict[str, str] = self.create_lookup(fn, self.allowed_extras)
@functools.wraps(fn)
def _wrapper(*args: Any, **kw: Any) -> Any:
__tracebackhide__ = True
return fn(*args, **self.unflex(_aliases, kw))
return _wrapper
def unflex(self, lookup: Dict[str, str], kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Return a copy of kwargs with the correct case."""
clean = {}
for key in kwargs:
lookup_key = self._lookup_key(key)
if lookup_key not in lookup:
if self.strip_extra:
continue
clean[key] = kwargs[key] # don't touch this one, let it explode later
else:
clean[lookup[lookup_key]] = kwargs[key]
return clean
@classmethod
def create_lookup(cls, fn: Callable, extras: List[str] = None) -> Dict[str, str]:
"""Create a simple lookup of stripped underscore + lowercased -> Original bases on the function's annotation.
Additional kwargs may be allowed to go through by using `extras`
"""
return {cls._lookup_key(annotation): annotation
for annotation in list(find_annotations(fn)) + (extras or [])
if annotation != 'return'}
@staticmethod
def _lookup_key(key: str) -> str:
"""Return a normalized lookup key."""
return key.replace('_', '').lower()
def flexcase(fn: Callable[..., T], *, strip_extra: bool = True, allowed_extras: List[str] = None) -> Callable[..., T]:
"""Return fn wrapped in flexcase magic.
Can be used as decorator over methods and functions: @flexcase
Can be used as a one-time delegate: result = flexcase(obj.method)(**dirty_casings)
"""
return _FlexcaseDecorator(strip_extra=strip_extra, allowed_extras=allowed_extras)(fn)
def unflex(fn: Callable, dirty_kwargs: Dict[str, Any], strip_extra: bool = True) -> Dict[str, Any]:
"""Opposite of flexcase; return a clean version of dirty_kwargs with correct case and extra kwargs stripped out."""
flex: _FlexcaseDecorator = _FlexcaseDecorator(strip_extra=strip_extra)
return flex.unflex(flex.create_lookup(fn), dirty_kwargs)
```
#### File: platform/extensionRunner/extension_runner_server.py
```python
import pathlib
import datetime
import marshal
import os
import base64
import sys
import site
import traceback
import subprocess
import uuid
import logging
import re
import shlex
import gc
from typing import Dict, List
from extension_runner import *
from cdf.document_definition import LocalBlobEntry, CompressionType, PermissionLevel, PermissionSet, Permission, MetaDataValue, DataStreamValue
from cdf.document_processor_script import ScriptExecutionResult, LogEntry, PythonScriptException, PythonPackageException, ScriptPackage, DocumentProcessorScriptParameters, RejectedFromScriptException, ScriptSkippedException
from cdf.logger import LogEntry, SeverityType
import json as native_json
try:
import rapidjson as _json
except ImportError:
import json as _json
LOG_SEVERITY = {
'fatal': 'Fatal',
'error': 'Error',
'important': 'Important',
'normal': 'Normal',
'debug': 'Debug',
'notification': 'Notification',
'warning': 'Warning',
'detail': 'Detail'
}
IDENTITY_TYPES = {
'user': 'User',
'group': 'Group',
'virtual_group': 'VirtualGroup',
'virtualgroup': 'VirtualGroup',
'unknown': 'Unknown'
}
class ExtensionRunner(object):
"""
Will delegate execution of a python script to user code
"""
def __init__(self, debug=False):
super().__init__()
self._debug = debug
self._package_re = re.compile('([\w-]+)(?:[=~<>]+)?(.*)')
self._last_document_state = None
# hook the logging to the node log file
self._log = logging.getLogger('system')
self._log.setLevel(logging.INFO)
def init_blade(self, parameters: Dict[str, str]):
"""
Called to pass init parameters
"""
if 'DataPath' in parameters:
DocumentApi.working_path = parameters['DataPath']
def compile(self, script_id: str, code: str) -> bytes:
"""
will compile the code
"""
if self._debug:
# Don't compile the script in debug since we'll save it to disk later in order to trace it
return base64.b64encode(b'').decode()
else:
return base64.b64encode(marshal.dumps(self._compile(code, '${FILENAME}'))).decode()
def prepare_packages(self, packages: List[str], working_path: str, merge: bool) -> List[ScriptPackage]:
"""
will prepare the packages
"""
try:
installed_into = []
if merge:
# installed packages in the same folder
installed_into.append(self._pip_install(packages, working_path))
else:
# install each package in its own folder
for p in packages:
installed_into.append(self._pip_install([p], working_path))
return installed_into
except Exception as e:
raise PythonPackageException(what=str(e))
def _pip_install(self, packages: List[str], working_path: str) -> ScriptPackage:
"""
will install one or more packages into a folder
"""
PIP_INSTALL_COMMAND_LINE = 'pip install {packages} -b "{build}" --prefix "{install}" --cache-dir "{cache}" --ignore-installed --compile'
PIP_FREEZE_COMMAND_LINE = 'pip freeze'
# create our working sub-folders
build_folder = os.path.join(working_path, str(uuid.uuid4()))
install_folder = os.path.join(working_path, str(uuid.uuid4()))
temp_folder = os.path.join(working_path, str(uuid.uuid4()))
cache_path = os.path.join(working_path, str(uuid.uuid4()))
os.makedirs(build_folder)
os.makedirs(install_folder)
os.makedirs(temp_folder)
os.makedirs(cache_path)
# some packages will access the temp path while installing
env = os.environ
env['TEMP'] = temp_folder
env['TMP'] = temp_folder
# spawn the installation
pip_install = subprocess.Popen(shlex.split(PIP_INSTALL_COMMAND_LINE.format(packages=' '.join(packages), build=build_folder, install=install_folder, cache=cache_path)),
shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
out, err = pip_install.communicate()
if pip_install.returncode != 0:
raise PythonPackageException(what=err.decode())
# figure out where the package has been installed
site_package_path = ''
try:
site_package_path = next(d[0] for d in os.walk(install_folder) if d[0].endswith('site-packages'))
except StopIteration:
pass
# figure out the version of the packages we've just installed with pip freeze
env['PYTHONPATH'] = site_package_path
pip_freeze = subprocess.Popen(shlex.split(PIP_FREEZE_COMMAND_LINE),
shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
out, err = pip_freeze.communicate()
if pip_freeze.returncode != 0:
raise PythonPackageException(what=err.decode())
all_installed_packages = {self._get_package_name(p): self._get_package_version(p) for p in out.decode().split()}
# package name will contain its version as well
# package that are merged together will end up with a merged name
package = ScriptPackage()
package.location = site_package_path
package.name = ','.join(['{}=={}'.format(*z) for z in zip([self._get_package_name(p) for p in packages], [all_installed_packages[self._get_package_name(p)] for p in packages])])
return package
def get_last_log_entries(self) -> List[LogEntry]:
"""
will get the log entries for the last script execution
"""
if self._last_document_state:
return self._last_document_state.get_log_entries()
else:
return []
def execute(self, parameters: DocumentProcessorScriptParameters, id_: str, meta_data: List[MetaDataValue], meta_data_file: str, permissions: List[PermissionLevel], data_streams: Dict[str, DataStreamValue], package_paths: List[str]):
"""
will the execute the extension
"""
# compile or use the already compiled code
delete_temp_file = False
compiled_code = None
filename = parameters.script_id
if parameters.compiled_code:
compiled_code = marshal.loads(base64.b64decode(parameters.compiled_code))
elif parameters.code:
if self._debug:
import tempfile
delete_temp_file = True
filename = tempfile.mktemp(suffix='.py')
with open(filename, 'wt', encoding='utf-8') as tmp_file:
tmp_file.write(parameters.code)
compiled_code = self._compile(parameters.code, filename if self._debug else '${FILENAME}')
elif pathlib.Path(filename).exists():
with open(filename, encoding='utf8') as source_file:
compiled_code = self._compile(source_file.read(), filename if self._debug else '${FILENAME}')
if not compiled_code:
raise PythonScriptException(what='Either compiled_code or code need to be populated in parameters')
system_log_entries = []
# load the meta from disk if needed
if meta_data_file:
try:
with open(os.path.join(DocumentApi.working_path, meta_data_file), 'r') as mf:
meta_data = [ApiV1.MetaDataValue(m.get('Origin', ''), m.get('Values', {})) for m in _json.load(mf)]
except Exception as e:
system_log_entries.append(LogEntry(comment=u'rapidjson: {}'.format(e),
date=int(datetime.datetime.now().timestamp()),
severity=SeverityType.Error))
# rapidjson cannot load the file, will retry with the native json
with open(os.path.join(DocumentApi.working_path, meta_data_file), 'r') as mf:
meta_data = [ApiV1.MetaDataValue(m.get('Origin', ''), m.get('Values', {})) for m in native_json.load(mf)]
else:
meta_data = [ApiV1.MetaDataValue(mm.origin, mm.values) for mm in meta_data]
document_state = _DocumentState(id_,
meta_data,
permissions,
data_streams,
parameters.name)
self._last_document_state = document_state
document_api = DocumentApi(document_state)
# inject package folders in path & site
old_path = list(sys.path)
for p in package_paths:
# addsitedir will add the folder to the end of sys.path
# but we need them at the beginning of the list but not a position 0 since it's the script name
sys.path.insert(1, p)
site.addsitedir(p)
script_globals = {'__name__': '__main__',
'sys': sys,
'script': document_api.legacy,
'document_api': document_api,
'document': document_api.v1,
'log': document_api.v1.log,
'parameters': parameters.values or {}}
try:
exec(compiled_code, script_globals)
result = document_state.result
if result.rejected:
# someone did "except Exception" silencing the RejectedException, re-throw it
raise RejectedException(document_state.reject_reason)
return ScriptExecutionResult(meta_data=result.meta_data,
permissions=result.permissions,
data_streams=result.streams,
log_entries=result.log_entries,
system_log_entries=system_log_entries)
except RejectedException as e:
raise RejectedFromScriptException(what=str(e))
except SkippedException as e:
raise ScriptSkippedException(what=str(e))
except BaseException:
raise PythonScriptException(what=self._get_traceback(filename))
finally:
if delete_temp_file:
try:
os.remove(filename)
except OSError:
pass
sys.path = list(old_path)
# clean up the data_streams
document_state.close_streams()
# close DataStreams that were created but not added
for s in script_globals.values():
if type(s) is ApiV1.DataStream and not s.closed:
s.close()
# clean up local objects that were passed to the extension in order to avoid leaks since those objects are somehow still referenced from the exec() step
to_delete = [k for k in script_globals.keys()]
for d in to_delete:
del script_globals[d]
gc.collect()
def _compile(self, code, filename):
try:
return compile(code, filename, 'exec')
except Exception:
raise PythonScriptException(what=self._get_traceback(filename))
def _get_traceback(self, filename):
body_frames = self._replace_filename(filename, traceback.format_list(traceback.extract_tb(sys.exc_info()[2])[1:]))
exception_frames = traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1])
# Make sure we deal with SyntaxError correctly, it adds the filename in the first frame
if len(exception_frames) > 1:
exception_frames = self._replace_filename(filename, exception_frames[:1]) + exception_frames[1:]
frames = ['Traceback (most recent call last):\n'] + \
body_frames + \
exception_frames
return ''.join(frames)
def _replace_filename(self, filename, frame_list):
filename = filename or '<string>'
return [frame.replace('${FILENAME}', filename) for frame in frame_list]
def _get_package_name(self, package):
"""
will get a package name without the version
:param package: package name with optional version
:return: the package name
"""
# according to https://packaging.python.org/tutorials/installing-packages/#id17
# 'SomeProject' 'SomeProject==1.4' 'SomeProject>=1,<2' 'SomeProject~=1.4.2'
return self._package_re.search(package).group(1)
def _get_package_version(self, package):
"""
will get a package version
:param package: package name with mandatory version
:return: the package version
"""
return self._package_re.search(package).group(2)
class _DocumentState(ApiV1.DocumentState):
def __init__(self, document_id, meta_data, permissions, streams, origin):
"""
convert the streams from the wrapped C++ to more manageable ones
"""
current_streams = []
for name, streams in streams.items():
for s in streams:
current_streams.append(ApiV1.ReadOnlyDataStream(name,
s.origin,
s.value.file_name,
base64.b64decode(s.value.inline_blob)))
super(_DocumentState, self).__init__(document_id,
meta_data,
[ApiV1.PermissionLevel(l.name, [ApiV1.PermissionSet(s.name,
s.allow_anonymous,
[ApiV1.Permission(a.identity, a.identity_type.name, a.security_provider, {k: v for k, v in a.additional_info.items()} if a.additional_info else {}) for a in s.allowed_permissions] if s.allowed_permissions else [],
[ApiV1.Permission(d.identity, d.identity_type.name, d.security_provider, {k: v for k, v in d.additional_info.items()} if d.additional_info else {}) for d in s.denied_permissions] if s.denied_permissions else []) for s in l.permission_sets]) for l in permissions],
current_streams,
origin)
@property
def result(self):
"""
get the result of the script execution
:return: ApiV1.Result
"""
return ApiV1.Result(self.meta_data_to_add,
self._get_final_permissions(),
self._get_streams_to_add(),
self.get_log_entries(),
self.reject_document,
self.reject_reason)
def _get_streams_to_add(self):
to_add = {}
for stream in self.streams_to_add:
if stream.filename:
# blob in a file
to_add[stream.name] = LocalBlobEntry(file_name=stream.filename,
compression=CompressionType['Uncompressed'])
else:
# inline blob
to_add[stream.name] = LocalBlobEntry(inline_blob=base64.b64encode(stream.inline),
compression=CompressionType['Uncompressed'])
return to_add
def get_log_entries(self):
return [LogEntry(comment=log_entry[0],
date=int(datetime.datetime.now().timestamp()),
duration=0,
fields={},
severity=LOG_SEVERITY.get(log_entry[1].lower(), 'Normal')) for log_entry in self.log_entries]
def _get_final_permissions(self):
def convert_permission(permission):
return Permission(
identity=permission.identity,
identity_type=IDENTITY_TYPES.get(permission.identity_type.lower(), 'Unknown'),
security_provider=permission.security_provider,
additional_info={k: v for k, v in permission.additional_info.items()}
)
# Convert back to Permissions
final_permissions = []
for level in self.final_permissions:
# level
final_level = PermissionLevel(
name=level.name,
permission_sets=[]
)
for permission_set in level.permission_sets:
# set
final_set = PermissionSet(
name=permission_set.name,
allow_anonymous=permission_set.allow_anonymous,
allowed_permissions=[convert_permission(a) for a in permission_set.allowed_permissions],
denied_permissions=[convert_permission(d) for d in permission_set.denied_permissions]
)
final_level.permission_sets.append(final_set)
final_permissions.append(final_level)
return final_permissions
```
#### File: platform/extensionRunner/extension_runner_standalone.py
```python
import json
import argparse
import base64
import zlib
from urllib.parse import urlparse
import requests
import pathlib
from collections import defaultdict
from typing import List
from extension_runner_server import ExtensionRunner
from cdf.root.serializer import JidEncoder
from cdf.root.deserializer import deserialize
from cdf.document_processor_script import DocumentProcessorScriptParameters
from cdf.document_definition import MetaDataValue, LocalBlobEntry, DataStreamValue
def get_script_parameters(session, env, org_id, script_file, source_id):
""" will get the parameters for a script on a given source"""
try:
organization_id, extension_id = pathlib.Path(script_file).stem.split('-')
except Exception:
extension_id = pathlib.Path(script_file).stem
organization_id = org_id
extension_id = '{}-{}'.format(organization_id, extension_id[:extension_id.find('_v3')] if '_v3' in extension_id else extension_id)
parameters = DocumentProcessorScriptParameters(script_id=script_file, name=extension_id)
# fetch the parameters for this extension
resp = session.get(f'https://platform{env}.cloud.coveo.com/rest/organizations/{organization_id}/sources/{source_id}/raw')
if resp.ok:
def get_params(pipeline):
for p in pipeline:
if p['extensionId'] == extension_id:
return p['parameters']
json_resp = resp.json()
if 'MAPPING_EXTENSION' in script_file:
parameters.values = {'mappings': json.dumps(json_resp['mappings'])}
else:
parameters.values = get_params(json_resp['preConversionExtensions']) or get_params(json_resp['postConversionExtensions'])
return parameters
def get_document(session, env, organization_id, query, load_from_field_name, load_from_stream_name, query_pipeline, meta_from_file):
""" will load the extension and document"""
origin_to_consider = {'crawler', 'converter', 'mapping'}
def figure_out_meta(meta):
""" will figure out the format of the meta """
try:
return deserialize(meta.json(), List[MetaDataValue])
except:
# simple key/value (may include origin)
meta_by_origin = defaultdict(defaultdict)
for k, v in meta.items():
origin = 'crawler'
value = v
if ':' in k:
name, origin = k.split(':')
if origin not in origin_to_consider:
continue
else:
name = k
meta_by_origin[origin][name] = v
return list(reversed([MetaDataValue(origin=k, values=v) for k, v in meta_by_origin.items()]))
# load the document from a query result
resp = session.post(f'https://platform{env}.cloud.coveo.com/rest/search/v2?organizationId={organization_id}&pipeline={query_pipeline}', data=json.dumps({'q': query}))
# fill meta from the query result or from the given field or stream
if resp.ok:
meta_data = None
content = resp.json()['results']
if content:
unique_id = content[0]["uniqueId"]
result_meta = resp.json()['results'][0]['raw']
# get the source id according to its name
resp = session.get(f'https://platform{env}.cloud.coveo.com/rest/organizations/{organization_id}/sources/')
if resp.ok:
source_id = next(s['id'] for s in resp.json() if s['name'] == content[0]['raw']['source'])
else:
raise Exception(f'cannot get source id: {resp.reason}')
if load_from_stream_name:
resp = session.get(f'https://platform{env}.cloud.coveo.com/rest/search/v2/datastream?uniqueId={unique_id}&organizationId={organization_id}&dataStream={load_from_stream_name}')
if resp.ok:
try:
meta_data = figure_out_meta(resp.json())
except Exception:
pass
if not meta_data:
if load_from_field_name and load_from_field_name in result_meta:
try:
meta_data = figure_out_meta(json.loads(result_meta[load_from_field_name]))
except Exception:
pass
if not meta_data:
meta_data = [MetaDataValue(origin='Crawler',
values={})]
for k, v in result_meta.items():
if not k.startswith('sys'):
meta_data[0].values[k] = v if type(v) is list else [v]
else:
raise Exception('document not found in index')
else:
raise Exception(resp.reason)
# replace with the meta we've loaded from the file
if meta_from_file:
def erase_meta_from_origin(origin_):
for m in meta_data:
if m.origin == origin_:
meta_data.remove(m)
break
for m in meta_from_file:
origin = m['Origin']
if origin.lower() in origin_to_consider:
erase_meta_from_origin(origin)
meta_data.append(MetaDataValue(origin=origin, values=m['Values']))
return unique_id, source_id, meta_data
def get_data_streams(session, env, unique_id, document_uri, script_file, streams_from_file):
""" load the streams needed by the script """
data_streams = {}
with open(script_file, 'rt') as s:
script_code_lower = s.read().lower()
if 'body_text' in script_code_lower:
if 'Body_Text' in streams_from_file and 'InlineBlob' in streams_from_file['Body_Text'][0]['Value']:
data_streams['Body_Text'] = [DataStreamValue(origin='Crawler',
value=LocalBlobEntry(compression='Uncompressed',
inline_blob=base64.b64encode(zlib.decompress(base64.b64decode(streams_from_file['Body_Text'][0]['Value']['InlineBlob'])))))]
else:
resp = session.get(f'https://platform{env}.cloud.coveo.com/rest/search/v2/text?uniqueId={unique_id}&organizationId={organization_id}')
if resp.ok:
data_streams['Body_Text'] = [DataStreamValue(origin='Crawler',
value=LocalBlobEntry(compression='Uncompressed',
inline_blob=base64.b64encode(resp.json()['content'].encode('utf-16le'))))]
if 'body_html' in script_code_lower:
if 'Body_HTML' in streams_from_file and 'InlineBlob' in streams_from_file['Body_HTML'][0]['Value']:
data_streams['Body_HTML'] = [DataStreamValue(origin='Crawler',
value=LocalBlobEntry(compression='Uncompressed',
inline_blob=base64.b64encode(zlib.decompress(base64.b64decode(streams_from_file['Body_HTML'][0]['Value']['InlineBlob'])))))]
else:
resp = session.get(f'https://platform{env}.cloud.coveo.com/rest/search/v2/html?uniqueId={unique_id}&organizationId={organization_id}')
if resp.ok:
data_streams['Body_HTML'] = [DataStreamValue(origin='Crawler',
value=LocalBlobEntry(compression='Uncompressed',
inline_blob=base64.b64encode(resp.content.decode().encode())))]
if 'documentdata' in script_code_lower:
if 'DocumentData' in streams_from_file and 'InlineBlob' in streams_from_file['DocumentData'][0]['Value']:
data_streams['DocumentData'] = [DataStreamValue(origin='Crawler',
value=LocalBlobEntry(compression='Uncompressed',
inline_blob=base64.b64encode(zlib.decompress(base64.b64decode(streams_from_file['DocumentData'][0]['Value']['InlineBlob'])))))]
elif urlparse(document_uri).scheme == 'https':
resp = requests.get(document_uri, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'})
if resp.ok:
data_streams['documentdata'] = [DataStreamValue(origin='Crawler',
value=LocalBlobEntry(compression='Uncompressed',
inline_blob=base64.b64encode(resp.content)))]
return data_streams
def run_extension(document, extension_filename, parameters):
""" will run an extension on a document """
data_streams = {}
streams_from_file = {}
document = document['Document']
streams_from_file = document.get('DataStreams', {})
meta_from_file = document.get('MetaData', {})
# setup the meta data
meta_data = [MetaDataValue(origin='Crawler',
values={})]
for k, v in meta_from_file[0]['Value'].items():
meta_data[0].values[k] = v if type(v) is list else [v]
if 'DocumentData' in streams_from_file and 'InlineBlob' in streams_from_file['DocumentData'][0]['Value']:
data_streams['DocumentData'] = [DataStreamValue(origin='Crawler',
value=LocalBlobEntry(compression='Uncompressed',
inline_blob=base64.b64encode(zlib.decompress(base64.b64decode(streams_from_file['DocumentData'][0]['Value']['InlineBlob'])))))]
# get the document URI from the meta Id
document_uri = ''
if 'Id' in document:
document_uri = document['Id']
# setup the params
script_parameters = DocumentProcessorScriptParameters(script_id=extension_filename, name=pathlib.Path(extension_filename).stem)
if parameters:
with open(parameters[1]) as param_file:
script_parameters.values = {parameters[0]: param_file.read()}
# run the extension
extension_runner = ExtensionRunner(debug=True)
return extension_runner.execute(script_parameters,
document_uri,
meta_data,
'',
[],
data_streams,
[])
if __name__ == '__main__':
# setup the valid args
parser = argparse.ArgumentParser(description='Standalone extension runner')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-uri', help='uri of the document to get from index')
group.add_argument('-urihash', help='the uri hash of the document to get the extensions for')
parser.add_argument('-script', required=True, nargs='+', help='filename of the script to apply to the document')
parser.add_argument('-token', required=True, help='the authorization token to connect to the platform')
parser.add_argument('-field', required=False, help='get the meta from that field, ex: allmetadatavalues')
parser.add_argument('-stream', required=False, help='get the meta from that stream, ex: allmetadatavalues')
parser.add_argument('-env', required=False, default='', help='dev, qa or empty for prod')
parser.add_argument('-pipeline', dest='pipeline', required=False, default='default', help='the query pipeline')
parser.add_argument('-json_file', required=False, default='', help='will load the document meta from a json file')
parser.add_argument('-orgid', required=False, help='the org id')
args = parser.parse_args()
scripts = args.script if type(args.script) else [args.script]
organization_id = args.orgid or pathlib.Path(scripts[0]).stem.split('-')[0]
# load the document from the optional json file
meta_from_file = []
streams_from_file = {}
if args.json_file:
with open(args.json_file) as json_file:
json_doc = json.load(json_file)
if 'Document' in json_doc:
meta_from_file = json_doc['Document'].get('MetaData', [])
streams_from_file = json_doc['Document'].get('DataStreams', {})
session = requests.session()
session.headers.update({'content-type': 'application/json', 'Authorization': f'Bearer {args.token}'})
query = f'@uri=={args.uri}' if args.uri else f'@urihash=={args.urihash}'
unique_id, source_id, meta_data = get_document(session, args.env, organization_id, query, args.field, args.stream, args.pipeline, meta_from_file)
document_uri = unique_id[unique_id.find('$') + 1:]
extension_runner = ExtensionRunner(debug=True)
for script in scripts:
data_streams = get_data_streams(session, args.env, unique_id, document_uri, script, streams_from_file)
script_parameters = get_script_parameters(session, args.env, organization_id, script, source_id)
try:
result = extension_runner.execute(script_parameters,
document_uri,
meta_data,
'',
[],
data_streams,
[])
print(f'Success {script_parameters.name}: {json.dumps(result, cls=JidEncoder, indent=4)}')
# update the document with new meta and streams
meta_data.append(MetaDataValue(origin=script_parameters.name,
values=result.meta_data))
if result.data_streams:
pass
except Exception as e:
print(f'Exception {script_parameters.name}: {json.dumps(e, cls=JidEncoder, indent=4,check_circular=False)}')
print(f'Logs: {json.dumps(extension_runner.get_last_log_entries(), cls=JidEncoder, indent=4)}')
#
# In order to have type completion in PyCharm, add those two lines to your script
#
# document = document
# """:type: extension_runner_27.ApiV1"""
#
```
#### File: platform/extensions/set_locale.py
```python
import re
langcode_to_language = {'en': 'English',
'fr': 'French',
'es': 'Spanish',
'de': 'German',
'it': 'Italian',
'ja': 'Japanese',
'zh': 'Chinese',
'ru': 'Russian',
'ko': 'Korean',
'pt': 'Portuguese'}
def extract_locale_info(uri):
locale_info_regex = r"(language|lang)?[=:]?([a-zA-Z]{2})_([a-zA-Z]{2})?"
locale_info = {}
log('input param >>> {}'.format(uri))
if uri :
m = re.search(locale_info_regex, uri)
if m :
lang_code = (m.group(2) or '').lower()
country_code = (m.group(3) or '').lower()
locale_info = {
"locale.culture_code": '{}_{}'.format(lang_code, country_code),
"locale.lang_code": '{}'.format(lang_code),
"locale.country_code": '{}'.format(country_code),
"locale.language": langcode_to_language.get(lang_code, '')
}
log('locale_info >>> {}'.format(locale_info))
return locale_info
def get_safe_meta_data(meta_data_name):
safe_meta = ''
meta_data_value = document.get_meta_data_value(meta_data_name)
if len(meta_data_value) > 0:
safe_meta = meta_data_value[-1]
return safe_meta
try:
locale_info = extract_locale_info(get_safe_meta_data('coveo_tags') or get_safe_meta_data('kav_language') or document.uri)
document.add_meta_data(locale_info)
except Exception as e:
log(str(e))
```
|
{
"source": "jfamaey/academic-website",
"score": 3
}
|
#### File: academic-website/publication_parser/parse.py
```python
import sys
import codecs
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
keys = set()
def parse_conference(text):
d = text.find('.')
authors = parse_authors(text[:d].strip())
text = text[d+1:]
d = text.find('.')
title = text[:d].strip()
text = text[d+1:]
d = text.find('.')
sub_fields = text[:d].split(", ")
conference = sub_fields[0].strip()
year = sub_fields[-1].strip()
doi = ""
if text[d+1:].find('doi') != -1:
doi = text[d+1:-1].split(':')[1].strip()
key = authors[0][1].replace(" ", "") + year
key_index = ord('a')
while key + chr(key_index) in keys:
key_index += 1
key += chr(key_index)
keys.add(key)
bibtex = "@inproceedings{Cnf-" + key + ",\n"
bibtex += "\tauthor = {"
for i in range(len(authors)):
bibtex += authors[i][1] + ", " + authors[i][0]
if i < len(authors) - 1:
bibtex += " and "
bibtex += "},\n"
bibtex += "\tbooktitle = {" + conference + "},\n"
bibtex += "\ttitle = {" + title + "},\n"
if not doi == '':
bibtex += "\tdoi = {" + doi + "},\n"
bibtex += "\tyear = {" + year + "}\n"
bibtex += "}\n"
return bibtex
def parse_journal(text):
parts = line.split('.', 3)
authors = parse_authors(parts[0].strip())
title = parts[1].strip()
doi = parts[3].split(':')[1].split(' ')[0].strip()
parts = parts[2].split(', ')
journal = parts[0].strip()
year = parts[2].strip()
vnp = parse_volume(parts[1].strip())
key = authors[0][1].replace(" ", "") + year
key_index = ord('a')
while key + chr(key_index) in keys:
key_index += 1
key += chr(key_index)
keys.add(key)
bibtex = "@article{Jnl-" + key + ",\n"
bibtex += "\tauthor = {"
for i in range(len(authors)):
bibtex += authors[i][1] + ", " + authors[i][0]
if i < len(authors) - 1:
bibtex += " and "
bibtex += "},\n"
bibtex += "\tjournal = {" + journal + "},\n"
bibtex += "\ttitle = {" + title + "},\n"
if not vnp[0] == '':
bibtex += "\tvolume = {" + vnp[0] + "},\n"
if not vnp[1] == '':
bibtex += "\tnumber = {" + vnp[1] + "},\n"
if not vnp[2] == '':
bibtex += "\tpages = {" + vnp[2] + "},\n"
bibtex += "\tyear = {" + year + "},\n"
bibtex += "\tdoi = {" + doi + "}\n"
bibtex += "}\n"
return bibtex
def parse_authors(str):
authors = str.split(', ')
list = []
for author in authors:
i = author.find(' ')
list.append((author[:i], author[i+1:]))
return list
def parse_volume(str):
volume = ''
number = ''
pages = ''
pstart = str.find(':')
nstart = str.find('(')
if pstart != -1:
pages = str[pstart + 1:]
str = str[:pstart]
if nstart != -1:
number = str[nstart + 1 : -1]
str = str[:nstart]
if (str.isdecimal()):
volume = str
return (volume, number, pages)
input = open('journal_papers.txt', encoding="utf8")
for line in input:
print(parse_journal(line))
print()
input.close()
input = open('conference_papers.txt', encoding="utf8")
keys.clear()
for line in input:
print(parse_conference(line.rstrip('\n')))
print()
```
|
{
"source": "jfangwpi/Interactive_planning_and_sensing",
"score": 3
}
|
#### File: test/python/test_GPMap.py
```python
import numpy as np
import math
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import Matern
def DistCalculation(pt1,pt2):
dist_sq = (pt1[0] - pt2[0])**2 + (pt1[1] - pt2[1])**2
dist = math.sqrt(dist_sq)
return dist
# Build the "continuous" map
row_coordinate = np.linspace(0,5,26)
col_coordinate = np.linspace(0,5,26)
# Generate training data
sensor1_center = [1.5,2.5]
sensor2_center = [3.5,1.5]
Rs = 1
pts1_sensing_range_ = []
for row in row_coordinate:
for col in col_coordinate:
dist = DistCalculation(sensor1_center,[row,col])
if dist <= Rs:
pts1_sensing_range_.append([row,col])
pts2_sensing_range_ = []
for row in row_coordinate:
for col in col_coordinate:
dist = DistCalculation(sensor2_center,[row,col])
if dist <= Rs:
pts2_sensing_range_.append([row,col])
training_X = []
training_Y = []
for pts in pts1_sensing_range_:
training_X.append(pts)
if 0 < pts[0] < 2 and 3 < pts[1] < 4:
training_Y.append(1)
elif 2 < pts[0] < 3 and 0 < pts[1] < 1:
training_Y.append(1)
elif 4 < pts[0] < 5 and 2 < pts[1] < 3:
training_Y.append(1)
else:
training_Y.append(0)
for pts in pts2_sensing_range_:
training_X.append(pts)
if 0 < pts[0] < 2 and 3 < pts[1] < 4:
training_Y.append(1)
elif 2 < pts[0] < 3 and 0 < pts[1] < 1:
training_Y.append(1)
elif 4 < pts[0] < 5 and 2 < pts[1] < 3:
training_Y.append(1)
else:
training_Y.append(0)
# for idx, pt in enumerate(training_X):
# print("vertex ({},{}), the occupancy is {}".format(pt[0],pt[1],training_Y[idx]))
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1))
gp_opt.fit(training_X,training_Y)
print("The trained hyperparameter are {}".format((gp_opt.kernel_.theta)))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
# # Plot posteriors
# plt.figure(0)
# plt.scatter(X[:train_size][0], X[:train_size][1],y[:train_size], c='k', label="Train data",
# edgecolors=(0, 0, 0))
# plt.scatter(X[train_size:][0], X[train_size:][1],y[train_size:], c='g', label="Test data",
# edgecolors=(0, 0, 0))
# X1_ = np.linspace(1, 5, 5)
# X2_ = np.linspace(1, 5, 5)
# # print(X1_)
#
# X_ = np.asanyarray([[row,col] for row in X1_ for col in X2_])
#
#
#
# fig3 = plt.figure()
# ax = fig3.add_subplot(111,projection="3d")
#
# # print(gp_fix.predict_proba(X_[:]))
# ax.scatter(X_[:,0],X_[:,1], gp_fix.predict_proba(X_[:])[:,1], c='r', marker='o')
# ax.scatter(X_[:,0],X_[:,1], gp_opt.predict_proba(X_[:])[:,1], c='b', marker='o')
# ax.set_xlabel("X1")
# ax.set_ylabel("X2")
# ax.set_zlabel("Z")
#
#
#
# print(gp_fix.predict_proba(np.reshape(X_[0,:], (-1,2))))
# fig1 = plt.figure()
# ZZ = np.empty([5,5])
# for row in range(5):
# for col in range(5):
# K = [X1_[row],X2_[col]]
# ZZ[row,col] = gp_fix.predict_proba(np.reshape(K,(-1,2)))[:,1]
#
#
# XX ,YY = np.mgrid[0:5:6j,0:5:6j]
#
# CMAP = plt.get_cmap('jet')
# plt.pcolormesh(XX,YY,ZZ,cmap=CMAP)
# cb = plt.colorbar(shrink = 1.0)
# # cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=10)
XX = np.arange(0.5,5.5,1.0)
YY = np.arange(0.5,5.5,1.0)
print(YY)
fig = plt.figure()
ZZ = np.empty([5,5])
for idx1, row in enumerate(XX):
for idx2,col in enumerate(YY):
K = [row,col]
p_occ = gp_opt.predict_proba(np.reshape(K,(-1,2)))[:,1]
occ_ = gp_opt.predict(np.reshape(K,(-1,2)))
ZZ[idx1,idx2] = p_occ
plt.text(row,col,"{}".format(round(p_occ[0],3)), color='black',fontsize=10)
if occ_ == 1:
plt.text(row + 0.25, col+0.25, "OCC", color='black', fontsize=10)
else:
plt.text(row + 0.25, col + 0.25, "FREE", color='black', fontsize=10)
# print("ZZ ({}, {}) represents the occupancy about ({}, {})".format(idx1,idx2,row,col))
# print("P(occ) is {}".format(ZZ[idx1,idx2]))
# fig2 = plt.figure()
# ZZ = np.empty([26,26])
# for row in range(26):
# for col in range(26):
# K = [row_coordinate[row],col_coordinate[col]]
# ZZ[row,col] = gp_opt.predict_proba(np.reshape(K,(-1,2)))[:,1]
#
XX ,YY = np.mgrid[0:5:6j,0:5:6j]
print(ZZ)
CMAP = plt.get_cmap('jet')
plt.pcolormesh(XX,YY,ZZ,cmap=CMAP)
cb = plt.colorbar(shrink = 1.0)
# Define the comparison
XX_c = np.arange(0.5,5.5,1.0)
YY_c = np.arange(0.5,5.5,1.0)
fig_c = plt.figure()
ZZ_c = np.empty([5,5])
ZZ_c = [[0.5,0.5,0.2,0.5,0.5],[0.5,0.2,0,0.8,0.5],[0.5,0.2,0.2,0.5,0.5],[0.2,0,0.2,0.5,0.5],[0.5,0.2,0.5,0.5,0.5]]
for idx1 in np.arange(5):
for idx2 in np.arange(5):
# print(XX[idx1],YY[idx2])
plt.text(XX_c[idx1],YY_c[idx2],"{}".format(round(ZZ_c[idx1][idx2],3)), color='black',fontsize=10)
CMAP = plt.get_cmap('jet')
plt.pcolormesh(XX,YY,ZZ_c,cmap=CMAP)
cb = plt.colorbar(shrink = 1.0)
# cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=30)
# plt.plot(X1_, X2_, gp_fix.predict_proba(X_[:][:]), 'r',
# label="Initial kernel: %s" % gp_fix.kernel_)
# plt.plot(X1_, X2_, gp_opt.predict_proba(X_[:][:]), 'b',
# label="Optimized kernel: %s" % gp_opt.kernel_)
# plt.xlabel("Feature")
# plt.ylabel("Class 1 probability")
# plt.xlim(0, 5)
# plt.ylim(-0.25, 1.5)
# plt.legend(loc="best")
#
# # # Plot LML landscape
# # plt.figure(1)
# # theta0 = np.logspace(0, 8, 30)
# # theta1 = np.logspace(-1, 1, 29)
# # Theta0, Theta1 = np.meshgrid(theta0, theta1)
# # LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
# # for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
# # LML = np.array(LML).T
# # plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
# # 'ko', zorder=10)
# # plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
# # 'ko', zorder=10)
# # plt.pcolor(Theta0, Theta1, LML)
# # plt.xscale("log")
# # plt.yscale("log")
# # plt.colorbar()
# # plt.xlabel("Magnitude")
# # plt.ylabel("Length-scale")
# # plt.title("Log-marginal-likelihood")
plt.show()
```
#### File: python/distribution_data/path_dist.py
```python
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
import distribution_data.sample
class path_dist(object):
__slots__ = ["num_sample", "cost_dist"]
__typenames__ = ["int32_t", "distribution_data.sample"]
__dimensions__ = [None, ["num_sample"]]
def __init__(self):
self.num_sample = 0
self.cost_dist = []
def encode(self):
buf = BytesIO()
buf.write(path_dist._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">i", self.num_sample))
for i0 in range(self.num_sample):
assert self.cost_dist[i0]._get_packed_fingerprint() == distribution_data.sample._get_packed_fingerprint()
self.cost_dist[i0]._encode_one(buf)
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != path_dist._get_packed_fingerprint():
raise ValueError("Decode error")
return path_dist._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = path_dist()
self.num_sample = struct.unpack(">i", buf.read(4))[0]
self.cost_dist = []
for i0 in range(self.num_sample):
self.cost_dist.append(distribution_data.sample._decode_one(buf))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if path_dist in parents: return 0
newparents = parents + [path_dist]
tmphash = (0x29238fd62ba89e85+ distribution_data.sample._get_hash_recursive(newparents)) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if path_dist._packed_fingerprint is None:
path_dist._packed_fingerprint = struct.pack(">Q", path_dist._get_hash_recursive([]))
return path_dist._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
```
#### File: src/python_vis/graph_vis_sensors.py
```python
import lcm
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from matplotlib.colors import ListedColormap
import numpy as np
import csv
from graph_data import vertex_data
from graph_data import map_data
from graph_data import path_data
from graph_data import paths_data
from graph_data import entropy_trend_data
from graph_data import range_checked_data
from graph_data import entropy_path_trend_data
from graph_data import entropy_paths_trend_data
from graph_data import sensors_data
from graph_data import local_hspts_data
fontsize_dis = 50
style_dis = "split"
num_row = 30
num_col = 30
# num_row = 10
# num_col = 10
# num_row = 5
# num_col = 5
# num_row = 20
# num_col = 20
# agents = {1:0,2:29,3:870,4:899}
# agents = {1:0,2:99}
# agents = {1:0}
# agents = {1:0,2:380,3:399}
agents = {1:0,2:29,3:870,4:899}
# tasks = {3:125, 4:199, 5:17, 6:398, 7:510, 8:409, 9:476, 10:701, 11:684, 12:284}
# tasks = {1:67,2:76,3:139,4:180,5:215,6:309}
# tasks = {1:50,2:95,3:7}
# tasks = {1:24}
tasks = {3:880,4:457,5:194,6:108,7:145,8:356,9:290,10:505,11:565,12:865}
class Vertex(object):
def __init__(self, index, pos_x, pos_y, p, ig, occu):
self.idx_ = index
self.pos_ = [pos_x, pos_y]
self.p_ = p
self.ig_ = ig
self.occupancy_ = occu
class Sample(object):
def __init__(self, sample, p):
self.sample_ = sample
self.p_ = p
class map_vis(object):
def __init__(self):
self.num_row_ = num_row
self.num_col_ = num_row
self.path_collection_ = {}
self.num_fig = 0
self.entropy_paths_ = {}
self.sensors_ = []
self.hspts_ = []
self.tasks_ = {}
self.agents_ = {}
self.graph_init()
def graph_reset(self):
self.vertex_ = {}
self.num_agents = 0
self.agents_ = {}
self.path_collection_ = {}
self.entropy_paths_ = {}
self.sensors_ = []
self.hspts_ = []
self.tasks_ = {}
def graph_init(self):
self.graph_reset()
for idx in range(self.num_col_ * self.num_row_):
row = self.num_row_ - idx//self.num_row_ - 1
col = idx % self.num_col_
p = 0.5
ig = 0
occupancy = "UNKNOWN"
new_vertex = Vertex(idx, row, col, p, ig, occupancy)
self.vertex_[idx] = new_vertex
for ag in agents.keys():
self.agents_[ag] = agents[ag]
for tsk in tasks.keys():
self.tasks_[tsk] = tasks[tsk]
def map_data_handler(self, channel, data):
self.graph_init()
msg = map_data.decode(data)
for cell in msg.vertex_:
idx = cell.idx_
self.vertex_[idx].ig_ = cell.ig_
self.vertex_[idx].p_ = cell.p_
if self.vertex_[idx].p_ == 0.0:
self.vertex_[idx].occupancy_ = "FREE"
elif self.vertex_[idx].p_ == 1.0:
self.vertex_[idx].occupancy_ = "OCCUPIED"
def hspts_data_handler(self, channel, data):
msg = local_hspts_data.decode(data)
for pos in msg.hspts_:
self.hspts_.append(pos)
def sensors_data_handler(self, channel, data):
msg = sensors_data.decode(data)
for pos in msg.sensor_pos_:
self.sensors_.append(pos)
self.visualization()
def paths_data_handler(self, channel, data):
msg = paths_data.decode(data)
for path_idx in range(msg.num_path_):
path_ = []
if msg.path_collection_[path_idx].cell_ == []:
continue
else:
for cell in msg.path_collection_[path_idx].cell_:
path_.append(cell)
self.path_collection_[path_idx] = path_
# self.visualization()
def entropy_trend_data_handler(self, channel, data):
msg = entropy_trend_data.decode(data)
self.entropy_trend_ = []
with open("Entropy_trend.cvs", mode="w") as csv_file:
fieldnames = ["iter_t", "entropy"]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for c, entropy in enumerate(msg.entropy_, 1):
self.entropy_trend_.append(entropy)
writer.writerow({"iter_t": c, "entropy": entropy})
def range_trend_data_handler(self, channel, data):
msg = range_checked_data.decode(data)
self.range_checked_ = []
with open("Range_trend.cvs", mode="w") as csv_file:
fieldnames = ["iter_t", "range"]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for c, r in enumerate(msg.range_):
self.range_checked_.append(r)
writer.writerow({"iter_t": c, "range": r})
def entropy_trend_paths_data_handler(self, channel, data):
msg = entropy_paths_trend_data.decode(data)
self.entropy_paths_ = {}
for r in msg.entropy_paths_:
self.entropy_paths_[r.agent_idx_] = [en for en in r.entropy_path_]
self.trend_vis()
with open("Entropy_trend_paths.cvs", mode="w") as csv_file:
fieldnames = ["iter_t", "entropy1", "entropy2", "entropy3"]
# fieldnames = ["iter_t", "entropy1"]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for c in range(len(self.entropy_paths_[0])):
# writer.writerow({"iter_t": c, "entropy1": self.entropy_paths_[0][c]})
writer.writerow({"iter_t": c, "entropy1": self.entropy_paths_[0][c], "entropy2":self.entropy_paths_[1][c], "entropy3":self.entropy_paths_[2][c]})
def trend_vis(self):
if self.range_checked_:
plt.figure(num=None, figsize= (50,25), dpi=80, facecolor='w',edgecolor='k')
iterations = [i+1 for i in range(len(self.entropy_trend_))]
plt.subplot(1,2,1)
plt.plot(iterations, self.entropy_trend_, 'b-', linewidth=8)
plt.yticks(fontsize = 30)
plt.xticks(fontsize = 30)
plt.xlabel("Num of iteration", fontsize=30)
plt.ylabel("Entropy of map", fontsize=30)
plt.ylim(bottom=0)
plt.xlim(left=1,right=iterations[-1])
plt.subplot(1,2,2)
iterations1 = [j+1 for j in range(len(self.range_checked_))]
plt.plot(iterations1, self.range_checked_, 'r-', linewidth=8)
plt.yticks(fontsize = 30)
plt.xticks(fontsize = 30)
plt.xlabel("Num of iteration", fontsize=30)
plt.ylabel("Focused Range", fontsize=30)
plt.ylim(bottom=0)
plt.xlim(left=1,right=iterations1[-1])
else:
plt.figure(num=None, figsize= (25,25), dpi=80, facecolor='w',edgecolor='k')
iterations = [i+1 for i in range(len(self.entropy_trend_))]
plt.plot(iterations, self.entropy_trend_, 'b-', linewidth=8)
plt.yticks(fontsize = 30)
plt.xticks(fontsize = 30)
plt.xlabel("Num of iteration", fontsize=30)
plt.ylabel("Entropy of map", fontsize=30)
plt.ylim(bottom=0)
plt.xlim(left=1,right=iterations[-1])
plt.savefig("Graph_data_trend.png")
if self.entropy_paths_:
plt.figure(num=None, figsize=(25,25),dpi=80,facecolor='w',edgecolor='k')
colors_ = ['yellow', 'blue', 'red', 'green']
for r, path in enumerate(self.entropy_paths_.values()):
iterations2 = [i+1 for i in range(len(path))]
label_ = "Agent " + str(r)
plt.plot(iterations2, path, colors_[r], linewidth=8, label=label_)
plt.legend(fontsize = 30)
plt.yticks(fontsize = 30)
plt.xticks(fontsize = 30)
plt.xlabel("Num of iteration", fontsize=30)
plt.ylabel("Entropy of path", fontsize=30)
plt.ylim(bottom=0)
plt.xlim(left=1,right=iterations[-1])
plt.savefig("Entropy_trend_paths.png")
plt.close('all')
def vis(self):
# Draw grid map
x_min = 0
x_max = self.num_col_
y_min = 0
y_max = self.num_row_
plt.grid(color='black', linestyle='-', linewidth=1)
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
plt.xticks(np.arange(x_min, x_max+1, 10))
plt.yticks(np.arange(y_min, y_max+1, 10))
# Draw each cell
for cell in self.vertex_.values():
# plt.text(cell.pos_[1] + 0.25, cell.pos_[0] + 0.25,"{}".format(round(cell.p_,3)), color='black', fontsize=fontsize_dis)
if (cell.occupancy_ == "UNKNOWN"):
x_region = np.arange(cell.pos_[1], cell.pos_[1] + 2, 1)
y_region = cell.pos_[0]
plt.fill_between(x_region, y_region, y_region +1, facecolor='gray', interpolate=True)
if (cell.occupancy_ == "INTERESTED"):
x_region = np.arange(cell.pos_[1], cell.pos_[1] + 2, 1)
y_region = cell.pos_[0]
plt.fill_between(x_region, y_region, y_region +1, facecolor='yellow', interpolate=True)
if (cell.occupancy_ == "OCCUPIED"):
x_region = np.arange(cell.pos_[1], cell.pos_[1] + 2, 1)
y_region = cell.pos_[0]
plt.fill_between(x_region, y_region, y_region +1, facecolor='black', interpolate=True)
# Draw agent position
for agent_idx in self.agents_.keys():
row = (self.num_row_ - self.agents_[agent_idx] // self.num_row_ - 1)
col = self.agents_[agent_idx] % self.num_row_
plt.text(col + 0.05, row + 0.12,"V {}".format(agent_idx), color='yellow', fontsize=fontsize_dis, fontweight='bold')
x_region = np.arange(col, col + 2, 1)
y_region = row
plt.fill_between(x_region, y_region, y_region +1, facecolor='red', interpolate=True)
for tsk in self.tasks_.keys():
row = (self.num_row_ - self.tasks_[tsk] // self.num_row_ - 1)
col = self.tasks_[tsk] % self.num_row_
plt.text(col + 0.55, row + 0.1,"P{}".format(tsk+1), color='black', fontsize=fontsize_dis, fontweight='bold')
x_region = np.arange(col, col + 2, 1)
y_region = row
plt.fill_between(x_region, y_region, y_region +1, facecolor='yellow', interpolate=True)
# Draw path
if self.path_collection_:
for idx in range(len(self.path_collection_)):
path = self.path_collection_[idx]
if path != []:
# Mark Start cell
if idx == 4:
start_id = path[0]
start_vertex = self.vertex_[start_id]
plt.text(start_vertex.pos_[1]+0.40, start_vertex.pos_[0]+0.65, 'S1', color='blue', fontsize=fontsize_dis, fontweight='bold')
if idx == 5:
start_id = path[0]
start_vertex = self.vertex_[start_id]
plt.text(start_vertex.pos_[1]+0.40, start_vertex.pos_[0]+0.65, 'S2', color='blue', fontsize=fontsize_dis, fontweight='bold')
if idx == 6:
start_id = path[0]
start_vertex = self.vertex_[start_id]
plt.text(start_vertex.pos_[1]+0.40, start_vertex.pos_[0]+0.65, 'S3', color='blue', fontsize=fontsize_dis, fontweight='bold')
if idx == 7:
start_id = path[0]
start_vertex = self.vertex_[start_id]
plt.text(start_vertex.pos_[1]+0.40, start_vertex.pos_[0]+0.65, 'S4', color='blue', fontsize=fontsize_dis, fontweight='bold')
# Mark End Cell
if idx <= 3:
end_id = path[-1]
end_vertex = self.vertex_[end_id]
plt.text(end_vertex.pos_[1] + 0.1, end_vertex.pos_[0] + 0.1, 'F', color='green', fontsize=fontsize_dis, fontweight='bold')
# Draw the path
for v_idx in range(len(path) - 1):
v1_id = path[v_idx]
v1 = self.vertex_[v1_id]
v2_id = path[v_idx+1]
v2 = self.vertex_[v2_id]
# plt.text(v1.pos_[1] + 0.25, v1.pos_[0] + 0.25,"{}".format(round(v1.p_,3)), color='black', fontsize=30)
y = np.linspace(v1.pos_[0] + 0.5, v2.pos_[0] + 0.5, 100)
x = np.linspace(v1.pos_[1] + 0.5, v2.pos_[1] + 0.5, 100)
if idx <= 3:
plt.plot(x,y,'y-', linewidth=15)
else:
plt.plot(x+0.1,y+0.1,'b--', linewidth=15)
else:
continue
def map_vis(self):
# Draw grid map
x_min = 0
x_max = self.num_col_
y_min = 0
y_max = self.num_row_
plt.grid(color='black', linestyle='-', linewidth=1)
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
plt.xticks(np.arange(x_min, x_max+1, 10))
plt.yticks(np.arange(y_min, y_max+1, 10))
# Draw each cell
# for cell in self.vertex_.values():
# plt.text(cell.pos_[1] + 0.1, cell.pos_[0] + 0.1,"{}".format(round(cell.p_,3)), color='black', fontweight = 'bold', fontsize=fontsize_dis)
# Draw the heat map
self.vertex_[0].p_ = 1.0
ZZ = np.empty([self.num_col_, self.num_row_])
for cell in self.vertex_.values():
ZZ[cell.pos_[1],cell.pos_[0]] = cell.p_
self.vertex_[0].p_ = 0.0
# The number here is the num of col and row + 1
XX, YY = np.mgrid[0:self.num_col_:31j, 0:self.num_row_:31j]
CMAP = plt.get_cmap('binary')
plt.pcolormesh(XX,YY,ZZ,cmap=CMAP)
cb = plt.colorbar(shrink = 1.0)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=50)
# Draw agent position
for agent_idx in self.agents_.keys():
row = (self.num_row_ - self.agents_[agent_idx] // self.num_row_ - 1)
col = self.agents_[agent_idx] % self.num_row_
self.vertex_[self.agents_[agent_idx]].p_ = 1.0
plt.text(col - 0.1, row + 0.65,"V {}".format(agent_idx), color='yellow', fontsize=fontsize_dis, fontweight='bold')
x_region = np.arange(col, col + 2, 1)
y_region = row
plt.fill_between(x_region, y_region, y_region +1, facecolor='red', interpolate=True)
for tsk in self.tasks_.keys():
row = (self.num_row_ - self.tasks_[tsk] // self.num_row_ - 1)
col = self.tasks_[tsk] % self.num_row_
plt.text(col + 0.55, row + 0.1,"P{}".format(tsk), color='black', fontsize=fontsize_dis, fontweight='bold')
x_region = np.arange(col, col + 2, 1)
y_region = row
plt.fill_between(x_region, y_region, y_region +1, facecolor='yellow', interpolate=True)
# Draw path
if self.path_collection_:
for idx in range(len(self.path_collection_)):
path = self.path_collection_[idx]
if path != []:
# Mark Start cell
if idx == 4:
start_id = path[0]
start_vertex = self.vertex_[start_id]
plt.text(start_vertex.pos_[1]+0.40, start_vertex.pos_[0]-0.2, 'S1', color='blue', fontsize=fontsize_dis, fontweight='bold')
if idx == 5:
start_id = path[0]
start_vertex = self.vertex_[start_id]
plt.text(start_vertex.pos_[1]+0.40, start_vertex.pos_[0]-0.2, 'S2', color='blue', fontsize=fontsize_dis, fontweight='bold')
if idx == 6:
start_id = path[0]
start_vertex = self.vertex_[start_id]
plt.text(start_vertex.pos_[1]+0.40, start_vertex.pos_[0]-0.2, 'S3', color='blue', fontsize=fontsize_dis, fontweight='bold')
if idx == 7:
start_id = path[0]
start_vertex = self.vertex_[start_id]
plt.text(start_vertex.pos_[1]+0.40, start_vertex.pos_[0]-0.2, 'S4', color='blue', fontsize=fontsize_dis, fontweight='bold')
# Mark End Cell
if idx <= 3:
end_id = path[-1]
end_vertex = self.vertex_[end_id]
plt.text(end_vertex.pos_[1] - 0.1, end_vertex.pos_[0] + 0.25, 'F', color='green', fontsize=fontsize_dis, fontweight='bold')
# Draw the path
for v_idx in range(len(path) - 1):
v1_id = path[v_idx]
v1 = self.vertex_[v1_id]
v2_id = path[v_idx+1]
v2 = self.vertex_[v2_id]
# plt.text(v1.pos_[1] + 0.25, v1.pos_[0] + 0.25,"{}".format(round(v1.p_,3)), color='black', fontsize=30)
y = np.linspace(v1.pos_[0] + 0.5, v2.pos_[0] + 0.5, 100)
x = np.linspace(v1.pos_[1] + 0.5, v2.pos_[1] + 0.5, 100)
if idx <= 3:
plt.plot(x,y,'y-', linewidth=15)
else:
plt.plot(x+0.1,y+0.1,'b--', linewidth=15)
# else:
# continue
def ig_vis(self):
# Draw grid map
x_min = 0
x_max = self.num_col_
y_min = 0
y_max = self.num_row_
plt.grid(color='black', linestyle='-', linewidth=1)
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
plt.xticks(np.arange(x_min, x_max+1, 10))
plt.yticks(np.arange(y_min, y_max+1, 10))
# Draw each cell
# for cell in self.vertex_.values():
# # if cell.idx_ in self.hspts_:
# if cell.ig_ < 0:
# cell.ig_ = 0.0
# plt.text(cell.pos_[1] + 0.1, cell.pos_[0] + 0.1,"{}".format(round(cell.ig_,3)), color='black', fontweight = 'bold', fontsize=fontsize_dis)
# Draw the heat map
ZZ = np.empty([self.num_col_, self.num_row_])
for cell in self.vertex_.values():
ZZ[cell.pos_[1],cell.pos_[0]] = cell.ig_
# The number here is the num of col and row + 1
XX, YY = np.mgrid[0:self.num_col_:31j, 0:self.num_row_:31j]
cmap = pl.cm.jet
CMAP = cmap(np.arange(cmap.N))
# CMAP = plt.get_cmap('coolwarm')
CMAP[:,-1] = np.linspace(0,1,cmap.N)
CMAP = ListedColormap(CMAP)
plt.pcolormesh(XX,YY,ZZ,cmap=CMAP)
cb = plt.colorbar(shrink = 1.0)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=50)
def visualization(self):
self.num_fig = self.num_fig + 1
if style_dis == "combined":
plt.figure(num=None, figsize=(55, 25), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(1,2,1)
self.vis()
plt.xticks(fontsize = fontsize_dis)
plt.yticks(fontsize = fontsize_dis)
plt.subplot(1,2,2)
self.ig_vis()
plt.xticks(fontsize = fontsize_dis)
plt.yticks(fontsize = fontsize_dis)
plt.suptitle("Occupancy grid map at iteration t = {}".format(self.num_fig/7 + 1), fontsize = fontsize_dis)
for sensor in self.sensors_:
cv = self.vertex_[sensor]
row = cv.pos_[1] + 0.5
col = cv.pos_[0] + 0.5
plt.plot(row,col,"*b",markersize=40)
plt.savefig("image{}.png".format(self.num_fig))
print("{} figures have been saved".format(self.num_fig))
plt.close('all')
elif style_dis == "split":
plt.figure(num=None, figsize=(30, 25), dpi=80, facecolor='w', edgecolor='k')
self.map_vis()
plt.xticks(fontsize = fontsize_dis)
plt.yticks(fontsize = fontsize_dis)
# plt.title("Occupancy grid map at iteration t = {}".format(self.num_fig), fontsize = fontsize_dis)
plt.savefig("gridmapimage{}.png".format(self.num_fig))
plt.figure(num=None, figsize=(30, 25), dpi=80, facecolor='w', edgecolor='k')
self.ig_vis()
plt.xticks(fontsize = fontsize_dis)
plt.yticks(fontsize = fontsize_dis)
for sensor in self.sensors_:
cv = self.vertex_[sensor]
row = cv.pos_[1] + 0.5
col = cv.pos_[0] + 0.5
plt.plot(row,col,"*b",markersize=50)
# plt.title("Information gain at iteration t = {}".format(self.num_fig), fontsize = fontsize_dis)
plt.savefig("igimage{}.png".format(self.num_fig))
print("{} figures have been saved".format(self.num_fig))
plt.close('all')
def main():
lc = lcm.LCM()
map = map_vis()
Entropy_Trend_data = "EntropyTrendData"
Range_Trend_data = "RangeTrendData"
Entropy_Trend_Paths_data = "EntropyTrendPathsData"
subscription = lc.subscribe(Entropy_Trend_data, map.entropy_trend_data_handler)
subscription = lc.subscribe(Range_Trend_data, map.range_trend_data_handler)
subscription = lc.subscribe(Entropy_Trend_Paths_data, map.entropy_trend_paths_data_handler)
for i in range(100000):
Graph_data = str(i) + "GraphData"
Path_Collection_data = str(i) + "PathCollectionData"
Sensors_data = str(i) + "SensorsData"
Hspots_data = str(i) + "HsptsData"
subscription = lc.subscribe(Graph_data, map.map_data_handler)
subscription = lc.subscribe(Path_Collection_data, map.paths_data_handler)
subscription = lc.subscribe(Hspots_data, map.hspts_data_handler)
subscription = lc.subscribe(Sensors_data, map.sensors_data_handler)
try:
while True:
lc.handle()
except KeyboardInterrupt:
pass
lc.unsubscribe(subscription)
if __name__ == '__main__':
main()
```
|
{
"source": "jfardello/dyn53",
"score": 2
}
|
#### File: dyn53/tests/__init__.py
```python
import unittest
from . import test_cli, test_client
def suite():
test_suite = unittest.TestSuite()
test_suite.addTests(unittest.makeSuite(test_cli.TestCli))
test_suite.addTests(unittest.makeSuite(test_client.TestClient))
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
```
|
{
"source": "jfardello/Sphinxtrap",
"score": 2
}
|
#### File: sphinxtrap/ext/rawtoc.py
```python
import sys
try:
from html.parser import HTMLParser, HTMLParseError
except ImportError:
from HTMLParser import HTMLParser, HTMLParseError
import logging
import re
from sphinx import addnodes
from docutils.nodes import emphasis, reference, Text
from docutils.parsers.rst.roles import set_classes
class Link(dict):
'''Minimal nested link representation.'''
def __init__(self, *args, **kwds):
default = {'href': None, 'text': None, 'current': False,
'depth': None, 'childs': []}
default.update(kwds)
self.update(*args, **default)
class ParseLinks(HTMLParser):
'''Parses sphinx's toctree html output into a list of nested Link
objects, to simplify the bootstrap stuff inside Jinja templates.'''
def __init__(self):
self.logger = logging.getLogger("%s.%s" %
(__name__, self.__class__.__name__))
HTMLParser.__init__(self)
self.depth = 0
self.tag_stack = []
self.links = []
self.current = False
self._tag = None
self._tmp = None
self._dataq = ""
def handle_starttag(self, tag, attrs):
if tag == 'ul':
self.logger.debug("adding %s to tag_stack" % tag)
self.tag_stack.append(tag)
self.depth += 1
if tag == 'li':
self.logger.debug("adding %s to tag_stack" % tag)
self.tag_stack.append(tag)
for name, value in attrs:
if name == 'class' and 'toctree-l' in value:
classes = value.split()
self.depth = int(classes[0].replace('toctree-l', ''))
if 'current' in classes:
self.current = True
else:
self.current = False
if tag == 'a':
self.logger.debug("adding %s to tag_stack" % tag)
self.tag_stack.append(tag)
self._tag = tag
for name, value in attrs:
if name == 'href':
href = value
self._tmp = (Link(href=href, depth=self.depth,
current=self.current))
def handle_data(self, data):
if self._tag == 'a':
data = " ".join(data.split())
self.logger.debug("handle 'a tag' data: %s" % data)
if self._tmp['text'] is None:
seen = False
self._tmp['text'] = data
else:
seen = True
self._tmp['text'] += " " + data
if not seen:
if self._tmp['depth'] > 1:
links = self.links
loop = 0
for pos in range(self._tmp['depth']):
if loop == 0:
links = links[-1:]
else:
links = links[-1:][0]['childs']
loop += 1
links.append(self._tmp)
else:
self.links.append(self._tmp)
def handle_endtag(self, tag):
try:
if tag in ["li", "a"] and tag == self.tag_stack[-1:][0]:
self.logger.debug("poping %s from stack" % tag)
self.tag_stack.pop()
self._tag = None
if tag == 'ul' and tag == self.tag_stack[-1:][0]:
self.tag_stack.pop()
self.depth -= 1
except IndexError:
raise HTMLParseError('Unbalanced html tags.')
def html_page_context(app, pagename, templatename, context, doctree):
""" Handler for the html-page-context signal, adds a raw_toctree function
to the context."""
def raw_toctree(collapse=False):
return build_raw_toctree(app.builder, pagename, prune=False,
collapse=collapse)
def raw_localtoc(collapse=False):
try:
self_toc = app.builder.env.get_toc_for(pagename, app.builder)
toc = app.builder.render_partial(self_toc)['fragment']
pl = ParseLinks()
pl.feed(toc)
index = pl.links[0]
childs = list(index['childs'])
index['childs'] = []
childs.insert(0, index)
return childs
except (IndexError, KeyError):
return []
context['raw_toctree'] = raw_toctree
context['raw_localtoc'] = raw_localtoc
def build_raw_toctree(builder, docname, prune, collapse):
""" Returns a list of nested Link objects representing the toctree."""
env = builder.env
doctree = env.get_doctree(env.config.master_doc)
toctrees = []
for toctreenode in doctree.traverse(addnodes.toctree):
toctree = env.resolve_toctree(docname, builder, toctreenode,
prune=prune, collapse=collapse,
includehidden=True)
toctrees.append(toctree)
if not toctrees:
return None
retv = toctrees[0]
for toctree in toctrees[1:]:
if toctree:
retv.extend(toctree.children)
pl = ParseLinks()
pl.feed(builder.render_partial(retv)['fragment'])
return pl.links
def icon_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
options.update({'classes': ["icon-" + x for x in text.split(",")]})
options['classes'].append('icon-holder')
set_classes(options)
node = emphasis(**options)
return [node], []
def btn_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
m = re.match(r'(.*)\<(.*)\>(,.*){0,}', text)
if m:
name, ref, classes = m.groups()
if ref is '':
msg = inliner.reporter.error('The ref portion of the btn role'
' cannot be none')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref_cls = []
em_cls = []
if classes:
cls = classes.strip(',').split(',')
em_cls = [x for x in cls if x.startswith("icon-")]
ref_cls = [x for x in cls if not x.startswith("icon-")]
if 'btn' not in ref_cls:
ref_cls.append('btn')
options.update({'classes': ref_cls})
set_classes(options)
node = reference(rawtext, name, refuri=ref, **options)
if len(em_cls) > 0:
em_cls.append('icon-holder')
em_opts = {'classes': em_cls}
set_classes(em_opts)
node.insert(0, emphasis(**em_opts))
node.insert(1, Text(" "))
return [node], []
def setup(app): # pragma: no cover
app.info('Adding the icon role')
app.add_role('icon', icon_role)
app.info('Adding the btn role')
app.add_role('btn', btn_role)
app.info('Adding the raw_toctree Jinja function.')
app.connect('html-page-context', html_page_context)
return
```
#### File: Sphinxtrap/tests/mocking.py
```python
import sys, os
__pyver = sys.version_info[0] * 10 + sys.version_info[1]
if __pyver <= 26:
import unittest2 as unittest
else:
import unittest
if __pyver < 33:
import mock
else:
from unittest import mock
from docutils.nodes import emphasis, reference
from sphinxtrap.ext.rawtoc import Link, ParseLinks, icon_role, btn_role
from sphinxtrap.ext.rawtoc import build_raw_toctree, html_page_context
def resolve_doctree(*args, **kwargs):
'''Emulates sphinx/docutils render_partial'''
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, "test.html")) as fp:
html = fp.read()
return html
#Mocking sphinx's stuff
doctree = mock.Mock()
doctree.traverse.side_effect = lambda x: ['index']
builder = mock.Mock()
builder.env.get_doctree.side_effect = lambda x: doctree
builder.env.resolve_doctree.side_effect = resolve_doctree
builder.render_partial.side_effect = lambda x: {'fragment':resolve_doctree()}
inliner = mock.Mock()
inliner.problematic= mock.Mock()
inliner.reporter= mock.Mock()
inliner.reporter.error = mock.Mock()
class TestSphinxtrap(unittest.TestCase):
def test_node(self):
lk = Link(text="gabbagabbahey", href="https://foo.com")
lk2 = Link(text="gabbagabbahey", href="https://foo.com", depth=1)
lk["childs"].append(lk2)
self.assertEqual(lk['childs'][0].get("current"), False)
self.assertEqual(lk['href'], "https://foo.com")
def test_parser(self):
pl = ParseLinks()
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, "test.html")) as fp:
pl.feed(fp.read())
self.assertEqual(pl.links[7]['childs'][0]['href'],
"#compression-algorithms")
def test_build_raw_toctree(self):
links = build_raw_toctree(builder, "test", False, False)
self.assertEqual(links[7]['childs'][0]['href'],
"#compression-algorithms")
def test_icon_role(self):
icon = icon_role("foo", ":icon:`foo`", "foo", 100,
inliner, options={'classes':[]}, content=[])
self.assertTrue(isinstance(icon[0][0], emphasis ))
def test_btn_role(self):
l_str = ":btn:`Foo <http://foo.org/>,btn-success,icon-globe`"
btn = btn_role("foo", l_str, l_str, 100, inliner,
options={'classes':[]})
self.assertTrue(isinstance(btn[0][0], reference))
def test_html_page_context(self):
global context
context = {}
app = mock.Mock()
app.builder = builder
html_page_context(app, 'index', 'foo', context, None)
local = context['raw_toctree']()
toc = context['raw_localtoc']()
self.assertFalse(toc[0]['current'])
self.assertFalse(local[0]['current'])
```
|
{
"source": "j-faria/ACTIN",
"score": 3
}
|
#### File: actin/actin_files/ac_config.py
```python
from __future__ import print_function
from __future__ import division
import os, sys
import codecs
def read_conf(config_file, calc_index):
"""
Reads data from config file and selects the lines needed for the
selected indices.
Parameters:
-----------
config_file : string
Name of the configuration file with path.
calc_index : list of strings
List of index ids to be calculated selected from the indices provided in the configuration file.
Returns:
--------
sel_lines : dictionary
Dictionary containing the identification of the indices selected and
the parameters of the spectral lines required for each index.
Each key entry is a list of parameters where the list indices form the
rows related to the same spectral line identified with the key 'ln_id'
which is related to the spectral index identified by 'ind_id'.
The returned keys are:
========== ========================================================
keys Description
---------- --------------------------------------------------------
ind_id str : Index identification.
ind_var str : Variable assigned to a given line to be used in
the index equation. Ex: 'L1', 'L2', etc, for the core
lines, and 'R1', 'R2', etc, for reference lines.
ln_id str : Spectral line identification.
ln_c float : Constant to be multilied to the flux of the line.
ln_ctr float : Wavelength of the line centre [angstroms].
ln_win float : Bandpass around the line centre to be used in
the flux integration [angstroms].
bandtype str : Type of bandpass used to integrate flux.
========== ========================================================
"""
print()
print("LOADING DATA FROM CONFIG FILE")
print("-----------------------------")
try:
f = codecs.open(config_file, 'r', encoding="utf-8")
except FileNotFoundError as fnf_err:
print("*** ERROR: Config file not found:")
print(fnf_err)
print("***", config_file)
sys.exit()
except TypeError as ty_err:
print("*** ERROR: Config file is not 'str'")
print("***", ty_err)
sys.exit()
# Ignores commented lines, reads header, then stops when dash is found
for line in f:
if line.startswith('#'): pass
elif line.startswith('-'): break
else: header = line
# Convert the lines in the config table into list of strings
columns = []
for line in f:
if not line.strip(): pass # ignore empty new lines
else:
line = line.strip() # removes whites spaces at the start and end of line
column = line.replace("\t\t", "\t") # converts double tabs to single tabs
column = line.split() # splits the line into a list of strings
columns.append(column)
f.close()
# Associate each key (header) in table to a column
lines = {}
keys = header.split() # keys as provided in the table in config file
for k in range(len(keys)):
lines[keys[k]] = []
for i in range(len(columns)):
lines[keys[k]].append(columns[i][k])
# Converts numerical values in the list to floats and leaves the text strings
for k in range(len(lines)):
for i, x in enumerate(lines[keys[k]]):
try: lines[keys[k]][i] = float(x)
except ValueError: pass
# Get indices ids from the function arguments
sel_ind = calc_index
# Check weather the selected indices have lines in config table
for k in range(len(sel_ind)):
if sel_ind[k] not in lines['ind_id']:
sys.exit("*** ERROR: Index {} is not in the config file.".format(sel_ind[k]))
#Iinitiate a dictionary with keys as the headers in table and make empty lists
sel_lines = {}
for i in range(len(keys)): sel_lines[keys[i]] = []
# Select the rows that belong to the selected index
rows = len(lines['ind_id'])
for k in range(rows):
if lines['ind_id'][k] in sel_ind:
print(lines['ln_id'][k])
for i in range(len(keys)):
sel_lines[keys[i]].append(lines[keys[i]][k])
else: pass
return sel_lines
```
#### File: ACTIN/actin/actin.py
```python
from __future__ import print_function
from __future__ import division
import sys, os
import glob
import time
import datetime
import numpy as np
import astropy.io.fits as pyfits
import argparse
import pkg_resources
import appdirs
# Directory of this file
path = os.path.dirname(os.path.realpath(__file__))
# Global variables
#import ac_settings as ac_set
# Global variables
from .actin_files import ac_settings as ac_set
from .actin_files import ac_config
from .actin_files import ac_read_data
from .actin_files import ac_get_win
from .actin_files import ac_calc_ind
from .actin_files import ac_save
from .actin_files import ac_plot_time as ac_plot
from .actin_files import ac_tools
from matplotlib import pylab as plt
# initiate global variables:
ac_set.init()
# Configuration file:
config_file = os.path.join(path, "config_lines.txt")
# Version file:
version_file = os.path.join(path, "VERSION")
# Print preamble:
version = ac_set.preamble(version_file)
def actin_file(file, calc_index=None, rv_in=None, config_file=config_file, save_output=False, ln_plts=False, obj_name=None, targ_list=None, del_out=False, frac=True):
"""
Runs ACTIN for one fits file.
Accepts files of types: 'S2D', 'S1D', 'e2ds', 's1d', 's1d_*_rv', 'ADP', and 'rdb'.
Recognizes fits files from HARPS, HARPS-N and ESPRESSO instruments.
"""
print()
print("--------------------")
print("EXECUTING ACTIN_FILE")
print("--------------------")
if type(file) is list: file = file[0]
# Check if file is from known instrument
tel, instr = ac_tools.get_instr(file)
if instr == False: pass
elif instr in ac_set.instr: pass
else:
msg="*** ERROR:\nUnrecognized instrument. ACTIN only accepts HARPS, HARPS-N or ESPRESSO. To read from a different instrument convert data to rdb file with the headers: 'obj', 'obs_date', 'bjd', 'wave', 'flux', 'error_pixel' (optional)."
sys.exit(msg)
# Checking if object in targ_list is the same as the object in fits file
if targ_list:
check = ac_tools.check_targ(file, targets=targ_list)
if check is True: pass
elif check is False: return
# Read config file and retrieve lines information
if calc_index:
sel_lines = ac_config.read_conf(config_file, calc_index)
# Read data from file
data = ac_read_data.read_data(file, rv_in=rv_in, obj_name=obj_name)
if not data:
return
# Check output file for duplicates
if save_output is not False and data['file_type'] != "rdb":
dupl = ac_save.check_duplicate(data['obj'], data['obs_date'], data['instr'], data['file_type'], save_output)
if dupl is True: return
if calc_index:
# Check selected lines for spectral range and orders
test = ac_calc_ind.check_lines(data['wave'], sel_lines)
if not test:
print("*** ACTION: Ignoring measurement.")
return
# Calculate flux in the required lines
sel_lines = ac_calc_ind.calc_flux_lines(data, sel_lines, ln_plts=ln_plts, frac=frac)
# Calculate chosen indices
index = ac_calc_ind.calc_ind(sel_lines)
if not calc_index:
index = None
sel_lines = None
# Write output to rdb file in "out_dir"/"obj"
if save_output is not False:
rdb_file = ac_save.save_data(data, index, out_dir=save_output)
else: rdb_file = None
info = {}
info['config_file'] = config_file
info['file_type'] = data['file_type']
info['version'] = version
info['source_path'] = os.path.split(file)[0]
info['tel'] = data['tel']
info['instr'] = data['instr']
info['obj'] = data['obj']
options = {}
options['frac'] = frac
output = {}
output['data'] = data
output['index'] = index
output['sel_lines'] = sel_lines
output['info'] = info
output['options'] = options
output['rdb_file'] = rdb_file
return output
def actin(files, calc_index=None, rv_in=None, config_file=None, save_output=False, ln_plts=False, obj_name=None, targ_list=None, del_out=False, frac=True, test=False, save_plots=False):
"""
Runs 'actin_file' function for one or multiple fits files, for one or multiple stars.
Accepts fits files from HARPS, HARPS-N, and ESPRESSO instruments.
Accepts files of types: 'S1D', 'S2D', 'e2ds', 's1d', 's1d_*_rv', 'ADP', and 'rdb'.
"""
print()
print("----------------")
print(" STARTING ACTIN ")
print("----------------")
start_time = time.time()
# Get config file from installation or input
if config_file is None:
cfg_file = get_config()
else:
cfg_file = config_file
print()
print("Using spectral lines from configuration file:")
print(cfg_file)
# test values can be 'S1D', 'S2D', 'e2ds', 's1d', 'adp', or 'rdb'
# this needs to have before anything that uses 'files'
if test:
calc_index, files = ac_tools.test_actin(test, path, calc_index)
if not files:
raise Exception("*** ERROR: There are no files to read")
# Make lists to be iterated below
if isinstance(files, str): files = [files]
if rv_in is None:
rv_in = [rv_in]*len(files)
elif type(rv_in) is not list:
rv_in = [float(rv_in)] ### added float to use as module
else: pass
# Check if files exist
ac_tools.check_files(files)
# Remove output file
if del_out:
print()
print("Executing ac_tools.remove_output:")
print("Searching output files to delete...")
#ac_tools.remove_output(files, save_output, targ_list)
if obj_name:
for f in files:
_, instr = ac_tools.get_instr(f)
file_type = ac_tools.get_file_type(f)
if isinstance(obj_name, str):
star_name = obj_name
ac_tools.remove_output2(star_name, instr, file_type, save_output)
elif isinstance(obj_name, (list, np.ndarray)):
for star_name in obj_name:
ac_tools.remove_output2(star_name, instr, file_type, save_output)
elif not obj_name:
for f in files:
star_name = ac_tools.get_target(f)
_, instr = ac_tools.get_instr(f)
file_type = ac_tools.get_file_type(f)
ac_tools.remove_output2(star_name, instr, file_type, save_output)
# Option to make line plots directory the same as the data output dir
if ln_plts == 'same':
ln_plts = save_output
total_files = len(files)
# Organize files by path to star and file type
files_list = ac_tools.files_by_star_and_ftype(files)
n_files_t = 0
# star directories
for k in range(len(files_list)):
# file types
for i in range(len(files_list[k])):
n_files = 0
# files organized by dir and file type
for j in range(len(files_list[k][i])):
n_files += 1
n_files_t += 1
# Run actin file
output = actin_file(files_list[k][i][j],
calc_index,
rv_in=rv_in[j],
config_file=cfg_file,
save_output=save_output,
ln_plts=ln_plts,
obj_name=obj_name,
targ_list=targ_list,
del_out=del_out,
frac=frac)
# POST-PROCESSING:
if output:
# Dictionaries for each file type
sel_lines = output['sel_lines']
info = output['info']
options = output['options']
rdb_file = output['rdb_file']
# Save log and line info files
ac_save.save_log(info, options, n_files, out_dir=save_output)
ac_save.save_line_info(info, sel_lines, out_dir=save_output)
if save_plots:
# Save time-series plots
ac_plot.plt_time(info, out_dir=save_output, rmv_flgs=False, save_plt=True)
ac_plot.plt_time_mlty(info, out_dir=save_output, rmv_flgs=False, save_plt=True, hdrs=calc_index)
else: pass
if n_files_t != total_files:
print()
print("*** ERROR: Number of ACTIN calls different than number of files.")
print("n_files_t:", n_files_t)
print("total_files:", total_files)
elapsed_time = (time.time() - start_time)/60
# Summary:
print("\n---------------------------------")
print("Fractional pixels:\t{}".format(frac))
print("Files analysed:\t\t{}".format(total_files))
print("Save output:\t\t{}".format(save_output))
print("Elapsed time:\t\t{:.4f} min".format(elapsed_time))
return
def get_config():
"""
Check for existence of ACTIN folder and config file and creates them if not present. Returns the path to the config file.
"""
cfg_dir = appdirs.user_config_dir('ACTIN')
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
cfg_file = os.path.join(cfg_dir, 'config_lines.txt')
if not os.path.isfile(cfg_file):
create_user_config(cfg_file)
return cfg_file
def create_user_config(cfg_file):
"""
Create the user's config file
"""
from shutil import copyfile ###
src = pkg_resources.resource_stream(__name__, 'config_lines.txt')
copyfile(src.name, cfg_file)
def main():
"""
Main function, call actin function with arguments from terminal.
"""
# initiate the parser
parser = argparse.ArgumentParser()
# add short and long argument
parser.add_argument('--files', '-f', help='Read file(s)', nargs='+')
parser.add_argument('--calc_index', '-i', help="Index id to calculate as designated by 'ind_id' in config_index.txt.", nargs='+', default=None)
parser.add_argument('--rv_in', '-rv', help="RV value to calibrate wavelength. If False (default) try to read RV from CCF file.", nargs='+', default=None, type=float)
parser.add_argument('--config_file', '-cf', help='Path to config_file, or False (default) read config file from standard directory.', default=None)
parser.add_argument('--save_output', '-s', help='Path to output directory of data table, or False (default).', default=False)
parser.add_argument('--ln_plts', '-lp', help="Path to directory to save line plots. If 'same' saves line plots to same directory of data output. If 'show' only shows the plots. If 'False' (default) does not save or show line plots", default=False)
parser.add_argument('--obj_name', '-obj', help='Give target a name that overides the one from the fits files.', default=None)
parser.add_argument('--targ_list', '-tl', help='Give a list of stars to select from fits files.', nargs='+', default=None)
parser.add_argument('--del_out', '-del', help='Delete output data file if True.', default=False, type=lambda x: (str(x).lower() == 'true'))
parser.add_argument('--test', '-t', help='Tests actin using the provided fits files in the "test_files" directory. Options are "e2ds", "s1d", and "adp"', default=False)
parser.add_argument('--frac', '-frc', help='Turns fractional pixel on (True, default) or off (False).', default=True, type=lambda x: (str(x).lower() == 'true'))
parser.add_argument('--save_plots', '-sp', help="If True saves time-series and multi-plots to same directory as 'save_output'.", default=False, type=lambda x: (str(x).lower() == 'true'))
# read arguments from the command lines
args = parser.parse_args()
actin(files = args.files,
calc_index = args.calc_index,
rv_in = args.rv_in,
config_file = args.config_file,
save_output = args.save_output,
ln_plts = args.ln_plts,
obj_name = args.obj_name,
targ_list = args.targ_list,
del_out = args.del_out,
test = args.test,
frac = args.frac,
save_plots = args.save_plots)
if __name__ == "__main__":
ac_set.preamble(version_file, verbose=True)
main()
```
|
{
"source": "j-faria/authors",
"score": 3
}
|
#### File: authors/authors/database.py
```python
import os
import sqlite3
here = os.path.dirname(os.path.abspath(__file__))
DBFILE = os.path.join(here, 'authors_institutes.db')
def _load_connection(conn, db):
creating_connection = conn is None
if creating_connection:
conn = sqlite3.connect(db)
return conn, creating_connection
def get_tables(db, conn=None):
conn, created_connection = _load_connection(conn, db)
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [record[0] for record in cursor.fetchall()[1:]]
if created_connection:
conn.close()
return tables
def get_columns(db, table, conn=None):
conn, created_connection = _load_connection(conn, db)
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM {table}")
columns = [record[0] for record in cursor.description]
if created_connection:
conn.close()
return columns
def create_database(db):
conn, created_connection = _load_connection(None, db)
sql1 = """
CREATE TABLE IF NOT EXISTS authors (
author_id INTEGER PRIMARY KEY,
author_name TEXT UNIQUE NOT NULL,
acknow TEXT
);"""
sql2 = """
CREATE TABLE IF NOT EXISTS institutes (
institute_id INTEGER PRIMARY KEY,
institute_address TEXT UNIQUE NOT NULL,
label TEXT
);
"""
sql3 = """
CREATE TABLE IF NOT EXISTS authors_institutes (
id INTEGER PRIMARY KEY,
author_name INTEGER REFERENCES authors(author_name),
institute_address INTEGER REFERENCES institutes(institute_address),
UNIQUE(author_name, institute_address)
);
"""
for sql in (sql1, sql2, sql3):
try:
conn.execute(sql)
except Exception as e:
raise e
if created_connection:
conn.close()
def has_required_tables(db):
tables = get_tables(db)
has_tables = (
'author' in tables,
'institutes' in tables,
'author_institutes' in tables,
)
return all(has_tables)
def check_database(db):
if not has_required_tables(db):
create_database(db)
def find_similar_institutes(db, threshold=0.7, interactive_replace=False):
from collections import defaultdict
from difflib import SequenceMatcher
from .authors import query_institute
conn, created_connection = _load_connection(None, db)
cursor = conn.cursor()
cursor.execute("SELECT * FROM institutes")
institutes = [row[1] for row in cursor.fetchall()]
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
similarities = defaultdict(list)
for i, institute in enumerate(institutes):
others = institutes[i + 1:]
# others = [x for j, x in enumerate(institutes) if j != i]
for other in others:
if similar(institute, other) > threshold:
similarities[institute].append(other)
if created_connection:
conn.close()
if interactive_replace:
for k, sim in similarities.items():
print(f'1 - {k}')
for i, s in enumerate(sim, start=2):
print(f'{i} - {s}')
print(' ', 'a) replace 1 with 2')
print(' ', 'b) replace 2 with 1')
print(' ', 'c) ignore')
option = input(' your option: ')
if option == 'a':
print(f'replacing\n {k}\nwith\n {s}')
elif option == 'b':
print(f'replacing\n {s}\nwith\n {k}')
else:
print('ignoring')
print()
else:
for k, sim in similarities.items():
print(k)
print('seems similar to')
for s in sim:
print(' ', s)
print()
return similarities
```
#### File: authors/authors/latex_pdf_utils.py
```python
import os
import subprocess
_here_ = os.path.abspath(os.path.dirname(__file__))
def preview_AandA(text):
template = os.path.join(_here_, 'templates', 'aa', 'aa-template.tex')
output = os.path.join(_here_, 'templates', 'aa', 'aa.tex')
assert os.path.exists(template)
with open(template) as fin:
with open(output, 'w') as fout:
for line in fin.readlines():
if "!!authors-institutes!!" in line:
print(text, file=fout)
else:
print(line, end='', file=fout)
path = os.path.join(_here_, 'templates', 'aa')
subprocess.call('latexmk -f -pdf aa.tex'.split(), cwd=path)
pdf = os.path.join(_here_, 'templates', 'aa', 'aa.pdf')
os.system(f'evince {pdf} &')
```
|
{
"source": "j-faria/EPRVlnZ",
"score": 2
}
|
#### File: EPRVlnZ/scripts/check_priors.py
```python
import sys
import numpy as np
import matplotlib.pyplot as plt
from astroML.plotting import hist
def do_plot(data, name, column=1, save=None):
plt.figure()
bins = 100 #np.linspace(data.min(), data.max(), 100)
plt.hist(data, bins=bins, color='black', histtype='step', normed=True)
# if log: plt.xscale('log')
# hist(data, bins='knuth', color='black', histtype='step', normed=True)
if save:
plt.savefig(save)
else:
plt.show()
def get_column(column):
return np.loadtxt('sample.txt', unpack=True, usecols=(column-1,))
if __name__ == '__main__':
column = int(sys.argv[1])
try:
log = sys.argv[2] == 'log'
except IndexError:
log = False
with open('sample.txt') as f:
# with open('posterior_sample.txt') as f:
firstline = f.readline()
firstline = firstline.strip().replace('#','')
names = firstline.split()
try:
print 'Histogram of column %d: %s' % (column, names[column-1])
except IndexError:
print 'Histogram of column %d' % column
data = get_column(column)
# data = np.loadtxt('sample.txt', unpack=True, usecols=(column-1,))
if log:
data = data[np.nonzero(data)[0]]
data = np.log(data)
# if log:
# bins = np.logspace(np.log(data.min()), np.log(data.max()), 100)
# else:
do_plot(data, column)
```
|
{
"source": "j-faria/george.fit",
"score": 2
}
|
#### File: george.fit/georgefit/mcmc.py
```python
import numpy as np
import emcee
def mcmc(gp, t, y, burn=200, sample=200, nwalkers=36):
def lnprob(p):
# Trivial uniform prior.
if np.any((-100 > p[1:]) + (p[1:] > 100)):
return -np.inf
# Update the kernel and compute the lnlikelihood.
gp.set_parameter_vector(p)
return gp.lnlikelihood(y, quiet=True)
gp.compute(t)
# Set up the sampler.
ndim = len(gp)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Initialize the walkers.
p0 = gp.get_parameter_vector() \
+ 1e-4 * np.random.randn(nwalkers, ndim)
print("Running burn-in (%d steps)" % burn)
p0, _, _ = sampler.run_mcmc(p0, burn)
print("Running production chain (%d steps)" % sample)
sampler.run_mcmc(p0, sample)
return sampler
```
#### File: george.fit/georgefit/optimization.py
```python
import numpy as np
import scipy.optimize as op
def optimization(gp, t, y, **minimize_kwargs):
# Define the objective function (negative log-likelihood in this case).
def nll(p):
gp.set_parameter_vector(p)
ll = gp.log_likelihood(y, quiet=True)
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y, quiet=True)
# You need to compute the GP once before starting the optimization.
gp.compute(t)
# Print the initial ln-likelihood.
print('Initial log-likelihood:', gp.log_likelihood(y))
# Run the optimization routine.
p0 = gp.get_parameter_vector()
minimize_kwargs.setdefault('jac', grad_nll) # provide our own
minimize_kwargs.setdefault('method', "L-BFGS-B")
results = op.minimize(nll, p0, **minimize_kwargs)
# Update the kernel and print the final log-likelihood.
gp.set_parameter_vector(results.x)
print('Final log-likelihood:', gp.log_likelihood(y))
return results
```
|
{
"source": "j-faria/IAporto-seminar-digest",
"score": 3
}
|
#### File: j-faria/IAporto-seminar-digest/create_email_html.py
```python
from parse_Gcalendar import *
from datetime import datetime, timedelta
import sys
# select one of the following:
IA_pole = 'IA' # unified digest
# IA_pole = 'Porto'
# IA_pole = 'Lisbon'
# download the updated ICS file from the Google calendar
update_calendarICS()
def get_week_dates(today=None):
if today is None:
today = datetime.now()
start = today - timedelta(days=today.weekday())
end = start + timedelta(days=4)
weekdates = start.strftime('%d/%b/%Y') + ' -- ' + end.strftime('%d/%b/%Y')
return start, end, weekdates
start, end, weekdates = get_week_dates()
fileid = 'WeekDigest.' + start.strftime('%d%b%Y') + '.' + end.strftime('%d%b%Y') + '.html'
if IA_pole == 'IA':
with open('email_template.html') as f:
email_template = f.read()
elif IA_pole == 'Porto':
with open('email_template_Porto.html') as f:
email_template = f.read()
elif IA_pole == 'Lisbon':
with open('email_template_Lisbon.html') as f:
email_template = f.read()
else:
print 'set IA_pole!'
sys.exit(1)
google_calendar_link = 'http://www.google.com/calendar/render?action=TEMPLATE'
google_calendar_link+= '&text=%s'
google_calendar_link+= '&dates=%s/%s'
google_calendar_link+= '&details=%s'
google_calendar_link+= '&location=%s'
google_calendar_link+= '&sf=true&output=xml'
seminar_content = ''
iteration = 0
while True:
datein = raw_input('Date of the seminar (DD-MM-YYYY) [empty to stop]: ')
if datein == '':
break
if iteration == 0:
date_of_week = datetime.strptime(datein, '%d-%m-%Y')
start, end, weekdates = get_week_dates(today=date_of_week)
fileid = 'WeekDigest.' + start.strftime('%d%b%Y') + '.' + end.strftime('%d%b%Y') + '.html'
print fileid
title, presenter, abstract, startime, dtstart, location, icslink = get_info_from_gcalendar(datein, IA_pole, type_of_event='S')
link = ''
seminar_content += '<h9><b>%s</b></h9> <br />\n' % title
seminar_content += '<h9>%s</h9> <br />\n' % presenter
seminar_content += '<p>%s</p>\n' % abstract
seminar_content += '<br />\n'
seminar_content += '<b><i>%s, %s</i></b>\n' % (location, startime)
seminar_content += '<br />\n'
datestart = time.strftime('%Y%m%dT%H%M00Z', dtstart)
end = datetime(*dtstart[:6])+timedelta(hours=1)
dateend = end.strftime('%Y%m%dT%H%M00Z')
link = google_calendar_link % (title, datestart, dateend, presenter, location)
# print link
seminar_content += 'Save this event to your calendar:\n'
# seminar_content += '<a href="%s">Outlook</a> -- \n' % icslink
# seminar_content += '<a href="%s">iCalendar</a> -- \n' % icslink
seminar_content += '<a href="%s">Google Calendar</a>\n' % link
seminar_content += '<hr />\n'
seminar_content += '<hr />\n'
seminar_content += '<br />\n'
print '\n'
iteration += 1
email_template = email_template.replace('{{seminars}}', seminar_content)
email_template = email_template.replace('{{weekdates}}', weekdates)
progclub_content = ''
PC = raw_input('Is there a programmers club this week (y/[n]) ')
if PC == 'y':
progclub = '\n<p class="lead">Also this week, there will be the Programmers Club:</p>\n'
datein = raw_input('Date of the programmers club (DD-MM-YYYY): ')
title, presenter, abstract, startime, dtstart, location, icslink = get_info_from_gcalendar(datein, IA_pole, type_of_event='PC')
link = ''
progclub_content += '<h9><b>%s</b></h9> <br />\n' % title
progclub_content += '<h9>%s</h9> <br />\n' % presenter
progclub_content += '<p>%s</p>\n' % abstract
progclub_content += '<br />\n'
progclub_content += '<b><i>%s, %s</i></b>\n' % (location, startime)
progclub_content += '<br />\n'
datestart = time.strftime('%Y%m%dT%H%M00Z', dtstart)
end = datetime(*dtstart[:6])+timedelta(hours=1)
dateend = end.strftime('%Y%m%dT%H%M00Z')
link = google_calendar_link % (title, datestart, dateend, presenter, location)
# print link
progclub_content += 'Save this event to your calendar:\n'
# progclub_content += '<a href="%s">Outlook</a> -- \n' % icslink
# progclub_content += '<a href="%s">iCalendar</a> -- \n' % icslink
progclub_content += '<a href="%s">Google Calendar</a>\n' % link
progclub_content += '<hr />\n'
progclub_content += '<hr />\n'
progclub_content += '<br />\n'
print '\n'
progclub_content = progclub + progclub_content
email_template = email_template.replace('{{programmersclub}}', progclub_content)
else:
email_template = email_template.replace('{{programmersclub}}', '\n')
extra_content = ''
extra = raw_input('Anything else going on? (y/[n]) ')
if extra == 'y':
extra_content += '<table>\n'
extra_content += '<td><br>\n'
extra_content += ' <p class="lead">In addition, the following events are also scheduled for this week.</p>\n'
extra_content += ' <dl>\n'
datein = raw_input('Date of the event (DD-MM-YYYY): ')
title, presenter, abstract, startime, dtstart, location, icslink = get_info_from_gcalendar(datein, IA_pole, type_of_event='other')
extra_event = ''
extra_event += ' <dt><h9><b>%s</b></h9> <br></dt>\n' % title
extra_event += ' <dd><h9>%s</h9> <br>\n' % presenter
extra_event += ' <b><i>%s, %s</i></b>\n' % (location, startime)
extra_event += ' <br />\n'
datestart = time.strftime('%Y%m%dT%H%M00Z', dtstart)
end = datetime(*dtstart[:6])+timedelta(hours=1)
dateend = end.strftime('%Y%m%dT%H%M00Z')
link = google_calendar_link % (title, datestart, dateend, presenter, location)
# print link
extra_event += 'Save this event to your calendar:\n'
# progclub_content += '<a href="%s">Outlook</a> -- \n' % icslink
# progclub_content += '<a href="%s">iCalendar</a> -- \n' % icslink
extra_event += '<a href="%s">Google Calendar</a>\n' % link
extra_event += ' </dd>\n'
extra_content += extra_event
extra_content += ' </dl>\n'
extra_content += ' <hr style="background-color:#d9d9d9; border:none; color:#d9d9d9; height:1px" bgcolor="#d9d9d9" height="1">\n'
extra_content += ' <hr style="background-color:#d9d9d9; border:none; color:#d9d9d9; height:1px" bgcolor="#d9d9d9" height="1">\n'
extra_content += '</td>\n'
extra_content += '</table>'
email_template = email_template.replace('{{extrathings}}', extra_content)
else:
email_template = email_template.replace('{{extrathings}}', '\n')
if (seminar_content=='' and progclub_content=='' and extra_content==''):
print
print 'It seems there is nothing happening...'
sys.exit(0)
# print repr(email_template)
email_template = unicode(email_template, 'utf8', 'replace')
# inline the CSS
from premailer import transform
email_template = transform(email_template)
print 'Successfully inlined CSS'
with open(fileid, 'w') as f:
f.write(email_template.encode('utf8'))
# f.write(email_template)
print 'Created the file %s' % fileid
```
|
{
"source": "j-faria/iCCF",
"score": 2
}
|
#### File: iCCF/iCCF/meta_ESPRESSO.py
```python
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import sys, os
import time as pytime
import subprocess
import multiprocessing
from itertools import product
from bisect import bisect_left, bisect_right
from glob import glob
from scipy.interpolate import interp1d
from .iCCF import Indicators
from .utils import doppler_shift_wave, get_ncores
import numba
def makeCCF(spec_wave, spec_flux, mask_wave=None, mask_contrast=None,
mask=None, mask_width=0.82, rvmin=None, rvmax=None, drv=None,
rvarray=None):
"""
Cross-correlate an observed spectrum with a mask template.
For each RV value in rvmin:rvmax:drv (or in rvarray), the wavelength axis of
the mask is Doppler shifted. The mask is then projected onto the spectrum
(using the provided `mask_width`) and the sum of the flux that goes through
the mask "holes" is calculated. This sum is weighted by the mask contrast
(which corresponds to line depths) to optimally extract the Doppler
information.
Parameters
----------
spec_wave : array
The wavelength of the observed spectrum.
spec_flux : array
The flux of the observed spectrum.
mask_wave : array, optional
The central wavelength of the mask.
mask_contrast : array, optional
The flux (contrast) of the mask.
mask : array (..., 3), optional
The mask as an array with lambda1, lambda2, depth.
mask_width : float, optional [default=0.82]
Width of the mask "holes", in velocity in km/s.
rvmin : float, optional
Minimum radial velocity for which to calculate the CCF [km/s].
rvmax : float, optional
Maximum radial velocity for which to calculate the CCF [km/s].
drv : float, optional
The radial-velocity step [km/s].
rvarray : array, optional
The radial velocities at which to calculate the CCF [km/s]. If this is
provided, `rvmin`, `rvmax` and `drv` are ignored.
Returns
-------
rv : array The radial-velocity where the CCF was calculated [km/s]. These
RVs refer to a shift of the mask -- positive values indicate that the
mask has been red-shifted and negative numbers indicate a blue-shift of
the mask.
ccf : array
The values of the cross-correlation function.
"""
if rvarray is None:
if rvmin is None or rvmax is None or drv is None:
raise ValueError("Provide `rvmin`, `rvmax`, and `drv`.")
# check order of rvmin and rvmax
if rvmax <= rvmin:
raise ValueError("`rvmin` should be smaller than `rvmax`.")
rvarray = np.arange(rvmin, rvmax + drv / 2, drv)
wave_resolution = spec_wave[1] - spec_wave[0]
if mask is None:
if mask_wave is None:
raise ValueError("Provide the mask wavelengths in `mask_wave`.")
if mask_contrast is None:
raise ValueError(
"Provide the mask wavelengths in `mask_contrast`.")
mask = np.c_[doppler_shift_wave(mask_wave, -mask_width / 2),
doppler_shift_wave(mask_wave, mask_width /
2), mask_contrast]
ccfarray = np.zeros_like(rvarray)
for i, RV in enumerate(rvarray):
nlines = 0
CCF = 0.0
mask_rv_shifted = np.copy(mask)
mask_rv_shifted[:, :2] = doppler_shift_wave(mask[:, :2], RV)
# region of intersection between the RV-shifted mask and the spectrum
region = (spec_wave[0] < mask_rv_shifted[:, 0]) & (
mask_rv_shifted[:, 1] < spec_wave[-1])
mask_rv_shifted = mask_rv_shifted[region]
# for every line in the mask
for mask_line_start, mask_line_end, mask_line_depth in mask_rv_shifted:
if mask_line_end + wave_resolution >= spec_wave[-1]:
break
# find the limiting indices in spec_wave, corresponding to the start
# and end wavelength of the mask
linePixelIni = bisect_left(spec_wave, mask_line_start)
linePixelEnd = bisect_right(spec_wave, mask_line_end)
# fraction of the spectrum inside the mask hole at the start
lineFractionIni = (
spec_wave[linePixelIni] - mask_line_start) / wave_resolution
# fraction of the spectrum inside the mask hole at the end
lineFractionEnd = 1 - abs(
mask_line_end - spec_wave[linePixelEnd]) / wave_resolution
CCF += mask_line_depth * np.sum(
spec_flux[linePixelIni:linePixelEnd])
CCF += mask_line_depth * lineFractionIni * spec_flux[linePixelIni -
1]
CCF += mask_line_depth * lineFractionEnd * spec_flux[linePixelEnd +
1]
nlines += 1
ccfarray[i] = CCF
return rvarray, ccfarray
@numba.njit
def espdr_compute_CCF_numba_fast(ll, dll, flux, error, blaze, quality, RV_table,
mask_wave, mask_contrast, berv, bervmax,
mask_width=0.5):
c = 299792.458
nx_s2d = flux.size
# ny_s2d = 1 #! since this function computes only one order
n_mask = mask_wave.size
nx_ccf = len(RV_table)
ccf_flux = np.zeros_like(RV_table)
ccf_error = np.zeros_like(RV_table)
ccf_quality = np.zeros_like(RV_table)
dll2 = dll / 2.0 # cpl_image_divide_scalar_create(dll,2.);
ll2 = ll - dll2 # cpl_image_subtract_create(ll,dll2);
#? this mimics the pipeline (note that cpl_image_get indexes starting at 1)
imin = 1; imax = nx_s2d
while(imin < nx_s2d and quality[imin-1] != 0): imin += 1
while(imax > 1 and quality[imax-1] != 0): imax -= 1
# my tests to speed things up
# imin = np.where(quality == 0)[0][0]
# imax = len(quality) - np.where(quality[::-1] == 0)[0][0] - 1
# print(imin, imax)
if imin >= imax:
return
#? note that cpl_image_get indexes starting at 1, hence the "-1"s
llmin = ll[imin + 1 - 1] / (1 + berv / c) * (1 + bervmax / c) / (1 + RV_table[0] / c)
llmax = ll[imax - 1 - 1] / (1 + berv / c) * (1 - bervmax / c) / (1 + RV_table[nx_ccf - 1] / c)
# print('blaze[0]:', blaze[0])
# print('flux[:10]:', flux[:10])
# print(ll[0])
# print(imin, imax)
# print(llmin, llmax)
imin = 0; imax = n_mask - 1
#? turns out cpl_table_get indexes stating at 0...
while (imin < n_mask and mask_wave[imin] < (llmin + 0.5 * mask_width / c * llmin)): imin += 1
while (imax >= 0 and mask_wave[imax] > (llmax - 0.5 * mask_width / c * llmax)): imax -= 1
# print(imin, imax)
# for (i = imin; i <= imax; i++)
for i in range(imin, imax + 1):
#? cpl_array_get also indexes starting at 0
llcenter = mask_wave[i] * (1. + RV_table[nx_ccf // 2] / c)
index_center = 1
while(ll[index_center-1] < llcenter): index_center += 1
# my attempt to speed it up
# index_center = np.where(ll < llcenter)[0][-1] +1
contrast = mask_contrast[i]
w = contrast * contrast
# print(i, w)
# print('llcenter:', llcenter)
# print('index_center:', index_center)
for j in range(0, nx_ccf):
llcenter = mask_wave[i] * (1. + RV_table[j] / c)
llstart = llcenter - 0.5 * mask_width / c * llcenter
llstop = llcenter + 0.5 * mask_width / c * llcenter
# print(llstart, llcenter, llstop)
index1 = 1
while(ll2[index1-1] < llstart): index1 += 1
# index1 = np.where(ll2 < llstart)[0][-1] +1
index2 = index1
while (ll2[index2-1] < llcenter): index2 += 1
# index2 = np.where(ll2 < llcenter)[0][-1] +1
index3 = index2
while (ll2[index3-1] < llstop): index3 += 1;
# index3 = np.where(ll2 < llstop)[0][-1] +1
# print(index1, index2, index3)
# sys.exit(0)
k = j
# if (i == imax and j == 0):
# print("index1=", index1)
# print("index2=", index2)
# print("index3=", index3)
for index in range(index1, index3):
ccf_flux[k] += w * flux[index-1] / blaze[index-1] * blaze[index_center-1]
ccf_flux[k] += w * flux[index1 - 1 - 1] * (ll2[index1-1] - llstart) / dll[index1 - 1 - 1] / blaze[index1 - 1 - 1] * blaze[index_center - 1]
ccf_flux[k] -= w * flux[index3 - 1 - 1] * (ll2[index3-1] - llstop) / dll[index3 - 1 - 1] / blaze[index3 - 1 - 1] * blaze[index_center - 1]
ccf_error[k] += w * w * error[index2 - 1 - 1] * error[index2 - 1 - 1]
ccf_quality[k] += quality[index2 - 1 - 1]
# my_error = cpl_image_power(*CCF_error_RE,0.5);
ccf_error = np.sqrt(ccf_error)
return ccf_flux, ccf_error, ccf_quality
def espdr_compute_CCF_fast(ll, dll, flux, error, blaze, quality, RV_table,
mask, berv, bervmax, mask_width=0.5):
c = 299792.458
nx_s2d = flux.size
# ny_s2d = 1 #! since this function computes only one order
n_mask = mask.size
nx_ccf = len(RV_table)
ccf_flux = np.zeros_like(RV_table)
ccf_error = np.zeros_like(RV_table)
ccf_quality = np.zeros_like(RV_table)
dll2 = dll / 2.0 # cpl_image_divide_scalar_create(dll,2.);
ll2 = ll - dll2 # cpl_image_subtract_create(ll,dll2);
#? this mimics the pipeline (note that cpl_image_get indexes starting at 1)
imin = 1; imax = nx_s2d
while(imin < nx_s2d and quality[imin-1] != 0): imin += 1
while(imax > 1 and quality[imax-1] != 0): imax -= 1
# my tests to speed things up
# imin = np.where(quality == 0)[0][0]
# imax = len(quality) - np.where(quality[::-1] == 0)[0][0] - 1
# print(imin, imax)
if imin >= imax:
return
#? note that cpl_image_get indexes starting at 1, hence the "-1"s
llmin = ll[imin + 1 - 1] / (1. + berv / c) * (1. + bervmax / c) / (1. + RV_table[0] / c)
llmax = ll[imax - 1 - 1] / (1. + berv / c) * (1. - bervmax / c) / (1. + RV_table[nx_ccf - 1] / c)
imin = 0; imax = n_mask - 1
#? turns out cpl_table_get indexes stating at 0...
while (imin < n_mask and mask['lambda'][imin] < (llmin + 0.5 * mask_width / c * llmin)): imin += 1
while (imax >= 0 and mask['lambda'][imax] > (llmax - 0.5 * mask_width / c * llmax)): imax -= 1
# print(imin, imax)
# for (i = imin; i <= imax; i++)
for i in range(imin, imax + 1):
#? cpl_array_get also indexes starting at 0
llcenter = mask['lambda'][i] * (1. + RV_table[nx_ccf // 2] / c)
# index_center = 1
# while(ll[index_center-1] < llcenter): index_center += 1
# my attempt to speed it up
index_center = np.where(ll < llcenter)[0][-1] +1
contrast = mask['contrast'][i]
w = contrast * contrast
# print(i, w)
for j in range(0, nx_ccf):
llcenter = mask['lambda'][i] * (1. + RV_table[j] / c)
llstart = llcenter - 0.5 * mask_width / c * llcenter
llstop = llcenter + 0.5 * mask_width / c * llcenter
# print(llstart, llcenter, llstop)
# index1 = 1
# while(ll2[index1-1] < llstart): index1 += 1
index1 = np.where(ll2 < llstart)[0][-1] +1
# index2 = index1
# while (ll2[index2-1] < llcenter): index2 += 1
index2 = np.where(ll2 < llcenter)[0][-1] +1
# index3 = index2
# while (ll2[index3-1] < llstop): index3 += 1;
index3 = np.where(ll2 < llstop)[0][-1] +1
# print(index1, index2, index3)
# sys.exit(0)
k = j
for index in range(index1, index3):
ccf_flux[k] += w * flux[index-1] / blaze[index-1] * blaze[index_center-1]
ccf_flux[k] += w * flux[index1 - 1 - 1] * (ll2[index1-1] - llstart) / dll[index1 - 1 - 1] / blaze[index1 - 1 - 1] * blaze[index_center - 1]
ccf_flux[k] -= w * flux[index3 - 1 - 1] * (ll2[index3-1] - llstop) / dll[index3 - 1 - 1] / blaze[index3 - 1 - 1] * blaze[index_center - 1]
ccf_error[k] += w * w * error[index2 - 1 - 1] * error[index2 - 1 - 1]
ccf_quality[k] += quality[index2 - 1 - 1]
# my_error = cpl_image_power(*CCF_error_RE,0.5);
ccf_error = np.sqrt(ccf_error)
return ccf_flux, ccf_error, ccf_quality
def find_dll(s2dfile):
hdu = fits.open(s2dfile)
dllfile = hdu[0].header['HIERARCH ESO PRO REC1 CAL7 NAME']
if os.path.exists(dllfile):
return dllfile
elif len(glob(dllfile + '*')) > 1:
return glob(dllfile + '*')[0]
else:
date = hdu[0].header['DATE-OBS']
def calculate_s2d_ccf(s2dfile, rvarray, order='all',
mask_file='ESPRESSO_G2.fits', mask=None, mask_width=0.5,
debug=False):
hdu = fits.open(s2dfile)
if order == 'all':
if debug:
print('can only debug one order at a time...')
return
orders = range(hdu[1].data.shape[0])
return_sum = True
else:
assert isinstance(order, int), 'order should be integer'
orders = (order, )
return_sum = False
BERV = hdu[0].header['HIERARCH ESO QC BERV']
BERVMAX = hdu[0].header['HIERARCH ESO QC BERVMAX']
dllfile = hdu[0].header['HIERARCH ESO PRO REC1 CAL7 NAME']
blazefile = hdu[0].header['HIERARCH ESO PRO REC1 CAL13 NAME']
print('need', dllfile)
print('need', blazefile)
dllfile = glob(dllfile + '*')[0]
# CCF mask
if mask is None:
mask = fits.open(mask_file)[1].data
else:
assert 'lambda' in mask, 'mask must contain the "lambda" key'
assert 'contrast' in mask, 'mask must contain the "contrast" key'
# get the flux correction stored in the S2D file
keyword = 'HIERARCH ESO QC ORDER%d FLUX CORR'
flux_corr = [hdu[0].header[keyword % (o + 1)] for o in range(170)]
ccfs, ccfes = [], []
for order in orders:
# WAVEDATA_AIR_BARY
ll = hdu[5].data[order, :]
# mean w
llc = np.mean(hdu[5].data, axis=1)
dll = fits.open(dllfile)[1].data[order, :]
# dll = doppler_shift_wave(dll, -BERV, f=1.+1.55e-8)
# fit an 8th degree polynomial to the flux correction
corr_model = np.polyval(np.polyfit(llc, flux_corr, 7), llc)
flux = hdu[1].data[order, :]
error = hdu[2].data[order, :]
quality = hdu[3].data[order, :]
blaze = fits.open(blazefile)[1].data[order, :]
y = flux * blaze / corr_model[order]
# y = np.loadtxt('flux_in_pipeline_order0.txt')
ye = error * blaze / corr_model[order]
if debug:
return ll, dll, y, ye, blaze, quality, rvarray, mask, BERV, BERVMAX
print('calculating ccf (order %d)...' % order)
ccf, ccfe, _ = espdr_compute_CCF_fast(ll, dll, y, ye, blaze, quality,
rvarray, mask, BERV, BERVMAX,
mask_width=mask_width)
ccfs.append(ccf)
ccfes.append(ccfe)
if return_sum:
ccf = np.concatenate([ccfs, np.array(ccfs).sum(axis=0, keepdims=True)])
ccfe = np.concatenate([ccfes, np.zeros(len(rvarray)).reshape(1, -1)])
# what to do with the errors?
return ccf, ccfe
else:
return np.array(ccfs), np.array(ccfes)
def find_file(file, ssh=None):
print('Looking for file:', file)
# first try here:
if os.path.exists(file) or os.path.exists(file + '.fits'):
print('\tfound it in current directory')
return glob(file + '*')[0]
similar = glob(file + '*.fits')
if len(similar) > 0:
file = similar[0]
print(f'\tfound a similar file in current directory ({file})')
return file
# try on the local machine
try:
found = subprocess.check_output(f'locate {file}'.split())
found = found.decode().split()
print('\tfound file:', found[-1])
return found[-1]
except subprocess.CalledProcessError:
if ssh is None:
raise FileNotFoundError(file) from None
# try on a server with SSH
if ssh is not None:
if '@' not in ssh:
raise ValueError('ssh should be in the form "user@host"')
# user, host = ssh.split('@')
locate_cmd = f'ssh {ssh} locate {file}'
try:
found = subprocess.check_output(locate_cmd.split())
found = found.decode().split()
print('\tfound file:', ssh + ':' + found[-1])
except subprocess.CalledProcessError:
raise FileNotFoundError(file) from None
full_path = found[-1]
scp_cmd = f'scp {ssh}:{full_path} .'
try:
subprocess.check_call(scp_cmd.split())
return os.path.split(full_path)[-1]
except subprocess.CalledProcessError:
raise RuntimeError(f'Could not scp {file} from {ssh}') from None
def _dowork(args, debug=False):
order, kwargs = args
data = kwargs['data']
dll = kwargs['dll'][order]
blaze = kwargs['blaze'][order]
corr_model = kwargs['corr_model']
rvarray = kwargs['rvarray']
mask = kwargs['mask']
mask_wave = mask['lambda'].astype(np.float64)
mask_contrast = mask['contrast'].astype(np.float64)
BERV = kwargs['BERV']
BERVMAX = kwargs['BERVMAX']
mask_width = kwargs['mask_width']
# WAVEDATA_AIR_BARY
ll = data[5][order, :]
flux = data[1][order, :]
error = data[2][order, :]
quality = data[3][order, :]
y = flux * blaze / corr_model[order]
ye = error * blaze #/ corr_model[order]
# ccf, ccfe, ccfq = espdr_compute_CCF_fast(ll, dll, y, ye, blaze, quality,
# rvarray, mask, BERV, BERVMAX,
# mask_width=mask_width)
ccf, ccfe, ccfq = espdr_compute_CCF_numba_fast(
ll, dll, y, ye, blaze, quality, rvarray, mask_wave, mask_contrast,
BERV, BERVMAX, mask_width=mask_width
)
return ccf, ccfe, ccfq
def calculate_s2d_ccf_parallel(s2dfile, rvarray, order='all',
mask_file='ESPRESSO_G2.fits', mask_width=0.5,
ncores=None, verbose=True, full_output=False,
ignore_blaze=True, ssh=None):
"""
Calculate the CCF between a 2D spectra and a mask. This function can lookup
necessary files (locally or over SSH) and can perform the calculation in
parallel, depending on the value of `ncores`
Arguments
---------
s2dfile : str
The name of the S2D file
rvarray : array
RV array where to calculate the CCF
order : str or int
Either 'all' to calculate the CCF for all orders, or the order
mask_file : str
The fits file containing the CCF mask (may be in the current directory)
mask_width : float
The width of the mask "lines" in km/s
ncores : int
Number of CPU cores to use for the calculation (default: all available)
verbose : bool, default True
Print messages and a progress bar during the calcualtion
full_output : bool, default False
Return all the quantities that went into the CCF calculation (some
extracted from the S2D file)
ignore_blaze : bool, default False
If True, the function completely ignores any blaze correction and takes
the flux values as is from the S2D file
ssh : str
SSH information in the form "user@host" to look for required
calibration files in a server. If the files are not found locally, the
function tries the `locate` and `scp` commands to find and copy the
file from the SSH host
"""
hdu = fits.open(s2dfile)
norders, order_len = hdu[1].data.shape
if ncores is None:
ncores = get_ncores()
print(f'Using {ncores} CPU cores for the calculation')
if order == 'all':
orders = range(hdu[1].data.shape[0])
return_sum = True
else:
assert isinstance(order, int), 'order should be integer'
orders = (order, )
return_sum = False
BERV = hdu[0].header['HIERARCH ESO QC BERV']
BERVMAX = hdu[0].header['HIERARCH ESO QC BERVMAX']
## find and read the blaze file
if ignore_blaze:
blaze = np.ones_like(hdu[1].data)
else:
blazefile = hdu[0].header['HIERARCH ESO PRO REC1 CAL12 NAME']
blazefile = find_file(blazefile, ssh)
blaze = fits.open(blazefile)[1].data
## dll used to be stored in a separate file (?), now it's in the S2D
# dllfile = hdu[0].header['HIERARCH ESO PRO REC1 CAL16 NAME']
# dllfile = find_file(dllfile, ssh)
# dll = fits.open(dllfile)[1].data.astype(np.float64)
dll = hdu[7].data
## CCF mask
mask_file = find_file(mask_file, ssh)
mask = fits.open(mask_file)[1].data
## get the flux correction stored in the S2D file
keyword = 'HIERARCH ESO QC ORDER%d FLUX CORR'
flux_corr = np.array(
[hdu[0].header[keyword % o] for o in range(1, norders + 1)]
)
## fit a polynomial and evaluate it at each order's wavelength
## orders with flux_corr = 1 are ignored in the polynomial fit
fit_nb = (flux_corr != 1.0).sum()
ignore = norders - fit_nb
# see espdr_science:espdr_correct_flux
poly_deg = round(8 * fit_nb / norders)
llc = hdu[5].data[:, order_len // 2]
coeff = np.polyfit(llc[ignore:], flux_corr[ignore:], poly_deg - 1)
# corr_model = np.ones_like(hdu[5].data, dtype=np.float32)
corr_model = np.polyval(coeff, hdu[5].data)
if verbose:
print('Performing flux correction', end=' ')
print(f'(discarding {ignore} orders; polynomial of degree {poly_deg})')
kwargs = {}
kwargs['data'] = [None] + [hdu[i].data for i in range(1, 6)]
kwargs['dll'] = dll
kwargs['blaze'] = blaze
kwargs['corr_model'] = corr_model
kwargs['rvarray'] = rvarray
kwargs['mask'] = mask
kwargs['BERV'] = BERV
kwargs['BERVMAX'] = BERVMAX
kwargs['mask_width'] = mask_width
# kwargs['verbose'] = verbose
start = pytime.time()
if verbose:
print(f'Calculating...', end=' ', flush=True)
pool = multiprocessing.Pool(ncores)
ccfs, ccfes, ccfqs = zip(*pool.map(_dowork, product(orders, [kwargs, ])))
pool.close()
end = pytime.time()
if verbose:
print(f'done in {end - start:.2f} seconds')
if return_sum:
# sum the CCFs over the orders
ccf = np.concatenate([ccfs, np.array(ccfs).sum(axis=0, keepdims=True)])
# quadratic sum of the errors
qsum = np.sqrt(np.sum(np.square(ccfes), axis=0))
ccfe = np.concatenate([ccfes, qsum.reshape(1, -1)])
# sum the qualities
ccfq = np.concatenate(
[ccfqs, np.array(ccfqs).sum(axis=0, keepdims=True)])
if full_output:
return ccf, ccfe, ccfq, kwargs
else:
return ccf, ccfe, ccfq
else:
if full_output:
return np.array(ccfs), np.array(ccfes), np.array(ccfqs), kwargs
else:
return np.array(ccfs), np.array(ccfes), np.array(ccfqs)
def calculate_ccf(s2dfile, mask, rvarray, **kwargs):
"""
A wrapper for `calculate_s2d_ccf_parallel` which also saves the resulting
CCF in a fits file. Mostly meant for the iccf-make-ccf script.
Parameters
----------
s2dfile : str
The name of the S2D file
mask : str
The identifier for the CCF mask to use. A file 'ESPRESSO_mask.fits'
should exist (not necessarily in the current directory)
rvarray : array
RV array where to calculate the CCF
**kwargs
Keyword arguments passed directly to `calculate_s2d_ccf_parallel`
"""
mask_file = f"ESPRESSO_{mask}.fits"
kwargs['mask_file'] = mask_file
ccf, ccfe, ccfq, kw = calculate_s2d_ccf_parallel(s2dfile, rvarray,
full_output=True,
**kwargs)
# in the pipeline, data are saved as floats
ccf = ccf.astype(np.float32)
ccfe = ccfe.astype(np.float32)
ccfq = ccfq.astype(np.int32)
# read original S2D file
s2dhdu = fits.open(s2dfile)
s2dfile = os.path.basename(s2dfile)
end = f'_CCF_{mask}_iCCF.fits'
try:
ccf_file = s2dfile[:s2dfile.index('_')] + end
except ValueError:
ccf_file = os.path.splitext(s2dfile)[0] + end
phdr = fits.Header()
phdr['HIERARCH ESO RV START'] = rvarray[0]
phdr['HIERARCH ESO RV STEP'] = np.ediff1d(rvarray)[0]
phdr['HIERARCH ESO QC BJD'] = s2dhdu[0].header['ESO QC BJD']
phdr['HIERARCH ESO QC BERV'] = kw['BERV']
phdr['HIERARCH ESO QC BERVMAX'] = kw['BERVMAX']
phdr['HIERARCH ESO QC CCF MASK'] = mask
phdr['INSTRUME'] = 'ESPRESSO'
phdr['HIERARCH ESO INS MODE'] = 'ESPRESSO'
phdr['HIERARCH ESO PRO SCIENCE'] = True
phdr['HIERARCH ESO PRO TECH'] = 'ECHELLE '
phdr['HIERARCH ESO PRO TYPE'] = 'REDUCED '
I = Indicators(rvarray, ccf[-1], ccfe[-1])
phdr['HIERARCH ESO QC CCF RV'] = I.RV
phdr['HIERARCH ESO QC CCF RV ERROR'] = I.RVerror
phdr['HIERARCH ESO QC CCF FWHM'] = I.FWHM
# phdr['HIERARCH ESO QC CCF FWHM ERROR'] = I.FWHMerror # TODO
phdr['HIERARCH ESO QC CCF CONTRAST'] = I.contrast
# phdr['HIERARCH ESO QC CCF CONTRAST ERROR'] = I.contrasterror # TODO
# 'ESO QC CCF FLUX ASYMMETRY' # TODO
phdu = fits.PrimaryHDU(header=phdr)
# science data, the actual CCF!
hdr1 = fits.Header()
hdr1['EXTNAME'] = 'SCIDATA'
hdu1 = fits.ImageHDU(ccf, header=hdr1)
# CCF errors
hdr2 = fits.Header()
hdr2['EXTNAME'] = 'ERRDATA'
hdu2 = fits.ImageHDU(ccfe, header=hdr2)
# quality flag
hdr3 = fits.Header()
hdr3['EXTNAME'] = 'QUALDATA'
hdu3 = fits.ImageHDU(ccfq, header=hdr3)
hdul = fits.HDUList([phdu, hdu1, hdu2, hdu3])
print('Output to:', ccf_file)
hdul.writeto(ccf_file, overwrite=True, checksum=True)
return ccf_file
```
|
{
"source": "j-faria/keplerian",
"score": 3
}
|
#### File: keplerian/tests/test_create_figure.py
```python
import pytest
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from keplerian import keplerian
@pytest.mark.mpl_image_compare
def test_simple_fig():
""" Simple figure """
t = np.linspace(0, 10, 1000)
P, K, e, w, T0 = 4., 1., 0., 0., 2.
vsys = 1.
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(t, keplerian(t, P, K, e, w, T0, vsys), lw=2)
return fig
@pytest.mark.mpl_image_compare
def test_complicated_fig():
""" Not so simple figure """
t = np.linspace(0, 10, 1000)
fig, axes = plt.subplots(5, 4)
for ax in axes.flatten():
ax.axis('off')
P, K, T0 = 4., 1., 2
vsys = 0
for i, e in enumerate(np.arange(0, 1, 0.2)):
for j, w in enumerate(np.arange(0, 2*np.pi, np.pi/2)):
ax = axes[i,j]
kep = keplerian(t, P, K, e, w, T0, vsys)
ax.plot(t, kep, lw=2)
ax.axhline(y=0, lw=1, color='k')
ax.set(ylim=[-2, 2], title='e=%.1f, w=%.2f' % (e, w))
fig.tight_layout()
return fig
```
|
{
"source": "j-faria/kima-subtrees-test",
"score": 3
}
|
#### File: kima-subtrees-test/pykima/check_priors.py
```python
import sys
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
import argparse
def _parse_args():
desc = """
A small script to check correct sampling from the priors.
Remember to run kima with the maximum number of levels
set to 1 (and save interval = 1 to speed things up).
Then, sample.txt contains samples from the prior.
This script simply plots histograms of each column of that file.
"""
parser = argparse.ArgumentParser(description=desc,
prog='kima-checkpriors',
# usage='%(prog)s [no,1,2,...,7]'
)
parser.add_argument('column', nargs=1, type=int,
help='which column to use for histogram')
parser.add_argument('--log', action='store_true',
help='plot the logarithm of the samples')
parser.add_argument('--code', nargs=1, type=str,
help='code to generate "theoretical" samples '\
'to compare to the prior. \n'\
'Assign samples to an iterable called `samples`. '\
'Use numpy and scipy.stats as `np` and `st`, respectively. '\
'Number of prior samples in sample.txt is in variable `nsamples`. '\
'For example: samples=np.random.uniform(0,1,nsamples)')
args = parser.parse_args()
return args
def main():
args = _parse_args()
column = args.column[0]
log = args.log
with open('sample.txt') as f:
firstline = f.readline()
firstline = firstline.strip().replace('#', '')
names = firstline.split()
try:
name = names[column - 1]
print ('Histogram of column %d: %s' % (column, name))
except IndexError:
name = 'column %d' % column
print ('Histogram of column %d' % column)
data = np.loadtxt('sample.txt', usecols=(column - 1,))
data = data[np.nonzero(data)[0]]
nsamples = data.size
print (' number of samples: %d' % nsamples)
print (' max value: %f' % data.max())
print (' min value: %f' % data.min())
xlabel = name
if log:
data = np.log(data)
xlabel = 'log ' + name
fig, ax = plt.subplots(1, 1)
ax.set_xlabel(xlabel)
ax.hist(data, bins=100, color='k', histtype='step', align='mid',
range=[data.min() - 0.2 * data.ptp(),
data.max() + 0.2 * data.ptp()],
)
if args.code:
namespace = locals()
exec (args.code[0], globals(), namespace)
samples = namespace['samples']
ax.hist(samples, alpha=0.3, bins=100, align='mid',
range=[data.min() - 0.2 * data.ptp(),
data.max() + 0.2 * data.ptp()],
)
plt.show()
if __name__ == '__main__':
main()
```
|
{
"source": "j-faria/LogUniform",
"score": 3
}
|
#### File: loguniform/tests/test_ModifiedLogUniform.py
```python
from unittest import TestCase
import loguniform
dist = loguniform.ModifiedLogUniform
class test_constructor(TestCase):
def test1(self):
with self.assertRaises(TypeError):
dist(a=1)
with self.assertRaises(TypeError):
dist(b=1000)
with self.assertRaises(TypeError):
dist(knee=10)
def test2(self):
with self.assertRaises(AssertionError):
dist(knee=10, b=1)
def test3(self):
with self.assertRaises(AssertionError):
dist(knee=0, b=1)
with self.assertRaises(AssertionError):
dist(knee=0, b=0)
def test4(self):
d = dist(knee=1, b=100)
self.assertEqual(d.knee, 1)
self.assertEqual(d.b, 100)
d = dist(knee=10.3, b=665.1)
self.assertEqual(d.knee, 10.3)
self.assertEqual(d.b, 665.1)
class test_methods(TestCase):
def test_pdf(self):
d = dist(knee=10, b=5000)
self.assertEqual(d.pdf(6000), 0.0)
self.assertNotEqual(d.pdf(d.a), 0.0)
self.assertGreater(d.pdf(d.a), 0.0)
self.assertGreater(d.pdf(d.knee), 0.0)
self.assertEqual(d.pdf(-1), 0.0)
self.assertNotEqual(d.pdf(d.b), 0.0)
self.assertGreater(d.pdf(d.b), 0.0)
```
|
{
"source": "j-faria/pyExoplanet.eu",
"score": 3
}
|
#### File: pyExoplanet.eu/pyexoplaneteu/config.py
```python
import os
def _create_data_dir():
""" Create empty directory where exoplanetEU.csv will be stored """
home = os.path.expanduser("~")
directory = os.path.join(home, '.pyexoplaneteu')
if not os.path.exists(directory):
os.makedirs(directory)
def _check_data_dir():
home = os.path.expanduser("~")
directory = os.path.join(home, '.pyexoplaneteu')
return os.path.exists(directory)
def get_data_dir():
""" Return directory where exoplanetEU.csv is stored """
if not _check_data_dir():
_create_data_dir()
home = os.path.expanduser("~")
return os.path.join(home, '.pyexoplaneteu')
```
#### File: pyExoplanet.eu/pyexoplaneteu/pyexoplaneteu.py
```python
import os
from urllib import request
import time
import math
import csv
import pprint
from collections import OrderedDict
from .utils import float_cols
# the link in the "Download CSV" button
download_link = 'http://exoplanet.eu/catalog/csv'
# to get the directory where SWEET-Cat data will be stored
from .config import get_data_dir
def download_data():
""" Download exoplanet.eu data and save it to `exoplanetEU.csv` """
with request.urlopen(download_link) as response:
data = response.read()
local_file = os.path.join(get_data_dir(), 'exoplanetEU.csv')
with open(local_file, 'wb') as f:
f.write(data)
print(f'Saved exoplanet.eu data to {local_file}')
def check_data_age():
""" How old is `exoplanetEU.csv`, in days """
local_file = os.path.join(get_data_dir(), 'exoplanetEU.csv')
age = time.time() - os.path.getmtime(local_file) # in sec
return age / (60*60*24) # in days
class DataDict(OrderedDict):
numpy_entries = False
__doc__ = "exoplanet.eu: a catalog of parameters for known exoplanets.\n" + \
"The catalog and more information can be found " \
"at http://exoplanet.eu\n" + \
"This dictionary has the catalog columns as its keys; " \
"see the `.columns()` method.\n" + \
"Entries are lists, see `to_numpy()` to convert them to numpy arrays."
def __init__(self, *args, **kwargs):
super(DataDict, self).__init__(self, *args, **kwargs)
def __getitem__(self, key):
# allows to do data['key_nonan'] to get data['key'] without NaNs
if key.endswith('_nonan'):
val = super().__getitem__(key.replace('_nonan',''))
try:
if self.numpy_entries:
from numpy import isnan
val = val[~isnan(val)]
else:
val = [v for v in val if not math.isnan(v)]
except TypeError:
# this column does not have floats
pass
else:
val = super().__getitem__(key)
return val
def __str__(self):
return 'exoplanet.eu data'
def __repr__(self):
return f'exoplanet.eu data: dictionary with {self.size} entries. '+\
'Use .columns() to get the column labels.'
def _repr_pretty_(self, p, cycle):
return p.text(self.__repr__())
def __len__(self):
return len(self.__getitem__('name'))
def columns(self):
""" List the available columns """
pprint.pprint(list(self.keys()), compact=True)
@property
def size(self):
return len(self.__getitem__('name'))
def to_numpy(self, inplace=True):
"""
Convert entries to numpy arrays. If `inplace` is True convert
the entries in place, else return a new dictionary.
"""
from numpy import asarray # this assumes numpy is installed
newself = self if inplace else DataDict()
for k, v in self.items():
newself[k] = asarray(v)
newself.numpy_entries = True
if not inplace:
return newself
def read_data():
def apply_float_to_column(data, key):
data[key] = [float(v) if v!='' else math.nan for v in data[key]]
# read the file
local_file = os.path.join(get_data_dir(), 'exoplanetEU.csv')
with open(local_file) as csvfile:
reader = csv.DictReader(csvfile)
lines = [row for row in reader]
# lines is a list of (ordered) dicts; transform it to a (ordered) dict of lists
data = OrderedDict({k: [dic[k] for dic in lines] for k in lines[0]})
# column labels were read automatically by the csv.DictReader
labels = list(data.keys())
# but the first label erroneously includes a "#"
labels[0] = 'name'
data['name'] = data.pop('# name')
nlab, nlin = len(labels), len(lines)
print(f'There are {nlab} columns with {nlin} entries each in `exoplanetEU.csv`')
data = DataDict(**data)
data.move_to_end('name', last=False) # put this key back at the beginning,
# just for clarity
# transform some columns to floats
for col in float_cols:
apply_float_to_column(data, col)
return data
def get_data():
local_file = os.path.join(get_data_dir(), 'exoplanetEU.csv')
if not os.path.exists(local_file):
print ('Downloading exoplanet.eu data')
download_data()
age = check_data_age()
if age > 5:
print ('Data in `exoplanetEU.csv` is older than 5 days, downloading.')
download_data()
else:
print ('Data in `exoplanetEU.csv` is recent.')
data = read_data()
return data
if __name__ == '__main__':
data = get_data()
```
|
{
"source": "j-faria/pySWEETCat",
"score": 3
}
|
#### File: pySWEETCat/pysweetcat/config.py
```python
import os
def _create_data_dir():
""" Create empty directory where SWEET_cat.tsv will be stored """
home = os.path.expanduser("~")
directory = os.path.join(home, '.pysweetcat')
if not os.path.exists(directory):
os.makedirs(directory)
def _check_data_dir():
home = os.path.expanduser("~")
directory = os.path.join(home, '.pysweetcat')
return os.path.exists(directory)
def get_data_dir():
""" Return directory where SWEET_cat.tsv is stored """
if not _check_data_dir():
_create_data_dir()
home = os.path.expanduser("~")
return os.path.join(home, '.pysweetcat')
```
|
{
"source": "j-faria/urepr",
"score": 4
}
|
#### File: urepr/urepr/core.py
```python
import re
import math
from math import sqrt, log, isnan, isinf
try:
from math import isinfinite # !! Python 3.2+
except ImportError:
def isinfinite(x): return isinf(x) or isnan(x)
def first_digit(value):
"""
Return the first digit position of the given value, as an integer.
0 is the digit just before the decimal point. Digits to the right
of the decimal point have a negative position.
Return 0 for a null value.
"""
try:
return int(math.floor(math.log10(abs(value))))
except ValueError: # Case of value == 0
return 0
def PDG_precision(std_dev):
"""
Return the number of significant digits to be used for the given standard
deviation, according to the rounding rules of the Particle Data Group (2010)
(http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf).
Also returns the effective standard deviation to be used for display.
"""
exponent = first_digit(std_dev)
if exponent >= 0:
(exponent, factor) = (exponent-2, 1)
else:
(exponent, factor) = (exponent+1, 1000)
digits = int(std_dev/10.**exponent*factor) # int rounds towards zero
# Rules:
if digits <= 354:
return (2, std_dev)
elif digits <= 949:
return (1, std_dev)
else:
# the parentheses matter, for very small or very large std_dev
return (2, 10.**exponent*(1000/factor))
def signif_dgt_to_limit(value, num_signif_d):
"""
Return the precision limit necessary to display value with num_signif_d
significant digits.
The precision limit is given as -1 for 1 digit after the decimal point, 0
for integer rounding, etc. It can be positive.
"""
fst_digit = first_digit(value)
limit_no_rounding = fst_digit-num_signif_d+1
rounded = round(value, -limit_no_rounding)
fst_digit_rounded = first_digit(rounded)
if fst_digit_rounded > fst_digit:
limit_no_rounding += 1
return limit_no_rounding
robust_format = format
EXP_LETTERS = {'f': 'e', 'F': 'E'}
def robust_align(orig_str, fill_char, align_option, width):
"""
Aligns the given string with the given fill character.
orig_str -- string to be aligned (str or unicode object).
fill_char -- if empty, space is used.
align_option -- as accepted by format().
wdith -- string that contains the width.
"""
return format(orig_str, fill_char+align_option+width)
# Maps some Unicode code points ("-", "+", and digits) to their
# superscript version:
TO_SUPERSCRIPT = {
0x2b: '⁺',
0x2d: '⁻',
0x30: '⁰',
0x31: '¹',
0x32: '²',
0x33: '³',
0x34: '⁴',
0x35: '⁵',
0x36: '⁶',
0x37: '⁷',
0x38: '⁸',
0x39: '⁹'
}
# Inverted TO_SUPERSCRIPT table, for use with unicode.translate():
FROM_SUPERSCRIPT = {ord(sup): normal for (normal, sup) in TO_SUPERSCRIPT.items()}
def to_superscript(value):
"""
Return a (Unicode) string with the given value as superscript characters.
The value is formatted with the %d %-operator format.
value -- integer.
"""
return ('%d' % value).translate(TO_SUPERSCRIPT)
def from_superscript(number_str):
"""
Converts a string with superscript digits and sign into an integer.
ValueError is raised if the conversion cannot be done.
number_str -- basestring object.
"""
return int(str(number_str).translate(FROM_SUPERSCRIPT))
# Function that transforms an exponent produced by format_num() into
# the corresponding string notation (for non-default modes):
EXP_PRINT = {
'pretty-print': lambda common_exp: '×10%s' % to_superscript(common_exp),
'latex': lambda common_exp: r' \times 10^{%d}' % common_exp
}
# Symbols used for grouping (typically between parentheses) in format_num():
GROUP_SYMBOLS = {
'pretty-print': ('(', ')'),
'latex': (r'\left(', r'\right)'),
'default': ('(', ')') # Basic text mode
}
def format_num(nom_val_main, error_main, common_exp,
fmt_parts, prec, main_pres_type, options):
r"""
Return a formatted number with uncertainty.
This is the same function as uncertainties.core.format_num; see much more
detailed documentation there.
"""
if 'P' in options:
print_type = 'pretty-print'
elif 'L' in options:
print_type = 'latex'
else:
print_type = 'default'
# Exponent part:
if common_exp is None:
exp_str = ''
elif print_type == 'default':
exp_str = EXP_LETTERS[main_pres_type]+'%+03d' % common_exp
else:
exp_str = EXP_PRINT[print_type](common_exp)
# Possible % sign:
percent_str = ''
if '%' in options:
if 'L' in options:
percent_str += ' \\'
percent_str += '%'
special_error = not error_main or isinfinite(error_main)
if special_error and fmt_parts['type'] in ('', 'g', 'G'):
fmt_suffix_n = (fmt_parts['prec'] or '')+fmt_parts['type']
else:
fmt_suffix_n = '.%d%s' % (prec, main_pres_type)
if 'S' in options: # Shorthand notation:
if error_main == 0:
uncert_str = '0'
elif isnan(error_main):
uncert_str = robust_format(error_main, main_pres_type)
if 'L' in options:
uncert_str = r'\mathrm{%s}' % uncert_str
elif isinf(error_main):
if 'L' in options:
uncert_str = r'\infty'
else:
uncert_str = robust_format(error_main, main_pres_type)
else: # Error with a meaningful first digit (not 0, and real number)
uncert = round(error_main, prec)
if first_digit(uncert) >= 0 and prec > 0:
uncert_str = '%.*f' % (prec, uncert)
else:
if uncert:
uncert_str = '%d' % round(uncert*10.**prec)
else:
uncert_str = '0.'
value_end = '(%s)%s%s' % (uncert_str, exp_str, percent_str)
any_exp_factored = True # Single exponent in the output
if fmt_parts['zero'] and fmt_parts['width']:
nom_val_width = max(int(fmt_parts['width']) - len(value_end), 0)
fmt_prefix_n = '%s%s%d%s' % (
fmt_parts['sign'], fmt_parts['zero'], nom_val_width,
fmt_parts['comma'])
else:
fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma']
nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n)
if 'L' in options:
if isnan(nom_val_main):
nom_val_str = r'\mathrm{%s}' % nom_val_str
elif isinf(nom_val_main):
nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '')
value_str = nom_val_str+value_end
if fmt_parts['width']: # An individual alignment is needed:
value_str = robust_align(
value_str, fmt_parts['fill'], fmt_parts['align'] or '>',
fmt_parts['width'])
else: # +/- notation:
any_exp_factored = not fmt_parts['width']
error_has_exp = not any_exp_factored and not special_error
nom_has_exp = not any_exp_factored and not isinfinite(nom_val_main)
if fmt_parts['width']: # Individual widths
if fmt_parts['zero']:
width = int(fmt_parts['width'])
remaining_width = max(width-len(exp_str), 0)
fmt_prefix_n = '%s%s%d%s' % (
fmt_parts['sign'], fmt_parts['zero'],
remaining_width if nom_has_exp else width,
fmt_parts['comma'])
fmt_prefix_e = '%s%d%s' % (
fmt_parts['zero'],
remaining_width if error_has_exp else width,
fmt_parts['comma'])
else:
fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma']
fmt_prefix_e = fmt_parts['comma']
else: # Global width
fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma']
fmt_prefix_e = fmt_parts['comma']
nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n)
if error_main:
if (isinfinite(nom_val_main)
and fmt_parts['type'] in ('', 'g', 'G')):
fmt_suffix_e = (fmt_parts['prec'] or '')+fmt_parts['type']
else:
fmt_suffix_e = '.%d%s' % (prec, main_pres_type)
else:
fmt_suffix_e = '.0%s' % main_pres_type
error_str = robust_format(error_main, fmt_prefix_e+fmt_suffix_e)
if 'L' in options:
if isnan(nom_val_main):
nom_val_str = r'\mathrm{%s}' % nom_val_str
elif isinf(nom_val_main):
nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '')
if isnan(error_main):
error_str = r'\mathrm{%s}' % error_str
elif isinf(error_main):
error_str = r'\infty'
if nom_has_exp:
nom_val_str += exp_str
if error_has_exp:
error_str += exp_str
if fmt_parts['width']: # An individual alignment is needed:
effective_align = fmt_parts['align'] or '>'
nom_val_str = robust_align(
nom_val_str, fmt_parts['fill'], effective_align,
fmt_parts['width'])
error_str = robust_align(
error_str, fmt_parts['fill'], effective_align,
fmt_parts['width'])
if 'P' in options:
pm_symbol = '±'
elif 'L' in options:
pm_symbol = r' \pm '
else:
pm_symbol = '+/-'
(LEFT_GROUPING, RIGHT_GROUPING) = GROUP_SYMBOLS[print_type]
if any_exp_factored and common_exp is not None:
value_str = ''.join((
LEFT_GROUPING,
nom_val_str, pm_symbol, error_str,
RIGHT_GROUPING,
exp_str, percent_str))
else:
value_str = ''.join([nom_val_str, pm_symbol, error_str])
if percent_str:
value_str = ''.join((
LEFT_GROUPING, value_str, RIGHT_GROUPING, percent_str))
return value_str
def format_num_ul(nom_val_main, upper_main, lower_main, common_exp,
fmt_parts, prec, main_pres_type, options):
r"""
Return a formatted number with upper and lower uncertainties.
Null errors are displayed as the integer 0, with no decimal point.
The formatting can be partially customized globally. The EXP_PRINT maps
non-default modes ("latex", "pretty-print") to a function that transforms a
common exponent into a string (of the form "times 10 to the power
<exponent>", where "times" can be represented, e.g., as a centered dot
instead of the multiplication symbol). The GROUP_SYMBOLS mapping maps each
of these modes to the pair of strings used for grouping expressions
(typically parentheses, which can be for instance replaced by "\left(" and
"\right(" in LaTeX so as to create a non-breakable group).
nom_val_main, upper_main, lower_main -- nominal value and upper and lower
errors, before using common_exp (e.g., "1.23e2" would have a main value of
1.23; similarly, "12.3+0.01-0.02" would have a main value of 12.3).
common_exp -- common exponent to use. If None, no common exponent is used.
fmt_parts -- mapping that contains at least the following parts of the
format specification: fill, align, sign, zero, width, comma, type; the value
are strings. These format specification parts are handled. The width is
applied to each value, or, if the shorthand notation is used, globally. If
the error is special (zero, NaN, inf), the parts are applied as much as
possible to the nominal value.
prec -- precision to use with the main_pres_type format type (see below).
main_pres_type -- format presentation type, either "f" or "F". This defines
how the mantissas, exponents and NaN/inf values are represented (in the same
way as for float). None, the empty string, or "%" are not accepted.
options -- options (as an object that support membership testing, like for
instance a string). "S" is for the short-hand notation 1.23(1). "P" is for
pretty-printing ("±" between the nominal value and the error, superscript
exponents, etc.). "L" is for a LaTeX output. Options can be combined. "%"
adds a final percent sign, and parentheses if the shorthand notation is not
used. The P option has priority over the L option (if both are given).
"""
# If a decimal point were always present in zero rounded errors
# that are not zero, the formatting would be difficult, in general
# (because the formatting options are very general): an example
# is'{:04.0f}'.format(0.1), which gives "0000" and would have to
# give "000.". Another example is '{:<4.0f}'.format(0.1), which
# gives "0 " but should give "0. ". This is cumbersome to
# implement in the general case, because no format prints "0."
# for 0. Furthermore, using the .0f format already brings the same
# kind of difficulty: non-zero numbers can appear as the exact
# integer zero, after rounding. The problem is not larger, for
# numbers with an error.
#
# That said, it is good to indicate null errors explicitly when
# possible: printing 3.1±0 with the default format prints 3.1+/-0,
# which shows that the uncertainty is exactly zero.
# The suffix of the result is calculated first because it is
# useful for the width handling of the shorthand notation.
# Printing type for parts of the result (exponent, parentheses),
# taking into account the priority of the pretty-print mode over
# the LaTeX mode. This setting does not apply to everything: for
# example, NaN is formatted as \mathrm{nan} (or NAN) if the LaTeX
# mode is required.
if 'P' in options:
print_type = 'pretty-print'
elif 'L' in options:
print_type = 'latex'
else:
print_type = 'default'
# Exponent part:
if common_exp is None:
exp_str = ''
elif print_type == 'default':
# Case of e or E. The same convention as Python 2.7
# to 3.3 is used for the display of the exponent:
exp_str = EXP_LETTERS[main_pres_type]+'%+03d' % common_exp
else:
exp_str = EXP_PRINT[print_type](common_exp)
# Possible % sign:
percent_str = ''
if '%' in options:
if 'L' in options:
# % is a special character, in LaTeX: it must be escaped.
#
# Using '\\' in the code instead of r'\' so as not to
# confuse emacs's syntax highlighting:
percent_str += ' \\'
percent_str += '%'
####################
# Only true if the error should not have an exponent (has priority
# over common_exp):
special_error = not upper_main or isinfinite(upper_main)
special_error |= not lower_main or isinfinite(lower_main)
# Nicer representation of the main nominal part, with no trailing
# zeros, when the error does not have a defined number of
# significant digits:
if special_error and fmt_parts['type'] in ('', 'g', 'G'):
# The main part is between 1 and 10 because any possible
# exponent is taken care of by common_exp, so it is
# formatted without an exponent (otherwise, the exponent
# would have to be handled for the LaTeX option):
fmt_suffix_n = (fmt_parts['prec'] or '')+fmt_parts['type']
else:
fmt_suffix_n = '.%d%s' % (prec, main_pres_type)
# print "FMT_SUFFIX_N", fmt_suffix_n
####################
# Calculation of the mostly final numerical part value_str (no %
# sign, no global width applied).
# Error formatting:
if 'S' in options: # Shorthand notation:
raise NotImplementedError
# Calculation of the uncertainty part for upper, uncert_str:
if upper_main == 0:
# The error is exactly zero
uncert_str = '0'
elif isnan(upper_main):
uncert_str = robust_format(upper_main, main_pres_type)
if 'L' in options:
uncert_str = r'\mathrm{%s}' % uncert_str
elif isinf(upper_main):
if 'L' in options:
uncert_str = r'\infty'
else:
uncert_str = robust_format(upper_main, main_pres_type)
else: # Error with a meaningful first digit (not 0, and real number)
uncert = round(upper_main, prec)
# The representation uncert_str of the uncertainty (which will
# be put inside parentheses) is calculated:
# The uncertainty might straddle the decimal point: we
# keep it as it is, in this case (e.g. 1.2(3.4), as this
# makes the result easier to read); the shorthand
# notation then essentially coincides with the +/-
# notation:
if first_digit(uncert) >= 0 and prec > 0:
# This case includes a zero rounded error with digits
# after the decimal point:
uncert_str = '%.*f' % (prec, uncert)
else:
if uncert:
# The round is important because 566.99999999 can
# first be obtained when 567 is wanted (%d prints the
# integer part, not the rounded value):
uncert_str = '%d' % round(uncert*10.**prec)
else:
# The decimal point indicates a truncated float
# (this is easy to do, in this case, since
# fmt_prefix_e is ignored):
uncert_str = '0.'
# Calculation of the uncertainty part for lower, uncert_str:
if lower_main == 0:
# The error is exactly zero
uncert_str_lower = '0'
elif isnan(lower_main):
uncert_str_lower = robust_format(lower_main, main_pres_type)
if 'L' in options:
uncert_str_lower = r'\mathrm{%s}' % uncert_str_lower
elif isinf(lower_main):
if 'L' in options:
uncert_str_lower = r'\infty'
else:
uncert_str_lower = robust_format(lower_main, main_pres_type)
else: # Error with a meaningful first digit (not 0, and real number)
uncert = round(lower_main, prec)
# The representation uncert_str_lower of the uncertainty (which will
# be put inside parentheses) is calculated:
# The uncertainty might straddle the decimal point: we
# keep it as it is, in this case (e.g. 1.2(3.4), as this
# makes the result easier to read); the shorthand
# notation then essentially coincides with the +/-
# notation:
if first_digit(uncert) >= 0 and prec > 0:
# This case includes a zero rounded error with digits
# after the decimal point:
uncert_str_lower = '%.*f' % (prec, uncert)
else:
if uncert:
# The round is important because 566.99999999 can
# first be obtained when 567 is wanted (%d prints the
# integer part, not the rounded value):
uncert_str_lower = '%d' % round(uncert*10.**prec)
else:
# The decimal point indicates a truncated float
# (this is easy to do, in this case, since
# fmt_prefix_e is ignored):
uncert_str_lower = '0.'
# End of the final number representation (width and alignment
# not included). This string is important for the handling of
# the width:
value_end = '(%s)%s%s' % (uncert_str, exp_str, percent_str)
value_end_lower = '(%s)%s%s' % (uncert_str_lower, exp_str, percent_str)
any_exp_factored = True # Single exponent in the output
##########
# Nominal value formatting:
# Calculation of fmt_prefix_n (prefix for the format of the
# main part of the nominal value):
if fmt_parts['zero'] and fmt_parts['width']:
# Padding with zeros must be done on the nominal value alone:
# Remaining width (for the nominal value):
nom_val_width = max(int(fmt_parts['width']) - len(value_end), 0)
fmt_prefix_n = '%s%s%d%s' % (
fmt_parts['sign'], fmt_parts['zero'], nom_val_width,
fmt_parts['comma'])
else:
# Any 'zero' part should not do anything: it is not
# included
fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma']
# print "FMT_PREFIX_N", fmt_prefix_n
# print "FMT_SUFFIX_N", fmt_suffix_n
nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n)
##########
# Overriding of nom_val_str for LaTeX,; possibly based on the
# existing value (for NaN vs nan):
if 'L' in options:
if isnan(nom_val_main):
nom_val_str = r'\mathrm{%s}' % nom_val_str
elif isinf(nom_val_main):
# !! It is wasteful, in this case, to replace
# nom_val_str: could this be avoided while avoiding to
# duplicate the formula for nom_val_str for the common
# case (robust_format(...))?
nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '')
value_str = nom_val_str + value_end
# Global width, if any:
if fmt_parts['width']: # An individual alignment is needed:
# Default alignment, for numbers: to the right (if no
# alignment is specified, a string is aligned to the
# left):
value_str = robust_align(
value_str, fmt_parts['fill'], fmt_parts['align'] or '>',
fmt_parts['width'])
else: # +/- notation:
# The common exponent is factored or not, depending on the
# width. This gives nice columns for the nominal values and
# the errors (no shift due to a varying exponent), when a need
# is given:
any_exp_factored = not fmt_parts['width']
# True when the error part has any exponent directly attached
# (case of an individual exponent for both the nominal value
# and the error, when the error is a non-0, real number).
# The goal is to avoid the strange notation nane-10, and to
# avoid the 0e10 notation for an exactly zero uncertainty,
# because .0e can give this for a non-zero error (the goal is
# to have a zero uncertainty be very explicit):
error_has_exp = not any_exp_factored and not special_error
# Like error_has_exp, but only for real number handling
# (there is no special meaning to a zero nominal value):
nom_has_exp = not any_exp_factored and not isinfinite(nom_val_main)
# Prefix for the parts:
if fmt_parts['width']: # Individual widths
# If zeros are needed, then the width is taken into
# account now (before the exponent is added):
if fmt_parts['zero']:
width = int(fmt_parts['width'])
# Remaining (minimum) width after including the
# exponent:
remaining_width = max(width-len(exp_str), 0)
fmt_prefix_n = '%s%s%d%s' % (
fmt_parts['sign'], fmt_parts['zero'],
remaining_width if nom_has_exp else width,
fmt_parts['comma'])
fmt_prefix_e = '%s%d%s' % (
fmt_parts['zero'],
remaining_width if error_has_exp else width,
fmt_parts['comma'])
else:
fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma']
fmt_prefix_e = fmt_parts['comma']
else: # Global width
fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma']
fmt_prefix_e = fmt_parts['comma']
## print "ANY_EXP_FACTORED", any_exp_factored
## print "ERROR_HAS_EXP", error_has_exp
## print "NOM_HAS_EXP", nom_has_exp
####################
# Nominal value formatting:
# !! The following fails with Python < 2.6 when the format is
# not accepted by the % operator. This can happen when
# special_error is true, as the format used for the nominal
# value is essentially the format provided by the user, which
# may be empty:
# print "FMT_PREFIX_N", fmt_prefix_n
# print "FMT_SUFFIX_N", fmt_suffix_n
nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n)
# print "NOM_VAL_STR", nom_val_str
####################
# Error formatting:
# !! Note: .0f applied to a float has no decimal point, but
# this does not appear to be documented
# (http://docs.python.org/2/library/string.html#format-specification-mini-language). This
# feature is used anyway, because it allows a possible comma
# format parameter to be handled more conveniently than if the
# 'd' format was used.
#
# The following uses a special integer representation of a
# zero uncertainty:
if upper_main:
# The handling of NaN/inf in the nominal value identical to
# the handling of NaN/inf in the standard deviation:
if (isinfinite(nom_val_main)
# Only some formats have a nicer representation:
and fmt_parts['type'] in ('', 'g', 'G')):
# The error can be formatted independently:
fmt_suffix_e = (fmt_parts['prec'] or '')+fmt_parts['type']
else:
fmt_suffix_e = '.%d%s' % (prec, main_pres_type)
else:
fmt_suffix_e = '.0%s' % main_pres_type
error_str_upper = robust_format(upper_main, fmt_prefix_e+fmt_suffix_e)
if lower_main:
# The handling of NaN/inf in the nominal value identical to
# the handling of NaN/inf in the standard deviation:
if (isinfinite(nom_val_main)
# Only some formats have a nicer representation:
and fmt_parts['type'] in ('', 'g', 'G')):
# The error can be formatted independently:
fmt_suffix_e = (fmt_parts['prec'] or '')+fmt_parts['type']
else:
fmt_suffix_e = '.%d%s' % (prec, main_pres_type)
else:
fmt_suffix_e = '.0%s' % main_pres_type
error_str_lower = robust_format(lower_main, fmt_prefix_e+fmt_suffix_e)
##########
# Overriding of nom_val_str and error_str for LaTeX:
if 'L' in options:
if isnan(nom_val_main):
nom_val_str = r'\mathrm{%s}' % nom_val_str
elif isinf(nom_val_main):
nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '')
if isnan(upper_main):
error_str_upper = r'\mathrm{%s}' % error_str_upper
elif isinf(upper_main):
error_str_upper = r'\infty'
if isnan(lower_main):
error_str_lower = r'\mathrm{%s}' % error_str_lower
elif isinf(lower_main):
error_str_lower = r'\infty'
if nom_has_exp:
nom_val_str += exp_str
if error_has_exp:
error_str_upper += exp_str
error_str_lower += exp_str
####################
# Final alignment of each field, if needed:
if fmt_parts['width']: # An individual alignment is needed:
# Default alignment, for numbers: to the right (if no
# alignment is specified, a string is aligned to the
# left):
effective_align = fmt_parts['align'] or '>'
# robust_format() is used because it may handle alignment
# options, where the % operator does not:
nom_val_str = robust_align(
nom_val_str, fmt_parts['fill'], effective_align,
fmt_parts['width'])
error_str_upper = robust_align(
error_str_upper, fmt_parts['fill'], effective_align,
fmt_parts['width'])
error_str_lower = robust_align(
error_str_lower, fmt_parts['fill'], effective_align,
fmt_parts['width'])
####################
if 'P' in options:
# Unicode has priority over LaTeX, so that users with a
# Unicode-compatible LaTeX source can use ±:
(LEFT_BRACKET, RIGHT_BRACKET) = '', ''
pm_symbol = '±'
p_symbol = '+'
m_symbol = '-'
elif 'L' in options:
(LEFT_BRACKET, RIGHT_BRACKET) = '{', '}'
pm_symbol = r' \pm '
p_symbol = r' ^' + LEFT_BRACKET + '+'
m_symbol = r' _' + LEFT_BRACKET + '-'
else:
(LEFT_BRACKET, RIGHT_BRACKET) = '', ''
pm_symbol = '+/-'
p_symbol = '+'
m_symbol = '-'
####################
# Construction of the final value, value_str, possibly with
# grouping (typically inside parentheses):
(LEFT_GROUPING, RIGHT_GROUPING) = GROUP_SYMBOLS[print_type]
# The nominal value and the error might have to be explicitly
# grouped together with parentheses, so as to prevent an
# ambiguous notation. This is done in parallel with the
# percent sign handling because this sign may too need
# parentheses.
if any_exp_factored and common_exp is not None:
value_str = ''.join((
LEFT_GROUPING,
nom_val_str,
p_symbol, error_str_upper, RIGHT_BRACKET,
m_symbol, error_str_lower, RIGHT_BRACKET,
RIGHT_GROUPING,
exp_str, percent_str))
else:
value_str = ''.join([
nom_val_str,
p_symbol, error_str_upper, RIGHT_BRACKET,
m_symbol, error_str_lower, RIGHT_BRACKET])
if percent_str:
value_str = ''.join((
LEFT_GROUPING, value_str, RIGHT_GROUPING, percent_str))
return value_str
def uformat(nominal_value, std_dev, format_spec=''):
"""Formats a number with uncertainty.
The format specification are the same as for format() for
floats, as defined for Python 2.6+ (restricted to what the %
operator accepts, if using an earlier version of Python),
except that the n presentation type is not supported. In
particular, the usual precision, alignment, sign flag,
etc. can be used. The behavior of the various presentation
types (e, f, g, none, etc.) is similar. Moreover, the format
is extended: the number of digits of the uncertainty can be
controlled, as is the way the uncertainty is indicated (with
+/- or with the short-hand notation 3.14(1), in LaTeX or with
a simple text string,...).
Beyond the use of options at the end of the format
specification, the main difference with floats is that a "u"
just before the presentation type (f, e, g, none, etc.)
activates the "uncertainty control" mode (e.g.: ".6u"). This
mode is also activated when not using any explicit precision
(e.g.: "g", "10f", "+010,e" format specifications). If the
uncertainty does not have a meaningful number of significant
digits (0 and NaN uncertainties), this mode is automatically
deactivated.
The nominal value and the uncertainty always use the same
precision. This implies trailing zeros, in general, even with
the g format type (contrary to the float case). However, when
the number of significant digits of the uncertainty is not
defined (zero or NaN uncertainty), it has no precision, so
there is no matching. In this case, the original format
specification is used for the nominal value (any "u" is
ignored).
Any precision (".p", where p is a number) is interpreted (if
meaningful), in the uncertainty control mode, as indicating
the number p of significant digits of the displayed
uncertainty. Example: .1uf will return a string with one
significant digit in the uncertainty (and no exponent).
If no precision is given, the rounding rules from the
Particle Data Group are used, if possible
(http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf). For
example, the "f" format specification generally does not use
the default 6 digits after the decimal point, but applies the
PDG rules.
A common exponent is used if an exponent is needed for the
larger of the nominal value (in absolute value) and the
standard deviation, unless this would result in a zero
uncertainty being represented as 0e... or a NaN uncertainty as
NaNe.... Thanks to this common exponent, the quantity that
best describes the associated probability distribution has a
mantissa in the usual 1-10 range. The common exponent is
factored (as in "(1.2+/-0.1)e-5"). unless the format
specification contains an explicit width (" 1.2e-5+/- 0.1e-5")
(this allows numbers to be in a single column, when printing
numbers over many lines). Specifying a minimum width of 1 is a
way of forcing any common exponent to not be factored out.
The fill, align, zero and width parameters of the format
specification are applied individually to each of the nominal
value and standard deviation or, if the shorthand notation is
used, globally.
The sign parameter of the format specification is only applied
to the nominal value (since the standard deviation is
positive).
In the case of a non-LaTeX output, the returned string can
normally be parsed back with ufloat_fromstr(). This however
excludes cases where numbers use the "," thousands separator,
for example.
Options can be added, at the end of the format
specification. Multiple options can be specified.
When option "S" is present (like in .1uS), the short-hand
notation 1.234(5) is used; if the digits of the uncertainty
straddle the decimal point, it uses a fixed-point notation,
like in 12.3(4.5). When "P" is present, the pretty-printing
mode is activated: "±" separates the nominal value from the
standard deviation, exponents use superscript characters,
etc. When "L" is present, the output is formatted with LaTeX.
An uncertainty which is exactly zero is represented as the
integer 0 (i.e. with no decimal point).
The "%" format type forces the percent sign to be at the end
of the returned string (it is not attached to each of the
nominal value and the standard deviation).
Some details of the formatting can be customized as described
in format_num().
"""
# Convention on limits "between" digits: 0 = exactly at the
# decimal point, -1 = after the first decimal, 1 = before the
# units digit, etc.
# Convention on digits: 0 is units (10**0), 1 is tens, -1 is
# tenths, etc.
# This method does the format specification parsing, and
# calculates the various parts of the displayed value
# (mantissas, exponent, position of the last digit). The
# formatting itself is delegated to format_num().
########################################
# Format specification parsing:
match = re.match(r"""
(?P<fill>[^{}]??)(?P<align>[<>=^]?) # fill cannot be { or }
(?P<sign>[-+ ]?)
(?P<zero>0?)
(?P<width>\d*)
(?P<comma>,?)
(?:\.(?P<prec>\d+))?
(?P<uncert_prec>u?) # Precision for the uncertainty?
# The type can be omitted. Options must not go here:
(?P<type>[eEfFgG%]??) # n not supported
(?P<options>[LSP]*)$""",
format_spec,
re.VERBOSE)
# Does the format specification look correct?
if not match:
raise ValueError(
'Format specification %r cannot be used' % (format_spec))
# Effective format presentation type: f, e, g, etc., or None,
# like in
# https://docs.python.org/3.4/library/string.html#format-specification-mini-language. Contrary
# to what is written in the documentation, it is not true that
# None is "the same as 'g'": "{}".format() and "{:g}" do not
# give the same result, on 31415000000.0. None is thus kept as
# is instead of being replaced by "g".
pres_type = match.group('type') or None
# Shortcut:
fmt_prec = match.group('prec') # Can be None
########################################
# Since the '%' (percentage) format specification can change
# the value to be displayed, this value must first be
# calculated. Calculating the standard deviation is also an
# optimization: the standard deviation is generally
# calculated: it is calculated only once, here:
nom_val = nominal_value
std_dev = std_dev
# 'options' is the options that must be given to format_num():
options = set(match.group('options'))
########################################
# The '%' format is treated internally as a display option: it
# should not be applied individually to each part:
if pres_type == '%':
# Because '%' does 0.0055*100, the value
# 0.5499999999999999 is obtained, which rounds to 0.5. The
# original rounded value is 0.006. The same behavior is
# found in Python 2.7: '{:.1%}'.format(0.0055) is '0.5%'.
# If a different behavior is needed, a solution to this
# problem would be to do the rounding before the
# multiplication.
std_dev *= 100
nom_val *= 100
pres_type = 'f'
options.add('%')
# At this point, pres_type is in eEfFgG or None (not %).
########################################
# Non-real values (nominal value or standard deviation) must
# be handled in a specific way:
real_values = [value for value in [abs(nom_val), std_dev]
if not isinfinite(value)]
# Calculation of digits_limit, which defines the precision of
# the nominal value and of the standard deviation (it can be
# None when it does not matter, like for NaN±NaN):
# Reference value for the calculation of a possible exponent,
# if needed:
if pres_type in (None, 'e', 'E', 'g', 'G'):
# Reference value for the exponent: the largest value
# defines what the exponent will be (another convention
# could have been chosen, like using the exponent of the
# nominal value, irrespective of the standard deviation):
try:
exp_ref_value = max(real_values)
except ValueError: # No non-NaN value: NaN±NaN…
# No meaningful common exponent can be obtained:
pass
## else:
## print "EXP_REF_VAL", exp_ref_value
# Should the precision be interpreted like for a float, or
# should the number of significant digits on the uncertainty
# be controlled?
if ((
# Default behavior: number of significant digits on the
# uncertainty controlled (if useful, i.e. only in
# situations where the nominal value and the standard
# error digits are truncated at the same place):
(not fmt_prec and len(real_values) == 2)
or match.group('uncert_prec')) # Explicit control
# The number of significant digits of the uncertainty must
# be meaningful, otherwise the position of the significant
# digits of the uncertainty does not have a clear
# meaning. This gives us the *effective* uncertainty
# control mode:
and std_dev
and not isinfinite(std_dev)):
# The number of significant digits on the uncertainty is
# controlled.
# The limit digits_limit on the digits of nom_val and std_dev
# to be displayed is calculated. If the exponent notation is
# used, this limit is generally different from the finally
# displayed limit (e.g. 314.15+/-0.01 has digits_limit=-2, but
# will be displayed with an exponent as (3.1415+/-0.0001)e+02,
# which corresponds to 4 decimals after the decimal point, not
# 2).
# Number of significant digits to use:
if fmt_prec:
num_signif_d = int(fmt_prec) # Can only be non-negative
if not num_signif_d:
raise ValueError("The number of significant digits"
" on the uncertainty should be positive")
else:
(num_signif_d, std_dev) = PDG_precision(std_dev)
digits_limit = signif_dgt_to_limit(std_dev, num_signif_d)
else:
# No control of the number of significant digits on the
# uncertainty.
## print "PRECISION NOT BASED ON UNCERTAINTY"
# The precision has the same meaning as for floats (it is
# not the uncertainty that defines the number of digits).
# The usual default precision is used (this is useful for
# 3.141592±NaN with an "f" format specification, for
# example):
#
# prec is the precision for the main parts of the final
# format (in the sense of float formatting):
#
# https://docs.python.org/3.4/library/string.html#format-specification-mini-language
if fmt_prec:
prec = int(fmt_prec)
elif pres_type is None:
prec = 12
else:
prec = 6
if pres_type in ('f', 'F'):
digits_limit = -prec
else: # Format type in None, eEgG
# We first calculate the number of significant digits
# to be displayed (if possible):
if pres_type in ('e', 'E'):
# The precision is the number of significant
# digits required - 1 (because there is a single
# digit before the decimal point, which is not
# included in the definition of the precision with
# the e/E format type):
num_signif_digits = prec+1
else: # Presentation type in None, g, G
# Effective format specification precision: the rule
# of
# http://docs.python.org/2.7/library/string.html#format-specification-mini-language
# is used:
# The final number of significant digits to be
# displayed is not necessarily obvious: trailing
# zeros are removed (with the gG presentation
# type), so num_signif_digits is the number of
# significant digits if trailing zeros were not
# removed. This quantity is relevant for the
# rounding implied by the exponent test of the g/G
# format:
# 0 is interpreted like 1 (as with floats with a
# gG presentation type):
num_signif_digits = prec or 1
# The number of significant digits is important for
# example for determining the exponent:
## print "NUM_SIGNIF_DIGITS", num_signif_digits
digits_limit = (
signif_dgt_to_limit(exp_ref_value, num_signif_digits)
if real_values
else None)
## print "DIGITS_LIMIT", digits_limit
#######################################
# Common exponent notation: should it be used? use_exp is set
# accordingly. If a common exponent should be used (use_exp is
# True), 'common_exp' is set to the exponent that should be
# used.
if pres_type in ('f', 'F'):
use_exp = False
elif pres_type in ('e', 'E'):
if not real_values:
use_exp = False
else:
use_exp = True
# !! This calculation might have been already done,
# for instance when using the .0e format:
# signif_dgt_to_limit() was called before, which
# prompted a similar calculation:
common_exp = first_digit(round(exp_ref_value, -digits_limit))
else: # None, g, G
# The rules from
# https://docs.python.org/3.4/library/string.html#format-specification-mini-language
# are applied.
# Python's native formatting (whose result could be parsed
# in order to determine whether a common exponent should
# be used) is not used because there is shared information
# between the nominal value and the standard error (same
# last digit, common exponent) and extracting this
# information from Python would entail parsing its
# formatted string, which is in principle inefficient
# (internally, Python performs calculations that yield a
# string, and the string would be parsed back into
# separate parts and numbers, which is in principle
# unnecessary).
# Should the scientific notation be used? The same rule as
# for floats is used ("-4 <= exponent of rounded value <
# p"), on the nominal value.
if not real_values:
use_exp = False
else:
# Common exponent *if* used:
common_exp = first_digit(round(exp_ref_value, -digits_limit))
# print "COMMON EXP TEST VALUE", common_exp
# print "LIMIT EXP", common_exp-digits_limit+1
# print "WITH digits_limit", digits_limit
# The number of significant digits of the reference value
# rounded at digits_limit is exponent-digits_limit+1:
if -4 <= common_exp < common_exp-digits_limit+1:
use_exp = False
else:
use_exp = True
########################################
# Calculation of signif_limit (position of the significant
# digits limit in the final fixed point representations; this
# is either a non-positive number, or None), of
# nom_val_mantissa ("mantissa" for the nominal value,
# i.e. value possibly corrected for a factorized exponent),
# and std_dev_mantissa (similarly for the standard
# deviation). common_exp is also set to None if no common
# exponent should be used.
if use_exp:
# Not 10.**(-common_exp), for limit values of common_exp:
factor = 10.**common_exp
nom_val_mantissa = nom_val/factor
std_dev_mantissa = std_dev/factor
# Limit for the last digit of the mantissas:
signif_limit = digits_limit - common_exp
else: # No common exponent
common_exp = None
nom_val_mantissa = nom_val
std_dev_mantissa = std_dev
signif_limit = digits_limit
## print "SIGNIF_LIMIT", signif_limit
########################################
# Format of the main (i.e. with no exponent) parts (the None
# presentation type is similar to the g format type):
main_pres_type = 'fF'[(pres_type or 'g').isupper()]
# The precision of the main parts must be adjusted so as
# to take into account the special role of the decimal
# point:
if signif_limit is not None: # If signif_limit is pertinent
# The decimal point location is always included in the
# printed digits (e.g., printing 3456 with only 2
# significant digits requires to print at least four
# digits, like in 3456 or 3500).
#
# The max() is important for example for
# 1234567.89123+/-12345.678 with the f format: in this
# case, signif_limit is +3 (2 significant digits necessary
# for the error, as per the PDG rules), but the (Python
# float formatting) precision to be used for the main
# parts is 0 (all digits must be shown).
#
# The 1 for the None pres_type represents "at least one
# digit past the decimal point" of Python
# (https://docs.python.org/3.4/library/string.html#format-specification-mini-language). This
# is only applied for null uncertainties.
prec = max(-signif_limit,
1 if pres_type is None and not std_dev
else 0)
## print "PREC", prec
########################################
# print (
# "FORMAT_NUM parameters: nom_val_mantissa={},"
# " std_dev_mantissa={}, common_exp={},"
# " match.groupdict()={}, prec={}, main_pres_type={},"
# " options={}".format(
# nom_val_mantissa, std_dev_mantissa, common_exp,
# match.groupdict(),
# prec,
# main_pres_type,
# options))
# Final formatting:
return format_num(nom_val_mantissa, std_dev_mantissa, common_exp,
match.groupdict(),
prec=prec,
main_pres_type=main_pres_type,
options=options)
def uformatul(nominal_value, upper, lower, format_spec=''):
"""Formats a number with upper and lower uncertainties. """
# Format specification parsing:
match = re.match(r"""
(?P<fill>[^{}]??)(?P<align>[<>=^]?) # fill cannot be { or }
(?P<sign>[-+ ]?)
(?P<zero>0?)
(?P<width>\d*)
(?P<comma>,?)
(?:\.(?P<prec>\d+))?
(?P<uncert_prec>u?) # Precision for the uncertainty?
# The type can be omitted. Options must not go here:
(?P<type>[eEfFgG%]??) # n not supported
(?P<options>[LSP]*)$""",
format_spec,
re.VERBOSE)
# Does the format specification look correct?
if not match:
raise ValueError(
'Format specification %r cannot be used' % (format_spec))
# Effective format presentation type: f, e, g, etc., or None,
pres_type = match.group('type') or None
# Shortcut:
fmt_prec = match.group('prec') # Can be None
nom_val = nominal_value
upper = upper
lower = lower
# 'options' is the options that must be given to format_num():
options = set(match.group('options'))
if pres_type == '%':
upper *= 100
lower *= 100
nom_val *= 100
pres_type = 'f'
options.add('%')
# Non-real values (nominal value or upper, lower) must be handled in a
# specific way:
real_values = [value for value in [abs(nom_val), upper, lower]
if not isinfinite(value)]
if pres_type in (None, 'e', 'E', 'g', 'G'):
try:
exp_ref_value = max(real_values)
except ValueError: # No non-NaN value: NaN±NaN…
pass
if ((
(not fmt_prec and len(real_values) == 3)
or match.group('uncert_prec')) # Explicit control
and upper
and lower
and not isinfinite(upper)
and not isinfinite(lower)):
# Number of significant digits to use:
if fmt_prec:
num_signif_d = int(fmt_prec) # Can only be non-negative
if not num_signif_d:
raise ValueError("The number of significant digits"
" on the uncertainty should be positive")
else:
(num_signif_d_u, upper) = PDG_precision(upper)
(num_signif_d_l, lower) = PDG_precision(lower)
num_signif_d = min(num_signif_d_u, num_signif_d_l)
digits_limit_u = signif_dgt_to_limit(upper, num_signif_d)
digits_limit_l = signif_dgt_to_limit(lower, num_signif_d)
digits_limit = min(digits_limit_u, digits_limit_l)
else:
if fmt_prec:
prec = int(fmt_prec)
elif pres_type is None:
prec = 12
else:
prec = 6
if pres_type in ('f', 'F'):
digits_limit = -prec
else: # Format type in None, eEgG
if pres_type in ('e', 'E'):
num_signif_digits = prec+1
else: # Presentation type in None, g, G
num_signif_digits = prec or 1
digits_limit = (
signif_dgt_to_limit(exp_ref_value, num_signif_digits)
if real_values
else None)
if pres_type in ('f', 'F'):
use_exp = False
elif pres_type in ('e', 'E'):
if not real_values:
use_exp = False
else:
use_exp = True
common_exp = first_digit(round(exp_ref_value, -digits_limit))
else: # None, g, G
if not real_values:
use_exp = False
else:
# Common exponent *if* used:
common_exp = first_digit(round(exp_ref_value, -digits_limit))
if -4 <= common_exp < common_exp-digits_limit+1:
use_exp = False
else:
use_exp = True
if use_exp:
factor = 10.**common_exp
nom_val_mantissa = nom_val/factor
upper_mantissa = upper/factor
lower_mantissa = lower/factor
signif_limit = digits_limit - common_exp
else: # No common exponent
common_exp = None
nom_val_mantissa = nom_val
upper_mantissa = upper
lower_mantissa = lower
signif_limit = digits_limit
main_pres_type = 'fF'[(pres_type or 'g').isupper()]
if signif_limit is not None: # If signif_limit is pertinent
prec = max(-signif_limit,
1 if pres_type is None and not upper or not lower
else 0)
# Final formatting:
return format_num_ul(nom_val_mantissa, upper_mantissa, lower_mantissa, common_exp,
match.groupdict(),
prec=prec,
main_pres_type=main_pres_type,
options=options)
```
|
{
"source": "j-faria/vera",
"score": 2
}
|
#### File: vera/tests/test_vera.py
```python
def test_imports():
import vera
from vera import RV
def test_dace():
from vera.query_dace import get_observations
_ = get_observations('HD10180', verbose=False)
def test_read_rdb():
from vera import RV
from os.path import dirname, join
here = dirname(__file__)
s = RV(join(here, 'data_file.rdb'), star='dummy', sigmaclip=False)
print(s)
def test_DACE():
from vera import DACE
s = DACE.HD10180
print(s)
def test_KOBE(capsys):
from vera import KOBE
# not in target list
_ = KOBE.HD100
cap = capsys.readouterr()
assert cap.out == 'Cannot find "HD100" in KOBE target list.\n'
# no access to data
s = KOBE.KOBE_001
assert s is None
def test_plot():
import matplotlib.pyplot as plt
from vera import DACE
s = DACE.HD10180
s.plot()
plt.close('all')
```
#### File: vera/vera/DACE.py
```python
import os
from glob import glob
from pprint import pprint
from requests import RequestException
from math import ceil
import numpy as np
import matplotlib.pyplot as plt
from cached_property import cached_property
from .RV import RV
from .CCFs import iCCF, chromaticRV, HeaderCCF, chromatic_plot_main
from .spectra import Spectrum, Spectrum2D
from .query_dace import get_CCFs, get_spectra, get_E2DS, translate
from .utils import (yel, yelb, red as cl_red, blue as cl_blue, info)
here = os.path.dirname(os.path.abspath(__file__))
def escape(target):
if 'proxima' in target.lower():
target = 'Proxima'
if target[0].isdigit():
target = '_' + target
target = target.replace('-', '_').replace('+', '_').replace('.', '_')
return target
def deescape(target):
if target in translate:
target = translate[target]
else:
target = target.replace('K2_', 'K2-')
target = target.replace('TOI_', 'TOI-')
target = target.replace('BD_', 'BD+')
target = target.replace('KOBE_', 'KOBE-')
target = target.replace('_', '')
return target
class DACERV(RV):
"""A set of RV observations for a DACE target"""
def __init__(self, *args, **kwargs):
super(DACERV, self).__init__(*args, **kwargs)
self._ccfs_enabled = False
self._spec_enabled = False
def __repr__(self):
n1 = self.mask.sum()
n2 = self.mask.size
s = f'RV({self.star}; {n1} points'
if n1 != n2:
s += f' [{n2-n1}/{n2} masked]'
if self.chromatic:
s += ', chromatic RVs enabled'
if self.spectroscopic:
s += ', spectra enabled'
s += ')'
return s
@property
def info(self):
from .utils import print_system_info
print_system_info(self)
def bin(self, *args, **kwargs):
try:
if self.chromatic and not '_child' in kwargs:
self.cRV.bin(self.night_indices)
del self.blue, self.mid, self.red
except AttributeError:
pass
super(DACERV, self).bin(*args, **kwargs)
def _download_spectra(self, s1d=True, s2d=True, **kwargs):
if self.verbose:
print('Will try to download ESPRESSO S1D and S2D from DACE')
if s1d:
if self.verbose:
print('- S1D:')
self.spec_dir = get_spectra(self.star, **kwargs)
if s2d:
if self.verbose:
print('- S2D:')
self.spec_dir = get_E2DS(self.star, **kwargs)
if self.spec_dir == 'stopped':
self._spec_enabled = False
return
self._spec_enabled = True
def enable_spectra(self, directory=None, s1d=True, s2d=True,
all_instruments=True, **kwargs):
"""
Enable spectroscopic calculations by trying to download all available
S1D and E2DS files from DACE.
Arguments
---------
directory: str (optional)
Directory where to store the downloaded files. By default this is
the star name followed by "_DACE_downloads"
s1d: bool (optional, default True)
Download S1D files
s2d: bool (optional, default True)
Download S2D files
all_instruments: bool (optional, default True)
Wheter to try to download CCFs for all available instruments. If
set to False, can provide an `instrument` argument to select the
instrument.
limit: int (optional)
Set a maximum number of files to download (per instrument)
"""
# maybe spectra is enabled but with fewer spectra
if self._spec_enabled:
if 'limit' in kwargs:
nn = len(self.spectra) / len(self.instruments)
if kwargs['limit'] > nn:
self._spec_enabled = False
if not self._spec_enabled:
kw = dict(directory=directory)
kw = {**kw, **kwargs}
if all_instruments:
kw['instrument'] = self.instruments
self._download_spectra(s1d=s1d, s2d=s2d, **kw)
if not self._spec_enabled: # download_spectra failed/stopped
print(cl_red | 'ERROR: spectra not enabled')
return
if s1d:
s1d_files = glob(os.path.join(self.spec_dir, '*_S1D_A.fits'))
if len(s1d_files) == 0:
s1d_files = glob(os.path.join(self.spec_dir, '*_s1d_A.fits'))
if len(s1d_files) < self.time.size:
c = f'({len(s1d_files)} / {self.time.size})'
print(yelb | 'Warning:', end=' ')
print(yel | f'there are less S1D files than observations {c}')
if len(s1d_files) == 0:
return
load = True
try:
load = len(self.spectra) != len(s1d_files)
except AttributeError:
pass
if load:
print(cl_blue | 'Loading S1D spectra...', flush=True)
self.spectra = Spectrum.from_file(s1d_files)
print(cl_blue | 'median-dividing...', flush=True)
for s in self.spectra:
s.median_divide()
return
if s2d:
s2d_files = glob(os.path.join(self.spec_dir, '*_S2D_A.fits'))
if len(s2d_files) == 0:
s2d_files = glob(os.path.join(self.spec_dir, '*_e2ds_A.fits'))
if len(s2d_files) < self.time.size:
c = f'({len(s2d_files)} / {self.time.size})'
print(yelb | 'Warning:', end=' ')
print(yel | f'there are less S2D files than observations {c}')
load = True
try:
load = len(self.spectra) != len(s2d_files)
except AttributeError:
pass
if load:
info('Loading S2D spectra...')
self.spectra = Spectrum2D.from_file(s2d_files)
# self.cRV = chromaticRV(indicators)
# self._round_chromatic_time()
# self.headers = HeaderCCF(self.cRV.I)
def _round_chromatic_time(self):
sig_digits = len(str(self.time[0]).split('.')[1])
self.cRV.time = self.cRV.time.round(sig_digits)
def _download_CCFs(self, **kwargs):
if self.verbose:
info('Will try to download CCFs from DACE')
self.ccf_dir = get_CCFs(self.star, **kwargs)
if self.ccf_dir in (None, 'stopped'):
self._ccfs_enabled = False
return
self._ccfs_enabled = True
def enable_chromatic(self, directory=None, all_instruments=True, **kwargs):
"""
Enable chromatic RV calculations by trying to download CCF files.
Arguments
---------
directory: str (optional)
Directory where to store the downloaded files. By default this is
the star name followed by "_DACE_downloads"
all_instruments: bool (optional, default True)
Wheter to try to download CCFs for all available instruments
"""
if chromaticRV is None:
raise NotImplementedError
if not self._ccfs_enabled:
kw = dict(directory=directory)
if all_instruments:
kw['instrument'] = self.instruments
kw = {**kw, **kwargs}
self._download_CCFs(**kw)
if not self._ccfs_enabled: # download_CCFs failed or stopped
print(cl_red | 'ERROR: chromatic not enabled')
return
ccf_files = glob(os.path.join(self.ccf_dir, '*CCF_SKYSUB*.fits'))
# ccf_files = []
if len(ccf_files) == 0:
ccf_files = []
for end in ('*CCF*', '*ccf*A.fits'):
ccf_files.extend(glob(os.path.join(self.ccf_dir, end)))
if len(ccf_files) < self.time.size:
counts = f'({len(ccf_files)} / {self.time.size})'
print(yelb | 'Warning:', end=' ')
print(yel | f'there are less CCF files than observations {counts}')
if hasattr(self, 'cRV') and len(ccf_files) == self.cRV.n:
if self.verbose:
info('chromatic enabled!')
return
info('Loading CCFs...')
indicators = iCCF.Indicators.from_file(
ccf_files, guess_instrument=True, sort_bjd=True)
self.cRV = chromaticRV(indicators)
self._round_chromatic_time()
self.headers = HeaderCCF(self.cRV.I)
if self.verbose:
info('chromatic enabled!')
@property
def chromatic(self):
return self._ccfs_enabled
@property
def spectroscopic(self):
return self._spec_enabled
def _build_chromatic_RV(self, which):
# if self is binned nightly, there might be more CCFs than time.size
if self.is_binned:
time = self._unbinned['time']
svrad = self._unbinned['svrad']
mask = self._unbinned['mask']
obs = self._unbinned['obs']
else:
time = self.time
svrad = self.svrad
mask = self.mask
obs = self.obs
# this finds the mask for times that have a CCF
# (because sometimes there are less CCFs)
inboth = np.isin(time, self.cRV.time.round(6))
ones = np.ones_like(time[inboth])
#! since chromatic RVs are in km/s (measured directly from the CCF)
#! make sure the errors we supply here are also in km/s
if self.did_ms:
svrad = svrad * 1e-3
vrad = {
'blue': self.cRV.blueRV,
'mid': self.cRV.midRV,
'red': self.cRV.redRV,
}
vrad = vrad[which]
svrad = {
'blue': self.cRV._blueRVerror,
'mid': self.cRV._midRVerror,
'red': self.cRV._redRVerror
}
svrad = svrad[which]
if svrad is None:
svrad = svrad[inboth]
fwhm = {
'blue': self.cRV._blueFWHM,
'mid': self.cRV._midFWHM,
'red': self.cRV._redFWHM,
}
fwhm = fwhm[which]
efwhm = {
'blue': self.cRV._blueFWHMerror,
'mid': self.cRV._midFWHMerror,
'red': self.cRV._redFWHMerror
}
efwhm = efwhm[which]
if efwhm is None:
efwhm = efwhm[inboth]
rv = RV.from_arrays(time[inboth], vrad, svrad, fwhm, efwhm,
verbose=self.verbose, star=self.star,
sigmaclip=False, adjust_means=self.did_adjustmeans,
ms=self.did_ms, tess=False)
rv.mask = mask[inboth]
rv.obs = obs[inboth]
rv.instruments = self.instruments
rv.pipelines = self.pipelines
# now bin the chromatic RVs if self is also binned
if self.is_binned:
rv.bin(_child=True)
return rv
@cached_property
def blue(self):
assert self.chromatic, \
'chromatic RVs not enabled, run .enable_chromatic()'
return self._build_chromatic_RV('blue')
@cached_property
def mid(self):
assert self.chromatic, \
'chromatic RVs not enabled, run .enable_chromatic()'
return self._build_chromatic_RV('mid')
@cached_property
def red(self):
assert self.chromatic, \
'chromatic RVs not enabled, run .enable_chromatic()'
return self._build_chromatic_RV('red')
def save_chromatic_rdb(self, filename):
header = 'jdb\tvrad\tsvrad\tvblue\tsvblue\tvmid\tsvmid\tvred\tsvred\tfwhm\tsfwhm'
header += '\n' + '\t'.join(['-'*len(s) for s in header.split('\t')])
fmt = ['%7.5f'] + 10*['%5.3f']
data = np.c_[self.time, self.vrad, self.svrad,
self.blue.vrad, self.blue.svrad,
self.mid.vrad, self.mid.svrad,
self.red.vrad, self.red.svrad,
self.fwhm, self.efwhm]
np.savetxt(filename, data, fmt=fmt, comments='', header=header,
delimiter='\t')
def chromatic_plot(self):
assert self.chromatic, \
'chromatic RVs not enabled, run .enable_chromatic()'
fig, axs = chromatic_plot_main(self)
fig.set_figwidth(9)
fig.set_figheight(10)
ploti = np.arange(0, 8, 2) # indices of RV plots
peri = np.arange(1, 8, 2) # indices of periodogram plots
axs[0].get_shared_x_axes().join(*axs[ploti])
axs[ploti[1]].get_shared_y_axes().join(*axs[ploti[1:]])
axs[1].get_shared_x_axes().join(*axs[peri])
if self.tess is not None and self.tess.period is not None:
for ax in axs[peri]:
y1, y2 = ax.get_ylim()
h = 0.1 * abs(y2 - y1)
ax.vlines(self.tess.period, ymin=y2 - h, ymax=y2, color='m',
alpha=0.6, label='planets')
axs[0].set_title(r'full $\lambda$ range', loc='left', fontsize=8)
names = ('blue', 'mid', 'red')
for i, (ax, name) in enumerate(zip(axs[ploti[1:]], names)):
ax.set_title(name + rf' $\lambda$={self.cRV.bands[i]} nm',
loc='left', fontsize=8)
for ax in axs[ploti]:
ax.set_ylabel('RV [m/s]')
for ax in axs[peri]:
ax.set_ylabel('Power')
def roundup(a, digits=0):
n = 10**-digits
return round(ceil(a / n) * n, digits)
# maxy = roundup(max([ax.get_ylim()[1] for ax in axs[ploti]]), 1)
# miny = roundup(min([ax.get_ylim()[0] for ax in axs[ploti]]), 1)
# for ax in axs[ploti]:
# ax.set_ylim(miny, maxy)
maxy = roundup(max([ax.get_ylim()[1] for ax in axs[peri]]), 1)
for ax in axs[peri]:
ax.set_ylim(0, maxy)
# if self.prot and not np.isnan(self.prot):
# # legend = legend & True
# y1, y2 = ax.get_ylim()
# h = 0.05 * abs(y2 - y1)
# ax.vlines(self.prot, ymin=y2-2*h, ymax=y2-h, color='r',
# alpha=0.6, lw=2)
# ax.plot(self.prot, y2-h, 'x', color='r', label=r'P$_{\rm rot}$')
# fig.tight_layout()
plt.show()
return fig, axs
def kima(self, directory=None, GUI=True, ESPRESSO_fiber_offset=True):
# from pykima import kimaGUI, make_template
star = ''.join(self.star.split())
d = star + '_kima_analysis' if directory is None else directory
self._kima_directory = d
if not os.path.exists(d):
print(yelb | 'Created', d)
os.mkdir(d)
# save data to directory
self.save(prefix=d, save_indicators=True)
if GUI:
cmd = f'kima-gui {d}'
os.system(cmd)
else:
create_dir = not os.path.exists(os.path.join(d, 'kima_setup.cpp'))
self._kima(directory=d, create_dir=create_dir, edit=False)
def save_figures(self):
folder = f'{self.star}_figures'
if not os.path.exists(folder):
os.mkdir(folder)
# plot_both
fig, axs = self.plot_both(show_today=True, right_ticks=False)
print(os.path.join(folder, 'plot_both.png'))
fig.savefig(os.path.join(folder, 'plot_both.png'), dpi=200)
# gls_both
fig, axs = self.gls_both(HZ=True)
axs[1].set_title('FWHM', loc='left')
print(os.path.join(folder, 'gls_both.png'))
fig.savefig(os.path.join(folder, 'gls_both.png'), dpi=200)
plt.close('all')
class DACE():
"""
This class holds information about DACE targets.
To access the RVs of a given target T use `DACE.T`
All symbols not allowed by Python (-, +, or .) have been replaced with an
underscore _. For target names starting with a number, start with an _.
Example: 'TOI-123' -> DACE.TOI_123, '51Peg' -> DACE._51Peg
.target : RV
Instance of the `RV` class for a given target
"""
# kwargs for RV with their default values
ESPRESSO_only = False # only load ESPRESSO RVs
local_first = False # try to read local files before querying DACE
verbose = True # what it says
bin_nightly = True # bin the observations nightly
sigmaclip = True # sigma-clip RVs, FWHM, and other observations
maxerror = 10 # max. RV error allows, mask points with larger errors
adjust_means = True
keep_pipeline_versions = False # keep both ESPRESSO pipeline versions
remove_ESPRESSO_commissioning = True # remove RVs before commissioning
download_TESS = False # try to download TESS data
ESPRESSO_fiber_offset = False
# remove_secular_acceleration = True # subtract secular acceleration from RVs
remove_secular_acceleration = ('HARPS', )
def __init__(self):
self._print_errors = True
self._attributes = set()
self._ignore_attrs = list(self.__dict__.keys())
@property
def _EO(self):
return self.ESPRESSO_only
@property
def settings(self):
msg = "Use .set(setting=True/False) to change each setting.\n"
msg += "For example DACE.set(bin_nightly=False, verbose=True)\n"
print(msg)
pprint(self._kwargs)
@property
def _kwargs(self):
k = {
'verbose': self.verbose,
'ESPRESSO_only': self.ESPRESSO_only,
'bin': self.bin_nightly,
'sigmaclip': self.sigmaclip,
'maxerror': self.maxerror,
'adjust_means': self.adjust_means,
'keep_pipeline_versions': self.keep_pipeline_versions,
'remove_secular_acceleration': self.remove_secular_acceleration,
'tess': self.download_TESS,
}
return k
def set(self, verbose=None, ESPRESSO_only=None, bin_nightly=None,
adjust_means=None, local_first=None, sigmaclip=None, maxerror=None,
download_TESS=None, keep_pipeline_versions=None,
remove_secular_acceleration=None,
reload_all=True):
def _not_none_and_different(val, name):
return val is not None and val != getattr(self, name)
change = False
if _not_none_and_different(verbose, 'verbose'):
self.verbose = verbose
change = True
if _not_none_and_different(ESPRESSO_only, 'ESPRESSO_only'):
self.ESPRESSO_only = ESPRESSO_only
change = True
if _not_none_and_different(bin_nightly, 'bin_nightly'):
self.bin_nightly = bin_nightly
change = True
if _not_none_and_different(adjust_means, 'adjust_means'):
self.adjust_means = adjust_means
change = True
if _not_none_and_different(local_first, 'local_first'):
self.local_first = local_first
change = True
if _not_none_and_different(sigmaclip, 'sigmaclip'):
self.sigmaclip = sigmaclip
change = True
if _not_none_and_different(maxerror, 'maxerror'):
self.maxerror = maxerror
change = True
if _not_none_and_different(keep_pipeline_versions,
'keep_pipeline_versions'):
self.keep_pipeline_versions = keep_pipeline_versions
change = True
if _not_none_and_different(download_TESS, 'download_TESS'):
self.download_TESS = download_TESS
change = True
if _not_none_and_different(remove_secular_acceleration,
'remove_secular_acceleration'):
self.remove_secular_acceleration = remove_secular_acceleration
change = True
if change and reload_all:
self._print_errors = False
self.reload()
self._print_errors = True
def reload(self, star=None):
if star is None:
stars = self._attributes
else:
stars = [star]
for star in stars:
escaped_star = escape(star)
if self.verbose:
if escaped_star != star:
print(f'reloading {star} ({escaped_star})')
else:
print(f'reloading {star}')
try:
delattr(self, escaped_star)
except AttributeError:
pass
getattr(self, escaped_star)
def _delete_all(self):
removed = []
for star in self._attributes:
delattr(self, star)
removed.append(star)
for star in removed:
self._attributes.remove(star)
def _from_local(self, star):
try:
return DACERV.from_local(star, **self._kwargs)
except ValueError:
if self._print_errors:
print(cl_red | f'ERROR: {star} no local data?')
return
def _from_DACE(self, star):
try:
return DACERV.from_DACE(star, **self._kwargs)
except (KeyError, RequestException):
if self._print_errors:
print(cl_red | f'ERROR: {star} no data found in DACE?')
return
def __getattr__(self, attr):
ignore = attr in (
'__wrapped__',
# '_ipython_canary_method_should_not_exist_',
'_repr_mimebundle_',
'getdoc',
'__call__',
'items')
ignore = ignore or attr.startswith('_repr')
ignore = ignore or attr.startswith('_ipython')
ignore = ignore or attr in self._ignore_attrs
if ignore:
return
star = deescape(attr)
if self.local_first:
t = self._from_local(star)
if t is None:
t = self._from_DACE(star)
else:
t = self._from_DACE(star)
if t is None:
return
setattr(self, attr, t)
self._attributes.add(star)
return t
```
#### File: vera/vera/_periodograms.py
```python
from functools import partial, partialmethod
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import gridspec
try:
from gatspy import periodic
except ImportError:
raise ImportError('Please, pip install gatspy')
from astropy.timeseries import LombScargle
from .utils import info, red
from .stat_tools import wrms, false_alarm_level_gatspy
def frequency_grid(self,
plow=None,
phigh=None,
samples_per_peak=5,
nmax=50000):
if plow is None:
plow = 0.5
if phigh is None:
phigh = 2 * self.time.ptp()
#! HACK
return np.linspace(1 / phigh, 1 / plow, 20000)
day2sec = 86400
sec2day = 1 / day2sec
## chosing the maximum frequency
self.extract_from_DACE_data('texp')
texp = self.texp[self.mask].mean()
windowing_limit = 0.5 / (texp * sec2day)
# print('windowing_limit:', windowing_limit, 'day^-1')
# observation times are recorded to D decimal places
time_precision = len(str(self.time[0]).split('.')[1])
time_precision_limit = 0.5 * 10**time_precision
# print('precision_limit:', time_precision_limit, 'day^-1')
fmax = min(windowing_limit, time_precision_limit)
## choosing the minimum frequency (easy)
timespan = self.time[self.mask].ptp()
fmin = 1 / timespan
Neval = int(samples_per_peak * texp * fmax)
# print('Neval:', Neval)
Neval = min(Neval, nmax)
freq = np.linspace(fmin, fmax, Neval)
return freq
def window_function(self, plot=True, frequency=True, norm=None, **kwargs):
""" Calculate the window function of the sampling times. """
if self.time[self.mask].size < 3:
print(red | 'Cannot calculate periodogram! Too few points?')
return
if norm is None:
norm = 'standard'
m = self.mask
t, e = self.time[m], self.svrad[m]
ls = LombScargle(t, np.ones_like(t), e, fit_mean=False, center_data=False,
normalization=norm)
minf = kwargs.pop('minf', None)
minf = kwargs.pop('minimum_frequency', minf)
maxf = kwargs.pop('maxf', None)
maxf = kwargs.pop('maximum_frequency', maxf)
freqW, powerW = ls.autopower(minimum_frequency=minf,
maximum_frequency=maxf,
method='slow',
**kwargs)
fig, (ax, ax1) = plt.subplots(1,
2,
constrained_layout=True,
figsize=(6, 3),
gridspec_kw=dict(width_ratios=[2, 1]))
if frequency:
# dm1_2_uHz = 1e6 / 86400
# ax.plot(freqW * dm1_2_uHz, powerW)
ax.plot(freqW, powerW)
ax.vlines([1, 2, 4], 0, 1, color='r', alpha=0.4, zorder=-1)
ax.vlines([1/365.25], 0, 1, color='g', alpha=0.4, zorder=-1)
ax.vlines([1/self.mtime.ptp()], 0, 1, color='m', alpha=0.4, zorder=-1)
else:
ax.semilogx(1 / freqW, powerW)
ax.vlines([1, 0.5, 0.25], 0, 1, color='r', alpha=0.4, zorder=-1)
ax.vlines([365.25], 0, 1, color='g', alpha=0.4, zorder=-1)
ax.vlines([self.mtime.ptp()], 0, 1, color='m', alpha=0.4, zorder=-1)
from matplotlib.backend_bases import MouseButton
point, = ax.plot(0, 0, 'ro')
circle, = ax1.plot(np.cos(2 * np.pi * 1 * self.mtime),
np.sin(2 * np.pi * 1 * self.mtime), 'x')
ax1.set(xlim=(-1.2, 1.2), ylim=(-1.2, 1.2))
def on_move(event):
# get the x and y pixel coords
x, y = event.x, event.y
# if event.inaxes:
# ax = event.inaxes # the axes instance
# print('data coords %f %f' % (event.xdata, event.ydata), end='\t')
# print(ls.power(event.xdata))
def on_click(event):
if event.inaxes is ax and event.button is MouseButton.LEFT:
print(event.xdata)
point.set_data(event.xdata, ls.power(event.xdata))
circle.set_data(np.cos(2 * np.pi * event.xdata * self.mtime),
np.sin(2 * np.pi * event.xdata * self.mtime))
fig.canvas.draw()
# print('disconnecting callback')
# plt.disconnect(binding_id)
binding_id = plt.connect('motion_notify_event', on_move)
plt.connect('button_press_event', on_click)
return fig, ax
def gls_indicator(self,
value,
error,
label,
recompute=False,
plot=True,
ax=None,
FAP=True,
adjust_offsets=True,
frequency=False,
bootstrap=True,
HZ=False,
gatspy=False,
legend=True,
obs=None,
oversampling=20,
plot_data_with_offsets=False,
color=None,
line_kwargs={},
**kwargs):
"""
Calculate the Lomb-Scargle periodogram of any attribute. This function can
automatically adjust offsets (for different instruments and between ESPRESSO
fibers, for example) while calculating the periodogram, but this is slower.
Turn this off by setting `adjust_offsets` to False.
"""
if self.time[self.mask].size < 3:
print(red | 'Cannot calculate periodogram! Too few points?')
return
same = self._periodogram_calculated_which == value
plow, phigh = kwargs.get('plow', None), kwargs.get('phigh', None)
freq = self.frequency_grid(plow, phigh)
period = 1 / freq
try:
value = getattr(self, value)
except AttributeError:
exec('out = ' + value, globals(), locals())
value = locals()['out']
error = getattr(self, error)
try:
# didn't adjust offsets before but now want to do it
if adjust_offsets and not self.GLS['gatspy']:
recompute = True
# adjusted offsets before but now don't want to do it
if not adjust_offsets and self.GLS['gatspy']:
recompute = True
except AttributeError:
pass
can_adjust_offsets = self.instruments.size > 1 or self.has_before_and_after_fibers
if not can_adjust_offsets:
adjust_offsets = False
if (not self.periodogram_calculated) or recompute or (not same):
if value.size == self.mask.size:
# use non-masked points
m = self.mask
# and not those which are nan
m &= ~np.isnan(value)
else:
info('different dimensions, skipping application of mask')
m = np.full_like(value, True, dtype=bool)
can_adjust_offsets = self.instruments.size > 1 or self.has_before_and_after_fibers
if adjust_offsets and can_adjust_offsets:
if self.verbose:
info(f'Adjusting {label} offsets within periodogram')
gatspy = True
model = periodic.LombScargleMultiband(Nterms_base=1, Nterms_band=0)
if obs is None:
obs = self.obs
model.fit(self.time[m], value[m], error[m], filts=obs[m])
# period, power = model.periodogram_auto(oversampling=30)
power = model.periodogram(period)
else:
if gatspy:
# if self.time.size < 50:
model = periodic.LombScargle(fit_period=False)
# else:
# model = periodic.LombScargleFast()
model.fit(self.time[m], value[m], error[m])
# period, power = model.periodogram_auto(oversampling=30)
power = model.periodogram(period)
else:
model = LombScargle(self.time[m], value[m], error[m])
power = model.power(1 / period)
# save it
self.GLS = {}
self.GLS['model'] = model
self.GLS['period'] = period
self.GLS['power'] = power
self.periodogram_calculated = True
self._periodogram_calculated_which = value
self.GLS['gatspy'] = gatspy
if gatspy:
fal = partial(false_alarm_level_gatspy, self)
self.GLS['model'].false_alarm_level = fal
if not self.GLS['gatspy']:
adjust_offsets = False
# plot_data_with_offsets = False
if self.verbose and adjust_offsets:
info('Adjusted means:')
ln = self._longest_name
offsets = self.GLS['model'].ymean_by_filt_
instruments = self.instruments.copy().astype('U16')
# if self.has_before_and_after_fibers:
# # print('(note ESPRESSO offset is between before and after fiber change)')
# i = np.where(instruments == 'ESPRESSO')[0][0]
# instruments[i] = 'ESPRESSO-post'
# instruments = np.insert(instruments, i, 'ESPRESSO-pre')
# ln += 6
s = [
f' {i:{ln}s}: {off:7.4f} {self.units}'
for i, off in zip(instruments, offsets)
]
print('\n'.join(s))
if not plot:
return
if ax is None:
fig, ax = plt.subplots(1, 1, constrained_layout=True)
else:
ax = ax
if kwargs.get('show_title', True):
ax.set_title(label, loc='left')
kw = dict(color=color, **line_kwargs)
if frequency:
factor = 1 #/ 86400
ax.plot(factor / self.GLS['period'], self.GLS['power'], **kw)
else:
ax.semilogx(self.GLS['period'], self.GLS['power'], **kw)
if FAP and self.time[self.mask].size > 5:
if bootstrap:
if self.verbose:
info('calculating FAP with bootstrap...')
k = dict(method='bootstrap')
fap01 = self.GLS['model'].false_alarm_level(0.1, **k)
fap001 = self.GLS['model'].false_alarm_level(0.01, **k)
else:
fap01 = self.GLS['model'].false_alarm_level(0.1)
fap001 = self.GLS['model'].false_alarm_level(0.01)
fap_period = kwargs.get('fap_period', 0.98 * ax.get_xlim()[1])
for fap, fapstr in zip((fap01, fap001), ('10%', '1%')):
ax.axhline(fap, color='k', alpha=0.3)
ax.text(fap_period, fap, fapstr, ha='right', va='bottom',
fontsize=8, alpha=0.4)
show_planets = kwargs.get('show_planets', True)
if show_planets and self.known_planets.P is not None:
# legend = legend & True
y1, y2 = ax.get_ylim()
h = 0.1 * abs(y2 - y1)
P = 1 / self.known_planets.P if frequency else self.known_planets.P
ax.vlines(P,
ymin=y2 - h,
ymax=y2,
color='m',
alpha=0.6,
label='planets')
show_prot = kwargs.get('show_prot', True)
if show_prot and self.prot:
if isinstance(self.prot, tuple): # assume it's (prot, error)
y1, y2 = ax.get_ylim()
h = 0.05 * abs(y2 - y1)
kw = dict(fmt='o', color='r', alpha=0.6, lw=2, ms=2)
prot = 1 / self.prot[0] if frequency else self.prot[0]
ax.errorbar(x=prot, y=y2, **kw, label=r'P$_{\rm rot}$')
# ax.vlines(self.prot[0], ymin=y2 - h, ymax=y2, **kw)
# ax.plot(self.prot[0], y2, 'x', **kw, label=r'P$_{\rm rot}$')
elif not np.isnan(self.prot):
y1, y2 = ax.get_ylim()
h = 0.05 * abs(y2 - y1)
kw = dict(color='r', alpha=0.6, lw=2)
prot = 1 / self.prot if frequency else self.prot
ax.vlines(prot, ymin=y2 - h, ymax=y2, **kw)
ax.plot(prot, y2, 'x', **kw, label=r'P$_{\rm rot}$')
if HZ and self.HZ is not None:
ax.axvspan(*self.HZ, color='g', alpha=0.2, zorder=-1, label='HZ')
xlabel = 'Frequency [days$^{-1}$]' if frequency else 'Period [days]'
ax.set(
xlabel=xlabel,
ylabel='Normalised Power',
ylim=(0, None),
xlim=(1e-10, 1) if frequency else (1, None)
)
labels = [line.get_label() for line in ax.lines]
# print(labels)
# labels = not all([l.startswith('_') for l in labels])
if legend and labels:
ax.legend(ncol=10,
bbox_to_anchor=(1, 1.12),
fontsize='small',
handletextpad=0.3)
add_period_axis = kwargs.get('add_period_axis', True)
if frequency and add_period_axis:
f2P = lambda f: 1 / (f + 1e-10)
P2f = lambda P: 1 / (P + 1e-10)
ax2 = ax.secondary_xaxis("top", functions=(f2P, P2f))
ax2.minorticks_off()
ax2.set_xticks([1, 1 / 0.5, 1 / 0.2, 1 / 0.1, 1 / 0.05])
# ax.set_xticklabels(['0', '0.2'])
ax.set_xlim(0, 1)
ax2.set_xlabel('Period [days]')
return ax
gls_fwhm = partialmethod(gls_indicator, 'fwhm', 'efwhm', 'FWHM')
gls_contrast = partialmethod(gls_indicator, 'contrast', 'econtrast', 'CCF contrast')
gls_bis = partialmethod(gls_indicator, 'bispan', 'bispan_err', 'BIS')
gls_rhk = partialmethod(gls_indicator, 'rhk', 'erhk', r"log R'$_{\rm HK}$")
gls_caindex = partialmethod(gls_indicator, 'caindex', 'caindex_err', 'Ca')
gls_naindex = partialmethod(gls_indicator, 'naindex', 'naindex_err', 'Na')
gls_haindex = partialmethod(gls_indicator, 'haindex', 'haindex_err', r'H$\alpha$')
def gls_offset(self, HZ=False, nsamples=50, _sign=True):
# if
# if not self.has_before_and_after_fibers:
# print(red | 'ERROR:', 'No points before and after offset')
# return
self.gls(adjust_offsets=True, plot=False, recompute=True, plow=1.1)
fig = plt.figure(figsize=(16, 5))#, constrained_layout=True)
gs = gridspec.GridSpec(2, 2, width_ratios=[1, 1.5], figure=fig)#, height_ratios=[4, 1])
ax1 = fig.add_subplot(gs[:, 0])
# ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[:, 1:])
ax3.set(xlabel='Period [days]', ylabel='Power')
self.plot(ax=ax1, legend=False, right_ticks=False)
# self.plot(ax=ax2, legend=False, right_ticks=False)
m = self.GLS['model']
for filt, ym in zip(np.unique(m.filts), m.ymean_by_filt_):
mask = m.filts == filt
ax1.hlines(ym, m.t[mask].min(), m.t[mask].max(), ls='--')
freq = 1 / self.GLS['period']
N = freq.size
best_offset = np.ediff1d(m.ymean_by_filt_)[0]
# periodograms for range of offsets
offsets = np.linspace(0, best_offset, nsamples)
power = np.empty((nsamples, N))
mask = m.filts != 1
for i, of in enumerate(offsets):
yn = m.y.copy()
yn[mask] -= of
power[i] = LombScargle(m.t, yn, m.dy).power(freq)
colors = plt.cm.GnBu(np.linspace(0, 0.8, nsamples))
for i in range(nsamples):
ax3.semilogx(
1 / freq,
power[i],
color=colors[i],
) # alpha=0.2)
cmap = mpl.cm.GnBu
norm = mpl.colors.Normalize(vmin=0, vmax=0.8)
cax = fig.add_axes([0.85, 0.85, 0.1, 0.05])
cb = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cax,
orientation='horizontal',
label='offset [m/s]')
cb.set_ticks([0, 0.8])
cb.set_ticklabels(['0', f'{best_offset:.2f}'])
# plt.colorbar()
# handles = [
# Line2D([0, 1], [0, 1], color=colors[0], lw=1, label=r'offset = 0'),
# # Line2D([0], [0], marker='o', color='k', lw=0,
# # label=r'$RV_{\rm RMS}$'),
# ]
# ax3.legend(handles=handles, fontsize=10, handletextpad=0, borderpad=0.2)
if self.known_planets.P is not None:
for p in self.known_planets.P:
ax3.axvline(p, 0.97, 1, color='m', alpha=0.8, label='planets')
if self.prot and not np.isnan(self.prot):
y1, y2 = ax3.get_ylim()
h = 0.05 * abs(y2 - y1)
# ax3.vlines(self.prot, ymin=y2 - h, ymax=y2, color='r', alpha=0.6,
# lw=2)
# ax3.plot(self.prot, y2, 'x', color='r', label=r'P$_{\rm rot}$')
ax3.axvline(self.prot, 0.97, 1, color='r', alpha=1, lw=3,
label='P$_{\rm rot}$')
if HZ and self.HZ is not None:
ax3.axvspan(*self.HZ, color='g', alpha=0.2, zorder=-1, label='HZ')
ax3.axhline(m.false_alarm_level(0.01), ls='--', color='k', alpha=0.2)
ax3.text(1, 1.01 * m.false_alarm_level(0.01), 'FAP 1%')
ax1.set_title(f'{self.star}, {self.NN} observations')
ax3.set_title(f'Adjusted RV offset: {best_offset:.4f} m/s')
fig.tight_layout()
if _sign:
fig.text(1e-3,
0.005,
'CC-BY © <NAME>',
fontsize=8,
color='gray',
ha='left',
va='bottom',
alpha=0.5)
return fig
def plot_gls_quantity(t, y, e, mask, obs, hl=None, setax1={}, setax2={}):
from gatspy import periodic
periods = np.logspace(np.log10(1), np.log10(2 * t.ptp()), 1000)
model = periodic.LombScargleMultiband(Nterms_base=1, Nterms_band=0)
model.fit(t[mask], y[mask], e[mask], filts=obs[mask])
power = model.periodogram(periods)
model.false_alarm_level = lambda x: np.zeros_like(x)
# return model, periods, power
fig, axs = plt.subplots(2, 1, constrained_layout=True)
axs[0].errorbar(t[mask], y[mask], e[mask], fmt='o', ms=2)
axs[0].plot(t[mask], model.ymean_, ls='--')
axs[1].semilogx(periods, power)
if hl is not None:
for p in hl:
axs[1].axvline(p, ls='--', color='k', alpha=0.2, zorder=-1)
setax1.setdefault('ylabel', 'RV [m/s]')
setax1.setdefault('xlabel', 'Time [BJD]')
axs[0].set(**setax1)
setax2.setdefault('ylabel', 'Power')
setax2.setdefault('xlabel', 'Period [days]')
axs[1].set(**setax2)
def gls_paper(self):
with plt.style.context('fast'):
figsize = (6, 10)
leg = ('RV', 'FWHM', r"$\log R'_{HK}$", 'BIS', 'Na', r'H$\alpha$')
fun = (self.gls, self.gls_fwhm, self.gls_rhk, self.gls_bis,
self.gls_naindex, self.gls_haindex)
fig, axs = plt.subplots(len(fun), 1, figsize=figsize,
constrained_layout=True)
kw = dict(frequency=False,
show_planets=False,
show_prot=False,
show_title=False,
bootstrap=False,
add_period_axis=False,
fap_period=140)
for i, (ax, f) in enumerate(zip(axs, fun)):
f(ax=ax, **kw)
ax.legend().remove()
ax.set_title(leg[i], loc='right')
ax.set_xlim(0.8, 100)
kwline = dict(ls='--', alpha=0.2, lw=2, zorder=-1)
ax.axvline(11.19, color='r', **kwline)
ax.axvline(5.12, color='r', **kwline)
ax.axvline(85.3, color='g', **kwline)
# fun[i](ax=ax5, **kw)
# for ax in axs:
# ax.set_xticklabels(['', '1', '5', '10', '50'])
for ax in axs[:-1]:
ax.set_xlabel('')
# for ax in axs[:, 0]:
# ax.set_xlim(1 / 10, 1 / 12.5)
# for ax in axs[:, 1]:
# ax.set_xlim(1 / 4, 1 / 6)
# self.gls_fwhm(ax=axs[1], **kw)
# self.gls_rhk(ax=axs[2], **kw)
# self.gls_bis(ax=axs[3], **kw)
# for ax in axs[:-1]:
# ax.set_xlabel('')
return fig
```
#### File: vera/vera/visibility.py
```python
from __future__ import print_function
import sys
import numpy as np
from scipy.optimize import bisect
import datetime as dt
from dateutil import tz
import pickle
from random import choice
from PyAstronomy import pyasl
from astropy.coordinates import SkyCoord
from astropy.coordinates import name_resolve
from astropy.time import Time
from astropy import units
import ephem
import argparse
import calendar
try:
from tqdm import tqdm
except ImportError:
def tqdm(x): return x
import io
import matplotlib.pyplot as plt
import matplotlib
replace_figure = True
try:
from PySide.QtGui import QApplication, QImage
except ImportError:
try:
from PyQt4.QtGui import QApplication, QImage
except ImportError:
try:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QImage
except ImportError:
replace_figure = False
def add_clipboard_to_figures():
# replace the original plt.figure() function with one that supports
# clipboard-copying
oldfig = plt.figure
def newfig(*args, **kwargs):
fig = oldfig(*args, **kwargs)
def clipboard_handler(event):
if event.key == 'ctrl+c':
# store the image in a buffer using savefig(), this has the
# advantage of applying all the default savefig parameters
# such as background color; those would be ignored if you simply
# grab the canvas using Qt
buf = io.BytesIO()
fig.savefig(buf)
QApplication.clipboard().setImage(
QImage.fromData(buf.getvalue()))
buf.close()
print('Ctrl+C pressed: image is now in the clipboard')
fig.canvas.mpl_connect('key_press_event', clipboard_handler)
return fig
plt.figure = newfig
if replace_figure:
add_clipboard_to_figures()
def _parser():
parser = argparse.ArgumentParser(
description='Plot altitudes of objects against time for a specific night')
parser.add_argument('targets', help='e.g. HD20010 or HD20010,HD41248',
nargs='+')
parser.add_argument(
'-d', '--date', default='today',
help='Date in format YYYY-MM-DD (or YYYY if starobs). '
'Default is today (this year if starobs).')
parser.add_argument(
'-P', '--period', default=None, type=str, nargs=1,
help='Specify ESO period (October-March / April-September)')
parser.add_argument(
'-s', '--site', default='esolasilla',
help='Observatory. Default is ESO La Silla. '
'Common codes are esoparanal, lapalma, keck, lco, Palomar, etc')
parser.add_argument(
'-l', '--loc', default=None,
help='Give the location of the observatory.'
'Comma-separated altitude, latitude, longitude, timezone')
parser.add_argument('-c', default=False, action='store_true',
help='Just print "target RA DEC" (to use in STARALT)')
parser.add_argument(
'-m', '--mode', choices=['staralt', 'starobs'], default='staralt',
help='staralt: plot altitude against time for a particular night; '
'starobs: plot how altitude changes over a year')
parser.add_argument('--nomoon', default=False, action='store_true',
help="Don't plot moon altitude")
parser.add_argument('--sh', default=None, type=float, nargs=1, dest='A',
help='Include plot of sunless hours above airmass A')
parser.add_argument('--hover', default=False, action='store_true',
help='Color lines when mouse over')
parser.add_argument(
'-o', '--save', default=None, type=str, nargs=1,
help='Save figure in output file (provide file extension)')
parser.add_argument('--remove-watermark', default=False,
action='store_true',
help='Remove "Created with..." watermark text')
parser.add_argument('--toi', default=False, action='store_true',
help='Targets are TOIs')
return parser.parse_args()
def decdeg2dms(dd):
""" Convert decimal degrees to deg,min,sec """
is_positive = dd >= 0
dd = abs(dd)
minutes, seconds = divmod(dd * 3600, 60)
degrees, minutes = divmod(minutes, 60)
degrees = degrees if is_positive else -degrees
return (degrees, minutes, seconds)
class CacheSkyCoord(SkyCoord):
@classmethod
def from_name(cls, name, frame='icrs'):
try:
cached = pickle.load(open('CachedSkyCoords.pickle', 'rb'))
except FileNotFoundError:
cached = {}
if name in cached:
return cached[name]
else:
original = super(CacheSkyCoord, cls).from_name(name, frame)
# keep the cached dict manageable
n = len(cached)
if n > 100:
# remove a random cached target
cached.pop(choice(list(cached.keys())))
cached.update({name: original})
pickle.dump(cached, open('CachedSkyCoords.pickle', 'wb'))
return original
ESO_periods = {
104: [(2019, 10, 1), (2020, 3, 31)],
103: [(2019, 4, 1), (2019, 9, 30)],
102: [(2018, 10, 1), (2019, 3, 31)],
101: [(2018, 4, 1), (2018, 9, 30)],
100: [(2017, 10, 1), (2018, 3, 31)],
99: [(2017, 4, 1), (2017, 9, 30)],
98: [(2016, 10, 1), (2017, 3, 31)],
97: [(2016, 4, 1), (2016, 9, 30)],
96: [(2015, 10, 1), (2016, 3, 31)],
95: [(2015, 4, 1), (2015, 9, 30)],
94: [(2014, 10, 1), (2015, 3, 31)],
93: [(2014, 4, 1), (2014, 9, 30)],
92: [(2013, 10, 1), (2014, 3, 31)],
}
def get_ESO_period(period):
""" Return the JD of start and end of ESO period """
assert isinstance(period, str) or isinstance(period, int)
P = int(period)
def getjd(y, m, d): return pyasl.jdcnv(dt.datetime(y, m, d))
jd_start, jd_end = [getjd(*d) for d in ESO_periods[P]]
return jd_start, jd_end
def hrs_up(up, down, eve, morn):
"""
If an object comes up past a given point at `up`, and goes down at `down`,
and evening and morning are at `eve` and `morn`, computes how long object
is up *and* it's dark.
"""
# if any input is a float, assume it's JD
if isinstance(up, float):
up = pyasl.daycnv(up, mode='dt')
if isinstance(down, float):
down = pyasl.daycnv(down, mode='dt')
if isinstance(eve, float):
eve = pyasl.daycnv(eve, mode='dt')
if isinstance(morn, float):
morn = pyasl.daycnv(morn, mode='dt')
SID_RATE = 1.0027379093
if up < eve:
if down >= morn:
return (morn - eve).total_seconds() / 3600 # up all night
elif down >= eve:
# careful here ... circumpolar objects can come back *up* a second time
# before morning. up and down are the ones immediately preceding
# and following the upper culmination nearest the center of the night,
# so "up" can be on the previous night rather than the one we want. */
up2 = up + dt.timedelta(days=1.0 / SID_RATE)
if (up2 > morn): # the usual case ... doesn't rise again
return (down - eve).total_seconds() / 3600
else:
return ((down - eve) + (morn - up2)).total_seconds() / 3600
else:
return 0.
elif down > morn:
if up > morn:
return 0.
else:
# again, a circumpolar object can be up at evening twilight and come
# 'round again in the morning ...
down0 = down - dt.timedelta(days=1.0 / SID_RATE)
if down0 < eve:
return (morn - up).total_seconds() / 3600
else:
return ((down0 - eve) + (morn - up)).total_seconds() / 3600
else:
return (down - up).total_seconds() / 3600
# up & down the same night ... might happen a second time in pathological
# cases, but this will be extremely rare except at very high latitudes.
SUN = ephem.Sun()
def get_next_sunset(jd, obs, mode='jd'):
datetime_jd = pyasl.daycnv(jd, mode='dt')
s = ephem.Observer()
s.date = datetime_jd
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
next_sunset = ephem.julian_date(s.next_setting(SUN))
if mode == 'jd':
return next_sunset
elif mode == 'dt':
return pyasl.daycnv(next_sunset, mode='dt')
def get_next_sunrise(jd, obs, mode='jd'):
datetime_jd = pyasl.daycnv(jd, mode='dt')
s = ephem.Observer()
s.date = datetime_jd
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
next_sunrise = ephem.julian_date(s.next_rising(SUN))
if mode == 'jd':
return next_sunrise
elif mode == 'dt':
return pyasl.daycnv(next_sunrise, mode='dt')
def get_next_pass_at_altitude(jd, altitude, target, obs, limit=0.25):
""" Next time after jd that target passes at altitude, seen from obs """
def alt(jd, target):
ra = np.full_like(jd, target.ra.value)
dec = np.full_like(jd, target.dec.value)
lon, lat, alt = map(
obs.__getitem__, ('longitude', 'latitude', 'altitude'))
hor = pyasl.eq2hor(jd, ra, dec, lon=lon, lat=lat, alt=alt)
return -altitude + hor[0]
# if target is *already* above altitude at jd, return jd
if alt(jd, target) > 0:
return jd
try:
return bisect(alt, jd, jd + limit, args=(target, ))
except ValueError:
try:
return bisect(alt, jd, jd + 2*limit, args=(target, ))
except ValueError:
return -99
def get_previous_pass_at_altitude(jd, altitude, target, obs, limit=0.25):
"""
Previous time, before jd, that target passes at altitude, seen from obs
"""
def alt(jd, target):
ra = np.full_like(jd, target.ra.value)
dec = np.full_like(jd, target.dec.value)
lon, lat, alt = map(obs.__getitem__,
('longitude', 'latitude', 'altitude'))
hor = pyasl.eq2hor(jd, ra, dec, lon=lon, lat=lat, alt=alt)
return -altitude + hor[0]
# if target is *still* above altitude at jd, return jd
if alt(jd, target) > 0:
return jd
try:
return bisect(alt, jd, jd - limit, args=(target, ))
except ValueError:
try:
return bisect(alt, jd, jd - 2*limit, args=(target, ))
except ValueError:
return -99
def hrs_above_altitude(jd, altitude, target, obs):
# evening
eve = get_next_sunset(jd, obs)
# star goes up (above altitude)
up = get_next_pass_at_altitude(eve, altitude, target, obs)
# print(eve, up)
if up == -99:
return 0.
# morning
morn = get_next_sunrise(jd, obs)
if morn < eve: # maybe of next day?
morn = get_next_sunrise(jd+1, obs)
# star goes down
down = get_previous_pass_at_altitude(morn, altitude, target, obs)
# print(morn, down)
if down == -99:
return 0.
return hrs_up(up, down, eve, morn)
def get_visibility_curve(year, target, observatory, period=None):
try:
target = {'name': target, 'coord': SkyCoord.from_name(target)}
except name_resolve.NameResolveError:
print('Could not find target: {0!s}'.format(target))
target_coord = target['coord']
target_ra = target_coord.ra.deg
target_dec = target_coord.dec.deg
# set the observatory
if isinstance(observatory, dict):
obs = observatory
else:
obs = pyasl.observatory(observatory)
if period is not None:
jd_start, jd_end = get_ESO_period(period)
else:
jd_start = pyasl.jdcnv(dt.datetime(year, 1, 1))
jd_end = pyasl.jdcnv(dt.datetime(year, 12, 31))
jdbinsize = 1 # every day
each_day = np.arange(jd_start, jd_end, jdbinsize)
jds = []
## calculate the mid-dark times
sun = ephem.Sun()
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer()
s.date = date_formatted
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
jds.append(ephem.julian_date(s.next_antitransit(sun)))
jds = np.array(jds)
# Get JD floating point
jdsub = jds - np.floor(jds[0])
# Get alt/az of object
altaz = pyasl.eq2hor(jds, np.ones_like(jds)*target_ra, np.ones_like(jds)*target_dec,
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# plt.plot( jdsub, altaz[0], '-', color='k')
return jds, altaz[0]
def StarObsPlot(year=None, targets=None, observatory=None, period=None,
hover=False, sunless_hours=None, remove_watermark=False):
"""
Plot the visibility of target.
Parameters
----------
year: int
The year for which to calculate the visibility.
targets: list
List of targets.
Each target should be a dictionary with keys 'name' and 'coord'.
The key 'name' is a string, 'coord' is a SkyCoord object.
observatory: string
Name of the observatory that pyasl.observatory can resolve.
Basically, any of pyasl.listObservatories().keys()
period: string, optional
ESO period for which to calculate the visibility. Overrides `year`.
hover: boolean, optional
If True, color visibility lines when mouse over.
sunless_hours: float, optional
If not None, plot sunless hours above this airmass
"""
from mpl_toolkits.axes_grid1 import host_subplot
from matplotlib.ticker import MultipleLocator
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
rcParams['xtick.major.pad'] = 12
font0 = FontProperties()
font1 = font0.copy()
font0.set_family('sans-serif')
font0.set_weight('light')
font1.set_family('sans-serif')
font1.set_weight('medium')
# set the observatory
if isinstance(observatory, dict):
obs = observatory
else:
obs = pyasl.observatory(observatory)
fig = plt.figure(figsize=(15, 10))
fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)
# watermak
if not remove_watermark:
fig.text(0.99, 0.99,
'Created with\ngithub.com/iastro-pt/ObservationTools',
fontsize=10, color='gray', ha='right', va='top', alpha=0.5)
# plotting sunless hours?
shmode = False
if sunless_hours is not None:
shmode = True
# limit in airmass (assumed plane-parallel atm)
shairmass = sunless_hours
# correspoing limit in altitude
def f(alt): return pyasl.airmassPP(alt) - shairmass
shalt = 90 - bisect(f, 0, 89)
if shmode:
fig.subplots_adjust(hspace=0.35)
ax = host_subplot(211)
axsh = host_subplot(212)
plt.text(0.5, 0.47,
"- sunless hours above airmass {:.1f} - \n".format(shairmass),
transform=fig.transFigure, ha='center', va='bottom',
fontsize=12)
plt.text(0.5, 0.465,
"the thick line above the curves represents the total sunless hours "
"for each day of the year",
transform=fig.transFigure, ha='center', va='bottom', fontsize=10)
else:
ax = host_subplot(111)
for n, target in enumerate(targets):
target_coord = target['coord']
target_ra = target_coord.ra.deg
target_dec = target_coord.dec.deg
if period is not None:
jd_start, jd_end = get_ESO_period(period)
else:
jd_start = pyasl.jdcnv(dt.datetime(year, 1, 1))
jd_end = pyasl.jdcnv(dt.datetime(year, 12, 31))
jdbinsize = 1 # every day
each_day = np.arange(jd_start, jd_end, jdbinsize)
jds = []
## calculate the mid-dark times
sun = ephem.Sun()
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer()
s.date = date_formatted
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
jds.append(ephem.julian_date(s.next_antitransit(sun)))
jds = np.array(jds)
# Get JD floating point
jdsub = jds - np.floor(jds[0])
# Get alt/az of object
altaz = pyasl.eq2hor(jds, np.ones_like(jds)*target_ra, np.ones_like(jds)*target_dec,
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
ax.plot(jdsub, altaz[0], '-', color='k')
# label for each target
plabel = "[{0:2d}] {1!s}".format(n + 1, target['name'])
# number of target at the top of the curve
ind_label = np.argmax(altaz[0])
# or at the bottom if the top is too close to the corners
# if jdsub[ind_label] < 5 or jdsub[ind_label] > jdsub.max()-5:
# ind_label = np.argmin(altaz[0])
ax.text(jdsub[ind_label], altaz[0][ind_label], str(n+1), color="b", fontsize=14,
fontproperties=font1, va="bottom", ha="center")
if n + 1 == 29:
# too many?
ax.text(1.1, 1.0-float(n+1)*0.04, "too many targets", ha="left", va="top", transform=ax.transAxes,
fontsize=10, fontproperties=font0, color="r")
else:
ax.text(1.1, 1.0-float(n+1)*0.04, plabel, ha="left", va="top", transform=ax.transAxes,
fontsize=12, fontproperties=font0, color="b")
if shmode:
sunless_hours = []
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer()
s.date = date_formatted
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
# hours from sunrise to sunset
td = pyasl.daycnv(ephem.julian_date(s.next_setting(sun)), mode='dt') \
- pyasl.daycnv(ephem.julian_date(s.next_rising(sun)), mode='dt')
sunless_hours.append(24 - td.total_seconds() / 3600)
days = each_day - np.floor(each_day[0])
axsh.plot(days, sunless_hours, '-', color='k', lw=2)
axsh.set(
ylim=(0, 15), yticks=range(1, 15), ylabel='Useful hours',
yticklabels=[r'${}^{{\rm h}}$'.format(n) for n in range(1, 15)])
ax.text(1.1, 1.03, "List of targets", ha="left", va="top", transform=ax.transAxes,
fontsize=12, fontproperties=font0, color="b")
axrange = ax.get_xlim()
if period is None:
months = range(1, 13)
ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]
ax.set_xlim([0, 366])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
ax.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 366])
axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
axsh.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
else:
if int(period) % 2 == 0:
# even ESO period, Oct -> Mar
months = [10, 11, 12, 1, 2, 3]
ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]
ax.set_xlim([0, 181])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
ax.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 181])
axsh.set_xticks(
np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
axsh.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
else:
# odd ESO period, Apr -> Sep
months = range(4, 10)
ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]
ax.set_xlim([0, 182])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
ax.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 182])
axsh.set_xticks(
np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
axsh.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
if axrange[1] - axrange[0] <= 1.0:
jdhours = np.arange(0, 3, 1.0 / 24.)
utchours = (np.arange(0, 72, dtype=int) + 12) % 24
else:
jdhours = np.arange(0, 3, 1.0 / 12.)
utchours = (np.arange(0, 72, 2, dtype=int) + 12) % 24
# Make ax2 responsible for "top" axis and "right" axis
ax2 = ax.twin()
# Set upper x ticks
ax2.set_xticks(np.cumsum(ndays))
ax2.set_xlabel("Day")
# plane-parallel airmass
airmass_ang = np.arange(10, 81, 5)
geo_airmass = pyasl.airmass.airmassPP(airmass_ang)[::-1]
ax2.set_yticks(airmass_ang)
airmassformat = []
for t in range(geo_airmass.size):
airmassformat.append("{0:2.2f}".format(geo_airmass[t]))
ax2.set_yticklabels(airmassformat) # , rotation=90)
ax2.set_ylabel("Relative airmass", labelpad=32)
ax2.tick_params(axis="y", pad=6, labelsize=8)
plt.text(1.02, -0.04, "Plane-parallel", transform=ax.transAxes, ha='left',
va='top', fontsize=10, rotation=90)
ax22 = ax.twin()
ax22.set_xticklabels([])
ax22.set_frame_on(True)
ax22.patch.set_visible(False)
ax22.yaxis.set_ticks_position('right')
ax22.yaxis.set_label_position('right')
ax22.spines['right'].set_position(('outward', 30))
ax22.spines['right'].set_color('k')
ax22.spines['right'].set_visible(True)
airmass2 = list(
map(
lambda ang: pyasl.airmass.airmassSpherical(
90. - ang, obs['altitude']),
airmass_ang))
ax22.set_yticks(airmass_ang)
airmassformat = []
for t in range(len(airmass2)):
airmassformat.append(" {0:2.2f}".format(airmass2[t]))
ax22.set_yticklabels(airmassformat, rotation=90)
ax22.tick_params(axis="y", pad=8, labelsize=8)
plt.text(1.05, -0.04, "Spherical+Alt", transform=ax.transAxes, ha='left', va='top',
fontsize=10, rotation=90)
ax.set_ylim([0, 91])
ax.yaxis.set_major_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(5))
yticks = ax.get_yticks()
ytickformat = []
for t in range(yticks.size):
ytickformat.append(str(int(yticks[t])) + r"$^\circ$")
ax.set_yticklabels(ytickformat, fontsize=11 if shmode else 16)
ax.set_ylabel("Altitude", fontsize=18)
yticksminor = np.array(ax.get_yticks(minor=True))
ymind = np.where(yticksminor % 15. != 0.)[0]
yticksminor = yticksminor[ymind]
ax.set_yticks(yticksminor, minor=True)
m_ytickformat = []
for t in range(yticksminor.size):
m_ytickformat.append(str(int(yticksminor[t])) + r"$^\circ$")
ax.set_yticklabels(m_ytickformat, minor=True)
ax.set_ylim([0, 91])
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', which="minor", linestyle='dotted')
ax2.xaxis.grid(color='gray', linestyle='dotted')
if period is not None:
plt.text(
0.5, 0.95,
"Visibility over P{0!s}\n - altitudes at mid-dark time -".format(
period), transform=fig.transFigure, ha='center', va='bottom',
fontsize=12)
else:
plt.text(
0.5, 0.95,
"Visibility over {0!s}\n - altitudes at mid-dark time -".format(
year), transform=fig.transFigure, ha='center', va='bottom',
fontsize=12)
obsco = "Obs coord.: {0:8.4f}$^\circ$, {1:8.4f}$^\circ$, {2:4f} m".format(
obs['longitude'], obs['latitude'], obs['altitude'])
plt.text(0.01, 0.97, obsco, transform=fig.transFigure, ha='left',
va='center', fontsize=10)
plt.text(0.01, 0.95, obs['name'], transform=fig.transFigure, ha='left',
va='center', fontsize=10)
# interactive!
if hover:
main_axis = fig.axes[0]
all_lines = set(main_axis.get_lines())
def on_plot_hover(event):
for line in main_axis.get_lines():
if line.contains(event)[0]:
line.set_color('red') # make this line red
# and all others black
all_other_lines = all_lines - set([line])
for other_line in all_other_lines:
other_line.set_color('black')
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', on_plot_hover)
return fig
def StarObsAxis(ax, year=None, targets=None, observatory=None, period=None,
hover=False, sunless_hours=None, remove_watermark=False):
"""
Plot the visibility of target.
Parameters
----------
year: int
The year for which to calculate the visibility.
targets: list
List of targets.
Each target should be a dictionary with keys 'name' and 'coord'.
The key 'name' is a string, 'coord' is a SkyCoord object.
observatory: string
Name of the observatory that pyasl.observatory can resolve.
Basically, any of pyasl.listObservatories().keys()
period: string, optional
ESO period for which to calculate the visibility. Overrides `year`.
hover: boolean, optional
If True, color visibility lines when mouse over.
sunless_hours: float, optional
If not None, plot sunless hours above this airmass
"""
from mpl_toolkits.axes_grid1 import host_subplot
from matplotlib.ticker import MultipleLocator
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
# rcParams['xtick.major.pad'] = 12
font0 = FontProperties()
font1 = font0.copy()
font0.set_family('sans-serif')
font0.set_weight('light')
font1.set_family('sans-serif')
font1.set_weight('medium')
# set the observatory
if isinstance(observatory, dict):
obs = observatory
else:
obs = pyasl.observatory(observatory)
# fig = plt.figure(figsize=(15, 10))
# fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)
# watermak
# if not remove_watermark:
# fig.text(0.99, 0.99,
# 'Created with\ngithub.com/iastro-pt/ObservationTools',
# fontsize=10, color='gray', ha='right', va='top', alpha=0.5)
# plotting sunless hours?
shmode = False
if sunless_hours is not None:
shmode = True
# limit in airmass (assumed plane-parallel atm)
shairmass = sunless_hours
# correspoing limit in altitude
def f(alt): return pyasl.airmassPP(alt) - shairmass
shalt = 90 - bisect(f, 0, 89)
if shmode:
fig.subplots_adjust(hspace=0.35)
ax = host_subplot(211)
axsh = host_subplot(212)
plt.text(0.5, 0.47,
"- sunless hours above airmass {:.1f} - \n".format(shairmass),
transform=fig.transFigure, ha='center', va='bottom',
fontsize=12)
plt.text(0.5, 0.465,
"the thick line above the curves represents the total sunless hours "
"for each day of the year",
transform=fig.transFigure, ha='center', va='bottom', fontsize=10)
for n, target in enumerate(targets):
target_coord = target['coord']
target_ra = target_coord.ra.deg
target_dec = target_coord.dec.deg
if period is not None:
jd_start, jd_end = get_ESO_period(period)
else:
jd_start = pyasl.jdcnv(dt.datetime(year, 1, 1))
jd_end = pyasl.jdcnv(dt.datetime(year, 12, 31))
jdbinsize = 1 # every day
each_day = np.arange(jd_start, jd_end, jdbinsize)
jds = []
## calculate the mid-dark times
sun = ephem.Sun()
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer()
s.date = date_formatted
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
jds.append(ephem.julian_date(s.next_antitransit(sun)))
jds = np.array(jds)
# Get JD floating point
jdsub = jds - np.floor(jds[0])
# Get alt/az of object
altaz = pyasl.eq2hor(jds, np.ones_like(jds)*target_ra, np.ones_like(jds)*target_dec,
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
ax.plot(jdsub, altaz[0], '-', color='k', lw=0.8)
ax.plot(jdsub[altaz[0] > 30], altaz[0]
[altaz[0] > 30], '-', color='g', lw=2)
# label for each target
# plabel = "[{0:2d}] {1!s}".format(n + 1, target['name'])
# # number of target at the top of the curve
# ind_label = np.argmax(altaz[0])
# # or at the bottom if the top is too close to the corners
# # if jdsub[ind_label] < 5 or jdsub[ind_label] > jdsub.max()-5:
# # ind_label = np.argmin(altaz[0])
# ax.text( jdsub[ind_label], altaz[0][ind_label], str(n+1), color="b", fontsize=14, \
# fontproperties=font1, va="bottom", ha="center")
# if n + 1 == 29:
# # too many?
# ax.text(1.1, 1.0-float(n+1)*0.04, "too many targets", ha="left", va="top", transform=ax.transAxes, \
# fontsize=10, fontproperties=font0, color="r")
# else:
# ax.text(1.1, 1.0-float(n+1)*0.04, plabel, ha="left", va="top", transform=ax.transAxes, \
# fontsize=12, fontproperties=font0, color="b")
if shmode:
sunless_hours = []
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer()
s.date = date_formatted
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
# hours from sunrise to sunset
td = pyasl.daycnv(ephem.julian_date(s.next_setting(sun)), mode='dt') \
- pyasl.daycnv(ephem.julian_date(s.next_rising(sun)), mode='dt')
sunless_hours.append(24 - td.total_seconds() / 3600)
days = each_day - np.floor(each_day[0])
axsh.plot(days, sunless_hours, '-', color='k', lw=2)
axsh.set(
ylim=(0, 15), yticks=range(1, 15), ylabel='Useful hours',
yticklabels=[r'${}^{{\rm h}}$'.format(n) for n in range(1, 15)])
# ax.text(1.1, 1.03, "List of targets", ha="left", va="top", transform=ax.transAxes, \
# fontsize=12, fontproperties=font0, color="b")
axrange = ax.get_xlim()
if period is None:
months = range(1, 13)
ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]
ax.set_xlim([0, 366])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
ax.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=8)
# if shmode:
# axsh.set_xlim([0, 366])
# axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
# axsh.set_xticklabels(
# map(calendar.month_abbr.__getitem__, months), fontsize=10)
else:
if int(period) % 2 == 0:
# even ESO period, Oct -> Mar
months = [10, 11, 12, 1, 2, 3]
ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]
ax.set_xlim([0, 181])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
ax.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 181])
axsh.set_xticks(
np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
axsh.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
else:
# odd ESO period, Apr -> Sep
months = range(4, 10)
ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]
ax.set_xlim([0, 182])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
ax.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 182])
axsh.set_xticks(
np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])
axsh.set_xticklabels(
map(calendar.month_abbr.__getitem__, months), fontsize=10)
if axrange[1] - axrange[0] <= 1.0:
jdhours = np.arange(0, 3, 1.0 / 24.)
utchours = (np.arange(0, 72, dtype=int) + 12) % 24
else:
jdhours = np.arange(0, 3, 1.0 / 12.)
utchours = (np.arange(0, 72, 2, dtype=int) + 12) % 24
ax.vlines(np.cumsum(ndays)[:-1], 0, 90, color='k', alpha=0.2)
ax.hlines([30], 0, 366, lw=0.8)
ax.vlines(dt.datetime.now().timetuple().tm_yday, 30, 90, color='b')
# Make ax2 responsible for "top" axis and "right" axis
ax2 = ax.twinx()
# Set upper x ticks
# ax2.xaxis.tick_top()
# ax2.set_xticks(np.cumsum(ndays))
# ax2.set_xlabel("Day")
# print(ax.get_xlim())
# plane-parallel airmass
airmass_ang = np.arange(0, 81, 5)
geo_airmass = pyasl.airmass.airmassPP(airmass_ang)[::-1]
ax2.set_yticks(airmass_ang)
airmassformat = []
for t in range(geo_airmass.size):
airmassformat.append("{0:2.2f}".format(geo_airmass[t]))
ax2.set_yticklabels(airmassformat) # , rotation=90)
ax2.set_ylabel("Relative airmass", labelpad=5)
ax2.tick_params(axis="y", pad=6, labelsize=8)
ax2.set_ylim(-9, 80)
# plt.text(1.02,-0.04, "Plane-parallel", transform=ax.transAxes, ha='left', \
# va='top', fontsize=10, rotation=90)
# ax22 = ax.twinx()
# ax22.set_xticklabels([])
# ax22.set_frame_on(True)
# ax22.patch.set_visible(False)
# ax22.yaxis.set_ticks_position('right')
# ax22.yaxis.set_label_position('right')
# ax22.spines['right'].set_position(('outward', 30))
# ax22.spines['right'].set_color('k')
# ax22.spines['right'].set_visible(True)
# airmass2 = list(
# map(
# lambda ang: pyasl.airmass.airmassSpherical(90. - ang, obs['altitude']),
# airmass_ang))
# ax22.set_yticks(airmass_ang)
# airmassformat = []
# for t in range(len(airmass2)):
# airmassformat.append(" {0:2.2f}".format(airmass2[t]))
# ax22.set_yticklabels(airmassformat, rotation=90)
# ax22.tick_params(axis="y", pad=8, labelsize=8)
# plt.text(1.05,-0.04, "Spherical+Alt", transform=ax.transAxes, ha='left', va='top', \
# fontsize=10, rotation=90)
ax.set_ylim([0, 90])
ax.yaxis.set_major_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(5))
yticks = ax.get_yticks()
ytickformat = []
for t in range(yticks.size):
ytickformat.append(str(int(yticks[t])) + r"$^\circ$")
ax.set_yticklabels(ytickformat, fontsize=10)
ax.set_ylabel("Altitude", fontsize=10)
yticksminor = ax.get_yticks(minor=True)
# ymind = np.where(yticksminor % 15. != 0.)[0]
# yticksminor = yticksminor[ymind]
# ax.set_yticks(yticksminor, minor=True)
# m_ytickformat = []
# for t in range(yticksminor.size):
# m_ytickformat.append(str(int(yticksminor[t])) + r"$^\circ$")
# ax.set_yticklabels(m_ytickformat, minor=True)
ax.set_ylim([0, 90])
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', which="minor", linestyle='dotted')
ax2.xaxis.grid(color='gray', linestyle='dotted')
# if period is not None:
# plt.text(
# 0.5, 0.95,
# "Visibility over P{0!s}\n - altitudes at mid-dark time -".format(
# period), transform=fig.transFigure, ha='center', va='bottom',
# fontsize=12)
# else:
# plt.text(
# 0.5, 0.95,
# "Visibility over {0!s}\n - altitudes at mid-dark time -".format(
# year), transform=fig.transFigure, ha='center', va='bottom',
# fontsize=12)
obsco = "Obs coord.: {0:8.4f}$^\circ$, {1:8.4f}$^\circ$, {2:.0f} m".format(
obs['longitude'], obs['latitude'], obs['altitude'])
ax.set_title(obsco, loc='left', fontsize=6)
ax.set_title('Altitudes at mid-dark time', loc='right', fontsize=8)
# plt.text(0.01, 0.97, obsco, transform=fig.transFigure, ha='left',
# va='center', fontsize=10)
# plt.text(0.01, 0.95, obs['name'], transform=fig.transFigure, ha='left',
# va='center', fontsize=10)
# interactive!
if hover:
main_axis = fig.axes[0]
all_lines = set(main_axis.get_lines())
def on_plot_hover(event):
for line in main_axis.get_lines():
if line.contains(event)[0]:
line.set_color('red') # make this line red
# and all others black
all_other_lines = all_lines - set([line])
for other_line in all_other_lines:
other_line.set_color('black')
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', on_plot_hover)
# return fig
def VisibilityPlot(date=None, targets=None, observatory=None, plotLegend=True,
showMoon=True, showMoonDist=True, print2file=False,
remove_watermark=False):
"""
Plot the visibility of target.
Parameters
----------
date: datetime
The date for which to calculate the visibility.
targets: list
List of targets.
Each target should be a dictionary with keys 'name' and 'coord'.
The key 'name' is aa string, 'coord' is a SkyCoord object.
observatory: string
Name of the observatory that pyasl.observatory can resolve.
Basically, any of pyasl.listObservatories().keys()
plotLegend: boolean, optional
If True (default), show a legend.
showMoonDist : boolean, optional
If True (default), the Moon distance will be shown.
"""
from mpl_toolkits.axes_grid1 import host_subplot
from matplotlib.ticker import MultipleLocator
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
rcParams['xtick.major.pad'] = 12
if isinstance(observatory, dict):
obs = observatory
else:
obs = pyasl.observatory(observatory)
# observer = ephem.Observer()
# observer.pressure = 0
# observer.horizon = '-0:34'
# observer.lat, observer.lon = obs['latitude'], obs['longitude']
# observer.date = date
# print(observer.date)
# print(observer.previous_rising(ephem.Sun()))
# print(observer.next_setting(ephem.Sun()))
# print(observer.previous_rising(ephem.Moon()))
# print(observer.next_setting(ephem.Moon()))
# observer.horizon = '-6'
# noon = observer.next_transit(ephem.Sun())
# print(noon)
# print(observer.previous_rising(ephem.Sun(), start=noon, use_center=True))
# print()
fig = plt.figure(figsize=(15, 10))
fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)
# watermak
if not remove_watermark:
fig.text(0.99, 0.99,
'Created with\ngithub.com/iastro-pt/ObservationTools',
fontsize=10, color='gray', ha='right', va='top', alpha=0.5)
ax = host_subplot(111)
font0 = FontProperties()
font1 = font0.copy()
font0.set_family('sans-serif')
font0.set_weight('light')
font1.set_family('sans-serif')
font1.set_weight('medium')
for n, target in enumerate(targets):
target_coord = target['coord']
target_ra = target_coord.ra.deg
target_dec = target_coord.dec.deg
# JD array
jdbinsize = 1.0 / 24. / 20.
# jds = np.arange(allData[n]["Obs jd"][0], allData[n]["Obs jd"][2], jdbinsize)
jd = pyasl.jdcnv(date)
jd_start = pyasl.jdcnv(date) - 0.5
jd_end = pyasl.jdcnv(date) + 0.5
jds = np.arange(jd_start, jd_end, jdbinsize)
# Get JD floating point
jdsub = jds - np.floor(jds[0])
# Get alt/az of object
altaz = pyasl.eq2hor(jds, np.ones(jds.size)*target_ra, np.ones(jds.size)*target_dec,
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# Get alt/az of Sun
sun_position = pyasl.sunpos(jd)
sun_ra, sun_dec = sun_position[1], sun_position[2]
sunpos_altaz = pyasl.eq2hor(jds, np.ones(jds.size)*sun_ra, np.ones(jds.size)*sun_dec,
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# Define plot label
plabel = "[{0:2d}] {1!s}".format(n + 1, target['name'])
# Find periods of: day, twilight, and night
day = np.where(sunpos_altaz[0] >= 0.)[0]
twi = np.where(
np.logical_and(sunpos_altaz[0] > -18., sunpos_altaz[0] < 0.))[0]
night = np.where(sunpos_altaz[0] <= -18.)[0]
if (len(day) == 0) and (len(twi) == 0) and (len(night) == 0):
print
print("VisibilityPlot - no points to draw")
print
if showMoon:
# plot the moon
mpos = pyasl.moonpos(jds)
# mpha = pyasl.moonphase(jds)
mpos_altaz = pyasl.eq2hor(jds, mpos[0], mpos[1],
lon=obs['longitude'],
lat=obs['latitude'], alt=obs['altitude'])
ax.plot(jdsub, mpos_altaz[0], color='k', alpha=0.3, ls='--',
label='Moon')
# moonind = np.where( mpos_altaz[0] > 0. )[0]
if showMoonDist:
mdist = pyasl.getAngDist(mpos[0], mpos[1], np.ones(jds.size)*target_ra,
np.ones(jds.size)*target_dec)
bindist = int((2.0 / 24.) / jdbinsize)
firstbin = np.random.randint(0, bindist)
for mp in range(0, int(len(jds) / bindist)):
bind = firstbin + mp * bindist
if altaz[0][bind] - 1. < 5.:
continue
ax.text(jdsub[bind], altaz[0][bind]-1., str(int(mdist[bind]))+r"$^\circ$", ha="center", va="top",
fontsize=8, stretch='ultra-condensed', fontproperties=font0, alpha=1.)
if len(twi) > 1:
# There are points in twilight
linebreak = np.where(
(jdsub[twi][1:] - jdsub[twi][:-1]) > 2.0 * jdbinsize)[0]
if len(linebreak) > 0:
plotrjd = np.insert(jdsub[twi], linebreak + 1, np.nan)
plotdat = np.insert(altaz[0][twi], linebreak + 1, np.nan)
ax.plot(plotrjd, plotdat, "-", color='#BEBEBE', linewidth=1.5)
else:
ax.plot(jdsub[twi], altaz[0][twi], "-", color='#BEBEBE',
linewidth=1.5)
ax.plot(jdsub[night], altaz[0][night], '.k', label=plabel)
ax.plot(jdsub[day], altaz[0][day], '.', color='#FDB813')
altmax = np.argmax(altaz[0])
ax.text(jdsub[altmax], altaz[0][altmax], str(n+1), color="b", fontsize=14,
fontproperties=font1, va="bottom", ha="center")
if n + 1 == 29:
ax.text(1.1, 1.0-float(n+1)*0.04, "too many targets", ha="left", va="top", transform=ax.transAxes,
fontsize=10, fontproperties=font0, color="r")
else:
ax.text(1.1, 1.0-float(n+1)*0.04, plabel, ha="left", va="top", transform=ax.transAxes,
fontsize=12, fontproperties=font0, color="b")
ax.text(1.1, 1.03, "List of targets", ha="left", va="top", transform=ax.transAxes,
fontsize=12, fontproperties=font0, color="b")
axrange = ax.get_xlim()
ax.set_xlabel("UT [hours]")
if axrange[1] - axrange[0] <= 1.0:
jdhours = np.arange(0, 3, 1.0 / 24.)
utchours = (np.arange(0, 72, dtype=int) + 12) % 24
else:
jdhours = np.arange(0, 3, 1.0 / 12.)
utchours = (np.arange(0, 72, 2, dtype=int) + 12) % 24
ax.set_xticks(jdhours)
ax.set_xlim(axrange)
ax.set_xticklabels(utchours, fontsize=18)
# Make ax2 responsible for "top" axis and "right" axis
ax2 = ax.twin()
# Set upper x ticks
ax2.set_xticks(jdhours)
ax2.set_xticklabels(utchours, fontsize=18)
ax2.set_xlabel("UT [hours]")
# Horizon angle for airmass
airmass_ang = np.arange(5., 90., 5.)
geo_airmass = pyasl.airmass.airmassPP(90. - airmass_ang)
ax2.set_yticks(airmass_ang)
airmassformat = []
for t in range(geo_airmass.size):
airmassformat.append("{0:2.2f}".format(geo_airmass[t]))
ax2.set_yticklabels(airmassformat, rotation=90)
ax2.set_ylabel("Relative airmass", labelpad=32)
ax2.tick_params(axis="y", pad=10, labelsize=10)
plt.text(1.015, -0.04, "Plane-parallel", transform=ax.transAxes, ha='left',
va='top', fontsize=10, rotation=90)
ax22 = ax.twin()
ax22.set_xticklabels([])
ax22.set_frame_on(True)
ax22.patch.set_visible(False)
ax22.yaxis.set_ticks_position('right')
ax22.yaxis.set_label_position('right')
ax22.spines['right'].set_position(('outward', 25))
ax22.spines['right'].set_color('k')
ax22.spines['right'].set_visible(True)
airmass2 = list(
map(
lambda ang: pyasl.airmass.airmassSpherical(
90. - ang, obs['altitude']),
airmass_ang))
ax22.set_yticks(airmass_ang)
airmassformat = []
for t in airmass2:
airmassformat.append("{0:2.2f}".format(t))
ax22.set_yticklabels(airmassformat, rotation=90)
ax22.tick_params(axis="y", pad=10, labelsize=10)
plt.text(1.045, -0.04, "Spherical+Alt", transform=ax.transAxes, ha='left', va='top',
fontsize=10, rotation=90)
ax3 = ax.twiny()
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
ax3.xaxis.set_ticks_position('bottom')
ax3.xaxis.set_label_position('bottom')
ax3.spines['bottom'].set_position(('outward', 50))
ax3.spines['bottom'].set_color('k')
ax3.spines['bottom'].set_visible(True)
ltime, ldiff = pyasl.localtime.localTime(
utchours, np.repeat(obs['longitude'], len(utchours)))
jdltime = jdhours - ldiff / 24.
ax3.set_xticks(jdltime)
ax3.set_xticklabels(utchours)
ax3.set_xlim([axrange[0], axrange[1]])
ax3.set_xlabel("Local time [hours]")
ax.set_ylim([0, 91])
ax.yaxis.set_major_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(5))
yticks = ax.get_yticks()
ytickformat = []
for t in range(yticks.size):
ytickformat.append(str(int(yticks[t])) + r"$^\circ$")
ax.set_yticklabels(ytickformat, fontsize=16)
ax.set_ylabel("Altitude", fontsize=18)
yticksminor = ax.get_yticks(minor=True)
ymind = np.where(yticksminor % 15. != 0.)[0]
yticksminor = yticksminor[ymind]
ax.set_yticks(yticksminor, minor=True)
m_ytickformat = []
for t in range(yticksminor.size):
m_ytickformat.append(str(int(yticksminor[t])) + r"$^\circ$")
ax.set_yticklabels(m_ytickformat, minor=True)
ax.set_ylim([0, 91])
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', which="minor", linestyle='dotted')
ax2.xaxis.grid(color='gray', linestyle='dotted')
plt.text(0.5, 0.95, "Visibility on {0!s}".format(date.date()),
transform=fig.transFigure, ha='center', va='bottom', fontsize=20)
if plotLegend:
line1 = matplotlib.lines.Line2D((0, 0), (1, 1), color='#FDB813',
linestyle="-", linewidth=2)
line2 = matplotlib.lines.Line2D((0, 0), (1, 1), color='#BEBEBE',
linestyle="-", linewidth=2)
line3 = matplotlib.lines.Line2D((0, 0), (1, 1), color='k',
linestyle="-", linewidth=2)
line4 = matplotlib.lines.Line2D((0, 0), (1, 1), color='k', alpha=0.2,
linestyle="--", linewidth=2)
if showMoon:
lgd2 = plt.legend((line1, line2, line3, line4),
("day", "twilight", "night", "Moon"),
bbox_to_anchor=(0.88, 0.18), loc='best',
borderaxespad=0, prop={'size': 12}, fancybox=True)
else:
lgd2 = plt.legend((line1, line2, line3),
("day", "twilight", "night"),
bbox_to_anchor=(0.88, 0.18), loc='best',
borderaxespad=0, prop={'size': 12}, fancybox=True)
lgd2.get_frame().set_alpha(.9)
obsco = r"Obs coord.: {0:8.4f}$^\circ$, {1:8.4f}$^\circ$, {2:4.2f} m"
obsco = obsco.format(obs['longitude'], obs['latitude'], obs['altitude'])
plt.text(0.01, 0.97, obsco, transform=fig.transFigure, ha='left',
va='center', fontsize=10)
plt.text(0.01, 0.95, obs['name'], transform=fig.transFigure, ha='left',
va='center', fontsize=10)
return fig
if __name__ == '__main__':
args = _parser()
target_names = args.targets[0].split(',')
## Get coordinates for all the targets
targets = []
# flush keyword was not backported to Python < 3.3
if sys.version_info[:2] < (3, 3):
print('Sending queries to CDS...', end=' ')
sys.stdout.flush()
else:
print('Sending queries to CDS...', end=' ', flush=True)
for target_name in tqdm(target_names):
if args.toi: # check the table
# data = np.genfromtxt('TOI-info.csv', delimiter=',', names=True)
# data = np.loadtxt('TOI-info.csv', delimiter=',', usecols=(1, 16,17), skiprows=1, dtype={'names': ('TOI', 'RA', 'Dec'), 'formats': (np.float, '|S15', '|S15')},)
data = np.loadtxt('TOI-info.csv', delimiter=',', usecols=(1, 15, 16),
skiprows=1, dtype={'names': ('TOI', 'RA', 'Dec'), 'formats': 3*[float]})
ind = np.where(data['TOI'].astype(int) == int(target_name))[0]
if ind.size == 0:
print('Could not find target: {0!s}'.format(target_name))
continue
ind = ind[0]
coord = SkyCoord(data[ind]['RA'], data[ind]['Dec'], unit=units.deg)
targets.append({
'name': target_name,
'coord': CacheSkyCoord(coord)
})
else:
try:
targets.append({
'name': target_name,
'coord': CacheSkyCoord.from_name(target_name)
})
except name_resolve.NameResolveError as e:
print('Could not find target: {0!s}'.format(target_name))
## Just print coordinates in STARALT format and exit
if args.c:
print('Coordinates for {0!s}\n'.format(args.targets[0]))
for target in targets:
## name hh mm ss ±dd mm ss
out = '{0!s}'.format(target['name'])
ra = target['coord'].ra.hms
out += ' {0:02d} {1:02d} {2:5.3f}'.format(
int(ra.h), int(ra.m), ra.s)
dec = target['coord'].dec.dms
out += ' {0:02d} {1:02d} {2:5.3f}'.format(
int(dec.d), int(dec.m), dec.s)
print(out)
sys.exit(0)
## Actually calculate the visibility curves
print('Calculating visibility for {0!s}'.format(args.targets[0]))
P = args.period
if args.period is not None:
if args.mode != 'starobs':
print('Specifying ESO period is only possible in "starobs" mode')
sys.exit(1)
P = args.period[0]
P = P.replace('P', '') # if user gave --period P100, for example
if args.date == 'today':
if args.mode == 'staralt':
# now() gives the current *time* which we don't want
today = dt.datetime.now()
date = dt.datetime(today.year, today.month, today.day,
tzinfo=tz.tzutc())
elif args.mode == 'starobs':
date = dt.datetime.now().year
else:
if args.mode == 'staralt':
if "-" not in args.date:
raise ValueError(
"Date needs to be provided as YYYY-MM-DD for staralt mode."
)
ymd = [int(i) for i in args.date.split('-')]
date = dt.datetime(*ymd)
elif args.mode == 'starobs':
if "-" in args.date:
date = int(args.date.split('-')[0])
else:
date = int(args.date)
## Find observatory
if args.loc is None:
available_sites = pyasl.listObservatories(show=False)
if args.site.lower() in ('paranal', 'vlt', 'UT1', 'UT2', 'UT3', 'UT4'):
args.site = 'esoparanal'
if args.site.lower() not in available_sites.keys():
print('"{0!s}" is not a valid observatory code. '
'Try one of the following:\n'.format(args.site)
)
maxCodeLen = max(map(len, available_sites.keys()))
print(("{0:" + str(maxCodeLen) + "s} ").format("Code") +
"Observatory name")
print("-" * (21 + maxCodeLen))
for k in sorted(available_sites.keys(), key=lambda s: s.lower()):
print(("{0:" + str(maxCodeLen) + "s} --- ").format(k) +
available_sites[k]["name"])
sys.exit(1)
site = args.site
else:
loc = list(map(float, args.loc.split(',')))
site = {
'altitude': loc[0],
'latitude': loc[1],
'longitude': loc[2],
'tz': loc[3],
'name': 'unknown'
}
if args.mode == 'staralt':
fig = VisibilityPlot(date=date, targets=targets, observatory=site,
remove_watermark=args.remove_watermark,
showMoon=not args.nomoon)
elif args.mode == 'starobs':
if args.A is not None:
am = args.A[0]
else:
am = None
fig = StarObsPlot(year=date, targets=targets, observatory=site,
period=P, hover=args.hover, sunless_hours=am,
remove_watermark=args.remove_watermark)
if args.save is not None:
print('Saving the figure to {}'.format(args.save[0]))
fig.savefig(args.save[0])
else:
plt.show()
```
|
{
"source": "j-faria/wobble",
"score": 3
}
|
#### File: figures/HD189733/viz.py
```python
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from ipywidgets import Layout
import starry
from ylm_rot import get_ylm_coeffs
import matplotlib.pyplot as pl
import numpy as np
vslider = \
widgets.FloatSlider(
value=0.1,
min=0.1,
max=10.0,
step=0.01,
description=r'$v_\mathrm{eq}$ [km / s]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
oslider = \
widgets.FloatSlider(
value=0,
min=-90,
max=90.0,
step=0.1,
description=r'$\lambda$ [deg]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
layout=Layout(width='40%')
)
islider = \
widgets.FloatSlider(
value=90,
min=1,
max=179.0,
step=0.1,
description=r'$i$ [deg]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
layout=Layout(width='40%')
)
aslider = \
widgets.FloatSlider(
value=0,
min=0,
max=1.0,
step=0.01,
description=r'$\alpha$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
u1slider = \
widgets.FloatSlider(
value=0,
min=0.0,
max=2.0,
step=0.01,
description=r'$u_1$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
u2slider = \
widgets.FloatSlider(
value=0.0,
min=-1.0,
max=1.0,
step=0.01,
description=r'$u_2$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
yslider = \
widgets.FloatSlider(
value=0,
min=-1.0,
max=1.0,
step=0.01,
description=r'$b$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
rslider = \
widgets.FloatSlider(
value=0.1,
min=0.01,
max=0.5,
step=0.001,
description=r'$r / R_\star$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout=Layout(width='40%')
)
# Load RV data for HD 189733 from Bedell, corrected for the baseline
xo_189, rv_189 = np.loadtxt("HD189733_sample.txt", unpack=True)
# Create the global starry maps
map_Iv_plus_I = starry.Map(5)
map_I = starry.Map(2)
def visualize_func(veq=1, inc=90, obl=0, alpha=0, u1=0, u2=0, yo=0, ro=0.1):
"""Interactive visualization of the RM effect."""
# Map resolution for plotting
res = 300
# Set the map coefficients
map_Iv_plus_I[:3, :] = get_ylm_coeffs(inc=inc, obl=obl, alpha=alpha, veq=veq * 1.e3)
map_Iv_plus_I[0, 0] = 1
map_Iv_plus_I[1] = u1
map_Iv_plus_I[2] = u2
map_I[0, 0] = 1
map_I[1] = u1
map_I[2] = u2
# Check if LD is physical
if (u1 + u2) > 1 or (u1 + 2 * u2) < 0 or u1 < 0:
u1slider.style.handle_color = "#FF0000"
u2slider.style.handle_color = "#FF0000"
else:
u1slider.style.handle_color = "#FFFFFF"
u2slider.style.handle_color = "#FFFFFF"
# Plot the brightness-weighted velocity field
x, y = np.meshgrid(np.linspace(-1, 1, res), np.linspace(-1, 1, res))
img = np.array([map_Iv_plus_I(x=x[j], y=y[j]) -
map_I(x=x[j], y=y[j]) for j in range(res)]) * (np.pi / 1.e3)
fig = pl.figure(figsize=(15, 8))
axim = pl.axes((0, 0.05, 0.3, 0.8))
axcb = pl.axes((0, 0.85, 0.3, 0.03))
axrm = pl.axes((0.4, 0.20, 0.6, 0.5))
im = axim.imshow(img, cmap='RdBu_r', origin='lower',
vmin=-veq, vmax=veq, extent=(-1,1,-1,1))
cb = pl.colorbar(im, orientation='horizontal', cax=axcb)
cb.ax.set_xlabel("Radial velocity [km / s]")
axim.contour(img, origin='lower', levels=np.linspace(-veq, veq, 20),
colors=['k' for i in range(20)], alpha=0.25,
extent=(-1,1,-1,1))
axim.axis('off')
axim.set_aspect(1)
axim.axhline(yo, color='k', alpha=0.5)
axim.axhline(yo + 0.5 * ro, color='k', ls='--', alpha=0.5)
axim.axhline(yo - 0.5 * ro, color='k', ls='--', alpha=0.5)
# Compute the RM effect amplitude
xo = np.linspace(-1 - 2 * ro, 1 + 2 * ro, 1000)
Iv_plus_I = map_Iv_plus_I.flux(xo=xo, yo=yo, ro=ro)
I = map_I.flux(xo=xo, yo=yo, ro=ro)
RM = (Iv_plus_I - I) / I
# Plot it
axrm.plot(xo, RM)
axrm.set_xlabel(r"Occultor x position [$R_\star$]", fontsize=16)
axrm.set_ylabel("Radial velocity [m /s]", fontsize=16)
axrm.set_title("The Rossiter-McLaughlin effect", fontsize=20)
axrm.plot(xo_189, rv_189, '.')
def visualize():
return interact(visualize_func, veq=vslider, inc=islider,
obl=oslider, alpha=aslider, u1=u1slider,
u2=u2slider, yo=yslider, ro=rslider)
```
#### File: wobble/scripts/make_data.py
```python
import numpy as np
from scipy.io.idl import readsav
from scipy.interpolate import interp1d
from harps_hacks import read_harps
import h5py
import math
from astropy.io import fits
import shutil
import glob
import os
def dimensions(instrument):
if instrument == 'HARPS':
M = 4096 # pixels per order
R = 72 # orders
else:
print("instrument not recognized. valid options are: HARPS")
return
return M, R
def read_data_from_fits(filelist, e2ds=False):
# input : a list of CCF filenames
N = len(filelist) # number of epochs
M, R = dimensions('HARPS')
data = [np.zeros((N,M)) for r in range(R)]
ivars = [np.zeros((N,M)) for r in range(R)]
xs = [np.zeros((N,M)) for r in range(R)]
empty = np.array([], dtype=int)
pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)
for n,f in enumerate(filelist):
sp = fits.open(f)
if not e2ds:
try:
pipeline_rvs[n] = sp[0].header['HIERARCH ESO DRS CCF RVC'] * 1.e3 # m/s
pipeline_sigmas[n] = sp[0].header['HIERARCH ESO DRS CCF NOISE'] * 1.e3 # m/s
drifts[n] = sp[0].header['HIERARCH ESO DRS DRIFT SPE RV']
except KeyError:
print("WARNING: {0} does not appear to be a stellar CCF file. Skipping this one.".format(f))
empty = np.append(empty, n)
continue
dates[n] = sp[0].header['HIERARCH ESO DRS BJD']
bervs[n] = sp[0].header['HIERARCH ESO DRS BERV'] * 1.e3 # m/s
airms[n] = sp[0].header['HIERARCH ESO TEL AIRM START']
spec_file = str.replace(f, 'ccf_G2', 'e2ds')
spec_file = str.replace(spec_file, 'ccf_M2', 'e2ds')
spec_file = str.replace(spec_file, 'ccf_K5', 'e2ds')
try:
wave, spec = read_harps.read_spec_2d(spec_file)
except:
empty = np.append(empty, n)
continue
snrs = read_harps.read_snr(f) # HACK
# save stuff
for r in range(R):
data[r][n,:] = spec[r,:]
ivars[r][n,:] = snrs[r]**2/spec[r,:]/np.nanmean(spec[r,:]) # scaling hack
xs[r][n,:] = wave[r,:]
# delete data without wavelength solutions:
for r in range(R):
data[r] = np.delete(data[r], empty, axis=0)
ivars[r] = np.delete(ivars[r], empty, axis=0)
xs[r] = np.delete(xs[r], empty, axis=0)
pipeline_rvs = np.delete(pipeline_rvs, empty)
pipeline_sigmas = np.delete(pipeline_sigmas, empty)
dates = np.delete(dates, empty)
bervs = np.delete(bervs, empty)
airms = np.delete(airms, empty)
drifts = np.delete(drifts, empty)
# re-introduce BERVs to HARPS results:
pipeline_rvs -= bervs
pipeline_rvs -= np.mean(pipeline_rvs)
return data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts
def savfile_to_filelist(savfile, destination_dir='../data/'):
# copies CCF + E2DS files to destination_dir and returns a list of the CCFs
s = readsav(savfile)
filelist = []
files = [f.decode('utf8') for f in s.files]
for f in files:
shutil.copy2(f, destination_dir)
spec_file = str.replace(f, 'ccf_G2', 'e2ds')
shutil.copy2(spec_file, destination_dir)
basename = f[str.rfind(f,'/')+1:]
filelist = np.append(filelist, destination_dir+basename)
return filelist
def missing_wavelength_files(filelist):
missing_files = []
for f in filelist:
path = f[0:str.rfind(f,'/')+1]
sp = fits.open(f)
header = sp[0].header
wave_file = header['HIERARCH ESO DRS CAL TH FILE']
if os.path.isfile(path+wave_file):
continue
else:
missing_files = np.append(missing_files, wave_file)
return np.unique(missing_files)
def write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, filenames, hdffile):
h = h5py.File(hdffile, 'w')
dset = h.create_dataset('data', data=data)
dset = h.create_dataset('ivars', data=ivars)
dset = h.create_dataset('xs', data=xs)
dset = h.create_dataset('pipeline_rvs', data=pipeline_rvs)
dset = h.create_dataset('pipeline_sigmas', data=pipeline_sigmas)
dset = h.create_dataset('dates', data=dates)
dset = h.create_dataset('bervs', data=bervs)
dset = h.create_dataset('airms', data=airms)
dset = h.create_dataset('drifts', data=drifts)
filenames = [a.encode('utf8') for a in filenames] # h5py workaround
dset = h.create_dataset('filelist', data=filenames)
h.close()
if __name__ == "__main__":
if False: #51 Peg
ccf_filelist = glob.glob('/Users/mbedell/python/wobble/data/51peg/HARPS*ccf_G2_A.fits')
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(ccf_filelist)
hdffile = '../data/51peg_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, ccf_filelist, hdffile)
if False: #Barnard's Star
ccf_filelist = glob.glob('/Users/mbedell/python/wobble/data/barnards/HARPS*ccf_M2_A.fits')
if False: # check for missing wavelength files
missing_files = missing_wavelength_files(ccf_filelist)
np.savetxt('missing_files.txt', missing_files, fmt='%s')
print('{0} missing wavelength files for Barnard\'s Star'.format(len(missing_files)))
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(ccf_filelist)
hdffile = '../data/barnards_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, ccf_filelist, hdffile)
if False: # HD189733
ccf_filelist = glob.glob('/Users/mbedell/python/wobble/data/HD189733/HARPS*ccf_*_A.fits')
if False: # check for missing wavelength files
missing_files = missing_wavelength_files(ccf_filelist)
np.savetxt('missing_files.txt', missing_files, fmt='%s')
print('{0} missing wavelength files'.format(len(missing_files)))
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(ccf_filelist)
hdffile = '../data/HD189733_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, ccf_filelist, hdffile)
if False: # telluric standard
e2ds_filelist = glob.glob('/Users/mbedell/python/wobble/data/telluric/HARPS*e2ds_A.fits')
if True: # check for missing wavelength files
missing_files = missing_wavelength_files(e2ds_filelist)
np.savetxt('missing_files.txt', missing_files, fmt='%s')
print('{0} missing wavelength files'.format(len(missing_files)))
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(e2ds_filelist, e2ds=True)
hdffile = '../data/telluric_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, e2ds_filelist, hdffile)
if True: # beta hyi
ccf_filelist = glob.glob('/mnt/ceph/users/mbedell/wobble/betahyi/HARPS*ccf_*_A.fits')
if True: # check for missing wavelength files
missing_files = missing_wavelength_files(ccf_filelist)
np.savetxt('missing_files.txt', missing_files, fmt='%s')
print('{0} missing wavelength files'.format(len(missing_files)))
data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = read_data_from_fits(ccf_filelist)
hdffile = '/mnt/ceph/users/mbedell/wobble/betahyi_e2ds.hdf5'
write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, ccf_filelist, hdffile)
```
#### File: wobble/scripts/regularization.py
```python
import numpy as np
import matplotlib.pyplot as plt
import wobble
import tensorflow as tf
from tqdm import tqdm
import h5py
import os
__all__ = ["improve_order_regularization", "improve_parameter", "test_regularization_value", "plot_pars_from_file"]
def get_name_from_tensor(tensor):
# hacky method to get rid of characters TF adds to the variable names
# NOTE - does not handle '_2' type additions!
# also won't work if you put colons in your variable names but why would you do that?
return str.split(tensor.name, ':')[0]
def improve_order_regularization(r, o, star_filename, tellurics_filename,
training_data, training_results,
validation_data, validation_results,
verbose=True, plot=False, basename='',
K_star=0, K_t=0, L1=True, L2=True,
tellurics_template_fixed=False):
"""
Use a validation scheme to determine the best regularization parameters for
all model components in a given order r.
Update files at star_filename, tellurics_filename with the best parameters.
"""
training_model = wobble.Model(training_data, training_results, r)
training_model.add_star('star', variable_bases=K_star)
if tellurics_template_fixed: # hackity hack hack
results_51peg = wobble.Results(filename='/Users/mbedell/python/wobble/results/results_51peg_Kstar0_Kt0.hdf5')
template_xs = np.copy(results_51peg.tellurics_template_xs[o])
template_ys = np.copy(results_51peg.tellurics_template_ys[o])
training_model.add_telluric('tellurics', rvs_fixed=True, template_fixed=True,
variable_bases=K_t, template_xs=template_xs,
template_ys=template_ys)
else:
training_model.add_telluric('tellurics', rvs_fixed=True, variable_bases=K_t)
training_model.setup()
training_model.optimize(niter=0, verbose=verbose, rv_uncertainties=False)
if plot:
n = 0 # epoch to plot
title = 'Initialization'
filename = '{0}_init'.format(basename)
plot_fit(r, n, training_data, training_results, title=title, basename=filename)
validation_model = wobble.Model(validation_data, validation_results, r)
validation_model.add_star('star', variable_bases=K_star,
template_xs=training_results.star_template_xs[r]) # ensure templates are same size
if tellurics_template_fixed: # hackity hack hack
validation_model.add_telluric('tellurics', rvs_fixed=True, template_fixed=True,
variable_bases=K_t, template_xs=training_results.tellurics_template_xs[r],
template_ys=training_results.tellurics_template_ys[r])
else:
validation_model.add_telluric('tellurics', rvs_fixed=True, variable_bases=K_t,
template_xs=training_results.tellurics_template_xs[r])
validation_model.setup()
# the order in which these are defined will determine the order in which they are optimized:
tensors_to_tune = [training_model.components[1].L2_template_tensor, training_model.components[0].L2_template_tensor,
training_model.components[1].L1_template_tensor, training_model.components[0].L1_template_tensor]
tensor_names = ['L2_template', 'L2_template', 'L1_template',
'L1_template'] # this isonly needed bc TF appends garbage to the end of the tensor name
tensor_components = ['tellurics', 'star', 'tellurics', 'star'] # ^ same
if K_star > 0:
tensors_to_tune = np.append(tensors_to_tune, [training_model.components[0].L2_basis_vectors_tensor,
training_model.components[0].L1_basis_vectors_tensor])
tensor_names = np.append(tensor_names, ['L2_basis_vectors', 'L1_basis_vectors'])
tensor_components = np.append(tensor_components, ['star', 'star'])
if K_t > 0:
tensors_to_tune = np.append(tensors_to_tune, [training_model.components[1].L2_basis_vectors_tensor,
training_model.components[1].L1_basis_vectors_tensor])
tensor_names = np.append(tensor_names, ['L2_basis_vectors', 'L1_basis_vectors'])
tensor_components = np.append(tensor_components, ['tellurics', 'tellurics'])
regularization_dict = {}
#o_init = max(0, o-1) # initialize from previous order, or if o=0 use defaults
o_init = o # always initialize from starting guess (TODO: decide which init is better)
for i,tensor in enumerate(tensors_to_tune):
if tensor_components[i] == 'star':
filename = star_filename
elif tensor_components[i] == 'tellurics':
filename = tellurics_filename
else:
print("something has gone wrong.")
assert False
with h5py.File(filename, 'r') as f:
regularization_dict[tensor] = np.copy(f[tensor_names[i]][o_init])
i = 0 # track order in which parameters are improved
for component,(tensor,name) in zip(tensor_components, zip(tensors_to_tune, tensor_names)):
if (name[0:2] == "L1" and L1) or (name[0:2] == "L2" and L2):
i += 1
regularization_dict[tensor] = improve_parameter(tensor, training_model, validation_model,
regularization_dict, validation_data, validation_results,
verbose=verbose,
plot=plot, basename=basename+'_par{0}'.format(i))
if component == 'star':
filename = star_filename
elif component == 'tellurics':
filename = tellurics_filename
else:
print("something has gone wrong.")
assert False
with h5py.File(filename, 'r+') as f:
f[name][o] = np.copy(regularization_dict[tensor])
if plot:
test_regularization_value(tensor, regularization_dict[tensor],
training_model, validation_model, regularization_dict,
validation_data, validation_results, plot=False, verbose=False) # hack to update results
title = 'Final'
filename = '{0}_final'.format(basename)
plot_fit(r, n, validation_data, validation_results, title=title, basename=filename)
fig = plt.figure()
ax = fig.add_subplot(111)
val_rvs = validation_results.star_rvs[r] + validation_results.bervs
train_rvs = training_results.star_rvs[r] + training_results.bervs
ax.plot(validation_results.dates, val_rvs - np.mean(val_rvs), 'r.')
ax.plot(training_results.dates, train_rvs - np.mean(train_rvs), 'k.', alpha=0.5)
ax.set_ylabel('RV (m/s)')
ax.set_xlabel('JD')
fig.tight_layout()
plt.savefig(basename+'_final_rvs.png')
plt.close(fig)
def improve_parameter(par, training_model, validation_model, regularization_dict,
validation_data, validation_results,
plot=False, verbose=True, basename=''):
"""
Perform a grid search to set the value of regularization parameter `par`.
Requires training data and validation data to evaluate goodness-of-fit for each parameter value.
Returns optimal parameter value.
"""
current_value = np.copy(regularization_dict[par])
if current_value == 0: # can't be scaled
return 0
name = str.split(par.name, ':')[0] # chop off TF's ID #
grid = np.logspace(-1.0, 1.0, num=3) * current_value
nll_grid = np.zeros_like(grid)
for i,val in enumerate(grid):
nll_grid[i] = test_regularization_value(par, val, training_model,
validation_model, regularization_dict,
validation_data, validation_results,
plot=plot, verbose=verbose, basename=basename)
# ensure that the minimum isn't on a grid edge:
best_ind = np.argmin(nll_grid)
while (best_ind == 0 and val >= 1.e-2): # prevent runaway minimization
val = grid[0]/10.
new_nll = test_regularization_value(par, val, training_model,
validation_model, regularization_dict,
validation_data, validation_results,
plot=plot, verbose=verbose, basename=basename)
grid = np.append(val, grid)
nll_grid = np.append(new_nll, nll_grid)
best_ind = np.argmin(nll_grid)
while best_ind == len(grid) - 1:
val = grid[-1]*10.
new_nll = test_regularization_value(par, val, training_model,
validation_model, regularization_dict,
validation_data, validation_results,
plot=plot, verbose=verbose, basename=basename)
grid = np.append(grid, val)
nll_grid = np.append(nll_grid, new_nll)
best_ind = np.argmin(nll_grid)
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(grid, nll_grid, color='r')
ax.plot(grid, nll_grid, c='r', ls='dashed', lw=1)
ax.axvline(grid[best_ind], c='k', alpha=0.7, ls='dashed', lw=2)
ax.set_ylim([nll_grid[best_ind]-10., nll_grid[best_ind]+100.])
ax.set_xlim([grid[0]*0.5, grid[-1]*2.])
ax.set_xscale('log')
ax.set_xlabel('{0} values'.format(name))
ax.set_ylabel('NLL')
fig.tight_layout()
plt.savefig('{0}_nll.png'.format(basename))
plt.close(fig)
if verbose:
print("{0} optimized to {1:.0e}".format(name, grid[best_ind]))
return grid[best_ind]
def test_regularization_value(par, val, training_model, validation_model, regularization_dict,
validation_data, validation_results,
plot=False, verbose=True, basename='',
training_niter=200, validation_niter=1000):
'''
Try setting regularization parameter `par` to value `val`; return goodness metric `nll`.
'''
r = training_model.r
regularization_dict[par] = val
name = get_name_from_tensor(par) # chop off TF's ID #
session = wobble.utils.get_session()
session.run(tf.global_variables_initializer()) # reset both models
training_model.optimize(niter=training_niter, feed_dict=regularization_dict, verbose=verbose, rv_uncertainties=False)
validation_dict = {**regularization_dict}
for c in validation_model.components:
validation_dict[getattr(c, 'template_xs')] = getattr(training_model.results,
c.name+'_template_xs')[r]
validation_dict[getattr(c, 'template_ys')] = getattr(training_model.results,
c.name+'_template_ys')[r]
if c.K > 0:
validation_dict[getattr(c, 'basis_vectors')] = getattr(training_model.results,
c.name+'_basis_vectors')[r]
session = wobble.utils.get_session()
if verbose:
iterator = tqdm(range(validation_niter))
else:
iterator = range(validation_niter)
for i in iterator:
for c in validation_model.components:
if not c.rvs_fixed:
session.run(c.opt_rvs, feed_dict=validation_dict) # HACK
if c.K > 0:
session.run(c.opt_basis_weights, feed_dict=validation_dict)
for c in validation_model.components:
validation_model.results.update(c, feed_dict=validation_dict)
zero_regularization_dict = {**regularization_dict} # for final chi-sq eval
for key in zero_regularization_dict:
zero_regularization_dict[key] = 0.0
for c in validation_model.components:
zero_regularization_dict[getattr(c, 'template_xs')] = getattr(training_model.results,
c.name+'_template_xs')[r]
zero_regularization_dict[getattr(c, 'template_ys')] = getattr(training_model.results,
c.name+'_template_ys')[r]
if not c.rvs_fixed:
zero_regularization_dict[getattr(c, 'rvs')] = getattr(validation_model.results,
c.name+'_rvs')[r]
if c.K > 0:
zero_regularization_dict[getattr(c, 'basis_vectors')] = getattr(training_model.results,
c.name+'_basis_vectors')[r]
zero_regularization_dict[getattr(c, 'basis_weights')] = getattr(validation_model.results,
c.name+'_basis_weights')[r]
if plot:
n = 0 # epoch to plot
title = '{0}: value {1:.0e}'.format(name, val)
filename = '{0}_val{1:.0e}'.format(basename, val)
plot_fit(r, n, validation_data, validation_results, title=title, basename=filename)
nll = session.run(validation_model.nll, feed_dict=zero_regularization_dict)
if verbose:
print('{0}, value {1:.0e}: nll {2:.4e}'.format(name, val, nll))
return nll
def plot_fit(r, n, data, results, title='', basename=''):
"""Plots full-order and zoomed-in versions of fits & residuals"""
fig, (ax, ax2) = plt.subplots(2, 1, gridspec_kw = {'height_ratios':[4, 1]}, figsize=(12,5))
xs = np.exp(data.xs[r][n])
ax.scatter(xs, np.exp(data.ys[r][n]), marker=".", alpha=0.5, c='k', label='data', s=16)
mask = data.ivars[r][n] <= 1.e-8
ax.scatter(xs[mask], np.exp(data.ys[r][n,mask]), marker=".", alpha=1., c='white', s=8)
ax.plot(xs,
np.exp(results.star_ys_predicted[r][n]),
color='r', label='star model', lw=1.5, alpha=0.7)
ax.plot(xs,
np.exp(results.tellurics_ys_predicted[r][n]),
color='b', label='tellurics model', lw=1.5, alpha=0.7)
ax.set_xticklabels([])
ax.set_ylabel('Normalized Flux', fontsize=14)
resids = np.exp(data.ys[r][n]) - np.exp(results.star_ys_predicted[r][n]
+ results.tellurics_ys_predicted[r][n])
ax2.scatter(xs, resids, marker=".", alpha=0.5, c='k')
ax2.set_ylim([-0.1, 0.1])
ax2.set_xlabel(r'Wavelength ($\AA$)', fontsize=14)
ax2.set_ylabel('Resids', fontsize=14)
ax.legend(fontsize=12)
ax.set_title(title, fontsize=12)
fig.tight_layout()
fig.subplots_adjust(hspace=0.05)
plt.savefig('{0}.png'.format(basename))
xlim = [np.percentile(xs, 20) - 7.5, np.percentile(xs, 20) + 7.5] # 15A near-ish the edge of the order
ax.set_xlim(xlim)
ax.set_xticklabels([])
ax2.set_xlim(xlim)
plt.savefig('{0}_zoom.png'.format(basename))
plt.close(fig)
def plot_pars_from_file(filename, basename, orders=np.arange(72)):
"""Takes an HDF5 file and automatically creates overview plots of regularization amplitudes"""
with h5py.File(filename, 'r') as f:
for key in list(f.keys()):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_yscale('log')
ax.plot(orders, np.array(f[key])[orders], 'o')
ax.set_xlabel('Order #')
ax.set_ylabel('Regularization Amplitude')
ax.set_title(key)
ax.set_xlim([-3,75])
fig.tight_layout()
plt.savefig(basename+'_{0}.png'.format(key))
plt.close(fig)
if __name__ == "__main__":
# change these keywords:
starname = 'barnards'
orders = np.arange(72)
K_star = 0 # number of variable components for stellar spectrum
K_t = 0 # number of variable components for telluric spectrum
tellurics_template_fixed = False
plot = True
verbose = True
# create directory for plots if it doesn't exist:
plot_dir = '../regularization/{0}_Kstar{1}_Kt{2}/'.format(starname, K_star, K_t)
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
# create HDF5 files if they don't exist:
regularization_par = ['L1_template', 'L2_template',
'L1_basis_vectors', 'L2_basis_vectors', 'L2_basis_weights']
R = len(orders)
star_filename = '../wobble/regularization/{0}_star_K{1}.hdf5'.format(starname, K_star)
if not os.path.isfile(star_filename):
with h5py.File(star_filename,'w') as f:
f.create_dataset('L1_template', data=np.zeros(R)+1.e-2)
f.create_dataset('L2_template', data=np.zeros(R)+1.e2)
if K_star > 0:
f.create_dataset('L1_basis_vectors', data=np.zeros(R)+1.e5)
f.create_dataset('L2_basis_vectors', data=np.zeros(R)+1.e6)
f.create_dataset('L2_basis_weights', data=np.ones(R)) # never tuned, just need to pass to wobble
tellurics_filename = '../wobble/regularization/{0}_t_K{1}.hdf5'.format(starname, K_t)
if not os.path.isfile(tellurics_filename):
with h5py.File(tellurics_filename,'w') as f:
if tellurics_template_fixed:
f.create_dataset('L1_template', data=np.zeros(R))
f.create_dataset('L2_template', data=np.zeros(R))
else:
f.create_dataset('L1_template', data=np.zeros(R)+1.e4)
f.create_dataset('L2_template', data=np.zeros(R)+1.e6)
if K_t > 0:
f.create_dataset('L1_basis_vectors', data=np.zeros(R)+1.e3)
f.create_dataset('L2_basis_vectors', data=np.zeros(R)+1.e8)
f.create_dataset('L2_basis_weights', data=np.ones(R)) # never tuned, just need to pass to wobble
# set up training & validation data sets:
if True:
data = wobble.Data(starname+'_e2ds.hdf5', filepath='data/', orders=orders, min_snr=3) # to get N_epochs
validation_epochs = np.random.choice(data.N, data.N//8, replace=False) # 12.5% of epochs will be validation set
training_epochs = np.delete(np.arange(data.N), validation_epochs)
else: # HACK for HD 189733
e = np.asarray([ 0, 1, 6, 7, 9, 17, 18, 19, 21, 23, 24, 26, 30, 33, 34, 35, 36,
37, 38, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 56, 61,
66, 69, 70, 72, 73, 75]) # night of August 28, 2007
validation_epochs = np.random.choice(e, len(e)//8, replace=False)
training_epochs = np.delete(e, validation_epochs)
training_data = wobble.Data(starname+'_e2ds.hdf5', filepath='../data/', orders=orders,
epochs=training_epochs, min_snr=3)
training_results = wobble.Results(training_data)
validation_data = wobble.Data(starname+'_e2ds.hdf5', filepath='../data/', orders=training_data.orders,
epochs=validation_epochs, min_snr=1) # HACK
validation_results = wobble.Results(validation_data)
assert len(training_data.orders) == len(validation_data.orders), "Number of orders used is not the same between training and validation data."
orders = training_data.orders
# improve each order's regularization:
for r,o in enumerate(orders): # r is an index into the (cleaned) data. o is an index into the 72 orders (and the file tracking them).
if verbose:
print('---- STARTING ORDER {0} ----'.format(o))
print("starting values:")
print("star:")
with h5py.File(star_filename, 'r') as f:
for key in list(f.keys()):
print("{0}: {1:.0e}".format(key, f[key][o]))
print("tellurics:")
with h5py.File(tellurics_filename, 'r') as f:
for key in list(f.keys()):
print("{0}: {1:.0e}".format(key, f[key][o]))
improve_order_regularization(r, o, star_filename, tellurics_filename,
training_data, training_results,
validation_data, validation_results,
verbose=verbose, plot=plot,
basename='{0}o{1}'.format(plot_dir, o),
K_star=K_star, K_t=K_t, L1=True, L2=True,
tellurics_template_fixed=tellurics_template_fixed)
if verbose:
print('---- ORDER {0} COMPLETE ({1}/{2}) ----'.format(o,r,len(orders)-1))
print("best values:")
print("star:")
with h5py.File(star_filename, 'r') as f:
for key in list(f.keys()):
print("{0}: {1:.0e}".format(key, f[key][o]))
print("tellurics:")
with h5py.File(tellurics_filename, 'r') as f:
for key in list(f.keys()):
print("{0}: {1:.0e}".format(key, f[key][o]))
# save some summary plots:
plot_pars_from_file(star_filename, 'regularization/{0}_star_Kstar{1}_Kt{2}'.format(starname, K_star, K_t), orders=orders)
plot_pars_from_file(tellurics_filename, 'regularization/{0}_tellurics_Kstar{1}_Kt{2}'.format(starname, K_star, K_t), orders=orders)
```
|
{
"source": "jfarid27/augur-1",
"score": 2
}
|
#### File: augur-core/tests/test_eth_exchange.py
```python
from eth_tester.exceptions import TransactionFailed
from pytest import raises, fixture as pytest_fixture
from utils import stringToBytes, AssertLog, PrintGasUsed
def test_eth_exchange(localFixture, augur, cash, ethExchange):
account = localFixture.accounts[0]
# Add liquidity to suggest the price is 1 ETH = 100 Cash
cashAmount = 1000 * 10**18
ethAmount = 10 * 10**18
addLiquidity(localFixture, ethExchange, cash, cashAmount, ethAmount, account)
# Now we can buy ETH
cashAmount = 10 * 10**18 # Trade 10 DAI for ~.1 ETH
expectedEthAmount = 10**17
assert roughlyEqual(ethExchange.getTokenPurchaseCost(expectedEthAmount), cashAmount, 2 * 10**17)
initialETH = localFixture.ethBalance(account)
buyEth(localFixture, ethExchange, cash, cashAmount, account)
assert roughlyEqual(initialETH + expectedEthAmount, localFixture.ethBalance(account))
# Buy Dai
ethAmount = 1 * 10**17 # Trade .1 ETH for ~10 DAI
initialCash = cash.balanceOf(account)
sellEth(localFixture, ethExchange, ethAmount, account)
assert roughlyEqual(initialCash + 10**19, cash.balanceOf(account), 10**17)
# Confirm that our estimate functions match
cashAmount = ethExchange.getTokenPurchaseCost(ethAmount)
assert ethExchange.getCashSaleProceeds(cashAmount) == ethAmount
def addLiquidity(fixture, exchange, cash, cashAmount, ethAmount, address):
cash.faucet(cashAmount)
cash.transfer(exchange.address, cashAmount)
fixture.sendEth(address, exchange.address, ethAmount)
assert exchange.getTokenBalance() == ethAmount
exchange.publicMint(address)
def buyEth(fixture, exchange, cash, cashAmount, address):
cash.faucet(cashAmount)
with PrintGasUsed(fixture, "Transfer Cash"):
cash.transfer(exchange.address, cashAmount)
with PrintGasUsed(fixture, "Buy ETH"):
exchange.buyToken(address)
def sellEth(fixture, exchange, ethAmount, address):
fixture.sendEth(address, exchange.address, ethAmount)
exchange.sellToken(address)
def roughlyEqual(amount1, amount2, tolerance=10**16):
return abs(amount1 - amount2) < tolerance
@pytest_fixture(scope="session")
def localSnapshot(fixture, augurInitializedSnapshot):
fixture.resetToSnapshot(augurInitializedSnapshot)
return augurInitializedSnapshot
@pytest_fixture
def localFixture(fixture, localSnapshot):
fixture.resetToSnapshot(localSnapshot)
return fixture
@pytest_fixture
def augur(localFixture, localSnapshot):
return localFixture.contracts["Augur"]
@pytest_fixture
def ethExchange(localFixture, localSnapshot):
return localFixture.contracts["EthExchange"]
```
|
{
"source": "jfarmer08/hassio",
"score": 2
}
|
#### File: site-packages/aiohttp_cors/mixin.py
```python
import asyncio
import collections
from aiohttp import hdrs, web
from .preflight_handler import _PreflightHandler
def custom_cors(config):
def wrapper(function):
name = "{}_cors_config".format(function.__name__)
setattr(function, name, config)
return function
return wrapper
class CorsViewMixin(_PreflightHandler):
cors_config = None
@classmethod
def get_request_config(cls, request, request_method):
try:
from . import APP_CONFIG_KEY
cors = request.app[APP_CONFIG_KEY]
except KeyError:
raise ValueError("aiohttp-cors is not configured.")
method = getattr(cls, request_method.lower(), None)
if not method:
raise KeyError()
config_property_key = "{}_cors_config".format(request_method.lower())
custom_config = getattr(method, config_property_key, None)
if not custom_config:
custom_config = {}
class_config = cls.cors_config
if not class_config:
class_config = {}
return collections.ChainMap(custom_config, class_config, cors.defaults)
@asyncio.coroutine
def _get_config(self, request, origin, request_method):
return self.get_request_config(request, request_method)
@asyncio.coroutine
def options(self):
response = yield from self._preflight_handler(self.request)
return response
```
#### File: site-packages/aiohttp/signals.py
```python
import asyncio
from itertools import count
from aiohttp.frozenlist import FrozenList
from aiohttp.helpers import isfuture
class BaseSignal(FrozenList):
__slots__ = ()
@asyncio.coroutine
def _send(self, *args, **kwargs):
for receiver in self:
res = receiver(*args, **kwargs)
if asyncio.iscoroutine(res) or isfuture(res):
yield from res
class Signal(BaseSignal):
"""Coroutine-based signal implementation.
To connect a callback to a signal, use any list method.
Signals are fired using the :meth:`send` coroutine, which takes named
arguments.
"""
__slots__ = ('_app', '_name', '_pre', '_post')
def __init__(self, app):
super().__init__()
self._app = app
klass = self.__class__
self._name = klass.__module__ + ':' + klass.__qualname__
self._pre = app.on_pre_signal
self._post = app.on_post_signal
@asyncio.coroutine
def send(self, *args, **kwargs):
"""
Sends data to all registered receivers.
"""
if self:
ordinal = None
debug = self._app._debug
if debug:
ordinal = self._pre.ordinal()
yield from self._pre.send(
ordinal, self._name, *args, **kwargs)
yield from self._send(*args, **kwargs)
if debug:
yield from self._post.send(
ordinal, self._name, *args, **kwargs)
class FuncSignal(BaseSignal):
"""Callback-based signal implementation.
To connect a callback to a signal, use any list method.
Signals are fired using the :meth:`send` method, which takes named
arguments.
"""
__slots__ = ()
def send(self, *args, **kwargs):
"""
Sends data to all registered receivers.
"""
for receiver in self:
receiver(*args, **kwargs)
class DebugSignal(BaseSignal):
__slots__ = ()
@asyncio.coroutine
def send(self, ordinal, name, *args, **kwargs):
yield from self._send(ordinal, name, *args, **kwargs)
class PreSignal(DebugSignal):
__slots__ = ('_counter',)
def __init__(self):
super().__init__()
self._counter = count(1)
def ordinal(self):
return next(self._counter)
class PostSignal(DebugSignal):
__slots__ = ()
```
#### File: curve25519/test/test_curve25519.py
```python
import unittest
from curve25519 import Private, Public
from hashlib import sha1, sha256
from binascii import hexlify
class Basic(unittest.TestCase):
def test_basic(self):
secret1 = b"<KEY>"
self.assertEqual(len(secret1), 32)
secret2 = b"<KEY>"
self.assertEqual(len(secret2), 32)
priv1 = Private(secret=secret1)
pub1 = priv1.get_public()
priv2 = Private(secret=secret2)
pub2 = priv2.get_public()
shared12 = priv1.get_shared_key(pub2)
e = b"b0818125eab42a8ac1af5e8b9b9c15ed2605c2bbe9675de89e5e6e7f442b9598"
self.assertEqual(hexlify(shared12), e)
shared21 = priv2.get_shared_key(pub1)
self.assertEqual(shared12, shared21)
pub2a = Public(pub2.serialize())
shared12a = priv1.get_shared_key(pub2a)
self.assertEqual(hexlify(shared12a), e)
def test_errors(self):
priv1 = Private()
self.assertRaises(ValueError, priv1.get_shared_key, priv1)
def test_seed(self):
# use 32-byte secret
self.assertRaises(TypeError, Private, secret=123)
self.assertRaises(TypeError, Private, secret=b"too short")
secret1 = b"<KEY>"
assert len(secret1) == 32
priv1 = Private(secret=secret1)
priv1a = Private(secret=secret1)
priv1b = Private(priv1.serialize())
self.assertEqual(priv1.serialize(), priv1a.serialize())
self.assertEqual(priv1.serialize(), priv1b.serialize())
e = b"6062636465666768696a6b6c6d6e6f707172737475767778797a313233343576"
self.assertEqual(hexlify(priv1.serialize()), e)
# the private key is a clamped form of the secret, so they won't
# quite be the same
p = Private(secret=b"\x00"*32)
self.assertEqual(hexlify(p.serialize()), b"00"*31+b"40")
p = Private(secret=b"\xff"*32)
self.assertEqual(hexlify(p.serialize()), b"f8"+b"ff"*30+b"7f")
# use arbitrary-length seed
self.assertRaises(TypeError, Private, seed=123)
priv1 = Private(seed=b"abc")
priv1a = Private(seed=b"abc")
priv1b = Private(priv1.serialize())
self.assertEqual(priv1.serialize(), priv1a.serialize())
self.assertEqual(priv1.serialize(), priv1b.serialize())
self.assertRaises(AssertionError, Private, seed=b"abc", secret=b"no")
priv1 = Private(seed=b"abc")
priv1a = Private(priv1.serialize())
self.assertEqual(priv1.serialize(), priv1a.serialize())
self.assertRaises(AssertionError, Private, seed=b"abc", secret=b"no")
# use built-in os.urandom
priv2 = Private()
priv2a = Private(priv2.private)
self.assertEqual(priv2.serialize(), priv2a.serialize())
# attempt to use both secret= and seed=, not allowed
self.assertRaises(AssertionError, Private, seed=b"abc", secret=b"no")
def test_hashfunc(self):
priv1 = Private(seed=b"abc")
priv2 = Private(seed=b"def")
shared_sha256 = priv1.get_shared_key(priv2.get_public())
e = b"da959ffe77ebeb4757fe5ba310e28ede425ae0d0ff5ec9c884e2d08f311cf5e5"
self.assertEqual(hexlify(shared_sha256), e)
# confirm the hash function remains what we think it is
def myhash(shared_key):
return sha256(b"curve25519-shared:"+shared_key).digest()
shared_myhash = priv1.get_shared_key(priv2.get_public(), myhash)
self.assertEqual(hexlify(shared_myhash), e)
def hexhash(shared_key):
return sha1(shared_key).hexdigest().encode()
shared_hexhash = priv1.get_shared_key(priv2.get_public(), hexhash)
self.assertEqual(shared_hexhash,
b"80eec98222c8edc4324fb9477a3c775ce7c6c93a")
if __name__ == "__main__":
unittest.main()
```
#### File: python3.5/site-packages/DirectPy.py
```python
import requests
class DIRECTV:
"""DirectPy.py by <NAME> (github.com/sentry07)
Control a DirecTV receiver over the network using
DirecTV's SHEF protocol. For more information on
enabling the SHEF interface, please see this PDF:
https://www.satinstalltraining.com/homeautomation/DTV-MD-0359-DIRECTV_SHEF_Command_Set-V1.3.C.pdf
The clientAddr parameter of the class is used for
Genie systems that have a server receiver and client
receivers. To control a client receiver, you must
know the MAC address and reference it without colons.
EX: DIRECTV('192.168.1.10',clientAddr='000A959D6816')
"""
def __init__(self, ip, port=8080, clientAddr='0'):
self.ip = ip
self.port = port
self.clientAddr = clientAddr
self.standby = False
self.channel = '0'
self.valid_keys = ['power', 'poweron', 'poweroff', 'format', 'pause', 'rew', 'replay', 'stop',
'advance', 'ffwd', 'record', 'play', 'guide', 'active', 'list', 'exit',
'back', 'menu', 'info', 'up', 'down', 'left', 'right', 'select', 'red',
'green', 'yellow', 'blue', 'chanup', 'chandown', 'prev', '0', '1', '2',
'3', '4', '5', '6', '7', '8', '9', 'dash', 'enter']
self.base_url = 'http://%s:%s' % (ip,port)
self.get_standby()
self.get_tuned()
@staticmethod
def _parse_channel(channel):
"""Return major and minor channel numbers for given channel"""
try:
major, minor = channel.split('-')
except ValueError:
major = channel
minor = 65535
return major,minor
@staticmethod
def _combine_channel(major,minor):
"""Return the combined channel number. If minor == 65535, there is no minor channel number."""
if minor == 65535:
return str(major)
else:
return '%d-%d' % (major,minor)
def get_standby(self):
"""Return standby status of the receiver."""
jResp = requests.get('%s/info/mode?clientAddr=%s' % (self.base_url,self.clientAddr)).json()
if jResp['status']['code'] == 200:
self.standby = (jResp['mode'] == 1)
return self.standby
def get_channel(self, channel:"'###' or '###-#'"):
"""Return program information for a channel."""
if not type(channel) is str:
raise TypeError('Channel should be a string')
major,minor = self._parse_channel(channel)
jResp = requests.get('%s/tv/getProgInfo?major=%s&minor=%s&clientAddr=%s' % (self.base_url,major,minor,self.clientAddr)).json()
return jResp
def get_tuned(self):
"""Returns the channel and program information of the current channel."""
jResp = requests.get('%s/tv/getTuned?clientAddr=%s' % (self.base_url,self.clientAddr)).json()
self.channel = self._combine_channel(jResp['major'],jResp['minor'])
return jResp
def tune_channel(self, channel:"'###' or '###-#'"):
"""Change the channel on the receiver."""
if not type(channel) is str:
raise TypeError('Channel should be a string')
major,minor = self._parse_channel(channel)
jResp = requests.get('%s/tv/tune?major=%s&minor=%s&clientAddr=%s' % (self.base_url,major,minor,self.clientAddr)).json()
if jResp['status']['code'] == 200:
self.channel = channel
return jResp
def key_press(self, key:str):
"""Emulate pressing a key on the remote. See help() for supported keys.
Supported keys: power, poweron, poweroff, format,
pause, rew, replay, stop, advance, ffwd, record,
play, guide, active, list, exit, back, menu, info,
up, down, left, right, select, red, green, yellow,
blue, chanup, chandown, prev, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, dash, enter
"""
if not type(key) is str:
raise TypeError('Key should be a string')
if not key.lower() in self.valid_keys:
raise ValueError('Invalid key: ' + key)
jResp = requests.get('%s/remote/processKey?key=%s&hold=keyPress&clientAddr=%s' % (self.base_url,key,self.clientAddr)).json()
return jResp
def get_locations(self):
"""Returns the clientAddr for all devices."""
jResp = requests.get('%s/info/getLocations' % (self.base_url)).json()
return jResp
```
#### File: site-packages/nest/command_line.py
```python
from __future__ import print_function
import argparse
import os
import sys
from . import nest
from . import utils
from . import helpers
def parse_args():
prog = os.path.basename(sys.argv[0])
config_file = os.path.sep.join(('~', '.config', 'nest', 'config'))
token_cache = os.path.sep.join(('~', '.config', 'nest', 'token_cache'))
conf_parser = argparse.ArgumentParser(prog=prog, add_help=False)
conf_parser.add_argument('--conf', default=config_file,
help='config file (default %s)' % config_file,
metavar='FILE')
args, remaining_argv = conf_parser.parse_known_args()
defaults = helpers.get_config(config_path=args.conf)
description = 'Command line interface to Nest™ Thermostats'
parser = argparse.ArgumentParser(description=description,
parents=[conf_parser])
parser.add_argument('--token-cache', dest='token_cache',
default=token_cache,
help='auth access token cache file',
metavar='TOKEN_CACHE_FILE')
parser.add_argument('-t', '--token', dest='token',
help='auth access token', metavar='TOKEN')
parser.add_argument('--client-id', dest='client_id',
help='product id on developer.nest.com', metavar='ID')
parser.add_argument('--client-secret', dest='client_secret',
help='product secret for nest.com', metavar='SECRET')
parser.add_argument('-c', '--celsius', dest='celsius', action='store_true',
help='use celsius instead of farenheit')
parser.add_argument('-s', '--serial', dest='serial',
help='optional, specify serial number of nest '
'thermostat to talk to')
parser.add_argument('-S', '--structure', dest='structure',
help='optional, specify structure name to'
'scope device actions')
parser.add_argument('-i', '--index', dest='index', default=0, type=int,
help='optional, specify index number of nest to '
'talk to')
subparsers = parser.add_subparsers(dest='command',
help='command help')
temp = subparsers.add_parser('temp', help='show/set temperature')
temp.add_argument('temperature', nargs='*', type=float,
help='target tempterature to set device to')
fan = subparsers.add_parser('fan', help='set fan "on" or "auto"')
fan_group = fan.add_mutually_exclusive_group()
fan_group.add_argument('--auto', action='store_true', default=False,
help='set fan to auto')
fan_group.add_argument('--on', action='store_true', default=False,
help='set fan to on')
mode = subparsers.add_parser('mode', help='show/set current mode')
mode_group = mode.add_mutually_exclusive_group()
mode_group.add_argument('--cool', action='store_true', default=False,
help='set mode to cool')
mode_group.add_argument('--heat', action='store_true', default=False,
help='set mode to heat')
mode_group.add_argument('--eco', action='store_true', default=False,
help='set mode to eco')
mode_group.add_argument('--range', action='store_true', default=False,
help='set mode to range')
mode_group.add_argument('--off', action='store_true', default=False,
help='set mode to off')
away = subparsers.add_parser('away', help='show/set current away status')
away_group = away.add_mutually_exclusive_group()
away_group.add_argument('--away', action='store_true', default=False,
help='set away status to "away"')
away_group.add_argument('--home', action='store_true', default=False,
help='set away status to "home"')
subparsers.add_parser('target', help='show current temp target')
subparsers.add_parser('humid', help='show current humidity')
target_hum = subparsers.add_parser('target_hum',
help='show/set target humidty')
target_hum.add_argument('humidity', nargs='*',
help='specify target humidity value or auto '
'to auto-select a humidity based on outside '
'temp')
subparsers.add_parser('show', help='show everything')
parser.set_defaults(**defaults)
return parser.parse_args()
def main():
args = parse_args()
def _identity(x):
return x
display_temp = _identity
cmd = args.command
if args.client_id is None or args.client_secret is None:
print("Missing client and secret. Either call with --client-id "
"and --client-secret or add to config as client_id and "
"client_secret")
return
token_cache = os.path.expanduser(args.token_cache)
with nest.Nest(client_id=args.client_id, client_secret=args.client_secret,
access_token=args.token,
access_token_cache_file=token_cache) as napi:
if napi.authorization_required:
print('Go to ' + napi.authorize_url +
' to authorize, then enter PIN below')
pin = input("PIN: ")
napi.request_token(pin)
if cmd == 'away':
structure = None
if args.structure:
struct = [s for s in napi.structures
if s.name == args.structure]
if struct:
structure = struct[0]
else:
if args.serial:
serial = args.serial
else:
serial = napi.thermostats[args.index]._serial
struct = [s for s in napi.structures for d in s.thermostats
if d._serial == serial]
if struct:
structure = struct[0]
if not structure:
structure = napi.structures[0]
if args.away:
structure.away = True
elif args.home:
structure.away = False
print(structure.away)
return
if args.serial:
device = nest.Thermostat(args.serial, napi)
elif args.structure:
struct = [s for s in napi.structures if s.name == args.structure]
if struct:
device = struct[0].thermostats[args.index]
else:
device = napi.structures[0].thermostats[args.index]
else:
device = napi.thermostats[args.index]
if args.celsius and device.temperature_scale is 'F':
display_temp = utils.f_to_c
elif not args.celsius and device.temperature_scale is 'C':
display_temp = utils.c_to_f
if cmd == 'temp':
if args.temperature:
if len(args.temperature) > 1:
if device.mode != 'range':
device.mode = 'range'
device.temperature = args.temperature
else:
device.temperature = args.temperature
print('%0.1f' % display_temp(device.temperature))
elif cmd == 'fan':
if args.auto:
device.fan = False
elif args.on:
device.fan = True
print(device.fan)
elif cmd == 'mode':
if args.cool:
device.mode = 'cool'
elif args.heat:
device.mode = 'heat'
elif args.eco:
device.mode = 'eco'
elif args.range:
device.mode = 'range'
elif args.off:
device.mode = 'off'
print(device.mode)
elif cmd == 'humid':
print(device.humidity)
elif cmd == 'target':
target = device.target
if isinstance(target, tuple):
print('Lower: %0.1f' % display_temp(target[0]))
print('Upper: %0.1f' % display_temp(target[1]))
else:
print('%0.1f' % display_temp(target))
elif cmd == 'show':
# TODO should pad key? old code put : out 35
print('Device: %s' % device.name)
print('Where: %s' % device.where)
print('Away : %s' % device.structure.away)
print('Mode : %s' % device.mode)
print('State : %s' % device.hvac_state)
print('Fan : %s' % device.fan)
print('Temp : %0.1f%s' % (device.temperature,
device.temperature_scale))
print('Humidity : %0.1f%%' % device.humidity)
if isinstance(device.target, tuple):
print('Target : %0.1f-%0.1f%s' % (
display_temp(device.target[0]),
display_temp(device.target[1]),
device.temperature_scale))
else:
print('Target : %0.1f%s' % (display_temp(device.target),
device.temperature_scale))
print('Away Heat: %0.1fC' % device.eco_temperature[0])
print('Away Cool: %0.1fC' % device.eco_temperature[1])
print('Has Leaf : %s' % device.has_leaf)
if __name__ == '__main__':
main()
```
#### File: site-packages/nest/nest.py
```python
import collections
import copy
import datetime
import hashlib
import time
import os
import uuid
import weakref
from dateutil.parser import parse as parse_time
import requests
from requests import auth
from requests import adapters
from requests.compat import json
ACCESS_TOKEN_URL = 'https://api.home.nest.com/oauth2/access_token'
AUTHORIZE_URL = 'https://home.nest.com/login/oauth2?client_id={0}&state={1}'
API_URL = 'https://developer-api.nest.com'
LOGIN_URL = 'https://home.nest.com/user/login'
SIMULATOR_SNAPSHOT_URL = \
'https://developer.nest.com' \
'/simulator/api/v1/nest/devices/camera/snapshot'
SIMULATOR_SNAPSHOT_PLACEHOLDER_URL = \
'https://media.giphy.com/media/WCwFvyeb6WJna/giphy.gif'
AWAY_MAP = {'on': 'away',
'away': 'away',
'off': 'home',
'home': 'home',
True: 'away',
False: 'home'}
FAN_MAP = {'auto on': False,
'on': True,
'auto': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False}
LowHighTuple = collections.namedtuple('LowHighTuple', ('low', 'high'))
DEVICES = 'devices'
METADATA = 'metadata'
STRUCTURES = 'structures'
THERMOSTATS = 'thermostats'
SMOKE_CO_ALARMS = 'smoke_co_alarms'
CAMERAS = 'cameras'
# https://developers.nest.com/documentation/api-reference/overview#targettemperaturef
MINIMUM_TEMPERATURE_F = 50
MAXIMUM_TEMPERATURE_F = 90
# https://developers.nest.com/documentation/api-reference/overview#targettemperaturec
MINIMUM_TEMPERATURE_C = 9
MAXIMUM_TEMPERATURE_C = 32
class APIError(Exception):
def __init__(self, response):
if response.content != b'':
message = response.json()['error']
else:
message = "Authorization failed"
# Call the base class constructor with the parameters it needs
super(APIError, self).__init__(message)
self.response = response
class AuthorizationError(Exception):
def __init__(self, response):
if response.content != b'':
message = response.json().get(
'error_description',
"Authorization Failed")
else:
message = "Authorization failed"
# Call the base class constructor with the parameters it needs
super(AuthorizationError, self).__init__(message)
self.response = response
class NestAuth(auth.AuthBase):
def __init__(self, auth_callback=None, session=None,
client_id=None, client_secret=None,
access_token=None, access_token_cache_file=None):
self._res = {}
self.auth_callback = auth_callback
self.pin = None
self._access_token_cache_file = access_token_cache_file
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
if (access_token_cache_file is not None and
access_token is None and
os.path.exists(access_token_cache_file)):
with open(access_token_cache_file, 'r') as f:
self._res = json.load(f)
self._callback(self._res)
if session is not None:
session = weakref.ref(session)
self._session = session
self._adapter = adapters.HTTPAdapter()
def _cache(self):
if self._access_token_cache_file is not None:
with os.fdopen(os.open(self._access_token_cache_file,
os.O_WRONLY | os.O_CREAT, 0o600),
'w') as f:
json.dump(self._res, f)
def _callback(self, res):
if self.auth_callback is not None and isinstance(self.auth_callback,
collections.Callable):
self.auth_callback(self._res)
def login(self, headers=None):
data = {'client_id': self._client_id,
'client_secret': self._client_secret,
'code': self.pin,
'grant_type': 'authorization_code'}
post = requests.post
if self._session:
session = self._session()
post = session.post
response = post(ACCESS_TOKEN_URL, data=data, headers=headers)
if response.status_code != 200:
raise AuthorizationError(response)
self._res = response.json()
self._cache()
self._callback(self._res)
@property
def access_token(self):
return self._res.get('access_token', self._access_token)
def __call__(self, r):
if self.access_token:
r.headers['Authorization'] = 'Bearer ' + self.access_token
return r
class NestBase(object):
def __init__(self, serial, nest_api):
self._serial = serial
self._nest_api = nest_api
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._repr_name)
def _set(self, what, data):
path = '/%s/%s' % (what, self._serial)
response = self._nest_api._put(path=path, data=data)
self._nest_api._bust_cache()
return response
@property
def _weather(self):
raise NotImplementedError("Deprecated Nest API")
# merge_code = self.postal_code + ',' + self.country_code
# return self._nest_api._weather[merge_code]
@property
def weather(self):
raise NotImplementedError("Deprecated Nest API")
# return Weather(self._weather, self._local_time)
@property
def serial(self):
return self._serial
@property
def _repr_name(self):
return self.serial
class Device(NestBase):
@property
def _device(self):
raise NotImplementedError("Implemented by sublass")
@property
def _devices(self):
return self._nest_api._devices
@property
def _repr_name(self):
if self.name:
return self.name
return self.where
@property
def name(self):
return self._device.get('name')
@name.setter
def name(self, value):
raise NotImplementedError("Needs updating with new API")
# self._set('shared', {'name': value})
@property
def name_long(self):
return self._device.get('name_long')
@property
def online(self):
return self._device.get('is_online')
@property
def structure(self):
return Structure(self._device['structure_id'],
self._nest_api)
@property
def where(self):
if self.where_id is not None:
return self.structure.wheres[self.where_id]['name']
@property
def where_id(self):
return self._device.get('where_id')
@where.setter
def where(self, value):
value = value.lower()
ident = self.structure.wheres.get(value)
if ident is None:
self.structure.add_where(value)
ident = self.structure.wheres[value]
self._set('device', {'where_id': ident})
@property
def description(self):
return self._device['name_long']
@property
def is_thermostat(self):
return False
@property
def is_camera(self):
return False
@property
def is_smoke_co_alarm(self):
return False
class Thermostat(Device):
@property
def is_thermostat(self):
return True
@property
def _device(self):
return self._devices[THERMOSTATS][self._serial]
@property
def _shared(self):
raise NotImplementedError("Deprecated Nest API")
# return self._nest_api._status['shared'][self._serial]
@property
def _track(self):
raise NotImplementedError("Deprecated Nest API")
# return self._nest_api._status['track'][self._serial]
@property
def fan(self):
# FIXME confirm this is the same as old havac_fan_state
return self._device.get('fan_timer_active')
@fan.setter
def fan(self, value):
mapped_value = FAN_MAP.get(value, False)
if mapped_value is None:
raise ValueError("Only True and False supported")
self._set('devices/thermostats', {'fan_timer_active': mapped_value})
@property
def humidity(self):
return self._device.get('humidity')
@property
def target_humidity(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['target_humidity']
@target_humidity.setter
def target_humidity(self, value):
raise NotImplementedError("No longer available in Nest API")
# if value == 'auto':
# if self._weather['current']['temp_c'] >= 4.44:
# hum_value = 45
# elif self._weather['current']['temp_c'] >= -1.11:
# hum_value = 40
# elif self._weather['current']['temp_c'] >= -6.67:
# hum_value = 35
# elif self._weather['current']['temp_c'] >= -12.22:
# hum_value = 30
# elif self._weather['current']['temp_c'] >= -17.78:
# hum_value = 25
# elif self._weather['current']['temp_c'] >= -23.33:
# hum_value = 20
# elif self._weather['current']['temp_c'] >= -28.89:
# hum_value = 15
# elif self._weather['current']['temp_c'] >= -34.44:
# hum_value = 10
# else:
# hum_value = value
# if float(hum_value) != self._device['target_humidity']:
# self._set('device', {'target_humidity': float(hum_value)})
@property
def mode(self):
# FIXME confirm same as target_temperature_type
return self._device.get('hvac_mode')
@mode.setter
def mode(self, value):
self._set('devices/thermostats', {'hvac_mode': value.lower()})
@property
def has_leaf(self):
return self._device.get('has_leaf')
@property
def hvac_ac_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._shared['hvac_ac_state']
@property
def hvac_cool_x2_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._shared['hvac_cool_x2_state']
@property
def hvac_heater_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._shared['hvac_heater_state']
@property
def hvac_aux_heater_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._shared['hvac_aux_heater_state']
@property
def hvac_heat_x2_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._shared['hvac_heat_x2_state']
@property
def hvac_heat_x3_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._shared['hvac_heat_x3_state']
@property
def hvac_alt_heat_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._shared['hvac_alt_heat_state']
@property
def hvac_alt_heat_x2_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._shared['hvac_alt_heat_x2_state']
@property
def hvac_emer_heat_state(self):
raise NotImplementedError(
"No longer available in Nest API. See "
"is_using_emergency_heat instead")
# return self._shared['hvac_emer_heat_state']
@property
def is_using_emergency_heat(self):
return self._device.get('is_using_emergency_heat')
@property
def local_ip(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['local_ip']
@property
def last_ip(self):
raise NotImplementedError("No longer available in Nest API")
# return self._track['last_ip']
@property
def last_connection(self):
# TODO confirm this does get set, or if the API documentation is wrong
return self._device.get('last_connection')
@property
def error_code(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['error_code']
@property
def battery_level(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['battery_level']
@property
def battery_health(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['battery_health']
@property
def postal_code(self):
return self.structure.postal_code
# return self._device['postal_code']
def _temp_key(self, key):
return "%s_%s" % (key, self.temperature_scale.lower())
def _round_temp(self, temp):
if self.temperature_scale == 'C':
return round(temp * 2) / 2
else:
# F goes to nearest degree
return int(round(temp))
@property
def temperature_scale(self):
return self._device['temperature_scale']
@property
def is_locked(self):
return self._device.get('is_locked')
@property
def locked_temperature(self):
low = self._device.get(self._temp_key('locked_temp_min'))
high = self._device.get(self._temp_key('locked_temp_max'))
return LowHighTuple(low, high)
@property
def temperature(self):
return self._device.get(self._temp_key('ambient_temperature'))
@property
def min_temperature(self):
if self.is_locked:
return self.locked_temperature[0]
else:
if self.temperature_scale == 'C':
return MINIMUM_TEMPERATURE_C
else:
return MINIMUM_TEMPERATURE_F
@property
def max_temperature(self):
if self.is_locked:
return self.locked_temperature[1]
else:
if self.temperature_scale == 'C':
return MAXIMUM_TEMPERATURE_C
else:
return MAXIMUM_TEMPERATURE_F
@temperature.setter
def temperature(self, value):
self.target = value
@property
def target(self):
if self.mode == 'heat-cool':
low = self._device[self._temp_key('target_temperature_low')]
high = self._device[self._temp_key('target_temperature_high')]
return LowHighTuple(low, high)
return self._device[self._temp_key('target_temperature')]
@target.setter
def target(self, value):
data = {}
if self.mode == 'heat-cool':
rounded_low = self._round_temp(value[0])
rounded_high = self._round_temp(value[1])
data[self._temp_key('target_temperature_low')] = rounded_low
data[self._temp_key('target_temperature_high')] = rounded_high
else:
rounded_temp = self._round_temp(value)
data[self._temp_key('target_temperature')] = rounded_temp
self._set('devices/thermostats', data)
@property
def away_temperature(self):
# see https://nestdevelopers.io/t/new-things-for-fall/226
raise NotImplementedError(
"Deprecated Nest API, use eco_temperature instead")
@away_temperature.setter
def away_temperature(self, value):
# see https://nestdevelopers.io/t/new-things-for-fall/226
raise NotImplementedError(
"Deprecated Nest API, use eco_temperature instead")
@property
def eco_temperature(self):
# use get, since eco_temperature isn't always filled out
low = self._device.get(self._temp_key('eco_temperature_low'))
high = self._device.get(self._temp_key('eco_temperature_high'))
return LowHighTuple(low, high)
@eco_temperature.setter
def eco_temperature(self, value):
low, high = value
data = {}
if low is not None:
data[self._temp_key('eco_temperature_low')] = low
if high is not None:
data[self._temp_key('eco_temperature_high')] = high
self._set('devices/thermostats', data)
@property
def can_heat(self):
return self._device.get('can_heat')
@property
def can_cool(self):
return self._device.get('can_cool')
@property
def has_humidifier(self):
return self._device.get('has_humidifier')
@property
def has_dehumidifier(self):
return self._device.get('has_dehumidifier')
@property
def has_fan(self):
return self._device.get('has_fan')
@property
def has_hot_water_control(self):
return self._device.get('has_hot_water_control')
@property
def hot_water_temperature(self):
return self._device.get('hot_water_temperature')
@property
def hvac_state(self):
return self._device.get('hvac_state')
@property
def eco(self):
raise NotImplementedError("Deprecated Nest API")
# eco_mode = self._device['eco']['mode']
# # eco modes can be auto-eco or manual-eco
# return eco_mode.endswith('eco')
@eco.setter
def eco(self, value):
raise NotImplementedError("Deprecated Nest API")
# data = {'eco': self._device['eco']}
# if value:
# data['eco']['mode'] = 'manual-eco'
# else:
# data['eco']['mode'] = 'schedule'
# data['eco']['mode_update_timestamp'] = time.time()
# self._set('device', data)
class SmokeCoAlarm(Device):
@property
def is_smoke_co_alarm(self):
return True
@property
def _device(self):
return self._devices[SMOKE_CO_ALARMS][self._serial]
@property
def auto_away(self):
raise NotImplementedError("No longer available in Nest API.")
# return self._device['auto_away']
@property
def battery_health(self):
return self._device.get('battery_health')
@property
def battery_health_state(self):
raise NotImplementedError("use battery_health instead")
@property
def battery_level(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['battery_level']
@property
def capability_level(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['capability_level']
@property
def certification_body(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['certification_body']
@property
def co_blame_duration(self):
raise NotImplementedError("No longer available in Nest API")
# if 'co_blame_duration' in self._device:
# return self._device['co_blame_duration']
@property
def co_blame_threshold(self):
raise NotImplementedError("No longer available in Nest API")
# if 'co_blame_threshold' in self._device:
# return self._device['co_blame_threshold']
@property
def co_previous_peak(self):
raise NotImplementedError("No longer available in Nest API")
# if 'co_previous_peak' in self._device:
# return self._device['co_previous_peak']
@property
def co_sequence_number(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['co_sequence_number']
@property
def co_status(self):
# TODO deprecate for new name
return self._device.get('co_alarm_state')
@property
def component_als_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_als_test_passed']
@property
def component_co_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_co_test_passed']
@property
def component_heat_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_heat_test_passed']
@property
def component_hum_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_hum_test_passed']
@property
def component_led_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_led_test_passed']
@property
def component_pir_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_pir_test_passed']
@property
def component_smoke_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_smoke_test_passed']
@property
def component_temp_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_temp_test_passed']
@property
def component_us_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_us_test_passed']
@property
def component_wifi_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_wifi_test_passed']
@property
def creation_time(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['creation_time']
@property
def device_external_color(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['device_external_color']
@property
def device_locale(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['device_locale']
@property
def fabric_id(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['fabric_id']
@property
def factory_loaded_languages(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['factory_loaded_languages']
@property
def gesture_hush_enable(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['gesture_hush_enable']
@property
def heads_up_enable(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['heads_up_enable']
@property
def home_alarm_link_capable(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['home_alarm_link_capable']
@property
def home_alarm_link_connected(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['home_alarm_link_connected']
@property
def home_alarm_link_type(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['home_alarm_link_type']
@property
def hushed_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['hushed_state']
@property
def installed_locale(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['installed_locale']
@property
def kl_software_version(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['kl_software_version']
@property
def latest_manual_test_cancelled(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['latest_manual_test_cancelled']
@property
def latest_manual_test_end_utc_secs(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['latest_manual_test_end_utc_secs']
@property
def latest_manual_test_start_utc_secs(self):
# TODO confirm units, deprecate for new method name
return self._device.get('last_manual_test_time')
@property
def last_manual_test_time(self):
# TODO parse time, check that it's in the dict
return self._device.get('last_manual_test_time')
@property
def line_power_present(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['line_power_present']
@property
def night_light_continuous(self):
raise NotImplementedError("No longer available in Nest API")
# if 'night_light_continuous' in self._device:
# return self._device['night_light_continuous']
@property
def night_light_enable(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['night_light_enable']
@property
def ntp_green_led_enable(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['ntp_green_led_enable']
@property
def product_id(self):
return self._device.get('product_id')
@property
def replace_by_date_utc_secs(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['replace_by_date_utc_secs']
@property
def resource_id(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['resource_id']
@property
def smoke_sequence_number(self):
return self._device['smoke_sequence_number']
@property
def smoke_status(self):
return self._device['smoke_alarm_state']
@property
def software_version(self):
return self._device['software_version']
@property
def spoken_where_id(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['spoken_where_id']
@property
def steam_detection_enable(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['steam_detection_enable']
@property
def thread_mac_address(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['thread_mac_address']
@property
def wifi_ip_address(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['wifi_ip_address']
@property
def wifi_mac_address(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['wifi_mac_address']
@property
def wifi_regulatory_domain(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['wifi_regulatory_domain']
@property
def wired_led_enable(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['wired_led_enable']
@property
def wired_or_battery(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['wired_or_battery']
class ActivityZone(NestBase):
def __init__(self, camera, zone_id):
self.camera = camera
NestBase.__init__(self, camera.serial, camera._nest_api)
# camera's activity_zone dict has int, but an event's list of
# activity_zone ids is strings `\/0_0\/`
self._zone_id = int(zone_id)
@property
def _camera(self):
return self.camera._device
@property
def _repr_name(self):
return self.name
@property
def _activity_zone(self):
return next(
z for z in self._camera['activity_zones']
if z['id'] == self.zone_id)
@property
def zone_id(self):
return self._zone_id
@property
def name(self):
return self._activity_zone['name']
class CameraEvent(NestBase):
def __init__(self, camera):
NestBase.__init__(self, camera.serial, camera._nest_api)
self.camera = camera
@property
def _camera(self):
return self.camera._device
@property
def _event(self):
return self._camera.get('last_event')
def __repr__(self):
return '<%s>' % (self.__class__.__name__)
def activity_in_zone(self, zone_id):
if 'activity_zone_ids' in self._event:
return str(zone_id) in self._event['activity_zone_ids']
return False
@property
def activity_zones(self):
if 'activity_zone_ids' in self._event:
return [ActivityZone(self, z)
for z in self._event['activity_zone_ids']]
@property
def animated_image_url(self):
return self._event.get('animated_image_url')
@property
def app_url(self):
return self._event.get('app_url')
@property
def has_motion(self):
return self._event.get('has_motion')
@property
def has_person(self):
return self._event.get('has_person')
@property
def has_sound(self):
return self._event.get('has_sound')
@property
def image_url(self):
return self._event.get('image_url')
@property
def start_time(self):
if 'start_time' in self._event:
return parse_time(self._event['start_time'])
@property
def end_time(self):
if 'end_time' in self._event:
return parse_time(self._event['end_time'])
@property
def urls_expire_time(self):
if 'urls_expire_time' in self._event:
return parse_time(self._event['urls_expire_time'])
@property
def web_url(self):
return self._event.get('web_url')
@property
def is_ongoing(self):
if self.end_time is not None:
# sometimes, existing event is updated with a new start time
# that's before the end_time which implies something new
if self.start_time > self.end_time:
return True
now = datetime.datetime.now(self.end_time.tzinfo)
# end time should be in the past
return self.end_time > now
# no end_time implies it's ongoing
return True
def has_ongoing_motion_in_zone(self, zone_id):
if self.is_ongoing and self.has_motion:
return self.activity_in_zone(zone_id)
def has_ongoing_sound(self):
if self.is_ongoing:
return self.has_sound
def has_ongoing_motion(self):
if self.is_ongoing:
return self.has_motion
def has_ongoing_person(self):
if self.is_ongoing:
return self.has_person
class Camera(Device):
@property
def is_camera(self):
return True
@property
def _device(self):
return self._devices[CAMERAS][self._serial]
@property
def ongoing_event(self):
if self.last_event is not None and self.last_event.is_ongoing:
return self.last_event
def has_ongoing_motion_in_zone(self, zone_id):
if self.ongoing_event is not None:
return self.last_event.has_ongoing_motion_in_zone(zone_id)
return False
@property
def sound_detected(self):
if self.ongoing_event is not None:
return self.last_event.has_ongoing_sound()
return False
@property
def motion_detected(self):
if self.ongoing_event is not None:
return self.last_event.has_ongoing_motion()
return False
@property
def person_detected(self):
if self.ongoing_event is not None:
return self.last_event.has_ongoing_person()
return False
@property
def activity_zones(self):
return [ActivityZone(self, z['id'])
for z in self._device.get('activity_zones', [])]
@property
def last_event(self):
if 'last_event' in self._device:
return CameraEvent(self)
@property
def is_streaming(self):
return self._device.get('is_streaming')
@property
def is_video_history_enabled(self):
return self._device.get('is_video_history_enabled')
@property
def is_audio_enabled(self):
return self._device.get('is_audio_input_enabled')
@property
def is_public_share_enabled(self):
return self._device.get('is_public_share_enabled')
@property
def capabilities(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['capabilities']
@property
def cvr(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['cvr_enrolled']
@property
def nexustalk_host(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['direct_nexustalk_host']
@property
def download_host(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['download_host']
@property
def last_connected(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['last_connected_time']
@property
def last_cuepoint(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['last_cuepoint']
@property
def live_stream(self):
# return self._device['live_stream_host']
raise NotImplementedError("No longer available in Nest API")
@property
def mac_address(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['mac_address']
@property
def model(self):
return self._device['model']
@property
def nexus_api_http_server_url(self):
# return self._device['nexus_api_http_server_url']
raise NotImplementedError("No longer available in Nest API")
@property
def streaming_state(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['streaming_state']
@property
def component_hum_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_hum_test_passed']
@property
def component_led_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_led_test_passed']
@property
def component_pir_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_pir_test_passed']
@property
def component_smoke_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_smoke_test_passed']
@property
def component_temp_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_temp_test_passed']
@property
def component_us_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_us_test_passed']
@property
def component_wifi_test_passed(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['component_wifi_test_passed']
@property
def creation_time(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['creation_time']
@property
def device_external_color(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['device_external_color']
@property
def device_locale(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['device_locale']
@property
def fabric_id(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['fabric_id']
@property
def factory_loaded_languages(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['factory_loaded_languages']
@property
def installed_locale(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['installed_locale']
@property
def kl_software_version(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['kl_software_version']
@property
def product_id(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['product_id']
@property
def resource_id(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['resource_id']
@property
def software_version(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['software_version']
@property
def spoken_where_id(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['spoken_where_id']
@property
def thread_mac_address(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['thread_mac_address']
@property
def where_id(self):
return self._device['where_id']
@property
def wifi_ip_address(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['wifi_ip_address']
@property
def wifi_mac_address(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['wifi_mac_address']
@property
def wifi_regulatory_domain(self):
raise NotImplementedError("No longer available in Nest API")
# return self._device['wifi_regulatory_domain']
@property
def snapshot_url(self):
if self._device['snapshot_url'] != SIMULATOR_SNAPSHOT_URL:
return self._device['snapshot_url']
else:
return SIMULATOR_SNAPSHOT_PLACEHOLDER_URL
class Structure(NestBase):
@property
def _structure(self):
return self._nest_api._status[STRUCTURES][self._serial]
def _set_away(self, value, auto_away=False):
self._set('structures', {'away': AWAY_MAP[value]})
@property
def away(self):
return self._structure['away']
@away.setter
def away(self, value):
self._set_away(value)
@property
def country_code(self):
return self._structure.get('country_code')
@property
def devices(self):
raise NotImplementedError("Use thermostats instead")
@property
def thermostats(self):
if THERMOSTATS in self._structure:
return [Thermostat(devid, self._nest_api)
for devid in self._structure[THERMOSTATS]]
else:
return []
@property
def protectdevices(self):
raise NotImplementedError("Use smoke_co_alarms instead")
@property
def smoke_co_alarms(self):
if SMOKE_CO_ALARMS in self._structure:
return [SmokeCoAlarm(devid, self._nest_api)
for devid in self._structure[SMOKE_CO_ALARMS]]
else:
return []
@property
def cameradevices(self):
raise NotImplementedError("Use cameras instead")
@property
def cameras(self):
if CAMERAS in self._structure:
return [Camera(devid, self._nest_api)
for devid in self._structure[CAMERAS]]
else:
return []
@property
def dr_reminder_enabled(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure['dr_reminder_enabled']
@property
def emergency_contact_description(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure['emergency_contact_description']
@property
def emergency_contact_type(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure['emergency_contact_type']
@property
def emergency_contact_phone(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure['emergency_contact_phone']
@property
def enhanced_auto_away_enabled(self):
# FIXME there is probably an equivilant thing for this
raise NotImplementedError("Deprecated Nest API")
# return self._structure['topaz_enhanced_auto_away_enabled']
@property
def eta_preconditioning_active(self):
# FIXME there is probably an equivilant thing for this
# or something that can be recommended
raise NotImplementedError("Deprecated Nest API")
# return self._structure['eta_preconditioning_active']
@property
def house_type(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure['house_type']
@property
def hvac_safety_shutoff_enabled(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure['hvac_safety_shutoff_enabled']
@property
def name(self):
return self._structure['name']
@name.setter
def name(self, value):
self._set('structure', {'name': value})
@property
def location(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure.get('location')
@property
def address(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure.get('street_address')
@property
def num_thermostats(self):
if THERMOSTATS in self._structure:
return len(self._structure[THERMOSTATS])
else:
return 0
@property
def num_cameras(self):
if CAMERAS in self._structure:
return len(self._structure[CAMERAS])
else:
return 0
@property
def num_smokecoalarms(self):
if SMOKE_CO_ALARMS in self._structure:
return len(self._structure[SMOKE_CO_ALARMS])
else:
return 0
@property
def measurement_scale(self):
raise NotImplementedError(
"Deprecated Nest API, see temperature_scale on "
"thermostats instead")
# return self._structure['measurement_scale']
@property
def postal_code(self):
# TODO check permissions if this is empty?
return self._structure.get('postal_code')
@property
def renovation_date(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure['renovation_date']
@property
def structure_area(self):
raise NotImplementedError("Deprecated Nest API")
# return self._structure['structure_area']
@property
def time_zone(self):
if 'time_zone' in self._structure:
return self._structure['time_zone']
@property
def peak_period_start_time(self):
if 'peak_period_start_time' in self._structure:
return parse_time(self._structure['peak_period_start_time'])
@property
def peak_period_end_time(self):
if 'peak_period_end_time' in self._structure:
return parse_time(self._structure['peak_period_end_time'])
@property
def eta_begin(self):
if 'eta_begin' in self._structure:
return parse_time(self._structure['eta_begin'])
@property
def wheres(self):
return self._structure['wheres']
@wheres.setter
def wheres(self, value):
self._set('where', {'wheres': value})
def add_where(self, name, ident=None):
name = name.lower()
if name in self.wheres:
return self.wheres[name]
name = ' '.join([n.capitalize() for n in name.split()])
wheres = copy.copy(self.wheres)
if ident is None:
ident = str(uuid.uuid4())
wheres.append({'name': name, 'where_id': ident})
self.wheres = wheres
return self.add_where(name)
def remove_where(self, name):
name = name.lower()
if name not in self.wheres:
return None
ident = self.wheres[name]
wheres = [w for w in copy.copy(self.wheres)
if w['name'] != name and w['where_id'] != ident]
self.wheres = wheres
return ident
class Nest(object):
def __init__(self, username=None, password=<PASSWORD>, cache_ttl=270,
user_agent=None,
access_token=None, access_token_cache_file=None,
local_time=False,
client_id=None, client_secret=None,
product_version=None):
self._urls = {}
self._limits = {}
self._user = None
self._userid = None
self._weave = None
self._staff = False
self._superuser = False
self._email = None
self._cache_ttl = cache_ttl
self._cache = (None, 0)
if local_time:
raise ValueError("local_time no longer supported")
if user_agent:
raise ValueError("user_agent no longer supported")
def auth_callback(result):
self._access_token = result['access_token']
self._access_token = access_token
self._client_id = client_id
self._client_secret = client_secret
self._product_version = product_version
self._session = requests.Session()
auth = NestAuth(client_id=self._client_id,
client_secret=self._client_secret,
session=self._session, access_token=access_token,
access_token_cache_file=access_token_cache_file)
self._session.auth = auth
@property
def authorization_required(self):
return self.never_authorized or \
self.invalid_access_token or \
self.client_version_out_of_date
@property
def never_authorized(self):
return self.access_token is None
@property
def invalid_access_token(self):
try:
self._status
return False
except AuthorizationError:
return True
@property
def client_version_out_of_date(self):
if self._product_version is not None:
self._bust_cache()
try:
return self.client_version < self._product_version
# an error means they need to authorize anyways
except AuthorizationError:
return True
return False
@property
def authorize_url(self):
state = hashlib.md5(os.urandom(32)).hexdigest()
return AUTHORIZE_URL.format(self._client_id, state)
def request_token(self, pin):
self._session.auth.pin = pin
self._session.auth.login()
@property
def access_token(self):
return self._access_token or self._session.auth.access_token
def _request(self, verb, path="/", data=None):
url = "%s%s" % (API_URL, path)
if data is not None:
data = json.dumps(data)
response = self._session.request(verb, url,
allow_redirects=False,
data=data)
if response.status_code == 200:
return response.json()
if response.status_code == 401:
raise AuthorizationError(response)
if response.status_code != 307:
raise APIError(response)
redirect_url = response.headers['Location']
response = self._session.request(verb, redirect_url,
allow_redirects=False,
data=data)
# TODO check for 429 status code for too frequent access.
# see https://developers.nest.com/documentation/cloud/data-rate-limits
if 400 <= response.status_code < 600:
raise APIError(response)
return response.json()
def _get(self, path="/"):
return self._request('GET', path)
def _put(self, path="/", data=None):
return self._request('PUT', path, data=data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return False
@property
def _status(self):
value, last_update = self._cache
now = time.time()
if not value or now - last_update > self._cache_ttl:
value = self._get("/")
self._cache = (value, now)
return value
@property
def _metadata(self):
return self._status[METADATA]
@property
def client_version(self):
return self._metadata['client_version']
@property
def _devices(self):
return self._status[DEVICES]
def _bust_cache(self):
self._cache = (None, 0)
@property
def devices(self):
raise NotImplementedError("Use thermostats instead")
@property
def thermostats(self):
return [Thermostat(devid, self)
for devid in self._devices.get(THERMOSTATS, [])]
@property
def protectdevices(self):
raise NotImplementedError("Use smoke_co_alarms instead")
@property
def smoke_co_alarms(self):
return [SmokeCoAlarm(devid, self)
for devid in self._devices.get(SMOKE_CO_ALARMS, [])]
@property
def cameradevices(self):
raise NotImplementedError("Use cameras instead")
@property
def cameras(self):
return [Camera(devid, self)
for devid in self._devices.get(CAMERAS, [])]
@property
def structures(self):
return [Structure(stid, self)
for stid in self._status[STRUCTURES]]
@property
def urls(self):
raise NotImplementedError("Deprecated Nest API")
@property
def user(self):
raise NotImplementedError("Deprecated Nest API")
```
#### File: site-packages/nest/utils.py
```python
import decimal
CELSIUS = 'C'
FAHRENHEIT = 'F'
_THIRTYTWO = decimal.Decimal(32)
_ONEPOINTEIGHT = decimal.Decimal(18) / decimal.Decimal(10)
_TENPOINTSEVENSIXFOUR = decimal.Decimal(10764) / decimal.Decimal(1000)
def f_to_c(temp):
temp = decimal.Decimal(temp)
return float((temp - _THIRTYTWO) / _ONEPOINTEIGHT)
def c_to_f(temp):
temp = decimal.Decimal(temp)
return float(temp * _ONEPOINTEIGHT + _THIRTYTWO)
def ft2_to_m2(area):
area = decimal.Decimal(area)
return float(area / _TENPOINTSEVENSIXFOUR)
def m2_to_ft2(area):
area = decimal.Decimal(area)
return float(area * _TENPOINTSEVENSIXFOUR)
```
#### File: netdisco/discoverables/belkin_wemo.py
```python
from . import SSDPDiscoverable
from ..const import ATTR_MAC_ADDRESS
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Belkin WeMo platform devices."""
def info_from_entry(self, entry):
"""Return most important info from a uPnP entry."""
info = super().info_from_entry(entry)
device = entry.description['device']
info[ATTR_MAC_ADDRESS] = device.get('macAddress', '')
return info
def get_entries(self):
"""Return all Belkin Wemo entries."""
return self.find_by_device_description(
{'manufacturer': 'Belkin International Inc.'})
```
#### File: netdisco/discoverables/daikin.py
```python
from . import BaseDiscoverable
class Discoverable(BaseDiscoverable):
"""Add support for discovering a Daikin device."""
def __init__(self, netdis):
"""Initialize the Daikin discovery."""
self._netdis = netdis
def get_entries(self):
"""Get all the Daikin details."""
return self._netdis.daikin.entries
```
#### File: netdisco/discoverables/frontier_silicon.py
```python
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering frontier silicon devices."""
def get_entries(self):
"""Get all the frontier silicon uPnP entries."""
return [entry for entry in self.netdis.ssdp.all()
if entry.st and 'fsapi' in entry.st and
'urn:schemas-frontier-silicon-com' in entry.st]
```
#### File: netdisco/discoverables/harmony.py
```python
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Harmony Hub remotes"""
def get_entries(self):
"""Get all the Harmony uPnP entries."""
return self.find_by_device_description({
"manufacturer": "Logitech",
"deviceType": "urn:myharmony-com:device:harmony:1"
})
```
#### File: netdisco/discoverables/ikea_tradfri.py
```python
from . import MDNSDiscoverable
# pylint: disable=too-few-public-methods
class Discoverable(MDNSDiscoverable):
"""Add support for discovering Ikea Tradfri devices."""
def __init__(self, nd):
"""Initialize the Cast discovery."""
super(Discoverable, self).__init__(nd, '_coap._udp.local.')
```
#### File: netdisco/discoverables/netgear_router.py
```python
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Netgear routers."""
def get_entries(self):
"""Get all the Netgear uPnP entries."""
return self.find_by_device_description({
"manufacturer": "NETGEAR, Inc.",
"deviceType": "urn:schemas-upnp-org:device:InternetGatewayDevice:1"
})
```
#### File: netdisco/discoverables/roku.py
```python
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Roku media players."""
def get_entries(self):
"""Get all the Roku entries."""
return self.find_by_st("roku:ecp")
```
#### File: netdisco/discoverables/samsung_tv.py
```python
from . import SSDPDiscoverable
from ..const import ATTR_NAME
# For some models, Samsung forces a [TV] prefix to the user-specified name.
FORCED_NAME_PREFIX = '[TV]'
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Samsung Smart TV services."""
def get_entries(self):
"""Get all the Samsung RemoteControlReceiver entries."""
return self.find_by_st(
"urn:samsung.com:device:RemoteControlReceiver:1")
def info_from_entry(self, entry):
"""Get most important info, by default the description location."""
info = super().info_from_entry(entry)
# Strip the forced prefix, if present
if info[ATTR_NAME].startswith(FORCED_NAME_PREFIX):
info[ATTR_NAME] = info[ATTR_NAME][len(FORCED_NAME_PREFIX):].strip()
return info
```
#### File: netdisco/discoverables/sonos.py
```python
from . import SSDPDiscoverable
# pylint: disable=too-few-public-methods
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Sonos devices."""
def get_entries(self):
"""Get all the Sonos device uPnP entries."""
return self.find_by_st("urn:schemas-upnp-org:device:ZonePlayer:1")
```
#### File: netdisco/discoverables/tellstick.py
```python
from . import BaseDiscoverable
class Discoverable(BaseDiscoverable):
"""Add support for discovering a Tellstick device."""
def __init__(self, netdis):
"""Initialize the Tellstick discovery."""
self._netdis = netdis
def get_entries(self):
"""Get all the Tellstick details."""
return self._netdis.tellstick.entries
```
#### File: netdisco/discoverables/yeelight.py
```python
import logging
from . import MDNSDiscoverable
from ..const import ATTR_DEVICE_TYPE
# pylint: disable=too-few-public-methods
class Discoverable(MDNSDiscoverable):
"""Add support for discovering Yeelight."""
def __init__(self, nd):
"""Initialize the Yeelight discovery."""
super(Discoverable, self).__init__(nd, '_miio._udp.local.')
def info_from_entry(self, entry):
"""Return most important info from mDNS entries."""
info = super().info_from_entry(entry)
device_type = "UNKNOWN"
if entry.name.startswith("yeelink-light-color1_"):
device_type = "rgb"
elif entry.name.startswith("yeelink-light-mono1_"):
device_type = "white"
elif entry.name.startswith("yeelink-light-strip1_"):
device_type = "strip"
elif entry.name.startswith("yeelink-light-bslamp1_"):
device_type = "bedside"
else:
logging.warning("Unknown miio device found: %s", entry)
info[ATTR_DEVICE_TYPE] = device_type
return info
def get_entries(self):
""" Return yeelight devices. """
return self.find_by_device_name('yeelink-light-')
```
#### File: site-packages/openzwave/controller.py
```python
import os, sys
import six
if six.PY3:
from pydispatch import dispatcher
from urllib.request import urlopen
else:
from louie import dispatcher
from urllib2 import urlopen
import zipfile
import tempfile
import threading
import shutil
import time
from openzwave.object import ZWaveObject, deprecated
from libopenzwave import PyStatDriver, PyControllerState
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
"""NullHandler logger for python 2.6"""
def emit(self, record):
pass
logger = logging.getLogger('openzwave')
logger.addHandler(NullHandler())
class ZWaveController(ZWaveObject):
'''
The controller manager.
Allows to retrieve informations about the library, statistics, ...
Also used to send commands to the controller
Commands :
- Driver::ControllerCommand_AddController : Add a new secondary controller to the Z-Wave network.
- Driver::ControllerCommand_AddDevice : Add a new device (but not a controller) to the Z-Wave network.
- Driver::ControllerCommand_CreateNewPrimary : (Not yet implemented)
- Driver::ControllerCommand_ReceiveConfiguration :
- Driver::ControllerCommand_RemoveController : remove a controller from the Z-Wave network.
- Driver::ControllerCommand_RemoveDevice : remove a device (but not a controller) from the Z-Wave network.
- Driver::ControllerCommand_RemoveFailedNode : move a node to the controller's list of failed nodes. The node must actually
have failed or have been disabled since the command will fail if it responds. A node must be in the controller's failed nodes list
or ControllerCommand_ReplaceFailedNode to work.
- Driver::ControllerCommand_HasNodeFailed : Check whether a node is in the controller's failed nodes list.
- Driver::ControllerCommand_ReplaceFailedNode : replace a failed device with another. If the node is not in
the controller's failed nodes list, or the node responds, this command will fail.
- Driver:: ControllerCommand_TransferPrimaryRole : (Not yet implemented) - Add a new controller to the network and
make it the primary. The existing primary will become a secondary controller.
- Driver::ControllerCommand_RequestNetworkUpdate : Update the controller with network information from the SUC/SIS.
- Driver::ControllerCommand_RequestNodeNeighborUpdate : Get a node to rebuild its neighbour list. This method also does ControllerCommand_RequestNodeNeighbors afterwards.
- Driver::ControllerCommand_AssignReturnRoute : Assign a network return route to a device.
- Driver::ControllerCommand_DeleteAllReturnRoutes : Delete all network return routes from a device.
- Driver::ControllerCommand_CreateButton : Create a handheld button id.
- Driver::ControllerCommand_DeleteButton : Delete a handheld button id.
Callbacks :
- Driver::ControllerState_Waiting : The controller is waiting for a user action. A notice should be displayed
to the user at this point, telling them what to do next.
For the add, remove, replace and transfer primary role commands, the user needs to be told to press the
inclusion button on the device that is going to be added or removed. For ControllerCommand_ReceiveConfiguration,
they must set their other controller to send its data, and for ControllerCommand_CreateNewPrimary, set the other
controller to learn new data.
- Driver::ControllerState_InProgress : the controller is in the process of adding or removing the chosen node. It is now too late to cancel the command.
- Driver::ControllerState_Complete : the controller has finished adding or removing the node, and the command is complete.
- Driver::ControllerState_Failed : will be sent if the command fails for any reason.
'''
#@deprecated
SIGNAL_CTRL_NORMAL = 'Normal'
#@deprecated
SIGNAL_CTRL_STARTING = 'Starting'
#@deprecated
SIGNAL_CTRL_CANCEL = 'Cancel'
#@deprecated
SIGNAL_CTRL_ERROR = 'Error'
#@deprecated
SIGNAL_CTRL_WAITING = 'Waiting'
#@deprecated
SIGNAL_CTRL_SLEEPING = 'Sleeping'
#@deprecated
SIGNAL_CTRL_INPROGRESS = 'InProgress'
#@deprecated
SIGNAL_CTRL_COMPLETED = 'Completed'
#@deprecated
SIGNAL_CTRL_FAILED = 'Failed'
#@deprecated
SIGNAL_CTRL_NODEOK = 'NodeOK'
#@deprecated
SIGNAL_CTRL_NODEFAILED = 'NodeFailed'
STATE_NORMAL = 'Normal'
STATE_STARTING = 'Starting'
STATE_CANCEL = 'Cancel'
STATE_ERROR = 'Error'
STATE_WAITING = 'Waiting'
STATE_SLEEPING = 'Sleeping'
STATE_INPROGRESS = 'InProgress'
STATE_COMPLETED = 'Completed'
STATE_FAILED = 'Failed'
STATE_NODEOK = 'NodeOK'
STATE_NODEFAILED = 'NodeFailed'
INT_NORMAL = 0
INT_STARTING = 1
INT_CANCEL = 2
INT_ERROR = 3
INT_WAITING = 4
INT_SLEEPING = 5
INT_INPROGRESS = 6
INT_COMPLETED = 7
INT_FAILED = 8
INT_NODEOK = 9
INT_NODEFAILED = 10
#@deprecated
SIGNAL_CONTROLLER = 'Message'
SIGNAL_CONTROLLER_STATS = 'ControllerStats'
#@deprecated
CMD_NONE = 0
#@deprecated
CMD_ADDDEVICE = 1
#@deprecated
CMD_CREATENEWPRIMARY = 2
#@deprecated
CMD_RECEIVECONFIGURATION = 3
#@deprecated
CMD_REMOVEDEVICE = 4
#@deprecated
CMD_REMOVEFAILEDNODE = 5
#@deprecated
CMD_HASNODEFAILED = 6
#@deprecated
CMD_REPLACEFAILEDNODE = 7
#@deprecated
CMD_TRANSFERPRIMARYROLE = 8
#@deprecated
CMD_REQUESTNETWORKUPDATE = 9
#@deprecated
CMD_REQUESTNODENEIGHBORUPDATE = 10
#@deprecated
CMD_ASSIGNRETURNROUTE = 11
#@deprecated
CMD_DELETEALLRETURNROUTES = 12
#@deprecated
CMD_SENDNODEINFORMATION = 13
#@deprecated
CMD_REPLICATIONSEND = 14
#@deprecated
CMD_CREATEBUTTON = 15
#@deprecated
CMD_DELETEBUTTON = 16
def __init__(self, controller_id, network, options=None):
"""
Initialize controller object
:param controller_id: The Id of the controller
:type controller_id: int
:param network: The network the controller is attached to
:type network: ZwaveNetwork
:param options: options of the manager
:type options: str
"""
if controller_id is None:
controller_id = 1
ZWaveObject.__init__(self, controller_id, network)
self._node = None
self._options = options
self._library_type_name = None
self._library_version = None
self._python_library_version = None
self._timer_statistics = None
self._interval_statistics = 0.0
self._ctrl_lock = threading.Lock()
#~ self._manager_last = None
self._ctrl_last_state = self.STATE_NORMAL
self._ctrl_last_stateint = self.INT_NORMAL
#~ self._ctrl_last_message = ""
self.STATES_LOCKED = [self.STATE_STARTING, self.STATE_WAITING, self.STATE_SLEEPING, self.STATE_INPROGRESS]
self.STATES_UNLOCKED = [self.STATE_NORMAL, self.STATE_CANCEL, self.STATE_ERROR, self.STATE_COMPLETED, self.STATE_FAILED, self.STATE_NODEOK, self.STATE_NODEFAILED]
def stop(self):
"""
Stop the controller and all this threads.
"""
self.cancel_command()
if self._timer_statistics is not None:
self._timer_statistics.cancel()
for i in range(0, 60):
if self.send_queue_count <= 0:
break
else:
try:
self._network.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
self.kill_command()
logger.debug(u"Wait for empty send_queue during %s second(s).", i)
def __str__(self):
"""
The string representation of the node.
:rtype: str
"""
node_name = ""
product_name = ""
if self._node is not None:
node_name = self._node.name
product_name = self._node.product_name
return u'home_id: [%s] id: [%s] name: [%s] product: [%s] capabilities: %s library: [%s]' % \
(self._network.home_id_str, self._object_id, node_name, product_name, self.capabilities, self.library_description)
@property
def node(self):
"""
The node controller on the network.
:return: The node controller on the network
:rtype: ZWaveNode
"""
return self._node
@node.setter
def node(self, value):
"""
The node controller on the network.
:param value: The node of the controller on the network
:type value: ZWaveNode
"""
self._node = value
@property
def node_id(self):
"""
The node Id of the controller on the network.
:return: The node id of the controller on the network
:rtype: int
"""
if self.node is not None:
return self.node.object_id
else:
return None
@property
def name(self):
"""
The node name of the controller on the network.
:return: The node's name of the controller on the network
:rtype: str
"""
if self.node is not None:
return self.node.name
else:
return None
@property
def library_type_name(self):
"""
The name of the library.
:return: The cpp library name
:rtype: str
"""
return self._network.manager.getLibraryTypeName(self._network.home_id)
@property
def library_description(self):
"""
The description of the library.
:return: The library description (name and version)
:rtype: str
"""
return '%s version %s' % (self.library_type_name, self.library_version)
@property
def library_version(self):
"""
The version of the library.
:return: The cpp library version
:rtype: str
"""
return self._network.manager.getLibraryVersion(self._network.home_id)
@property
def python_library_flavor(self):
"""
The flavor of the python library.
:return: The python library flavor
:rtype: str
"""
return self._network.manager.getPythonLibraryFlavor()
@property
def python_library_version(self):
"""
The version of the python library.
:return: The python library version
:rtype: str
"""
return self._network.manager.getPythonLibraryVersionNumber()
@property
def python_library_config_version(self):
"""
The version of the config for python library.
:return: The python library config version
:rtype: str
"""
tversion = "Original %s" % self.library_version
fversion = os.path.join(self.library_config_path, 'pyozw_config.version')
if os.path.isfile(fversion):
with open(fversion, 'r') as f:
val = f.read()
tversion = "Git %s" % val
return tversion
@property
def ozw_library_version(self):
"""
The version of the openzwave library.
:return: The openzwave library version
:rtype: str
"""
return self._network.manager.getOzwLibraryVersion()
@property
def library_config_path(self):
"""
The library Config path.
:return: The library config directory
:rtype: str
"""
if self._options is not None:
return self._options.config_path
else:
return None
@property
def library_user_path(self):
"""
The library User path.
:return: The user directory to store user configuration
:rtype: str
"""
if self._options is not None:
return self._options.user_path
else:
return None
@property
def device(self):
"""
The device path.
:return: The device (ie /dev/zwave)
:rtype: str
"""
if self._options is not None:
return self._options.device
else:
return None
@property
def options(self):
"""
The starting options of the manager.
:return: The options used to start the manager
:rtype: ZWaveOption
"""
return self._options
@property
def stats(self):
"""
Retrieve statistics from driver.
Statistics:
* s_SOFCnt : Number of SOF bytes received
* s_ACKWaiting : Number of unsolicited messages while waiting for an ACK
* s_readAborts : Number of times read were aborted due to timeouts
* s_badChecksum : Number of bad checksums
* s_readCnt : Number of messages successfully read
* s_writeCnt : Number of messages successfully sent
* s_CANCnt : Number of CAN bytes received
* s_NAKCnt : Number of NAK bytes received
* s_ACKCnt : Number of ACK bytes received
* s_OOFCnt : Number of bytes out of framing
* s_dropped : Number of messages dropped & not delivered
* s_retries : Number of messages retransmitted
* s_controllerReadCnt : Number of controller messages read
* s_controllerWriteCnt : Number of controller messages sent
:return: Statistics of the controller
:rtype: dict()
"""
return self._network.manager.getDriverStatistics(self.home_id)
def get_stats_label(self, stat):
"""
Retrieve label of the statistic from driver.
:param stat: The code of the stat label to retrieve.
:type stat:
:return: The label or the stat.
:rtype: str
"""
#print "stat = %s" % stat
return PyStatDriver[stat]
def do_poll_statistics(self):
"""
Timer based polling system for statistics
"""
self._timer_statistics = None
stats = self.stats
dispatcher.send(self.SIGNAL_CONTROLLER_STATS, \
**{'controller':self, 'stats':stats})
self._timer_statistics = threading.Timer(self._interval_statistics, self.do_poll_statistics)
self._timer_statistics.start()
@property
def poll_stats(self):
"""
The interval for polling statistics
:return: The interval in seconds
:rtype: float
"""
return self._interval_statistics
@poll_stats.setter
def poll_stats(self, value):
"""
The interval for polling statistics
:return: The interval in seconds
:rtype: ZWaveNode
:param value: The interval in seconds
:type value: float
"""
if value != self._interval_statistics:
if self._timer_statistics is not None:
self._timer_statistics.cancel()
if value != 0:
self._interval_statistics = value
self._timer_statistics = threading.Timer(self._interval_statistics, self.do_poll_statistics)
self._timer_statistics.start()
@property
def capabilities(self):
"""
The capabilities of the controller.
:return: The capabilities of the controller
:rtype: set
"""
caps = set()
if self.is_primary_controller:
caps.add('primaryController')
if self.is_static_update_controller:
caps.add('staticUpdateController')
if self.is_bridge_controller:
caps.add('bridgeController')
return caps
@property
def is_primary_controller(self):
"""
Is this node a primary controller of the network.
:rtype: bool
"""
return self._network.manager.isPrimaryController(self.home_id)
@property
def is_static_update_controller(self):
"""
Is this controller a static update controller (SUC).
:rtype: bool
"""
return self._network.manager.isStaticUpdateController(self.home_id)
@property
def is_bridge_controller(self):
"""
Is this controller using the bridge controller library.
:rtype: bool
"""
return self._network.manager.isBridgeController(self.home_id)
@property
def send_queue_count(self):
"""
Get count of messages in the outgoing send queue.
:return: The count of messages in the outgoing send queue.
:rtype: int
"""
if self.home_id is not None:
return self._network.manager.getSendQueueCount(self.home_id)
return -1
def hard_reset(self):
"""
Hard Reset a PC Z-Wave Controller.
Resets a controller and erases its network configuration settings.
The controller becomes a primary controller ready to add devices to a new network.
This command fires a lot of louie signals.
Louie's clients must disconnect from nodes and values signals
.. code-block:: python
dispatcher.send(self._network.SIGNAL_NETWORK_RESETTED, **{'network': self._network})
"""
self._network.state = self._network.STATE_RESETTED
dispatcher.send(self._network.SIGNAL_NETWORK_RESETTED, \
**{'network':self._network})
self._network.manager.resetController(self._network.home_id)
try:
self.network.network_event.wait(5.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
def soft_reset(self):
"""
Soft Reset a PC Z-Wave Controller.
Resets a controller without erasing its network configuration settings.
"""
self._network.manager.softResetController(self.home_id)
def create_new_primary(self):
'''Create a new primary controller when old primary fails. Requires SUC.
This command creates a new Primary Controller when the Old Primary has Failed. Requires a SUC on the network to function.
Results of the CreateNewPrimary Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s', 'create_new_primary')
return self._network.manager.createNewPrimary(self.home_id)
else:
logger.warning(u"Can't lock controller for command : %s", 'create_new_primary')
return False
def transfer_primary_role(self):
'''
Add a new controller to the network and make it the primary.
The existing primary will become a secondary controller.
Results of the TransferPrimaryRole Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s', 'transfer_primary_role')
return self._network.manager.transferPrimaryRole(self.home_id)
else:
logger.warning(u"Can't lock controller for command : %s", 'create_new_primary')
return False
def receive_configuration(self):
'''Receive network configuration information from primary controller. Requires secondary.
This command prepares the controller to recieve Network Configuration from a Secondary Controller.
Results of the ReceiveConfiguration Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s', 'receive_configuration')
return self._network.manager.receiveConfiguration(self.home_id)
else:
logger.warning(u"Can't lock controller for command : %s", 'receive_configuration')
return False
def add_node(self, doSecurity=False):
'''Start the Inclusion Process to add a Node to the Network.
The Status of the Node Inclusion is communicated via Notifications. Specifically, you should
monitor ControllerCommand Notifications.
Results of the AddNode Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param doSecurity: Whether to initialize the Network Key on the device if it supports the Security CC
:type doSecurity: bool
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : secure : %s', 'add_node', doSecurity)
return self._network.manager.addNode(self.home_id, doSecurity)
else:
logger.warning(u"Can't lock controller for command : %s", 'add_node')
return False
def remove_node(self):
'''Remove a Device from the Z-Wave Network
The Status of the Node Removal is communicated via Notifications. Specifically, you should
monitor ControllerCommand Notifications.
Results of the RemoveNode Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param doSecurity: Whether to initialize the Network Key on the device if it supports the Security CC
:type doSecurity: bool
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s', 'remove_node')
return self._network.manager.removeNode(self.home_id)
else:
logger.warning(u"Can't lock controller for command : %s", 'remove_node')
return False
def remove_failed_node(self, nodeid):
'''Remove a Failed Device from the Z-Wave Network
This Command will remove a failed node from the network. The Node should be on the Controllers Failed
Node List, otherwise this command will fail. You can use the HasNodeFailed function below to test if the Controller
believes the Node has Failed.
The Status of the Node Removal is communicated via Notifications. Specifically, you should
monitor ControllerCommand Notifications.
Results of the RemoveFailedNode Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'remove_failed_node', nodeid)
return self._network.manager.removeFailedNode(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'remove_failed_node')
return False
def has_node_failed(self, nodeid):
'''Check if the Controller Believes a Node has Failed.
This is different from the IsNodeFailed call in that we test the Controllers Failed Node List, whereas the IsNodeFailed is testing
our list of Failed Nodes, which might be different.
The Results will be communicated via Notifications. Specifically, you should monitor the ControllerCommand notifications
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'has_node_failed', nodeid)
return self._network.manager.hasNodeFailed(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'has_node_failed')
return False
def request_node_neighbor_update(self, nodeid):
'''Ask a Node to update its Neighbor Tables
This command will ask a Node to update its Neighbor Tables.
Results of the RequestNodeNeighborUpdate Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'request_node_neighbor_update', nodeid)
return self._network.manager.requestNodeNeighborUpdate(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'request_node_neighbor_update')
return False
def assign_return_route(self, nodeid):
'''Ask a Node to update its update its Return Route to the Controller
This command will ask a Node to update its Return Route to the Controller
Results of the AssignReturnRoute Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'assign_return_route', nodeid)
return self._network.manager.assignReturnRoute(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'assign_return_route')
return False
def delete_all_return_routes(self, nodeid):
'''Ask a Node to delete all Return Route.
This command will ask a Node to delete all its return routes, and will rediscover when needed.
Results of the DeleteAllReturnRoutes Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'delete_all_return_routes', nodeid)
return self._network.manager.deleteAllReturnRoutes(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'delete_all_return_routes')
return False
def send_node_information(self, nodeid):
'''Send a NIF frame from the Controller to a Node.
This command send a NIF frame from the Controller to a Node
Results of the SendNodeInformation Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'send_node_information', nodeid)
return self._network.manager.sendNodeInformation(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'send_node_information')
return False
def replace_failed_node(self, nodeid):
'''Replace a failed device with another.
If the node is not in the controller's failed nodes list, or the node responds, this command will fail.
You can check if a Node is in the Controllers Failed node list by using the HasNodeFailed method.
Results of the ReplaceFailedNode Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'replace_failed_node', nodeid)
return self._network.manager.replaceFailedNode(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'replace_failed_node')
return False
def request_network_update(self, nodeid):
'''Update the controller with network information from the SUC/SIS.
Results of the RequestNetworkUpdate Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'request_network_update', nodeid)
return self._network.manager.requestNetworkUpdate(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'request_network_update')
return False
def replication_send(self, nodeid):
'''Send information from primary to secondary
Results of the ReplicationSend Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'replication_send', nodeid)
return self._network.manager.replicationSend(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'replication_send')
return False
def create_button(self, nodeid, buttonid):
'''Create a handheld button id.
Only intended for Bridge Firmware Controllers.
Results of the CreateButton Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:param buttonid: the ID of the Button to query.
:type buttonid: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s, button : %s', 'create_button', nodeid, buttonid)
return self._network.manager.createButton(self.home_id, nodeid, buttonid)
else:
logger.warning(u"Can't lock controller for command : %s", 'create_button')
return False
def delete_button(self, nodeid, buttonid):
'''Delete a handheld button id.
Only intended for Bridge Firmware Controllers.
Results of the CreateButton Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:param buttonid: the ID of the Button to query.
:type buttonid: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s, button : %s', 'delete_button', nodeid, buttonid)
return self._network.manager.deleteButton(self.home_id, nodeid, buttonid)
else:
logger.warning(u"Can't lock controller for command : %s", 'delete_button')
return False
def _handle_controller_command(self, args):
"""
Called when a message from controller is sent.
The state could be obtained here :
dispatcher.send(self.SIGNAL_CONTROLLER_WAITING, \
**{'network': self, 'controller': self.controller,
'state_int': args['controllerStateInt'], 'state': args['controllerState'], 'state_full': args['controllerStateDoc'],
})
And the full command here :
dispatcher.send(self.SIGNAL_CONTROLLER_COMMAND, \
**{'network': self, 'controller': self.controller,
'node':self.nodes[args['nodeId']] if args['nodeId'] in self.nodes else None, 'node_id' : args['nodeId'],
'state_int': args['controllerStateInt'], 'state': args['controllerState'], 'state_full': args['controllerStateDoc'],
'error_int': args['controllerErrorInt'], 'error': args['controllerError'], 'error_full': args['controllerErrorDoc'],
})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave ControllerCommand : %s', args)
if args['controllerState'] == self.STATE_WAITING:
dispatcher.send(self._network.SIGNAL_CONTROLLER_WAITING, \
**{'network': self._network, 'controller': self,
'state_int': args['controllerStateInt'], 'state': args['controllerState'], 'state_full': args['controllerStateDoc'],
})
if args['controllerState'] in self.STATES_UNLOCKED:
try:
self._ctrl_lock.release()
except threading.ThreadError:
pass
self._ctrl_last_state = args['controllerState']
self._ctrl_last_stateint = args['controllerStateInt']
dispatcher.send(self._network.SIGNAL_CONTROLLER_COMMAND, \
**{'network': self._network, 'controller': self,
'node':self._network.nodes[args['nodeId']] if args['nodeId'] in self._network.nodes else None, 'node_id' : args['nodeId'],
'state_int': args['controllerStateInt'], 'state': args['controllerState'], 'state_full': args['controllerStateDoc'],
'error_int': args['controllerErrorInt'], 'error': args['controllerError'], 'error_full': args['controllerErrorDoc'],
})
def _lock_controller(self):
"""Try to lock the controller and generate a notification if fails
"""
if self._ctrl_lock.acquire(False):
return True
else:
dispatcher.send(self._network.SIGNAL_CONTROLLER_COMMAND, \
**{'network': self._network, 'controller': self,
'node':self, 'node_id' : self.node_id,
'state_int': self.INT_INPROGRESS, 'state': PyControllerState[self.INT_INPROGRESS], 'state_full': PyControllerState[self.INT_INPROGRESS].doc,
'error_int': self.STATE_ERROR, 'error': 'Locked', 'error_full': "Can't lock controller because a command is already in progress",
})
def request_controller_status(self):
"""
Generate a notification with the current status of the controller.
You can check the lock in your code using something like this:
if controllerState in network.controller.STATES_UNLOCKED:
hide_cancel_button()
show_command_buttons()
else:
show_cancel_button()
hide_command_buttons()
"""
dispatcher.send(self._network.SIGNAL_CONTROLLER_COMMAND, \
**{'network': self._network, 'controller': self,
'node':self, 'node_id' : self.node_id,
'state_int': self._ctrl_last_stateint, 'state': PyControllerState[self._ctrl_last_stateint], 'state_full': PyControllerState[self._ctrl_last_stateint].doc,
'error_int': 0, 'error': "None", 'error_full': "None",
})
return True
@property
def is_locked(self):
"""
Check if the controller is locked or not. Should not be used.
Listen to notifications and use request_controller_status to retrieve the status of the controller
"""
return self._ctrl_lock.locked()
def cancel_command(self):
"""
Cancels any in-progress command running on a controller.
"""
try:
self._ctrl_lock.release()
except threading.ThreadError:
pass
if self.home_id is not None:
return self._network.manager.cancelControllerCommand(self.home_id)
return False
def kill_command(self):
"""
Cancels any in-progress command running on a controller and release the lock.
"""
try:
self._ctrl_lock.release()
except threading.ThreadError:
pass
if self.home_id is not None:
return self._network.manager.cancelControllerCommand(self.home_id)
return False
def to_dict(self, extras=['all']):
"""Return a dict representation of the controller.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret = self.node.to_dict(extras=extras)
if 'all' in extras:
extras = ['kvals', 'capabilities', 'neighbors']
if 'capabilities' in extras:
ret['capabilities'].update(dict.fromkeys(self.capabilities, 0))
ret["zw_version"] = self.library_version
ret["zw_description"] = self.library_description
ret["oz_version"] = self.ozw_library_version
ret["py_version"] = self.python_library_version
return ret
@deprecated
def begin_command_send_node_information(self, node_id):
"""
Send a node information frame.
:param node_id: Used only with the ReplaceFailedNode command, to specify the node that is going to be replaced.
:type node_id: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_SENDNODEINFORMATION, self.zwcallback, nodeId=node_id)
@deprecated
def begin_command_replication_send(self, high_power=False):
"""
Send information from primary to secondary.
:param high_power: Usually when adding or removing devices, the controller operates at low power so that the controller must
be physically close to the device for security reasons. If _highPower is true, the controller will
operate at normal power levels instead. Defaults to false.
:type high_power: bool
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_REPLICATIONSEND, self.zwcallback, highPower=high_power)
@deprecated
def begin_command_request_network_update(self):
"""
Update the controller with network information from the SUC/SIS.
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_REQUESTNETWORKUPDATE, self.zwcallback)
@deprecated
def begin_command_add_device(self, high_power=False):
"""
Add a new device to the Z-Wave network.
:param high_power: Used only with the AddDevice, AddController, RemoveDevice and RemoveController commands.
Usually when adding or removing devices, the controller operates at low power so that the controller must
be physically close to the device for security reasons. If _highPower is true, the controller will
operate at normal power levels instead. Defaults to false.
:type high_power: bool
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_ADDDEVICE, self.zwcallback, highPower=high_power)
@deprecated
def begin_command_remove_device(self, high_power=False):
"""
Remove a device from the Z-Wave network.
:param high_power: Used only with the AddDevice, AddController, RemoveDevice and RemoveController commands.
Usually when adding or removing devices, the controller operates at low power so that the controller must
be physically close to the device for security reasons. If _highPower is true, the controller will
operate at normal power levels instead. Defaults to false.
:type high_power: bool
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_REMOVEDEVICE, self.zwcallback, highPower=high_power)
@deprecated
def begin_command_remove_failed_node(self, node_id):
"""
Move a node to the controller's list of failed nodes. The node must
actually have failed or have been disabled since the command
will fail if it responds. A node must be in the controller's
failed nodes list for ControllerCommand_ReplaceFailedNode to work.
:param node_id: Used only with the ReplaceFailedNode command, to specify the node that is going to be replaced.
:type node_id: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_REMOVEFAILEDNODE, self.zwcallback, nodeId=node_id)
@deprecated
def begin_command_has_node_failed(self, node_id):
"""
Check whether a node is in the controller's failed nodes list.
:param node_id: Used only with the ReplaceFailedNode command, to specify the node that is going to be replaced.
:type node_id: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_HASNODEFAILED, self.zwcallback, nodeId=node_id)
@deprecated
def begin_command_replace_failed_node(self, node_id):
"""
Replace a failed device with another. If the node is not in
the controller's failed nodes list, or the node responds, this command will fail.
:param node_id: Used only with the ReplaceFailedNode command, to specify the node that is going to be replaced.
:type node_id: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_REPLACEFAILEDNODE, self.zwcallback, nodeId=node_id)
@deprecated
def begin_command_request_node_neigbhor_update(self, node_id):
"""
Get a node to rebuild its neighbors list.
This method also does ControllerCommand_RequestNodeNeighbors afterwards.
:param node_id: Used only with the ReplaceFailedNode command, to specify the node that is going to be replaced.
:type node_id: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_REQUESTNODENEIGHBORUPDATE, self.zwcallback, nodeId=node_id)
@deprecated
def begin_command_create_new_primary(self):
"""
Add a new controller to the Z-Wave network. Used when old primary fails. Requires SUC.
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_CREATENEWPRIMARY, self.zwcallback)
@deprecated
def begin_command_transfer_primary_role(self, high_power=False):
"""
Make a different controller the primary.
The existing primary will become a secondary controller.
:param high_power: Used only with the AddDevice, AddController, RemoveDevice and RemoveController commands.
Usually when adding or removing devices, the controller operates at low power so that the controller must
be physically close to the device for security reasons. If _highPower is true, the controller will
operate at normal power levels instead. Defaults to false.
:type high_power: bool
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_TRANSFERPRIMARYROLE, self.zwcallback, highPower=high_power)
@deprecated
def begin_command_receive_configuration(self):
"""
-
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_RECEIVECONFIGURATION, self.zwcallback)
@deprecated
def begin_command_assign_return_route(self, from_node_id, to_node_id):
"""
Assign a network return route from a node to another one.
:param from_node_id: The node that we will use the route.
:type from_node_id: int
:param to_node_id: The node that we will change the route
:type to_node_id: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_ASSIGNRETURNROUTE, self.zwcallback, nodeId=from_node_id, arg=to_node_id)
@deprecated
def begin_command_delete_all_return_routes(self, node_id):
"""
Delete all network return routes from a device.
:param node_id: Used only with the ReplaceFailedNode command, to specify the node that is going to be replaced.
:type node_id: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_DELETEALLRETURNROUTES, self.zwcallback, nodeId=node_id)
@deprecated
def begin_command_create_button(self, node_id, arg=0):
"""
Create a handheld button id
:param node_id: Used only with the ReplaceFailedNode command, to specify the node that is going to be replaced.
:type node_id: int
:param arg:
:type arg: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_CREATEBUTTON, self.zwcallback, nodeId=node_id, arg=arg)
@deprecated
def begin_command_delete_button(self, node_id, arg=0):
"""
Delete a handheld button id.
:param node_id: Used only with the ReplaceFailedNode command, to specify the node that is going to be replaced.
:type node_id: int
:param arg:
:type arg: int
:return: True if the command was accepted and has started.
:rtype: bool
"""
return self._network.manager.beginControllerCommand(self.home_id, \
self.CMD_DELETEBUTTON, self.zwcallback, nodeId=node_id, arg=arg)
@deprecated
def zwcallback(self, args):
"""
The Callback Handler used when sendig commands to the controller.
Dispatch a louie message.
To do : add node in signal when necessary
:param args: A dict containing informations about the state of the controller
:type args: dict()
"""
logger.debug(u'Controller state change : %s', args)
state = args['state']
message = args['message']
self.ctrl_last_state = state
self.ctrl_last_message = message
if state == self.SIGNAL_CTRL_WAITING:
dispatcher.send(self.SIGNAL_CTRL_WAITING, \
**{'state': state, 'message': message, 'network': self._network, 'controller': self})
dispatcher.send(self.SIGNAL_CONTROLLER, \
**{'state': state, 'message': message, 'network': self._network, 'controller': self})
def update_ozw_config(self):
"""
Update the openzwave config from github.
Not available for shared flavor as we don't want to update the config of the precompiled config.
"""
if self.python_library_flavor in ['shared']:
logger.warning(u"Can't update_ozw_config for this flavor (%s)."%self.python_library_flavor)
return
logger.info(u'Update_ozw_config from github.')
dest = tempfile.mkdtemp()
dest_file = os.path.join(dest, 'open-zwave.zip')
req = urlopen('https://codeload.github.com/OpenZWave/open-zwave/zip/master')
with open(dest_file, 'wb') as f:
f.write(req.read())
zip_ref = zipfile.ZipFile(dest_file, 'r')
zip_ref.extractall(dest)
zip_ref.close()
os.system("cp -rf %s %s"%(os.path.join(dest, 'open-zwave-master', 'config'), self.library_config_path))
with open(os.path.join(self.library_config_path, 'pyozw_config.version'), 'w') as f:
f.write(time.strftime("%Y-%m-%d %H:%M"))
shutil.rmtree(dest)
```
#### File: site-packages/openzwave/group.py
```python
from openzwave.object import ZWaveObject
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
"""NullHandler logger for python 2.6"""
def emit(self, record):
pass
logger = logging.getLogger('openzwave')
logger.addHandler(NullHandler())
class ZWaveGroup(ZWaveObject):
"""
The driver object.
Hold options of the manager
Also used to retrieve information about the library, ...
"""
def __init__(self, group_index, network=None, node_id=None):
"""
Initialize driver object
:param group_index: index of the group
:type group_index: int
:param network: The network object to access the manager
:type network: ZWaveNetwork
:param node_id: ID of node
:type node_id: int
"""
ZWaveObject.__init__(self, group_index, network)
self._node_id = node_id
self._index = group_index
def __str__(self):
"""
The string representation of the group.
:rtype: str
"""
return 'index: [%s] label: [%s]' % (self.index, self.label)
@property
def index(self):
"""
The index of the group.
:rtype: int
"""
return self._index
@property
def label(self):
"""
The label of the group.
:rtype: int
"""
return self._network.manager.getGroupLabel(self.home_id, self._node_id, self.index)
@property
def max_associations(self):
"""
The number of associations.
:rtype: int
"""
return self._network.manager.getMaxAssociations(self.home_id, self._node_id, self.index)
@property
def associations(self):
"""
The members of associations.
:rtype: set()
"""
return self._network.manager.getAssociations(self.home_id, self._node_id, self.index)
@property
def associations_instances(self):
"""
The members of associations with theirs instances.
Nodes that does not support multi-instances have an instanceid equal to 0.
:rtype: set() of tuples (nodeid,instanceid)
"""
return self._network.manager.getAssociationsInstances(self.home_id, self._node_id, self.index)
def add_association(self, target_node_id, instance=0x00):
"""
Adds a node to an association group.
Due to the possibility of a device being asleep, the command is assumed to
complete with success, and the association data held in this class is updated directly. This
will be reverted by a future Association message from the device if the Z-Wave
message actually failed to get through. Notification callbacks will be sent in
both cases.
:param target_node_id: Identifier for the node that will be added to the association group.
:type target_node_id: int
:param instance: The instance that will be added to the association group.
:type instance: int
"""
self._network.manager.addAssociation(self.home_id, self._node_id, self.index, target_node_id, instance)
def remove_association(self, target_node_id, instance=0x00):
"""
Removes a node from an association group.
Due to the possibility of a device being asleep, the command is assumed to
succeed, and the association data held in this class is updated directly. This
will be reverted by a future Association message from the device if the Z-Wave
message actually failed to get through. Notification callbacks will be sent
in both cases.
:param target_node_id: Identifier for the node that will be removed from the association group.
:type target_node_id: int
:param instance: The instance that will be added to the association group.
:type instance: int
"""
self._network.manager.removeAssociation(self._network.home_id, self._node_id, self.index, target_node_id, instance)
def to_dict(self, extras=['all']):
"""
Return a dict representation of the group.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
if 'all' in extras:
extras = ['associations']
ret = {}
ret['label'] = self.label
if 'associations' in extras:
ret['associations'] = dict.fromkeys(self.associations, 0)
return ret
```
#### File: site-packages/openzwave/node.py
```python
import sys
from openzwave.object import ZWaveObject
from openzwave.group import ZWaveGroup
from openzwave.value import ZWaveValue
from openzwave.command import ZWaveNodeBasic, ZWaveNodeSwitch
from openzwave.command import ZWaveNodeSensor, ZWaveNodeThermostat
from openzwave.command import ZWaveNodeSecurity, ZWaveNodeDoorLock
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
"""NullHandler logger for python 2.6"""
def emit(self, record):
pass
logger = logging.getLogger('openzwave')
logger.addHandler(NullHandler())
class ZWaveNode(ZWaveObject,
ZWaveNodeBasic, ZWaveNodeSwitch,
ZWaveNodeSensor, ZWaveNodeThermostat,
ZWaveNodeSecurity, ZWaveNodeDoorLock):
"""
Represents a single Node within the Z-Wave Network.
"""
_isReady = False
def __init__(self, node_id, network):
"""
Initialize zwave node
:param node_id: ID of the node
:type node_id: int
:param network: The network object to access the manager
:type network: ZWaveNetwork
"""
logger.debug("Create object node (node_id:%s)", node_id)
ZWaveObject.__init__(self, node_id, network)
#No cache management for values in nodes
self.values = dict()
self._is_locked = False
self._isReady = False
def __str__(self):
"""
The string representation of the node.
:rtype: str
"""
try:
return u'home_id: [%s] id: [%s] name: [%s] model: [%s]' % \
(self._network.home_id_str, self._object_id, self.name, self.product_name)
except UnicodeDecodeError:
return u'home_id: [%s] id: [%s] name: [%s] model: [%s]' % \
(self._network.home_id_str, self._object_id, self.name.decode('utf-8', 'ignore'), self.product_name.decode('utf-8', 'ignore'))
@property
def node_id(self):
"""
The id of the node.
:rtype: int
"""
return self._object_id
@property
def name(self):
"""
The name of the node.
:rtype: str
"""
return self._network.manager.getNodeName(self.home_id, self.object_id)
@name.setter
def name(self, value):
"""
Set the name of the node.
:param value: The new name of the node
:type value: str
"""
self._network.manager.setNodeName(self.home_id, self.object_id, value)
@property
def location(self):
"""
The location of the node.
:rtype: str
"""
return self._network.manager.getNodeLocation(self.home_id, self.object_id)
@location.setter
def location(self, value):
"""
Set the location of the node.
:param value: The new location of the node
:type value: str
"""
self._network.manager.setNodeLocation(self.home_id, self.object_id, value)
@property
def product_name(self):
"""
The product name of the node.
:rtype: str
"""
return self._network.manager.getNodeProductName(self.home_id, self.object_id)
@product_name.setter
def product_name(self, value):
"""
Set the product name of the node.
:param value: The new name of the product
:type value: str
"""
self._network.manager.setNodeProductName(self.home_id, self.object_id, value)
@property
def product_type(self):
"""
The product type of the node.
:rtype: str
"""
return self._network.manager.getNodeProductType(self.home_id, self.object_id)
@property
def product_id(self):
"""
The product Id of the node.
:rtype: str
"""
return self._network.manager.getNodeProductId(self.home_id, self.object_id)
@property
def device_type(self):
"""
The device_type of the node.
:rtype: str
"""
return self._network.manager.getNodeDeviceTypeString(self.home_id, self.object_id)
@property
def role(self):
"""
The role of the node.
:rtype: str
"""
return self._network.manager.getNodeRoleString(self.home_id, self.object_id)
def to_dict(self, extras=['all']):
"""
Return a dict representation of the node.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
if 'all' in extras:
extras = ['kvals', 'capabilities', 'neighbors', 'groups', 'values']
ret={}
ret['name'] = self.name
ret['location'] = self.location
ret['product_type'] = self.product_type
ret['product_name'] = self.product_name
ret['node_id'] = self.node_id
if 'values' in extras :
ret['values'] = self.values_to_dict(extras=extras)
if 'groups' in extras :
ret['groups'] = self.groups_to_dict(extras=extras)
if 'neighbors' in extras :
ret['neighbors'] = dict.fromkeys(self.neighbors, 0)
if 'capabilities' in extras :
ret['capabilities'] = dict.fromkeys(self.capabilities, 0)
if 'kvals' in extras and self.network.dbcon is not None:
vals = self.kvals
for key in vals.keys():
ret[key]=vals[key]
return ret
@property
def capabilities(self):
"""
The capabilities of the node.
:rtype: set()
"""
caps = set()
if self.is_routing_device:
caps.add('routing')
if self.is_listening_device:
caps.add('listening')
if self.is_frequent_listening_device:
caps.add('frequent')
if self.is_security_device:
caps.add('security')
if self.is_beaming_device:
caps.add('beaming')
if self.node_id == self._network.controller.node_id:
for cap in self._network.controller.capabilities:
caps.add(cap)
return caps
@property
def neighbors(self):
"""
The neighbors of the node.
:rtype: set()
"""
return self._network.manager.getNodeNeighbors(self.home_id, self.object_id)
@property
def num_groups(self):
"""
Gets the number of association groups reported by this node.
:rtype: int
"""
return self._network.manager.getNumGroups(self.home_id, self.object_id)
def get_max_associations(self, groupidx):
"""
Gets the maximum number of associations for a group.
:param groupidx: The group to query
:type groupidx: int
:rtype: int
"""
return self._network.manager.getMaxAssociations(self.home_id, self.node_id, groupidx)
@property
def groups(self):
"""
Get the association groups reported by this node
In Z-Wave, groups are numbered starting from one. For example, if a call to
GetNumGroups returns 4, the _groupIdx value to use in calls to GetAssociations
AddAssociation and RemoveAssociation will be a number between 1 and 4.
:rtype: dict()
"""
groups = dict()
groups_added = 0
i = 1
while groups_added < self.num_groups and i<256:
if self.get_max_associations(i) > 0:
groups[i] = ZWaveGroup(i, network=self._network, node_id=self.node_id)
groups_added += 1
i += 1
return groups
def groups_to_dict(self, extras=['all']):
"""
Return a dict representation of the groups.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
groups = self.groups
ret={}
for gid in groups.keys():
ret[gid] = groups[gid].to_dict(extras=extras)
return ret
@property
def command_classes(self):
"""
The commandClasses of the node.
:rtype: set()
"""
command_classes = set()
for cls in self._network.manager.COMMAND_CLASS_DESC:
if self._network.manager.getNodeClassInformation(self.home_id, self.object_id, cls):
command_classes.add(cls)
return command_classes
@property
def command_classes_as_string(self):
"""
Return the command classes of the node as string.
:rtype: set()
"""
commands = self.command_classes
command_str = set()
for cls in commands:
command_str.add(self._network.manager.COMMAND_CLASS_DESC[cls])
return command_str
def get_command_class_as_string(self, class_id):
"""
Return the command class representation as string.
:param class_id: the COMMAND_CLASS to get string representation
:type class_id: hexadecimal code
:rtype: str
"""
return self._network.manager.COMMAND_CLASS_DESC[class_id]
def get_command_class_genres(self):
"""
Return the list of genres of command classes
:rtype: set()
"""
return ['User', 'Basic', 'Config', 'System']
def get_values_by_command_classes(self, genre='All', \
type='All', readonly='All', writeonly='All'):
"""
Retrieve values in a dict() of dicts(). The dict is indexed on the COMMAND_CLASS.
This allows to browse values grouped by the COMMAND_CLASS.You can optionnaly filter for a command class,
a genre and/or a type. You can also filter readonly and writeonly params.
This method always filter the values.
If you wan't to get all the node's values, use the property self.values instead.
:param genre: the genre of value
:type genre: 'All' or PyGenres
:param type: the type of value
:type type: 'All' or PyValueTypes
:param readonly: Is this value readonly
:type readonly: 'All' or True or False
:param writeonly: Is this value writeonly
:type writeonly: 'All' or True or False
:rtype: dict(command_class : dict(valueids))
"""
values = dict()
for value in self.values:
if (genre == 'All' or self.values[value].genre == genre) and \
(type == 'All' or self.values[value].type == type) and \
(readonly == 'All' or self.values[value].is_read_only == readonly) and \
(writeonly == 'All' or self.values[value].is_write_only == writeonly):
if self.values[value].command_class not in values:
values[self.values[value].command_class] = dict()
values[self.values[value].command_class][value] = self.values[value]
return values
def get_values_for_command_class(self, class_id):
"""
Retrieve the set of values for a command class.
Deprecated
For backward compatibility only.
Use get_values instead
:param class_id: the COMMAND_CLASS to get values
:type class_id: hexadecimal code or string
:type writeonly: 'All' or True or False
:rtype: set() of classId
"""
#print class_id
return self.get_values(class_id=class_id)
def get_values(self, class_id='All', genre='All', type='All', \
readonly='All', writeonly='All', index='All', label='All'):
"""
Retrieve the set of values. You can optionnaly filter for a command class,
a genre and/or a type. You can also filter readonly and writeonly params.
This method always filter the values.
If you wan't to get all the node's values, use self.values instead.
:param class_id: the COMMAND_CLASS to get values
:type class_id: hexadecimal code or string
:param genre: the genre of value
:type genre: 'All' or PyGenres
:param type: the type of value
:type type: 'All' or PyValueTypes
:param readonly: Is this value readonly
:type readonly: 'All' or True or False
:param writeonly: Is this value writeonly
:type writeonly: 'All' or True or False
:param index: Index of value within all the values
:type index: int
:param label: Label of the value as set by openzwave
:type label: str
:rtype: set() of Values
"""
ret = dict()
valkeys = self.values.keys()
for value in valkeys:
if (class_id == 'All' or self.values[value].command_class == class_id) and \
(genre == 'All' or self.values[value].genre == genre) and \
(type == 'All' or self.values[value].type == type) and \
(readonly == 'All' or self.values[value].is_read_only == readonly) and \
(writeonly == 'All' or self.values[value].is_write_only == writeonly) and \
(index == 'All' or self.values[value].index == index) and \
(label == 'All' or self.values[value].label == label):
ret[value] = self.values[value]
return ret
def values_to_dict(self, extras=['all']):
"""
Return a dict representation of the values.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret={}
for vid in self.values.keys():
ret[vid] = self.values[vid].to_dict(extras=extras)
return ret
def add_value(self, value_id):
"""
Add a value to the node
:param value_id: The id of the value to add
:type value_id: int
:param command_class: The command_class of the value
:type command_class: str
:rtype: bool
"""
value = ZWaveValue(value_id, network=self.network, parent=self)
self.values[value_id] = value
def change_value(self, value_id):
"""
Change a value of the node.
Not implemented
:param value_id: The id of the value to change
:type value_id: int
"""
pass
def refresh_value(self, value_id):
"""
Refresh a value of the node.
Not implemented
:param value_id: The id of the value to change
:type value_id: int
"""
return self._network.manager.refreshValue(value_id)
def remove_value(self, value_id):
"""
Change a value of the node. Todo
:param value_id: The id of the value to change
:type value_id: int
:return: The result of the operation
:rtype: bool
"""
if value_id in self.values:
logger.debug("Remove value : %s", self.values[value_id])
del self.values[value_id]
return True
return False
def set_field(self, field, value):
"""
A helper to set a writable field : name, location, product_name, ...
:param field: The field to set : name, location, product_name, manufacturer_name
:type field: str
:param value: The value to set
:type value: str
:rtype: bool
"""
if field == "name":
self.name = value
elif field == "location":
self.location = value
elif field == "product_name":
self.product_name = value
elif field == "manufacturer_name":
self.manufacturer_name = value
def has_command_class(self, class_id):
"""
Check that this node use this commandClass.
:param classId: the COMMAND_CLASS to check
:type classId: hexadecimal code
:rtype: bool
"""
return class_id in self.command_classes
@property
def manufacturer_id(self):
"""
The manufacturer id of the node.
:rtype: str
"""
return self._network.manager.getNodeManufacturerId(self.home_id, self.object_id)
@property
def manufacturer_name(self):
"""
The manufacturer name of the node.
:rtype: str
"""
return self._network.manager.getNodeManufacturerName(self.home_id, self.object_id)
@manufacturer_name.setter
def manufacturer_name(self, value):
"""
Set the manufacturer name of the node.
:param value: The new manufacturer name of the node
:type value: str
"""
self._network.manager.setNodeManufacturerName(self.home_id, self.object_id, value)
@property
def generic(self):
"""
The generic type of the node.
:rtype: int
"""
return self._network.manager.getNodeGeneric(self.home_id, self.object_id)
@property
def basic(self):
"""
The basic type of the node.
:rtype: int
"""
return self._network.manager.getNodeBasic(self.home_id, self.object_id)
@property
def specific(self):
"""
The specific type of the node.
:return: The specific type of the node
:rtype: int
"""
return self._network.manager.getNodeSpecific(self.home_id, self.object_id)
@property
def security(self):
"""
The security type of the node.
:return: The security type of the node
:rtype: int
"""
return self._network.manager.getNodeSecurity(self.home_id, self.object_id)
@property
def version(self):
"""
The version of the node.
:return: The version of the node
:rtype: int
"""
return self._network.manager.getNodeVersion(self.home_id, self.object_id)
@property
def is_listening_device(self):
"""
Is this node a listening device.
:rtype: bool
"""
return self._network.manager.isNodeListeningDevice(self.home_id, self.object_id)
@property
def is_beaming_device(self):
"""
Is this node a beaming device.
:rtype: bool
"""
return self._network.manager.isNodeBeamingDevice(self.home_id, self.object_id)
@property
def is_frequent_listening_device(self):
"""
Is this node a frequent listening device.
:rtype: bool
"""
return self._network.manager.isNodeFrequentListeningDevice(self.home_id, self.object_id)
@property
def is_security_device(self):
"""
Is this node a security device.
:rtype: bool
"""
return self._network.manager.isNodeSecurityDevice(self.home_id, self.object_id)
@property
def is_routing_device(self):
"""
Is this node a routing device.
:rtype: bool
"""
return self._network.manager.isNodeRoutingDevice(self.home_id, self.object_id)
@property
def is_zwave_plus(self):
"""
Is this node a zwave plus one.
:rtype: bool
"""
return self._network.manager.isNodeZWavePlus(self.home_id, self.object_id)
@property
def is_locked(self):
"""
Is this node locked.
:rtype: bool
"""
return self._is_locked
@property
def is_sleeping(self):
"""
Is this node sleeping.
:rtype: bool
"""
return not self.is_awake
# @property
# def level(self):
# """
# The level of the node.
# Todo
# """
# values = self._getValuesForCommandClass(0x26) # COMMAND_CLASS_SWITCH_MULTILEVEL
# if values:
# for value in values:
# vdic = value.value_data
# if vdic and vdic.has_key('type') and vdic['type'] == 'Byte' and vdic.has_key('value'):
# return int(vdic['value'])
# return 0
# @property
# def is_on(self):
# """
# Is this node On.
# Todo
# """
# values = self._getValuesForCommandClass(0x25) # COMMAND_CLASS_SWITCH_BINARY
# if values:
# for value in values:
# vdic = value.value_data
# if vdic and vdic.has_key('type') and vdic['type'] == 'Bool' and vdic.has_key('value'):
# return vdic['value'] == 'True'
# return False
# @property
# def signal_strength(self):
# """
# The signal strenght of this node.
# Todo
# """
# return 0
@property
def max_baud_rate(self):
"""
Get the maximum baud rate of a node
"""
return self._network.manager.getNodeMaxBaudRate(self.home_id, self.object_id)
def heal(self, upNodeRoute=False):
"""
Heal network node by requesting the node rediscover their neighbors.
Sends a ControllerCommand_RequestNodeNeighborUpdate to the node.
:param upNodeRoute: Optional Whether to perform return routes initialization. (default = false).
:type upNodeRoute: bool
:return: True is the ControllerCommand is sent. False otherwise
:rtype: bool
"""
if self.is_awake == False:
logger.warning(u'Node state must a minimum set to awake')
return False
self._network.manager.healNetworkNode(self.home_id, self.object_id, upNodeRoute)
return True
def test(self, count=1):
"""
Send a number of test messages to node and record results.
:param count: The number of test messages to send.
:type count: int
"""
self._network.manager.testNetworkNode(self.home_id, self.object_id, count)
def assign_return_route(self):
'''Ask the to update its update its Return Route to the Controller
This command will ask a Node to update its Return Route to the Controller
Results of the AssignReturnRoute Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug('assign_return_route for node [%s]', self.object_id)
return self._network.controller.assign_return_route(self.object_id)
def refresh_info(self):
"""
Trigger the fetching of fixed data about a node.
Causes the nodes data to be obtained from the Z-Wave network in the same way
as if it had just been added. This method would normally be called
automatically by OpenZWave, but if you know that a node has been changed,
calling this method will force a refresh of the data held by the library. This
can be especially useful for devices that were asleep when the application was
first run.
:rtype: bool
"""
logger.debug(u'refresh_info for node [%s]', self.object_id)
return self._network.manager.refreshNodeInfo(self.home_id, self.object_id)
def request_state(self):
"""
Trigger the fetching of just the dynamic value data for a node.
Causes the node's values to be requested from the Z-Wave network. This is the
same as the query state starting from the dynamic state.
:rtype: bool
"""
logger.debug(u'request_state for node [%s]', self.object_id)
return self._network.manager.requestNodeState(self.home_id, self.object_id)
def send_information(self):
'''Send a NIF frame from the Controller to a Node.
This command send a NIF frame from the Controller to a Node
Results of the SendNodeInformation Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'send_information for node [%s]', self.object_id)
return self._network.controller.send_node_information(self.object_id)
def network_update(self):
'''Update the controller with network information from the SUC/SIS.
Results of the RequestNetworkUpdate Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'network_update for node [%s]', self.object_id)
return self._network.controller.request_network_update(self.object_id)
def neighbor_update(self):
'''Ask a Node to update its Neighbor Tables
This command will ask a Node to update its Neighbor Tables.
Results of the RequestNodeNeighborUpdate Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'neighbor_update for node [%s]', self.object_id)
return self._network.controller.request_node_neighbor_update(self.object_id)
def create_button(self, buttonid):
'''Create a handheld button id.
Only intended for Bridge Firmware Controllers.
Results of the CreateButton Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param buttonid: the ID of the Button to query.
:type buttonid: int
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'create_button for node [%s]', self.object_id)
return self._network.controller.create_button(self.object_id, buttonid)
def delete_button(self, buttonid):
'''Delete a handheld button id.
Only intended for Bridge Firmware Controllers.
Results of the CreateButton Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param buttonid: the ID of the Button to query.
:type buttonid: int
:return: True if the request was sent successfully.
:rtype: bool
'''
logger.debug(u'delete_button for node [%s]', self.object_id)
return self._network.controller.delete_button(self.object_id, buttonid)
def request_all_config_params(self):
"""
Request the values of all known configurable parameters from a device.
"""
logger.debug(u'Requesting config params for node [%s]', self.object_id)
self._network.manager.requestAllConfigParams(self.home_id, self.object_id)
def request_config_param(self, param):
"""
Request the value of a configurable parameter from a device.
Some devices have various parameters that can be configured to control the
device behaviour. These are not reported by the device over the Z-Wave network
but can usually be found in the devices user manual. This method requests
the value of a parameter from the device, and then returns immediately,
without waiting for a response. If the parameter index is valid for this
device, and the device is awake, the value will eventually be reported via a
ValueChanged notification callback. The ValueID reported in the callback will
have an index set the same as _param and a command class set to the same value
as returned by a call to Configuration::StaticGetCommandClassId.
:param param: The param of the node.
:type param:
"""
logger.debug(u'Requesting config param %s for node [%s]', param, self.object_id)
self._network.manager.requestConfigParam(self.home_id, self.object_id, param)
def set_config_param(self, param, value, size=2):
"""
Set the value of a configurable parameter in a device.
Some devices have various parameters that can be configured to control the
device behaviour. These are not reported by the device over the Z-Wave network
but can usually be found in the devices user manual. This method returns
immediately, without waiting for confirmation from the device that the change
has been made.
:param param: The param of the node.
:type param:
:param value: The value of the param.
:type value:
:param size: Is an optional number of bytes to be sent for the parameter value. Defaults to 2.
:type size: int
:return:
:rtype: bool
"""
logger.debug(u'Set config param %s for node [%s]', param, self.object_id)
return self._network.manager.setConfigParam(self.home_id, self.object_id, param, value, size)
# def setNodeOn(self, node):
# """
# """
# self._log.debug('Requesting setNodeOn for node {0}'.format(node.id))
# self._manager.setNodeOn(node.home_id, node.id)
# def setNodeOff(self, node):
# """
# """
# self._log.debug('Requesting setNodeOff for node {0}'.format(node.id))
# self._manager.setNodeOff(node.home_id, node.id)
# def setNodeLevel(self, node, level):
# """
# """
# self._log.debug('Requesting setNodeLevel for node {0} with new level {1}'.format(node.id, level))
# self._manager.setNodeLevel(node.home_id, node.id, level)
@property
def is_awake(self):
"""
Is this node a awake.
:rtype: bool
"""
return self._network.manager.isNodeAwake(self.home_id, self.object_id)
@property
def is_failed(self):
"""
Is this node is presume failed.
:rtype: bool
"""
return self._network.manager.isNodeFailed(self.home_id, self.object_id)
@property
def query_stage(self):
"""
Is this node a awake.
:rtype: string
"""
return self._network.manager.getNodeQueryStage(self.home_id, self.object_id)
@property
def is_ready(self):
"""
Get whether the node is ready to operate (QueryStage Completed).
:rtype: bool
"""
return self._isReady
@is_ready.setter
def is_ready(self, value):
"""
Set whether the node is ready to operate.
automatically set to True by notification SIGNAL_NODE_QUERIES_COMPLETE
:param value: is node ready
:type value: bool
"""
self._isReady = value
@property
def is_info_received(self):
"""
Get whether the node information has been received. Returns True if the node information has been received yet
:rtype: bool
"""
return self._network.manager.isNodeInfoReceived(self.home_id, self.object_id)
@property
def type(self):
"""
Get a human-readable label describing the node
:rtype: str
"""
return self._network.manager.getNodeType(self.home_id, self.object_id)
```
#### File: site-packages/openzwave/option.py
```python
import os
from platform import system as platform_system
import libopenzwave
from libopenzwave import PyLogLevels
from openzwave.object import ZWaveException
from openzwave.singleton import Singleton
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
"""NullHandler logger for python 2.6"""
def emit(self, record):
pass
logger = logging.getLogger('openzwave')
logger.addHandler(NullHandler())
class ZWaveOption(libopenzwave.PyOptions):
"""
Represents a Zwave option used to start the manager.
"""
def __init__(self, device=None, config_path=None, user_path=None, cmd_line=None):
"""
Create an option object and check that parameters are valid.
:param device: The device to use
:type device: str
:param config_path: The openzwave config directory. If None, try to configure automatically.
:type config_path: str
:param user_path: The user directory
:type user_path: str
:param cmd_line: The "command line" options of the openzwave library
:type cmd_line: str
"""
if platform_system() == 'Windows':
self._device = device
else:
#For linux
try:
if os.path.exists(device):
if os.access(device, os.R_OK) and os.access(device, os.W_OK):
self._device = device
else:
import sys, traceback
raise ZWaveException(u"Can't write to device %s : %s" % (device, traceback.format_exception(*sys.exc_info())))
else:
import sys, traceback
raise ZWaveException(u"Can't find device %s : %s" % (device, traceback.format_exception(*sys.exc_info())))
except:
import sys, traceback
raise ZWaveException(u"Error when retrieving device %s : %s" % (device, traceback.format_exception(*sys.exc_info())))
libopenzwave.PyOptions.__init__(self, config_path=config_path, user_path=user_path, cmd_line=cmd_line)
def set_log_file(self, logfile):
"""
Set the log file location.
:param logfile: The location of the log file
:type logfile: str
"""
return self.addOptionString("LogFileName", logfile, False)
def set_logging(self, status):
"""
Set the status of logging.
:param status: True to activate logs, False to disable
:type status: bool
"""
return self.addOptionBool("Logging", status)
def set_append_log_file(self, status):
"""
Append new session logs to existing log file (false = overwrite).
:param status:
:type status: bool
"""
return self.addOptionBool("AppendLogFile", status)
def set_console_output(self, status):
"""
Display log information on console (as well as save to disk).
:param status:
:type status: bool
"""
return self.addOptionBool("ConsoleOutput", status)
def set_save_log_level(self, level):
"""
Save (to file) log messages equal to or above LogLevel_Detail.
:param level:
:type level: PyLogLevels
* 'None':"Disable all logging"
* 'Always':"These messages should always be shown"
* 'Fatal':"A likely fatal issue in the library"
* 'Error':"A serious issue with the library or the network"
* 'Warning':"A minor issue from which the library should be able to recover"
* 'Alert':"Something unexpected by the library about which the controlling application should be aware"
* 'Info':"Everything Is working fine...these messages provide streamlined feedback on each message"
* 'Detail':"Detailed information on the progress of each message" /
* 'Debug':"Very detailed information on progress that will create a huge log file quickly"
* 'StreamDetail':"Will include low-level byte transfers from controller to buffer to application and back"
* 'Internal':"Used only within the log class (uses existing timestamp, etc.)"
"""
return self.addOptionInt("SaveLogLevel", PyLogLevels[level]['value'])
def set_queue_log_level(self, level):
"""
Save (in RAM) log messages equal to or above LogLevel_Debug.
:param level:
:type level: PyLogLevels
* 'None':"Disable all logging"
* 'Always':"These messages should always be shown"
* 'Fatal':"A likely fatal issue in the library"
* 'Error':"A serious issue with the library or the network"
* 'Warning':"A minor issue from which the library should be able to recover"
* 'Alert':"Something unexpected by the library about which the controlling application should be aware"
* 'Info':"Everything Is working fine...these messages provide streamlined feedback on each message"
* 'Detail':"Detailed information on the progress of each message" /
* 'Debug':"Very detailed information on progress that will create a huge log file quickly"
* 'StreamDetail':"Will include low-level byte transfers from controller to buffer to application and back"
* 'Internal':"Used only within the log class (uses existing timestamp, etc.)"
"""
return self.addOptionInt("QueueLogLevel", PyLogLevels[level])
def set_dump_trigger_level(self, level):
"""
Default is to never dump RAM-stored log messages.
:param level:
:type level: PyLogLevels
* 'None':"Disable all logging"
* 'Always':"These messages should always be shown"
* 'Fatal':"A likely fatal issue in the library"
* 'Error':"A serious issue with the library or the network"
* 'Warning':"A minor issue from which the library should be able to recover"
* 'Alert':"Something unexpected by the library about which the controlling application should be aware"
* 'Info':"Everything Is working fine...these messages provide streamlined feedback on each message"
* 'Detail':"Detailed information on the progress of each message" /
* 'Debug':"Very detailed information on progress that will create a huge log file quickly"
* 'StreamDetail':"Will include low-level byte transfers from controller to buffer to application and back"
* 'Internal':"Used only within the log class (uses existing timestamp, etc.)"
"""
return self.addOptionInt("DumpTriggerLevel", PyLogLevels[level])
def set_associate(self, status):
"""
Enable automatic association of the controller with group one of every device.
:param status: True to enable logs, False to disable
:type status: bool
"""
return self.addOptionBool("Associate", status)
def set_exclude(self, commandClass):
"""
Remove support for the seted command classes.
:param commandClass: The command class to exclude
:type commandClass: str
"""
return self.addOptionString("Exclude", commandClass, True)
def set_include(self, commandClass):
"""
Only handle the specified command classes. The Exclude option is ignored if anything is seted here.
:param commandClass: The location of the log file
:type commandClass: str
"""
return self.addOptionString("Include", commandClass, True)
def set_notify_transactions(self, status):
"""
Notifications when transaction complete is reported.
:param status: True to enable, False to disable
:type status: bool
"""
return self.addOptionBool("NotifyTransactions", status)
def set_interface(self, port):
"""
Identify the serial port to be accessed (TODO: change the code so more than one serial port can be specified and HID).
:param port: The serial port
:type port: str
"""
return self.addOptionString("Interface", port, True)
def set_save_configuration(self, status):
"""
Save the XML configuration upon driver close.
:param status: True to enable, False to disable
:type status: bool
"""
return self.addOptionBool("SaveConfiguration", status)
def set_driver_max_attempts(self, attempts):
"""
Set the driver max attempts before raising an error.
:param attempts: Number of attempts
:type attempts: int
"""
return self.addOptionInt("DriverMaxAttempts", attempts)
def set_poll_interval(self, interval):
"""
30 seconds (can easily poll 30 values in this time; ~120 values is the effective limit for 30 seconds).
:param interval: interval in seconds
:type interval: int
"""
return self.addOptionInt("PollInterval", interval)
def set_interval_between_polls(self, status):
"""
Notifications when transaction complete is reported.
:param status: if false, try to execute the entire poll set within the PollInterval time frame. If true, wait for PollInterval milliseconds between polls
:type status: bool
"""
return self.addOptionBool("IntervalBetweenPolls", status)
def set_suppress_value_refresh(self, status):
"""
if true, notifications for refreshed (but unchanged) values will not be sent.
:param status: True to enable, False to disable
:type status: bool
"""
return self.addOptionBool("SuppressValueRefresh", status)
def set_security_strategy(self, strategy='SUPPORTED'):
"""
Should we encrypt CC's that are available via both clear text and Security CC?
:param strategy: The security strategy : SUPPORTED|ESSENTIAL|CUSTOM
:type strategy: str
"""
return self.addOptionString("SecurityStrategy", strategy, False)
def set_custom_secured_cc(self, custom_cc='0x62,0x4c,0x63'):
"""
What List of Custom CC should we always encrypt if SecurityStrategy is CUSTOM.
:param custom_cc: List of Custom CC
:type custom_cc: str
"""
return self.addOptionString("CustomSecuredCC", custom_cc, False)
@property
def device(self):
"""
The device used by the controller.
:rtype: str
"""
return self._device
@property
def config_path(self):
"""
The config path.
:rtype: str
"""
return self._config_path
@property
def user_path(self):
"""
The config path.
:rtype: str
"""
return self._user_path
class ZWaveOptionSingleton(ZWaveOption):
"""
Represents a singleton Zwave option used to start the manager.
"""
__metaclass__ = Singleton
```
#### File: warrant/django/forms.py
```python
from django import forms
class ProfileForm(forms.Form):
first_name = forms.CharField(max_length=200,required=True)
last_name = forms.CharField(max_length=200,required=True)
email = forms.EmailField(required=True)
phone_number = forms.CharField(max_length=30,required=True)
gender = forms.ChoiceField(choices=(('female','Female'),('male','Male')),required=True)
address = forms.CharField(max_length=200,required=True)
preferred_username = forms.CharField(max_length=200,required=True)
api_key = forms.CharField(max_length=200, required=False)
api_key_id = forms.CharField(max_length=200, required=False)
class APIKeySubscriptionForm(forms.Form):
plan = forms.ChoiceField(required=True)
def __init__(self, plans=[], users_plans=[], *args, **kwargs):
self.base_fields['plan'].choices = [(p.get('id'),p.get('name')) for p in plans if not p.get('id') in users_plans]
super(APIKeySubscriptionForm, self).__init__(*args, **kwargs)
```
#### File: site-packages/websockets/headers.py
```python
import re
from .exceptions import InvalidHeader
__all__ = [
'parse_extension_list', 'build_extension_list',
'parse_protocol_list', 'build_protocol_list',
]
# To avoid a dependency on a parsing library, we implement manually the ABNF
# described in https://tools.ietf.org/html/rfc6455#section-9.1 with the
# definitions from https://tools.ietf.org/html/rfc7230#appendix-B.
def peek_ahead(string, pos):
"""
Return the next character from ``string`` at the given position.
Return ``None`` at the end of ``string``.
We never need to peek more than one character ahead.
"""
return None if pos == len(string) else string[pos]
_OWS_re = re.compile(r'[\t ]*')
def parse_OWS(string, pos):
"""
Parse optional whitespace from ``string`` at the given position.
Return the new position.
The whitespace itself isn't returned because it isn't significant.
"""
# There's always a match, possibly empty, whose content doesn't matter.
match = _OWS_re.match(string, pos)
return match.end()
_token_re = re.compile(r'[-!#$%&\'*+.^_`|~0-9a-zA-Z]+')
def parse_token(string, pos):
"""
Parse a token from ``string`` at the given position.
Return the token value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeader` on invalid inputs.
"""
match = _token_re.match(string, pos)
if match is None:
raise InvalidHeader("Expected token", string=string, pos=pos)
return match.group(), match.end()
_quoted_string_re = re.compile(
r'"(?:[\x09\x20-\x21\x23-\x5b\x5d-\x7e]|\\[\x09\x20-\x7e\x80-\xff])*"')
_unquote_re = re.compile(r'\\([\x09\x20-\x7e\x80-\xff])')
def parse_quoted_string(string, pos):
"""
Parse a quoted string from ``string`` at the given position.
Return the unquoted value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeader` on invalid inputs.
"""
match = _quoted_string_re.match(string, pos)
if match is None:
raise InvalidHeader("Expected quoted string", string=string, pos=pos)
return _unquote_re.sub(r'\1', match.group()[1:-1]), match.end()
def parse_extension_param(string, pos):
"""
Parse a single extension parameter from ``string`` at the given position.
Return a ``(name, value)`` pair and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeader` on invalid inputs.
"""
# Extract parameter name.
name, pos = parse_token(string, pos)
pos = parse_OWS(string, pos)
# Extract parameter string, if there is one.
if peek_ahead(string, pos) == '=':
pos = parse_OWS(string, pos + 1)
if peek_ahead(string, pos) == '"':
pos_before = pos # for proper error reporting below
value, pos = parse_quoted_string(string, pos)
# https://tools.ietf.org/html/rfc6455#section-9.1 says: the value
# after quoted-string unescaping MUST conform to the 'token' ABNF.
if _token_re.fullmatch(value) is None:
raise InvalidHeader("Invalid quoted string content",
string=string, pos=pos_before)
else:
value, pos = parse_token(string, pos)
pos = parse_OWS(string, pos)
else:
value = None
return (name, value), pos
def parse_extension(string, pos):
"""
Parse an extension definition from ``string`` at the given position.
Return an ``(extension name, parameters)`` pair, where ``parameters`` is a
list of ``(name, value)`` pairs, and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeader` on invalid inputs.
"""
# Extract extension name.
name, pos = parse_token(string, pos)
pos = parse_OWS(string, pos)
# Extract all parameters.
parameters = []
while peek_ahead(string, pos) == ';':
pos = parse_OWS(string, pos + 1)
parameter, pos = parse_extension_param(string, pos)
parameters.append(parameter)
return (name, parameters), pos
def parse_extension_list(string, pos=0):
"""
Parse a ``Sec-WebSocket-Extensions`` header.
The string is assumed not to start or end with whitespace.
Return a value with the following format::
[
(
'extension name',
[
('parameter name', 'parameter value'),
....
]
),
...
]
Parameter values are ``None`` when no value is provided.
Raise :exc:`~websockets.exceptions.InvalidHeader` on invalid inputs.
"""
# Per https://tools.ietf.org/html/rfc7230#section-7, "a recipient MUST
# parse and ignore a reasonable number of empty list elements"; hence
# while loops that remove extra delimiters.
# Remove extra delimiters before the first extension.
while peek_ahead(string, pos) == ',':
pos = parse_OWS(string, pos + 1)
extensions = []
while True:
# Loop invariant: an extension starts at pos in string.
extension, pos = parse_extension(string, pos)
extensions.append(extension)
# We may have reached the end of the string.
if pos == len(string):
break
# There must be a delimiter after each element except the last one.
if peek_ahead(string, pos) == ',':
pos = parse_OWS(string, pos + 1)
else:
raise InvalidHeader("Expected comma", string=string, pos=pos)
# Remove extra delimiters before the next extension.
while peek_ahead(string, pos) == ',':
pos = parse_OWS(string, pos + 1)
# We may have reached the end of the string.
if pos == len(string):
break
# Since we only advance in the string by one character with peek_ahead()
# or with the end position of a regex match, we can't overshoot the end.
assert pos == len(string)
return extensions
def build_extension(name, parameters):
"""
Build an extension definition.
This is the reverse of :func:`parse_extension`.
"""
return '; '.join([name] + [
# Quoted strings aren't necessary because values are always tokens.
name if value is None else '{}={}'.format(name, value)
for name, value in parameters
])
def build_extension_list(extensions):
"""
Unparse a ``Sec-WebSocket-Extensions`` header.
This is the reverse of :func:`parse_extension_list`.
"""
return ', '.join(
build_extension(name, parameters)
for name, parameters in extensions
)
def parse_protocol(string, pos):
"""
Parse a protocol definition from ``string`` at the given position.
Return the protocol and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeader` on invalid inputs.
"""
name, pos = parse_token(string, pos)
pos = parse_OWS(string, pos)
return name, pos
def parse_protocol_list(string, pos=0):
"""
Parse a ``Sec-WebSocket-Protocol`` header.
The string is assumed not to start or end with whitespace.
Return a list of protocols.
Raise :exc:`~websockets.exceptions.InvalidHeader` on invalid inputs.
"""
# Per https://tools.ietf.org/html/rfc7230#section-7, "a recipient MUST
# parse and ignore a reasonable number of empty list elements"; hence
# while loops that remove extra delimiters.
# Remove extra delimiters before the first extension.
while peek_ahead(string, pos) == ',':
pos = parse_OWS(string, pos + 1)
protocols = []
while True:
# Loop invariant: a protocol starts at pos in string.
protocol, pos = parse_protocol(string, pos)
protocols.append(protocol)
# We may have reached the end of the string.
if pos == len(string):
break
# There must be a delimiter after each element except the last one.
if peek_ahead(string, pos) == ',':
pos = parse_OWS(string, pos + 1)
else:
raise InvalidHeader("Expected comma", string=string, pos=pos)
# Remove extra delimiters before the next protocol.
while peek_ahead(string, pos) == ',':
pos = parse_OWS(string, pos + 1)
# We may have reached the end of the string.
if pos == len(string):
break
# Since we only advance in the string by one character with peek_ahead()
# or with the end position of a regex match, we can't overshoot the end.
assert pos == len(string)
return protocols
def build_protocol_list(protocols):
"""
Unparse a ``Sec-WebSocket-Protocol`` header.
This is the reverse of :func:`parse_protocol_list`.
"""
return ', '.join(protocols)
```
#### File: site-packages/websockets/protocol.py
```python
import asyncio
import asyncio.queues
import codecs
import collections
import enum
import logging
import random
import struct
from .compatibility import asyncio_ensure_future
from .exceptions import (
ConnectionClosed, InvalidState, PayloadTooBig, WebSocketProtocolError
)
from .framing import *
from .handshake import *
__all__ = ['WebSocketCommonProtocol']
logger = logging.getLogger(__name__)
# A WebSocket connection goes through the following four states, in order:
class State(enum.IntEnum):
CONNECTING, OPEN, CLOSING, CLOSED = range(4)
# In order to ensure consistency, the code always checks the current value of
# WebSocketCommonProtocol.state before assigning a new value and never yields
# between the check and the assignment.
class WebSocketCommonProtocol(asyncio.StreamReaderProtocol):
"""
This class implements common parts of the WebSocket protocol.
It assumes that the WebSocket connection is established. The handshake is
managed in subclasses such as
:class:`~websockets.server.WebSocketServerProtocol` and
:class:`~websockets.client.WebSocketClientProtocol`.
It runs a task that stores incoming data frames in a queue and deals with
control frames automatically. It sends outgoing data frames and performs
the closing handshake.
On Python ≥ 3.6, :class:`WebSocketCommonProtocol` instances support
asynchronous iteration::
async for message in websocket:
await process(message)
The iterator yields incoming messages. It exits normally when the
connection is closed with the status code 1000 OK. It raises a
:exc:`~websockets.exceptions.ConnectionClosed` exception when the
connection is closed with any other status code.
The ``host``, ``port`` and ``secure`` parameters are simply stored as
attributes for handlers that need them.
The ``timeout`` parameter defines the maximum wait time in seconds for
completing the closing handshake and, only on the client side, for
terminating the TCP connection. :meth:`close()` will complete in at most
``4 * timeout`` on the server side and ``5 * timeout`` on the client side.
The ``max_size`` parameter enforces the maximum size for incoming messages
in bytes. The default value is 1MB. ``None`` disables the limit. If a
message larger than the maximum size is received, :meth:`recv()` will
raise :exc:`~websockets.exceptions.ConnectionClosed` and the connection
will be closed with status code 1009.
The ``max_queue`` parameter sets the maximum length of the queue that holds
incoming messages. The default value is 32. 0 disables the limit. Messages
are added to an in-memory queue when they're received; then :meth:`recv()`
pops from that queue. In order to prevent excessive memory consumption when
messages are received faster than they can be processed, the queue must be
bounded. If the queue fills up, the protocol stops processing incoming data
until :meth:`recv()` is called. In this situation, various receive buffers
(at least in ``asyncio`` and in the OS) will fill up, then the TCP receive
window will shrink, slowing down transmission to avoid packet loss.
Since Python can use up to 4 bytes of memory to represent a single
character, each websocket connection may use up to ``4 * max_size *
max_queue`` bytes of memory to store incoming messages. By default,
this is 128MB. You may want to lower the limits, depending on your
application's requirements.
The ``read_limit`` argument sets the high-water limit of the buffer for
incoming bytes. The low-water limit is half the high-water limit. The
default value is 64kB, half of asyncio's default (based on the current
implementation of :class:`~asyncio.StreamReader`).
The ``write_limit`` argument sets the high-water limit of the buffer for
outgoing bytes. The low-water limit is a quarter of the high-water limit.
The default value is 64kB, equal to asyncio's default (based on the
current implementation of ``FlowControlMixin``).
As soon as the HTTP request and response in the opening handshake are
processed, the request path is available in the :attr:`path` attribute,
and the request and response HTTP headers are available:
* as a :class:`~http.client.HTTPMessage` in the :attr:`request_headers`
and :attr:`response_headers` attributes
* as an iterable of (name, value) pairs in the :attr:`raw_request_headers`
and :attr:`raw_response_headers` attributes
These attributes must be treated as immutable.
If a subprotocol was negotiated, it's available in the :attr:`subprotocol`
attribute.
Once the connection is closed, the status code is available in the
:attr:`close_code` attribute and the reason in :attr:`close_reason`.
"""
# There are only two differences between the client-side and the server-
# side behavior: masking the payload and closing the underlying TCP
# connection. Set is_client and side to pick a side.
is_client = None
side = 'undefined'
def __init__(self, *,
host=None, port=None, secure=None,
timeout=10, max_size=2 ** 20, max_queue=2 ** 5,
read_limit=2 ** 16, write_limit=2 ** 16,
loop=None, legacy_recv=False):
self.host = host
self.port = port
self.secure = secure
self.timeout = timeout
self.max_size = max_size
self.max_queue = max_queue
self.read_limit = read_limit
self.write_limit = write_limit
# Store a reference to loop to avoid relying on self._loop, a private
# attribute of StreamReaderProtocol, inherited from FlowControlMixin.
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
self.legacy_recv = legacy_recv
# Configure read buffer limits. The high-water limit is defined by
# ``self.read_limit``. The ``limit`` argument controls the line length
# limit and half the buffer limit of :class:`~asyncio.StreamReader`.
# That's why it must be set to half of ``self.read_limit``.
stream_reader = asyncio.StreamReader(limit=read_limit // 2, loop=loop)
super().__init__(stream_reader, self.client_connected, loop)
self.reader = None
self.writer = None
self._drain_lock = asyncio.Lock(loop=loop)
# This class implements the data transfer and closing handshake, which
# are shared between the client-side and the server-side.
# Subclasses implement the opening handshake and, on success, execute
# :meth:`connection_open()` to change the state to OPEN.
self.state = State.CONNECTING
# HTTP protocol parameters.
self.path = None
self.request_headers = None
self.raw_request_headers = None
self.response_headers = None
self.raw_response_headers = None
# WebSocket protocol parameters.
self.extensions = []
self.subprotocol = None
# The close code and reason are set when receiving a close frame or
# losing the TCP connection.
self.close_code = None
self.close_reason = ''
# Completed when the connection state becomes CLOSED. Translates the
# :meth:`connection_lost()` callback to a :class:`~asyncio.Future`
# that can be awaited. (Other :class:`~asyncio.Protocol` callbacks are
# translated by ``self.stream_reader``).
self.connection_lost_waiter = asyncio.Future(loop=loop)
# Queue of received messages.
self.messages = asyncio.queues.Queue(max_queue, loop=loop)
# Mapping of ping IDs to waiters, in chronological order.
self.pings = collections.OrderedDict()
# Task running the data transfer.
self.transfer_data_task = None
# Task closing the TCP connection.
self.close_connection_task = None
def client_connected(self, reader, writer):
"""
Callback when the TCP connection is established.
Record references to the stream reader and the stream writer to avoid
using private attributes ``_stream_reader`` and ``_stream_writer`` of
:class:`~asyncio.StreamReaderProtocol`.
"""
self.reader = reader
self.writer = writer
def connection_open(self):
"""
Callback when the WebSocket opening handshake completes.
Enter the OPEN state and start the data transfer phase.
"""
# 4.1. The WebSocket Connection is Established.
assert self.state is State.CONNECTING
self.state = State.OPEN
# Start the task that receives incoming WebSocket messages.
self.transfer_data_task = asyncio_ensure_future(
self.transfer_data(), loop=self.loop)
# Start the task that eventually closes the TCP connection.
self.close_connection_task = asyncio_ensure_future(
self.close_connection(), loop=self.loop)
# Public API
@property
def local_address(self):
"""
Local address of the connection.
This is a ``(host, port)`` tuple or ``None`` if the connection hasn't
been established yet.
"""
if self.writer is None:
return None
return self.writer.get_extra_info('sockname')
@property
def remote_address(self):
"""
Remote address of the connection.
This is a ``(host, port)`` tuple or ``None`` if the connection hasn't
been established yet.
"""
if self.writer is None:
return None
return self.writer.get_extra_info('peername')
@property
def open(self):
"""
This property is ``True`` when the connection is usable.
It may be used to detect disconnections but this is discouraged per
the EAFP_ principle. When ``open`` is ``False``, using the connection
raises a :exc:`~websockets.exceptions.ConnectionClosed` exception.
.. _EAFP: https://docs.python.org/3/glossary.html#term-eafp
"""
return self.state is State.OPEN
@asyncio.coroutine
def recv(self):
"""
This coroutine receives the next message.
It returns a :class:`str` for a text frame and :class:`bytes` for a
binary frame.
When the end of the message stream is reached, :meth:`recv` raises
:exc:`~websockets.exceptions.ConnectionClosed`. This can happen after
a normal connection closure, a protocol error or a network failure.
.. versionchanged:: 3.0
:meth:`recv` used to return ``None`` instead. Refer to the
changelog for details.
"""
# Don't yield from self.ensure_open() here because messages could be
# available in the queue even if the connection is closed.
# Return any available message
try:
return self.messages.get_nowait()
except asyncio.queues.QueueEmpty:
pass
# Don't yield from self.ensure_open() here because messages could be
# received before the closing frame even if the connection is closing.
# Wait for a message until the connection is closed
next_message = asyncio_ensure_future(
self.messages.get(), loop=self.loop)
try:
done, pending = yield from asyncio.wait(
[next_message, self.transfer_data_task],
loop=self.loop, return_when=asyncio.FIRST_COMPLETED)
except asyncio.CancelledError:
# Propagate cancellation to avoid leaking the next_message Task.
next_message.cancel()
raise
# Now there's no need to yield from self.ensure_open(). Either a
# message was received or the connection was closed.
if next_message in done:
return next_message.result()
else:
next_message.cancel()
if not self.legacy_recv:
raise ConnectionClosed(self.close_code, self.close_reason)
@asyncio.coroutine
def send(self, data):
"""
This coroutine sends a message.
It sends :class:`str` as a text frame and :class:`bytes` as a binary
frame. It raises a :exc:`TypeError` for other inputs.
"""
yield from self.ensure_open()
if isinstance(data, str):
opcode = 1
data = data.encode('utf-8')
elif isinstance(data, bytes):
opcode = 2
else:
raise TypeError("data must be bytes or str")
yield from self.write_frame(opcode, data)
@asyncio.coroutine
def close(self, code=1000, reason=''):
"""
This coroutine performs the closing handshake.
It waits for the other end to complete the handshake and for the TCP
connection to terminate.
It doesn't do anything once the connection is closed. In other words
it's idemptotent.
It's safe to wrap this coroutine in :func:`~asyncio.ensure_future`
since errors during connection termination aren't particularly useful.
``code`` must be an :class:`int` and ``reason`` a :class:`str`.
"""
if self.state is State.OPEN:
# 7.1.2. Start the WebSocket Closing Handshake
# 7.1.3. The WebSocket Closing Handshake is Started
frame_data = serialize_close(code, reason)
try:
yield from asyncio.wait_for(
self.write_frame(OP_CLOSE, frame_data),
self.timeout, loop=self.loop)
except asyncio.TimeoutError:
# If the close frame cannot be sent because the send buffers
# are full, the closing handshake won't complete anyway.
# Cancel the data transfer task to shut down faster.
# Cancelling a task is idempotent.
self.transfer_data_task.cancel()
# If no close frame is received within the timeout, wait_for() cancels
# the data transfer task and raises TimeoutError. Then transfer_data()
# catches CancelledError and exits without an exception.
# If close() is called multiple times concurrently and one of these
# calls hits the timeout, other calls will resume executing without an
# exception, so there's no need to catch CancelledError here.
try:
# If close() is cancelled during the wait, self.transfer_data_task
# is cancelled before the timeout elapses (on Python ≥ 3.4.3).
# This helps closing connections when shutting down a server.
yield from asyncio.wait_for(
self.transfer_data_task, self.timeout, loop=self.loop)
except asyncio.TimeoutError:
pass
# Wait for the close connection task to close the TCP connection.
yield from asyncio.shield(self.close_connection_task)
@asyncio.coroutine
def ping(self, data=None):
"""
This coroutine sends a ping.
It returns a :class:`~asyncio.Future` which will be completed when the
corresponding pong is received and which you may ignore if you don't
want to wait.
A ping may serve as a keepalive or as a check that the remote endpoint
received all messages up to this point::
pong_waiter = await ws.ping()
await pong_waiter # only if you want to wait for the pong
By default, the ping contains four random bytes. The content may be
overridden with the optional ``data`` argument which must be of type
:class:`str` (which will be encoded to UTF-8) or :class:`bytes`.
"""
yield from self.ensure_open()
if data is not None:
data = encode_data(data)
# Protect against duplicates if a payload is explicitly set.
if data in self.pings:
raise ValueError("Already waiting for a pong with the same data")
# Generate a unique random payload otherwise.
while data is None or data in self.pings:
data = struct.pack('!I', random.getrandbits(32))
self.pings[data] = asyncio.Future(loop=self.loop)
yield from self.write_frame(OP_PING, data)
return asyncio.shield(self.pings[data])
@asyncio.coroutine
def pong(self, data=b''):
"""
This coroutine sends a pong.
An unsolicited pong may serve as a unidirectional heartbeat.
The content may be overridden with the optional ``data`` argument
which must be of type :class:`str` (which will be encoded to UTF-8) or
:class:`bytes`.
"""
yield from self.ensure_open()
data = encode_data(data)
yield from self.write_frame(OP_PONG, data)
# Private methods - no guarantees.
@asyncio.coroutine
def ensure_open(self):
"""
Check that the WebSocket connection is open.
Raise :exc:`~websockets.exceptions.ConnectionClosed` if it isn't.
"""
# Handle cases from most common to least common for performance.
if self.state is State.OPEN:
return
if self.state is State.CLOSED:
raise ConnectionClosed(self.close_code, self.close_reason)
if self.state is State.CLOSING:
# If we started the closing handshake, wait for its completion to
# get the proper close code and status. self.close_connection_task
# will complete within 4 or 5 * timeout after calling close().
# The CLOSING state also occurs when failing the connection. In
# that case self.close_connection_task will complete even faster.
if self.close_code is None:
yield from asyncio.shield(self.close_connection_task)
raise ConnectionClosed(self.close_code, self.close_reason)
# Control may only reach this point in buggy third-party subclasses.
assert self.state is State.CONNECTING
raise InvalidState("WebSocket connection isn't established yet")
@asyncio.coroutine
def transfer_data(self):
"""
Read incoming messages and put them in a queue.
This coroutine runs in a task until the closing handshake is started.
"""
try:
while True:
msg = yield from self.read_message()
# Exit the loop when receiving a close frame.
if msg is None:
break
yield from self.messages.put(msg)
except asyncio.CancelledError:
# This happens if self.close() cancels self.transfer_data_task.
pass
except WebSocketProtocolError:
yield from self.fail_connection(1002)
except asyncio.IncompleteReadError:
yield from self.fail_connection(1006)
except UnicodeDecodeError:
yield from self.fail_connection(1007)
except PayloadTooBig:
yield from self.fail_connection(1009)
except Exception:
logger.warning("Error in data transfer", exc_info=True)
yield from self.fail_connection(1011)
@asyncio.coroutine
def read_message(self):
"""
Read a single message from the connection.
Re-assemble data frames if the message is fragmented.
Return ``None`` when the closing handshake is started.
"""
frame = yield from self.read_data_frame(max_size=self.max_size)
# A close frame was received.
if frame is None:
return
if frame.opcode == OP_TEXT:
text = True
elif frame.opcode == OP_BINARY:
text = False
else: # frame.opcode == OP_CONT
raise WebSocketProtocolError("Unexpected opcode")
# Shortcut for the common case - no fragmentation
if frame.fin:
return frame.data.decode('utf-8') if text else frame.data
# 5.4. Fragmentation
chunks = []
max_size = self.max_size
if text:
decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
if max_size is None:
def append(frame):
nonlocal chunks
chunks.append(decoder.decode(frame.data, frame.fin))
else:
def append(frame):
nonlocal chunks, max_size
chunks.append(decoder.decode(frame.data, frame.fin))
max_size -= len(frame.data)
else:
if max_size is None:
def append(frame):
nonlocal chunks
chunks.append(frame.data)
else:
def append(frame):
nonlocal chunks, max_size
chunks.append(frame.data)
max_size -= len(frame.data)
append(frame)
while not frame.fin:
frame = yield from self.read_data_frame(max_size=max_size)
if frame is None:
raise WebSocketProtocolError("Incomplete fragmented message")
if frame.opcode != OP_CONT:
raise WebSocketProtocolError("Unexpected opcode")
append(frame)
return ('' if text else b'').join(chunks)
@asyncio.coroutine
def read_data_frame(self, max_size):
"""
Read a single data frame from the connection.
Process control frames received before the next data frame.
Return ``None`` if a close frame is encountered before any data frame.
"""
# 6.2. Receiving Data
while True:
frame = yield from self.read_frame(max_size)
# 5.5. Control Frames
if frame.opcode == OP_CLOSE:
# Make sure the close frame is valid before echoing it.
code, reason = parse_close(frame.data)
# 7.1.5. The WebSocket Connection Close Code
# 7.1.6. The WebSocket Connection Close Reason
self.close_code, self.close_reason = code, reason
if self.state is State.OPEN:
# 7.1.3. The WebSocket Closing Handshake is Started
yield from self.write_frame(OP_CLOSE, frame.data)
return
elif frame.opcode == OP_PING:
# Answer pings.
yield from self.pong(frame.data)
elif frame.opcode == OP_PONG:
# Do not acknowledge pings on unsolicited pongs.
if frame.data in self.pings:
# Acknowledge all pings up to the one matching this pong.
ping_id = None
while ping_id != frame.data:
ping_id, pong_waiter = self.pings.popitem(0)
pong_waiter.set_result(None)
# 5.6. Data Frames
else:
return frame
@asyncio.coroutine
def read_frame(self, max_size):
"""
Read a single frame from the connection.
"""
frame = yield from Frame.read(
self.reader.readexactly,
mask=not self.is_client,
max_size=max_size,
extensions=self.extensions,
)
logger.debug("%s < %s", self.side, frame)
return frame
@asyncio.coroutine
def write_frame(self, opcode, data=b''):
# Defensive assertion for protocol compliance.
if self.state is not State.OPEN: # pragma: no cover
raise InvalidState("Cannot write to a WebSocket "
"in the {} state".format(self.state.name))
# Make sure no other frame will be sent after a close frame. Do this
# before yielding control to avoid sending more than one close frame.
if opcode == OP_CLOSE:
self.state = State.CLOSING
frame = Frame(True, opcode, data)
logger.debug("%s > %s", self.side, frame)
frame.write(
self.writer.write,
mask=self.is_client,
extensions=self.extensions,
)
# Backport of https://github.com/python/asyncio/pull/280.
# Remove when dropping support for Python < 3.6.
if self.writer.transport is not None: # pragma: no cover
if self.writer_is_closing():
yield
try:
# drain() cannot be called concurrently by multiple coroutines:
# http://bugs.python.org/issue29930. Remove this lock when no
# version of Python where this bugs exists is supported anymore.
with (yield from self._drain_lock):
# Handle flow control automatically.
yield from self.writer.drain()
except ConnectionError:
# Terminate the connection if the socket died.
yield from self.fail_connection(1006)
# And raise an exception, since the frame couldn't be sent.
raise ConnectionClosed(self.close_code, self.close_reason)
def writer_is_closing(self):
"""
Backport of https://github.com/python/asyncio/pull/291.
Replace with ``self.writer.transport.is_closing()`` when dropping
support for Python < 3.6 and with ``self.writer.is_closing()`` when
https://bugs.python.org/issue31491 is fixed.
"""
transport = self.writer.transport
try:
return transport.is_closing()
except AttributeError: # pragma: no cover
# This emulates what is_closing would return if it existed.
try:
return transport._closing
except AttributeError:
return transport._closed
@asyncio.coroutine
def close_connection(self, after_handshake=True):
"""
7.1.1. Close the WebSocket Connection
When the opening handshake succeeds, :meth:`connection_open` starts
this coroutine in a task. It waits for the data transfer phase to
complete then it closes the TCP connection cleanly.
When the opening handshake fails, the client or the server runs this
coroutine with ``after_handshake=False`` to close the TCP connection.
"""
try:
# Wait for the data transfer phase to complete.
if after_handshake:
yield from self.transfer_data_task
# A client should wait for a TCP Close from the server.
if self.is_client and after_handshake:
if (yield from self.wait_for_connection_lost()):
return
logger.debug(
"%s ! timed out waiting for TCP close", self.side)
# Half-close the TCP connection if possible (when there's no TLS).
if self.writer.can_write_eof():
logger.debug(
"%s x half-closing TCP connection", self.side)
self.writer.write_eof()
if (yield from self.wait_for_connection_lost()):
return
logger.debug(
"%s ! timed out waiting for TCP close", self.side)
finally:
# The try/finally ensures that the transport never remains open,
# even if this coroutine is cancelled (for example).
# Closing a transport is idempotent. If the transport was already
# closed, for example from eof_received(), it's fine.
# Close the TCP connection. Buffers are flushed asynchronously.
logger.debug(
"%s x closing TCP connection", self.side)
self.writer.close()
if (yield from self.wait_for_connection_lost()):
return
logger.debug(
"%s ! timed out waiting for TCP close", self.side)
# Abort the TCP connection. Buffers are discarded.
logger.debug(
"%s x aborting TCP connection", self.side)
self.writer.transport.abort()
# connection_lost() is called quickly after aborting.
yield from self.wait_for_connection_lost()
@asyncio.coroutine
def wait_for_connection_lost(self):
"""
Wait until the TCP connection is closed or ``self.timeout`` elapses.
Return ``True`` if the connection is closed and ``False`` otherwise.
"""
if not self.connection_lost_waiter.done():
try:
yield from asyncio.wait_for(
asyncio.shield(self.connection_lost_waiter),
self.timeout, loop=self.loop)
except asyncio.TimeoutError:
pass
# Re-check self.connection_lost_waiter.done() synchronously because
# connection_lost() could run between the moment the timeout occurs
# and the moment this coroutine resumes running.
return self.connection_lost_waiter.done()
@asyncio.coroutine
def fail_connection(self, code=1011, reason=''):
"""
7.1.7. Fail the WebSocket Connection
"""
logger.debug(
"%s ! failing WebSocket connection: %d %s",
self.side, code, reason,
)
# Don't send a close frame if the connection is broken.
if self.state is State.OPEN and code != 1006:
frame_data = serialize_close(code, reason)
yield from self.write_frame(OP_CLOSE, frame_data)
# asyncio.StreamReaderProtocol methods
def connection_made(self, transport):
"""
Configure write buffer limits.
The high-water limit is defined by ``self.write_limit``.
The low-water limit currently defaults to ``self.write_limit // 4`` in
:meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should
be all right for reasonable use cases of this library.
This is the earliest point where we can get hold of the transport,
which means it's the best point for configuring it.
"""
logger.debug("%s - connection_made(%s)", self.side, transport)
transport.set_write_buffer_limits(self.write_limit)
super().connection_made(transport)
def eof_received(self):
"""
Close the transport after receiving EOF.
Since Python 3.5, `:meth:~StreamReaderProtocol.eof_received` returns
``True`` on non-TLS connections.
See http://bugs.python.org/issue24539 for more information.
This is inappropriate for websockets for at least three reasons:
1. The use case is to read data until EOF with self.reader.read(-1).
Since websockets is a TLV protocol, this never happens.
2. It doesn't work on TLS connections. A falsy value must be
returned to have the same behavior on TLS and plain connections.
3. The websockets protocol has its own closing handshake. Endpoints
close the TCP connection after sending a Close frame.
As a consequence we revert to the previous, more useful behavior.
"""
logger.debug("%s - eof_received()", self.side)
super().eof_received()
return
def connection_lost(self, exc):
"""
7.1.4. The WebSocket Connection is Closed.
"""
logger.debug("%s - connection_lost(%s)", self.side, exc)
self.state = State.CLOSED
if self.close_code is None:
self.close_code = 1006
# If self.connection_lost_waiter isn't pending, that's a bug, because:
# - it's set only here in connection_lost() which is called only once;
# - it must never be cancelled.
self.connection_lost_waiter.set_result(None)
super().connection_lost(exc)
try:
from .py36.protocol import __aiter__
except (SyntaxError, ImportError): # pragma: no cover
pass
else:
WebSocketCommonProtocol.__aiter__ = __aiter__
```
#### File: site-packages/websockets/test_headers.py
```python
import unittest
from .exceptions import InvalidHeader
from .headers import *
class HeadersTests(unittest.TestCase):
def test_parse_extension_list(self):
for header, parsed in [
# Synthetic examples
(
'foo',
[('foo', [])],
),
(
'foo, bar',
[('foo', []), ('bar', [])],
),
(
'foo; name; token=token; quoted-string="quoted-string", '
'bar; quux; quuux',
[
('foo', [('name', None), ('token', 'token'),
('quoted-string', 'quoted-string')]),
('bar', [('quux', None), ('quuux', None)]),
],
),
# Pathological examples
(
',\t, , ,foo ;bar = 42,, baz,,',
[('foo', [('bar', '42')]), ('baz', [])],
),
# Realistic use cases for permessage-deflate
(
'permessage-deflate',
[('permessage-deflate', [])],
),
(
'permessage-deflate; client_max_window_bits',
[('permessage-deflate', [('client_max_window_bits', None)])],
),
(
'permessage-deflate; server_max_window_bits=10',
[('permessage-deflate', [('server_max_window_bits', '10')])],
),
]:
with self.subTest(header=header):
self.assertEqual(parse_extension_list(header), parsed)
# Also ensure that build_extension_list round-trips cleanly.
unparsed = build_extension_list(parsed)
self.assertEqual(parse_extension_list(unparsed), parsed)
def test_parse_extension_list_invalid_header(self):
for header in [
# Truncated examples
'',
',\t,'
'foo;',
'foo; bar;',
'foo; bar=',
'foo; bar="baz',
# Wrong delimiter
'foo, bar, baz=quux; quuux',
# Value in quoted string parameter that isn't a token
'foo; bar=" "',
]:
with self.subTest(header=header):
with self.assertRaises(InvalidHeader):
parse_extension_list(header)
def test_parse_protocol_list(self):
for header, parsed in [
# Synthetic examples
(
'foo',
['foo'],
),
(
'foo, bar',
['foo', 'bar'],
),
# Pathological examples
(
',\t, , ,foo ,, bar,baz,,',
['foo', 'bar', 'baz'],
),
]:
with self.subTest(header=header):
self.assertEqual(parse_protocol_list(header), parsed)
# Also ensure that build_protocol_list round-trips cleanly.
unparsed = build_protocol_list(parsed)
self.assertEqual(parse_protocol_list(unparsed), parsed)
def test_parse_protocol_list_invalid_header(self):
for header in [
# Truncated examples
'',
',\t,'
# Wrong delimiter
'foo; bar',
]:
with self.subTest(header=header):
with self.assertRaises(InvalidHeader):
parse_protocol_list(header)
```
#### File: site-packages/yarl/quoting.py
```python
import re
from string import ascii_letters, ascii_lowercase, digits
BASCII_LOWERCASE = ascii_lowercase.encode('ascii')
BPCT_ALLOWED = {'%{:02X}'.format(i).encode('ascii') for i in range(256)}
GEN_DELIMS = ":/?#[]@"
SUB_DELIMS_WITHOUT_QS = "!$'()*,"
SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + '+&=;'
RESERVED = GEN_DELIMS + SUB_DELIMS
UNRESERVED = ascii_letters + digits + '-._~'
ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS
_IS_HEX = re.compile(b'[A-Z0-9][A-Z0-9]')
class _PyQuoter:
def __init__(self, *, safe='', protected='', qs=False):
self._safe = safe
self._protected = protected
self._qs = qs
def __call__(self, val):
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Argument should be str")
if not val:
return ''
val = val.encode('utf8', errors='ignore')
ret = bytearray()
pct = b''
safe = self._safe
safe += ALLOWED
if not self._qs:
safe += '+&=;'
safe += self._protected
bsafe = safe.encode('ascii')
idx = 0
while idx < len(val):
ch = val[idx]
idx += 1
if pct:
if ch in BASCII_LOWERCASE:
ch = ch - 32 # convert to uppercase
pct.append(ch)
if len(pct) == 3: # pragma: no branch # peephole optimizer
pct = bytes(pct)
buf = pct[1:]
if not _IS_HEX.match(buf):
ret.extend(b'%25')
pct = b''
idx -= 2
continue
try:
unquoted = chr(int(pct[1:].decode('ascii'), base=16))
except ValueError:
ret.extend(b'%25')
pct = b''
idx -= 2
continue
if unquoted in self._protected:
ret.extend(pct)
elif unquoted in safe:
ret.append(ord(unquoted))
else:
ret.extend(pct)
pct = b''
# special case, if we have only one char after "%"
elif len(pct) == 2 and idx == len(val):
ret.extend(b'%25')
pct = b''
idx -= 1
continue
elif ch == ord('%'):
pct = bytearray()
pct.append(ch)
# special case if "%" is last char
if idx == len(val):
ret.extend(b'%25')
continue
if self._qs:
if ch == ord(' '):
ret.append(ord('+'))
continue
if ch in bsafe:
ret.append(ch)
continue
ret.extend(('%{:02X}'.format(ch)).encode('ascii'))
return ret.decode('ascii')
class _PyUnquoter:
def __init__(self, *, unsafe='', qs=False):
self._unsafe = unsafe
self._qs = qs
self._quoter = _Quoter()
self._qs_quoter = _Quoter(qs=True)
def __call__(self, val):
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Argument should be str")
if not val:
return ''
pct = ''
last_pct = ''
pcts = bytearray()
ret = []
for ch in val:
if pct:
pct += ch
if len(pct) == 3: # pragma: no branch # peephole optimizer
pcts.append(int(pct[1:], base=16))
last_pct = pct
pct = ''
continue
if pcts:
try:
unquoted = pcts.decode('utf8')
except UnicodeDecodeError:
pass
else:
if self._qs and unquoted in '+=&;':
ret.append(self._qs_quoter(unquoted))
elif unquoted in self._unsafe:
ret.append(self._quoter(unquoted))
else:
ret.append(unquoted)
del pcts[:]
if ch == '%':
pct = ch
continue
if pcts:
ret.append(last_pct) # %F8ab
last_pct = ''
if ch == '+':
if not self._qs or ch in self._unsafe:
ret.append('+')
else:
ret.append(' ')
continue
if ch in self._unsafe:
ret.append('%')
h = hex(ord(ch)).upper()[2:]
for ch in h:
ret.append(ch)
continue
ret.append(ch)
if pcts:
try:
unquoted = pcts.decode('utf8')
except UnicodeDecodeError:
ret.append(last_pct) # %F8
else:
if self._qs and unquoted in '+=&;':
ret.append(self._qs_quoter(unquoted))
elif unquoted in self._unsafe:
ret.append(self._quoter(unquoted))
else:
ret.append(unquoted)
return ''.join(ret)
try:
from ._quoting import _Quoter, _Unquoter
except ImportError: # pragma: no cover
_Quoter = _PyQuoter
_Unquoter = _PyUnquoter
```
|
{
"source": "jfarmer08/ha-wyzeapi",
"score": 2
}
|
#### File: custom_components/wyzeapi/__init__.py
```python
import logging
import voluptuous as vol
from .wyzeapi.wyzeapi import WyzeApi
from homeassistant.const import (
CONF_DEVICES, CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'wyzeapi'
CONF_SENSORS = "sensors"
CONF_LIGHT = "light"
CONF_SWITCH = "switch"
CONF_LOCK = "lock"
CONF_CAMERAS = "camera"
CAMERA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_LIGHT, default=True): cv.boolean,
vol.Optional(CONF_SWITCH, default=True): cv.boolean,
vol.Optional(CONF_LOCK, default=True): cv.boolean,
vol.Optional(CONF_CAMERAS, default={}): {cv.string: CAMERA_SCHEMA}
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the WyzeApi parent component."""
_LOGGER.debug("""
-------------------------------------------------------------------
Wyze Bulb and Switch Home Assistant Integration
Version: v0.4.3-beta.2
This is a custom integration
If you have any issues with this you need to open an issue here:
https://github.com/JoshuaMulliken/ha-wyzeapi/issues
-------------------------------------------------------------------""")
_LOGGER.debug("""Creating new WyzeApi component""")
wyzeapi_account = WyzeApi(config[DOMAIN].get(CONF_USERNAME),
config[DOMAIN].get(CONF_PASSWORD))
await wyzeapi_account.async_init()
sensor_support = config[DOMAIN].get(CONF_SENSORS)
light_support = config[DOMAIN].get(CONF_LIGHT)
switch_support = config[DOMAIN].get(CONF_SWITCH)
lock_support = config[DOMAIN].get(CONF_LOCK)
camera_support = config[DOMAIN].get(CONF_CAMERAS)
_LOGGER.debug(str(camera_support))
if not wyzeapi_account.is_valid_login():
_LOGGER.error("Not connected to Wyze account. Unable to add devices. Check your configuration.")
return False
_LOGGER.debug("Connected to Wyze account")
wyzeapi_devices = await wyzeapi_account.async_get_devices()
# Store the logged in account object for the platforms to use.
hass.data[DOMAIN] = {
"wyzeapi_account": wyzeapi_account
}
# Start up lights and switch components
if wyzeapi_devices:
_LOGGER.debug("Starting WyzeApi components")
if light_support == True:
await discovery.async_load_platform(hass, "light", DOMAIN, {}, config)
_LOGGER.debug("Starting WyzeApi Lights")
if switch_support == True:
await discovery.async_load_platform(hass, "switch", DOMAIN, {}, config)
_LOGGER.debug("Starting WyzeApi switchs")
if sensor_support == True:
await discovery.async_load_platform(hass, "binary_sensor", DOMAIN, {}, config)
_LOGGER.debug("Starting WyzeApi Sensors")
if lock_support == True:
await discovery.async_load_platform(hass, "lock", DOMAIN, {}, config)
_LOGGER.debug("Starting WyzeApi lock")
if camera_support == True:
await discovery.async_load_platform(hass, "camera", DOMAIN, {}, config)
_LOGGER.debug("Starting WyzeApi Camera")
else:
_LOGGER.error("WyzeApi authenticated but could not find any devices.")
return True
```
#### File: custom_components/wyzeapi/lock.py
```python
import logging
from datetime import timedelta
from .wyzeapi.wyzeapi import WyzeApi
from . import DOMAIN
import voluptuous as vol
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED, ATTR_ATTRIBUTION
# Import the device class from the component that you want to support
from homeassistant.components.lock import LockEntity
#import homeassistant.components.lock.LockEntity
from homeassistant.core import callback
#Add to support quicker update time. Is this to Fast?
SCAN_INTERVAL = timedelta(seconds=5)
ATTRIBUTION = "Data provided by Wyze"
ATTR_STATE ="state"
ATTR_AVAILABLE = "available"
ATTR_DEVICE_MODEL = "device model"
ATTR_OPEN_CLOSE_STATE = "door"
ATTR_DOOR_STATE_OPEN = "open"
ATTR_DOOR_STATE_CLOSE = "closed"
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Wyze binary_sensor platform."""
_LOGGER.debug("""Creating new WyzeApi Lock component""")
async_add_entities([WyzeLock(lock) for lock in await hass.data[DOMAIN]["wyzeapi_account"].async_list_lock()], True)
class WyzeLock(LockEntity):
"""Representation of a Wyze binary_sensor."""
def __init__(self, lock):
"""Initialize a Wyze binary_sensor."""
self._lock = lock
self._name = lock._friendly_name
self._state = lock._state
self._avaliable = True
self._device_mac = lock._device_mac
self._device_model = lock._device_model
self._open_close_state = lock._open_close_state
@property
def name(self):
"""Return the display name of this sensor."""
return self._name
@property
def available(self):
"""Return the connection status of this sensor"""
return self._avaliable
@property
def is_locked(self):
"""Return true if sensor is on."""
return self._state
@property
def unique_id(self):
return self._device_mac
@property
def device_state_attributes(self):
"""Return device attributes of the entity."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_STATE : self._state,
ATTR_AVAILABLE : self._avaliable,
ATTR_DEVICE_MODEL : self._device_model,
ATTR_OPEN_CLOSE_STATE : self.get_door_state()
}
def get_door_state(self):
return ATTR_DOOR_STATE_OPEN if self._open_close_state == True else ATTR_DOOR_STATE_CLOSE
@property
def should_poll(self):
"""We always want to poll for sensors."""
return True
#This is not working.
async def async_lock(self, **kwargs):
"""Lock all or specified locks. A code to lock the lock with may optionally be specified."""
await self._lock.async_lock()
#This is not working>
async def async_unlock(self, **kwargs):
"""Unlock all or specified locks. A code to unlock the lock with may optionally be specified."""
await self._lock.async_unlock()
async def async_update(self):
"""Fetch new state data for this sensor.
This is the only method that should fetch new data for Home Assistant.
"""
_LOGGER.debug("""Binary Locks doing a update.""")
await self._lock.async_update()
self._state = self._lock._state
self._open_close_state = self._lock._open_close_state
```
|
{
"source": "jfarnsworth95/Flask-Weather-App",
"score": 3
}
|
#### File: jfarnsworth95/Flask-Weather-App/__init__.py
```python
from flask import Flask, redirect, url_for, render_template, request, session, flash
from datetime import timedelta
from json_interpreter import *
from api_caller import *
app = Flask(__name__)
app.secret_key = "supersecretkeyrighthere"
app.permanent_session_lifetime = timedelta(hours=1) # enable if session is permanent below, this just sets time
# Just making it easy to keep track of state values
imperial = "imperial"
metric = "metric"
view_today = 0
view_5_day = 1
view_5_day_graph = 2
# Template must be in a "Template folder in the same dir as py file
@app.route("/")
def home():
return render_template("home.html")
@app.route("/search", methods=["POST", "GET"])
def search_page():
if request.method == "POST":
session.permanent = True
if "zip_code" not in session and "country_name" not in session:
session["unit"] = imperial
session["view"] = view_5_day
print(request.form)
zip_code = request.form["myZip"]
session["zip_code"] = zip_code
country_name = request.form["myCountry"]
session["country_name"] = country_name
return redirect(url_for("weather_home"))
else:
country_name = ""
if "country_name" in session:
country_name = session["country_name"]
zip_code = ""
if "zip_code" in session:
zip_code = session["zip_code"]
return render_template("search.html", zip_code=zip_code, country_name=country_name)
@app.route("/weather", methods=["POST", "GET"])
def weather_home():
# if user hasn't provided location data, redirect to search page until they do
if "country_name" not in session or "zip_code" not in session:
flash("Enter your Zip and Country so we can find out what it's looking like out there", "info")
return redirect(url_for("search_page"))
else:
temp_data = {}
country_name = session["country_name"]
zip_code = session["zip_code"]
unit = session["unit"]
view = session["view"]
interpreter = json_interpreter()
caller = api_caller()
if view == view_5_day:
if "last_update_today" not in session or "forecast_5_day" not in session or can_i_refresh(session["last_update_5_day"]) :
interval_forecasts = interpreter.lazy_pass_in(caller.get_5_day_forecast(country_name, zip_code, unit))
if interval_forecasts is not None:
for key in interval_forecasts.keys():
if interval_forecasts[key]["timestamp_adjusted"].split("@ ")[1] == "12:00:00":
print(interval_forecasts[key]["timestamp"])
temp_data[interval_forecasts[key]["timestamp"]] = interval_forecasts[key]
session["last_update_5_day"] = datetime.datetime.now()
session["forecast_5_day"] = temp_data
else:
flash("Looks like there was some trouble connecting to OpenWeather to fetch forecasts. Make sure your "
+ "API key is up to date, and servers are reachable.")
else:
temp_data = session["forecast_5_day"]
elif view == view_today:
if "forecast_today" not in session or "last_update_today" not in session or can_i_refresh(session["last_update_today"]):
temp_data = interpreter.lazy_pass_in(caller.get_weather_today(country_name, zip_code, unit))
if temp_data is None:
flash("Looks like there was some trouble connecting to OpenWeather to fetch forecasts. Make sure your "
+ "API key is up to date, and servers are reachable.")
else:
session["last_update_today"] = datetime.datetime.now()
session["forecast_today"] = temp_data
else:
temp_data = session["forecast_today"]
else:
if "graph_points" not in session or "last_update_graph" not in session or can_i_refresh(session["last_update_graph"]):
api_return = caller.get_5_day_forecast(country_name, zip_code, unit)
if api_return is None:
flash("Looks like there was some trouble connecting to OpenWeather to fetch forecasts. Make sure "
+ "your API key is up to date, and servers are reachable.")
else:
api_return["graph"] = True
temp_data = interpreter.lazy_pass_in(api_return)
session["last_update_graph"] = datetime.datetime.now()
session["graph_points"] = temp_data
else:
temp_data = session["graph_points"]
# Allow switch between "Today" and "5 Day Forecast"
return render_template("weather.html", unit=unit, temp_data=temp_data, view=view)
@app.route("/toggle_unit")
def toggle_unit():
if "unit" in session:
if session["unit"] == imperial:
session["unit"] = metric
else:
session["unit"] = imperial
return redirect(url_for("weather_home"))
@app.route("/toggle_view", methods=["POST"])
def toggle_view():
if request.method == "POST" and "new_view_id" in request.form:
new_view_id = int(request.form["new_view_id"])
if new_view_id == view_today:
session["view"] = view_today
elif new_view_id == view_5_day:
session["view"] = view_5_day
else:
session["view"] = view_5_day_graph
return redirect(url_for("weather_home"))
@app.route("/contact")
def contact_page():
return redirect("https://github.com/jfarnsworth95")
@app.route("/clear_session")
def clear_session():
session.pop("country_name", None)
session.pop("zip_code", None)
session.pop("unit", None)
session.pop("view", None)
session.pop("can_update", None)
session.pop("last_update_5_day", None)
session.pop("last_update_today", None)
session.pop("last_update_graph", None)
session.pop("forecast_today", None)
session.pop("forecast_5_day", None)
session.pop("graph_points", None)
flash("Your session has been successfully purged... It had a family, you monster.")
return redirect(url_for("home"))
# Just catches any unknown paths to provide a cleaner experience
@app.route("/<unknown>")
def unknown(unknown):
return redirect(url_for("home"))
def can_i_refresh(last_updated):
if last_updated is None:
return True
difference = datetime.datetime.now() - last_updated
if difference.total_seconds() / 60 > 5:
return True
return False
if __name__ == "__main__":
app.run(debug=True) # debug=True will allow code to update once saved without requiring a restart
```
|
{
"source": "jfarrell-bsu/CS111-resources",
"score": 4
}
|
#### File: CS111-resources/archived/cs111-project3-component3-stub.py
```python
def playGameOfBlackjack(name):
# Step 2: The game should begin by displaying a welcome message including the
# name of the game (Blackjack) and the players name.
# Step 3: Declare two integer variables to track the number of points in
# the player's hand and the dealer's hand respectively.
# What should the initial values be?
# Step 4: Use randint() function to "draw" two cards for player's hand
# Step 5: Use randint() function to "draw" two cards for dealer's hand
# Step 6: Use a while loop that will prompt the player to draw an additional
# card or stand. The loop ends when the player stands or if the points
# in the player's hand exceeds 21 (busts).
# Step 6.1: Print the current point value of the players hand
# Step 6.2: Use a while loop to prompt the player to Please (H)it or (S)tand
# and validate input. If the player chooses to draw a card, use the
# random() function to "draw" a card and add the points to the player's hand.
# Step 6.3: Check if the points in the player's hand exceeds 21 points.
# If player busts, print player loses message and return False
# Step 7: Use a while loop that will check the point total in the dealer's hand.
# If the points in the dealer's hand are less than 17, the dealer must draw an additional
# card. If the dealer has 17 or more points, the dealer stands. The loop ends when the
# dealer stands or if the points in the dealer's hand exceeds 21 (busts).
# Step 7.1: Print the current point value of the dealers hand
# Step 7.2: If current point value in hand is < 17,
# randomly generate an integer value for the houses draw.
# Otherwise, the dealer stands.
# Step 7.3: If dealer busts, print dealer loses message and return True
# Step 8: Compare the player's and dealer's hands, if player's hand is > houses hand and <= 21
# print winner message, including name and return True. Otherwise, print player loses and return False
######################################################################
# The code below this comment is what runs the program. Please #
# take the time to look at how your function is being called and #
# what is being done with the return value, but you do not need #
# to modify this code to complete the component. #
######################################################################
# Setup a default player
playerName = "Bob"
# Call the function and store the result
playerWins = playGameOfBlackjack(playerName)
# Display the winner!
if playerWins == True:
winnerString = "* " + playerName + " Wins! *"
else:
winnerString = "* House Wins! *"
starBorder = "*" * len(winnerString)
print(starBorder)
print(winnerString)
print(starBorder)
```
#### File: CS111-resources/archived/cs111-project3-component4-stub.py
```python
def getBetFromUser(maxBet):
validResponse = False
bet = 0
while validResponse == False:
response = raw_input("Please enter your bet up to $" + str(maxBet) + ": ")
if response.isdigit() and int(response) <= maxBet:
validResponse = True
bet = int(response)
else:
print("Please enter a valid bet")
return bet
#
# Display the winner for the current game
#
# Parameter
# winnerName - The name of the winner
#
def displayWinner(winnerName):
# Display the winner!
winnerString = "* " + winnerName + " Wins! *"
starBorder = "*" * len(winnerString)
print(starBorder)
print(winnerString)
print(starBorder)
#
# Primary Game Loop and Selection Menu
#
# The selection menu will be core component that ties all three individual games
# into a single program. It contains the primary game loop that will continue
# running until the player either selects (Q)uit from the selection menu or the
# player runs out of money. Selection menu contains options for (D)ice, (S)lots,
# and (B)lackjack. When the player selects a game, this component will prompt
# them for the bet amount and will add or subtract that amount from the their
# balance depending upon whether they win or lose. The player begins with $100.
# Step 5: Declare an integer variable called balance and set the initial value to 100
# Step 6: Prompt the player for their name
# Step 7: Print a message welcoming the player by name to Jackpot Challenge.
# It should also display their current balance.
# Step 8: Setup the Game Loop so that the game will continue running while the
# player's balance is > 0 and they have not selected to quit the game.
# Step 8.1: Display Game Menu
# Step 8.2: Prompt user for selection and validate input
# Step 8.3: Use if and elif statements to run a particular game based upon the
# player's selection from the menu. Call the provided getBetFromUser() function
# before starting each game and store the bet amount to a variable. If the
# player winds the game, add the bet amount to their balance. If the player
# looses the game, deduct the amount from their balance. End the game if the
# user selects (Q)uit
#
# Step 9: Game Over. To reach this point in the game, either the player has run
# out of money (balance == 0), or the player has selected quit from the menu.
# Display three different messages to the user depening upon their remaining
# balance. These messages should find ways to provided a supportive message
# to the player.
#
# balance > 100: - Display Message 1
# balance > 0 and balance <= 100 = Display Message 2
# balance <= 0 - Display Message 3.
#
```
#### File: CS111-resources/examples/cs111-text-file-write.py
```python
def writeListToFile(outputFile, dataList):
try:
destination = open(outputFile,"w")
destination.writelines(dataList)
destination.close()
except IOError:
print("Unable to write to output file" + outputFile)
# Create a string with the name of the file to write to
myFile = "names.txt"
# List of Strings to write to file, to ensure write the values to separate lines
# in the files, each string must include a newline.
#
# How can we modify the writeListToFile() function to do this for us automatically?
#
# nameList = ["Robin", "Lily", "Nora", "Patrice", "Zoey", "Quinn","Ted", "Marshall", "Barney", "Ranjit", "Carl", "Linus"]
nameList = ["Robin\n", "Lily\n", "Nora\n", "Patrice\n", "Zoey\n", "Quinn\n","Ted\n", "Marshall\n", "Barney\n", "Ranjit\n", "Carl\n", "Linus\n"]
# Call the above function to build a list of strings from the file
writeListToFile(myFile,nameList)
```
#### File: CS111-resources/minitasks/cs111-madlib-stubs.py
```python
import random
#
# This function is an implementation of the <REPLACE WITH MADLIB NAME> Madlib.
# It will dynamically generate a new Madlib each time it is called by
# randomly selecting values from the above lists
# Return
# This function will return a String containing the new Madlib
def generateMadlib():
# Step 1. Define List variables for each category in your Madlib.
# Add additional lists as necessary.
ADJ_LIST = []
NOUN_LIST = []
ADVERB_LIST = []
EXCLAMATION_LIST = []
# Setup the output string that will contain the Madlib
output = ""
# Step 2. Write your Madlib below using String concatenation and the random.choice() function
output += "Use String concatenation to write your madlib.\n"
output += "Replace these lines with your Madlib"
# Return generated Madlib
return output
#
# Generate the Madlib
#
madlib = generateMadlib()
#
# Print the Madlib
#
print(madlib)
```
#### File: CS111-resources/minitasks/cs111-splitting-up-stub.py
```python
singleSongCSV = "<NAME>,Songs You Know by Heart,Cheeseburger in Paradise,172"
#
# List of strings containing CSV formatted song data
#
songList = ['<NAME>,Songs You Know by Heart,Cheeseburger in Paradise,172',
'<NAME>,Songs You Know by Heart,He Went to Paris,209',
'<NAME>,Songs You Know by Heart,Fins,205',
'<NAME>,Songs You Know by Heart,Son of a Son of a Sailor,205',
'<NAME>,Songs You Know by Heart,A Pirate Looks at Forty,232',
'<NAME>,Songs You Know by Heart,Margaritaville,251',
'<NAME>,Songs You Know by Heart,Come Monday,189',
'<NAME>,Songs You Know by Heart,Changes in Latitudes Changes in Attitudes,195',
"<NAME>,Songs You Know by Heart,Why Don't We Get Drunk,162",
'<NAME>,Songs You Know by Heart,Pencil Thin Mustache,170',
'<NAME>,Songs You Know by Heart,Grapefruit-Juicy Fruit,176',
'<NAME>,Songs You Know by Heart,Boat Drinks,157',
'<NAME>,Songs You Know by Heart,Volcano,218']
# Display nicely formatted song details for a
# string provided in the following format:
# "Artist,Album,Title,Duration"
#
# Parameters
# song - String containing comma separated song details
#
# Return
# none
def printSong(song):
#
# call printSong() to print singleSongCSV
#
#
# use a for loop and printSong() to print each song in songList
#
```
|
{
"source": "jfarrelly-voysis/voysis-python",
"score": 2
}
|
#### File: voysis-python/tests/test_audio.py
```python
import io
from voysis.audio.audio import AudioFile
def test_16khz_file():
audio_file = AudioFile(io.BytesIO(b'\x52\x49\x46\x46\x22\xe2\x01\x00\x57\x41\x56\x45\x66\x6d\x74\x20\x10\x00'
b'\x00\x00\x01\x00\x01\x00\x80\x3e\x00\x00\x00\x7d\x00\x00\x02\x00\x10\x00'
b'\x64\x61\x74\x61\xfe\xe1\x01\x00'))
assert audio_file.header.bits_per_sample == 16
assert audio_file.header.sample_rate == 16000
def test_48khz_file():
audio_file = AudioFile(io.BytesIO(b'\x52\x49\x46\x46\x32\x00\x0a\x00\x57\x41\x56\x45\x66\x6d\x74\x20\x12\x00'
b'\x00\x00\x03\x00\x01\x00\x80\xbb\x00\x00\x00\xee\x02\x00\x04\x00\x20\x00'
b'\x00\x00\x66\x61\x63\x74\x04\x00'))
assert audio_file.header.bits_per_sample == 32
assert audio_file.header.sample_rate == 48000
def test_raw_audio():
audio_file = AudioFile(io.BytesIO(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'))
assert audio_file.header.bits_per_sample == 16
assert audio_file.header.sample_rate == 16000
```
#### File: voysis/client/client.py
```python
import abc
import threading
import uuid
from datetime import datetime
from datetime import timedelta
import six
from dateutil.parser import parse as parsedatetime
from dateutil.tz import tzutc
from voysis.client.user_agent import UserAgent
class ClientError(Exception):
def __init__(self, *args, **kwargs):
super(ClientError, self).__init__(*args, **kwargs)
if args and len(args) > 0:
self.message = args[0]
else:
self.message = None
class ResponseFuture(object):
def __init__(self,
response_code=None,
response_message=None,
response_entity=None,
call_on_complete=None):
self._event = threading.Event()
self._callable = call_on_complete
self._response_entity = None
self.response_code = response_code
self.response_message = response_message
if response_entity:
self.set(response_code, response_entity=response_entity)
def wait_until_complete(self, timeout):
if not self._event.is_set():
if not self._event.wait(timeout):
raise ClientError("Timeout waiting on response.")
def get_entity(self, timeout=None):
self.wait_until_complete(timeout)
return self._response_entity
def set(self, response_code, response_message=None, response_entity=None):
self._response_entity = response_entity
self.response_code = response_code
self.response_message = response_message
self._event.set()
if self._callable:
self._callable(self)
def is_complete(self):
return self._event.is_set()
@six.add_metaclass(abc.ABCMeta)
class Client(object):
def __init__(self, url, user_agent=None, timeout=15):
self._url = url
self.user_agent = user_agent if user_agent else UserAgent()
self.audio_profile_id = str(uuid.uuid4())
self.api_media_type = 'application/vnd.voysisquery.v1+json'
self.ignore_vad = False
self.locale = 'en-US'
self.check_hostname = True
self.auth_token = None
self.timeout = timeout
self.current_conversation_id = None
self.current_context = None
self.app_token_renewal_grace = timedelta(seconds=180)
self._app_token = None
self._app_token_expiry = datetime.now(tzutc())
@abc.abstractmethod
def stream_audio(self, frames_generator, notification_handler=None, audio_type=None):
'''
Stream audio data to the query API, creating a new conversation (if
required) and a new audio query. Raises a ClientError if query
processing is unsuceesful.
:param frames_generator:
:param notification_handler A callable that will be invoked if
streaming to the server is stopped for any reason. The callable
should accept a single argument, which will be a string indicating
the reason for the stoppage.
:param audio_type The Content-Type to use for the audio
:return: The completed query as a dictionary.
'''
pass
@abc.abstractmethod
def send_text(self, text):
'''
Sends text query to the query API, creating a new conversation (if
required). Raises a ClientError if query
processing is unsuccessful.
:param text
:return: The completed query as a dictionary.
'''
pass
@abc.abstractmethod
def send_request(self, uri, request_entity=None, extra_headers=None, call_on_complete=None, method='POST'):
"""
Send a request to the remote server. Raise an exception if the
request is not successful.
:param uri: The URI to make the request to.
:param request_entity: The entity to send in the body of the request.
:param extra_headers: Any extra headers to include. Every request will
have the standard headers set.
:param call_on_complete: A callable that will be invoked when the response
to the request is completed.
:param method: The HTTP method to use in sending the request. Defaults to POST.
:return: A ResponseFuture instance that can be used to obtain the
response.
"""
pass
def close(self):
"""
Release any resources in use by this client.
:return: None
"""
pass
def create_common_headers(self):
headers = {
'User-Agent': self.user_agent.get(),
'X-Voysis-Audio-Profile-Id': self.audio_profile_id,
'X-Voysis-Ignore-Vad': str(self.ignore_vad),
'Content-Type': 'application/json',
'Accept': self.api_media_type
}
if self._app_token:
headers['Authorization'] = 'Bearer ' + self._app_token
return headers
def send_feedback(self, query_id, rating=None, description=None, durations=None):
"""
Send feedback to the server for the given query.
"""
request_body = {}
if rating:
request_body['rating'] = rating
if description:
request_body['description'] = description
if durations:
request_body['durations'] = durations
if len(request_body) < 1:
return None
uri = "/queries/{query_id}/feedback".format(
query_id=query_id
)
return self.send_request(uri, request_body, method='PATCH').get_entity()
def refresh_app_token(self, force=False):
delta_to_expiry = self._app_token_expiry - datetime.now(tzutc())
if self.auth_token and (force or delta_to_expiry < self.app_token_renewal_grace):
auth_headers = {
'Authorization': 'Bearer ' + self.auth_token,
'Accept': 'application/json'
}
response_future = self.send_request(
'/tokens', extra_headers=auth_headers, call_on_complete=self._update_app_token
)
if not self._app_token:
response_future.wait_until_complete(self.timeout)
return self._app_token
def _create_audio_query_entity(self, audio_type='audio/pcm;bits=16;rate=16000'):
entity = {
'locale': self.locale,
'queryType': 'audio',
'audioQuery': {
'mimeType': audio_type
}
}
if self.current_conversation_id:
entity['conversationId'] = self.current_conversation_id
if self.current_context:
entity['context'] = self.current_context.copy()
return entity
def _create_text_query_entity(self, text):
entity = {
'locale': self.locale,
'queryType': 'text',
'textQuery': {
'text': text
}
}
if self.current_conversation_id:
entity['conversationId'] = self.current_conversation_id
if self.current_context:
entity['context'] = self.current_context.copy()
return entity
def _update_current_context(self, query):
if 'context' in query:
self.current_context = query['context'].copy()
else:
self.current_context = dict()
def _update_app_token(self, response_future):
if response_future.response_code == 200:
app_token_response = response_future.get_entity()
self._app_token = app_token_response['token']
self._app_token_expiry = parsedatetime(app_token_response['expiresAt'])
```
#### File: voysis/device/device.py
```python
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Device(object):
def __init__(self, **kwargs):
self.chunk_size = kwargs.get('chunk_size', 1024)
@abc.abstractmethod
def stream(self, client, recording_stopper):
pass
@abc.abstractmethod
def start_recording(self):
pass
@abc.abstractmethod
def stop_recording(self):
pass
@abc.abstractmethod
def is_recording(self):
pass
@abc.abstractmethod
def generate_frames(self):
pass
@abc.abstractmethod
def audio_type(self):
pass
```
#### File: voysis/device/file_device.py
```python
import datetime
import sys
import time
from voysis.audio.audio import AudioFile
from voysis.device.device import Device
if sys.version[0] == '2':
import Queue as queue
else:
import queue
class FileDevice(Device):
def __init__(self, audio_file=None, **kwargs):
Device.__init__(self)
self.time_between_chunks = kwargs.get('time_between_chunks', 0.08)
self._queue = queue.Queue()
self._last_chunk_time = datetime.datetime.utcfromtimestamp(0)
self.audio_file = AudioFile(audio_file)
def stream(self, client, recording_stopper):
self.start_recording()
recording_stopper.started()
query = client.stream_audio(self.generate_frames(), notification_handler=recording_stopper.stop_recording,
audio_type=self.audio_type())
recording_stopper.stop_recording(None)
return query
def start_recording(self):
self._queue.queue.clear()
self.audio_to_frames()
def stop_recording(self):
self._queue.queue.clear()
def is_recording(self):
return not (self._queue.empty())
def generate_frames(self):
while not self._queue.empty():
data = self._queue.get_nowait()
if data:
now = datetime.datetime.utcnow()
seconds_since_last = (now - self._last_chunk_time).total_seconds()
if seconds_since_last < self.time_between_chunks:
time.sleep(self.time_between_chunks - seconds_since_last)
self._last_chunk_time = now
yield data
def audio_to_frames(self):
while True:
data = self.audio_file.read(self.chunk_size)
if not data:
break
self._queue.put(data)
def audio_type(self):
return 'audio/pcm;bits={};rate={}'.format(self.audio_file.header.bits_per_sample,
self.audio_file.header.sample_rate)
```
#### File: voysis/device/mic_device.py
```python
import sys
import threading
from select import select
import pyaudio
from voysis.device.device import Device
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as Queue
else:
import queue as Queue
class MicDevice(Device):
def __init__(self, **kwargs):
Device.__init__(self)
self.pyaudio_instance = pyaudio.PyAudio()
self.queue = Queue.Queue()
self.quit_event = threading.Event()
self.channels = kwargs.get('channels', 1)
self.sample_rate = kwargs.get('sample_rate', 16000)
self.audio_format = kwargs.get('audio_format', pyaudio.paInt16)
self.device_index = None
def _callback(self, in_data, frame_count, time_info, status):
self.queue.put(in_data)
return None, pyaudio.paContinue
def stream(self, client, recording_stopper):
print("Ready to capture your voice query")
input("Press ENTER to start recording")
query = None
self.start_recording()
recording_stopper.started()
try:
def keyboard_stop():
print("Press ENTER to stop recording (or wait for VAD)")
while self.is_recording():
res = select([sys.stdin], [], [], 1)
for sel in res[0]:
if sel == sys.stdin:
recording_stopper.stop_recording('user_stop')
keyboard_thread = threading.Thread(target=keyboard_stop)
keyboard_thread.daemon = True
keyboard_thread.start()
query = client.stream_audio(self.generate_frames(), notification_handler=recording_stopper.stop_recording,
audio_type=self.audio_type())
recording_stopper.stop_recording(None)
except ValueError:
pass
return query
def start_recording(self):
self.stream = self.pyaudio_instance.open(
input=True,
start=False,
format=self.audio_format,
channels=self.channels,
rate=self.sample_rate,
frames_per_buffer=self.chunk_size,
stream_callback=self._callback,
input_device_index=self.device_index
)
self.quit_event.clear()
self.queue.queue.clear()
self.stream.start_stream()
def stop_recording(self):
self.stream.stop_stream()
self.quit_event.set()
def is_recording(self):
return not(self.quit_event.is_set())
def generate_frames(self):
self.quit_event.clear()
try:
while not self.quit_event.is_set():
try:
frames = self.queue.get(block=False)
if not frames:
break
yield frames
except Queue.Empty:
pass
except StopIteration:
self.stream.close()
self.pyaudio_instance.terminate()
raise
raise StopIteration()
def audio_type(self):
return "audio/pcm;bits={};rate={}".format(
pyaudio.get_sample_size(self.audio_format) * 8,
self.sample_rate)
```
|
{
"source": "jfarrugia-uom/hyperstar",
"score": 3
}
|
#### File: hyperstar/batch_sim/nn_vec.py
```python
from .argmaxk import argmaxk_rows
import numpy as np
import psutil
from sys import stderr
__author__ = 'test'
def nn_vec_basic(arr1, arr2, topn, sort=True, return_sims=False, nthreads=8):
"""
For each row in arr1 (m1 x d) find topn most similar rows from arr2 (m2 x d). Similarity is defined as dot product.
Please note, that in the case of normalized rows in arr1 and arr2 dot product will be equal to cosine and will be
monotonically decreasing function of Eualidean distance.
:param arr1: array of vectors to find nearest neighbours for
:param arr2: array of vectors to search for nearest neighbours in
:param topn: number of nearest neighbours
:param sort: indices in i-th row of returned array should sort corresponding rows of arr2 in descending order of
similarity to i-th row of arr2
:param return_sims: return similarities along with indices of nearest neighbours
:param nthreads:
:return: array (m1 x topn) where i-th row contains indices of rows in arr2 most similar to i-th row of m1, and, if
return_sims=True, an array (m1 x topn) of corresponding similarities.
"""
sims = np.dot(arr1, arr2.T)
best_inds = argmaxk_rows(sims, topn, sort=sort, nthreads=nthreads)
if not return_sims:
return best_inds
# generate row indices corresponding to best_inds (just current row id in each row) (m x k)
rows = np.arange(best_inds.shape[0], dtype=np.intp)[:, np.newaxis].repeat(best_inds.shape[1], axis=1)
return best_inds, sims[rows, best_inds]
def nn_vec(m1, m2, topn, sort=True, return_sims=False, nthreads=8, USE_MEM_PERCENT = 0.3,verbose=True):
ndists = m1.shape[0] * m2.shape[0] # number of distances
if m1.shape[0] < 2 or ndists < 10**7: # cannot or need not split m1 into batches
return nn_vec_basic(m1, m2, topn=topn, sort=sort, return_sims=return_sims, nthreads=nthreads)
# estimate memory required to store results:
# best_inds: m1.shape[0] * topn * tmp1.itemsize, dists: m1.shape[0] * topn * tmp2.itemsize
tmp_inds, tmp_dists = nn_vec_basic(m1[:2,:], m2[:2,:], topn=2, sort=False, return_sims=True, nthreads=1)
res_mem = m1.shape[0] * topn * ( tmp_inds.itemsize + (tmp_dists.itemsize if return_sims else 0) )
amem = psutil.virtual_memory().available
use_mem = (amem - res_mem) * USE_MEM_PERCENT
dists_mem = ndists * tmp_dists.itemsize # memory required for the whole distances matrix
num_batches = int(np.ceil(dists_mem / use_mem))
batch_size = int(np.ceil(1.0 * m1.shape[0] / num_batches))
if verbose:
print ( 'Full distances matrix will occupy %.2fG; we would like to occupy %.2fG from %.2fG of available memory...' % \
(1.*dists_mem/2**30, 1.*use_mem/2**30, 1.*amem/2**30) )
print ('... processing in %d batches of %d rows' % (num_batches, batch_size) )
res_inds, res_dists = None, None
for st in range(0, m1.shape[0], batch_size):
en = st+batch_size
if verbose:
print ( 'Processing rows %d-%d from %d' % (st, min(en-1, m1.shape[0]), m1.shape[0]) )
res = nn_vec_basic(m1[st:en,:], m2, topn=topn, sort=sort, return_sims=return_sims, nthreads=nthreads)
res0 = res[0] if return_sims else res
res_inds = np.vstack([res_inds, res0]) if res_inds is not None else res0
if return_sims:
res_dists = np.vstack([res_dists, res[1]]) if res_dists is not None else res[1]
return (res_inds, res_dists) if return_sims else res_inds
```
#### File: jfarrugia-uom/hyperstar/crim_data.py
```python
import numpy as np
from itertools import cycle
from copy import deepcopy
import random
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from collections import defaultdict
# borrowed from https://github.com/gbcolborne/hypernym_discovery/blob/master/train.py
def make_sampler(things):
""" Make generator that samples randomly from a list of things. """
nb_things = len(things)
shuffled_things = deepcopy(things)
for i in cycle(range(nb_things)):
if i == 0:
random.shuffle(shuffled_things)
yield shuffled_things[i]
class CrimData:
def __init__(self, args):
# embeddings model
self.w2v = args['w2v']
# training tuples
train = args['train']
# test tuples
test = args['test']
# validation tuples
validation = args['validation']
# synonyms
self.synonyms = args['synonyms']
# if set to -1 then we will use full vector space vocab
# otherwise use indicated size
self.limited_vocab_n = args['limited_vocab_n']
if self.limited_vocab_n > -1:
print ("Creating limited vocabulary of %d" % (self.limited_vocab_n))
# collect words for exercise
flat_synonym = [word for v in self.synonyms.values() for word in v]
hyponyms = list(set([x for x,y in train + test + validation] ))
hypernyms = list(set([y for x,y in train + test + validation] ))
# dataset set vocab
vocab = list(set(hyponyms + hypernyms + flat_synonym))
vocab_len = len(vocab)
print ("Dataset vocabulary size is %d" % (vocab_len))
model_words = list(self.w2v.vocab.keys())
# sample words from vector space; sample more words than requested to handle collisions with dataset words
random_words = np.random.choice(model_words, (self.limited_vocab_n+10000), replace=False)
vocab = vocab + [w for w in random_words.tolist() if w not in vocab][:self.limited_vocab_n - vocab_len]
print ("Truncated vocab length is %d" % (len(vocab)))
else:
# choose all words in vector space
vocab = list(self.w2v.vocab.keys())
# create tokenizer from embeddings model
self.tokenizer = Tokenizer(filters='', lower=False)
# fit on vocab
self.tokenizer.fit_on_texts(vocab)
print ("Vocab size is %d words" % (len(self.tokenizer.index_word)))
# initialise negative word sampler
print ("Initialising negative sampler")
self.negative_sampler = make_sampler(list(self.tokenizer.word_index.values()))
print ("Tokenising all dataset tuples")
# tokenize dataset -> convert to numbers which will serve as embeddings lookup keys
self.all_data_token = self.tokenizer.texts_to_sequences([[x,y] for x, y in train + test + validation])
# create hypernym dictionary lookup
self.hypernym_id_lookup = defaultdict(list)
for x, y in self.all_data_token:
self.hypernym_id_lookup[x].append(y)
# disable default factory
self.hypernym_id_lookup.default_factory = None
print ("Creating embeddings matrix")
# create embeddings matrix
self.embeddings_matrix = np.zeros( (len(self.tokenizer.index_word) + 1, 300) )
for k, v in self.tokenizer.index_word.items():
self.embeddings_matrix[k] = self.w2v[v]
#vectors should already by nornalised
#self.embeddings_matrix[k] /= np.linalg.norm(emb_matrix[k])
print ("Done!")
# get list of padded synonyms
def sample_synonyms(self, word_id, sample_length):
# convert word_id to word to look for in synyony dictionary
word = self.tokenizer.index_word[word_id]
if word in self.synonyms:
_syn = self.synonyms[word]
else:
_syn = []
# convert list to embeddings index array
syn_list = np.asarray(self.tokenizer.texts_to_sequences([_syn])[0])
result = np.asarray([])
# if we have enough synonyms, we can randomly sample length-1 from list and add the hyponym itself to
# the list
if (sample_length > 1 and len(syn_list) >= (sample_length-1)):
result = np.random.choice(syn_list, sample_length-1, replace=False)
result = np.append(result, word_id)
# otherwise, we pick all synyonyms and pad the sequences to match model fixed-input
else:
result = np.append(syn_list, word_id)
result = pad_sequences([result], sample_length, padding='post', value=word_id)
# we're expecting 1-D vector
return result.flatten()
def get_negative_random(self, word_id, neg_count):
neg_samples = []
while len(neg_samples) < neg_count:
tmp_neg = next(self.negative_sampler)
if tmp_neg not in self.hypernym_id_lookup[word_id]:
neg_samples.append(tmp_neg)
return neg_samples
def get_augmented_batch(self, query_batch, neg_count, syn_count):
# create synonym equivalent in ids, prepending the hyponym to the list of synonyms
query_input = np.zeros((len(query_batch) * (neg_count+1), 1), dtype='int32')
hyper_input = np.zeros((len(query_batch) * (neg_count+1), 1), dtype='int32')
synonym_input = np.zeros((len(query_batch) * (neg_count+1), syn_count), dtype='int32')
y_input = np.zeros(len(query_batch) * (neg_count+1))
for idx, (query, hyper) in enumerate(query_batch):
query_input[idx * (neg_count+1)] = np.asarray(query)
hyper_input[idx * (neg_count+1)] = np.asarray(hyper)
synonym_input[idx * (neg_count+1)] = self.sample_synonyms(query, syn_count)
y_input[idx * (neg_count+1)] = 1
if neg_count > 0:
negatives = self.get_negative_random(word_id=query, neg_count=neg_count)
for m, neg in enumerate(negatives):
query_input[(idx * (neg_count+1)) + (m + 1)] = np.asarray(query)
hyper_input[(idx * (neg_count+1)) + (m + 1)] = np.asarray(neg)
synonym_input[(idx * (neg_count+1)) + (m + 1)] = self.sample_synonyms(query, syn_count)
return query_input, hyper_input, synonym_input, y_input
def token_to_words(self, dataset):
_q = self.tokenizer.sequences_to_texts(dataset[:,0].reshape(-1,1))
_h = self.tokenizer.sequences_to_texts(dataset[:,1].reshape(-1,1))
return list(zip(_q, _h))
```
#### File: jfarrugia-uom/hyperstar/evaluate.py
```python
from batch_sim.nn_vec import nn_vec
import argparse
import csv
import glob
import os
import pickle
import re
import sys
#from gensim.models.word2vec import Word2Vec
from gensim.models import KeyedVectors
from collections import defaultdict
import numpy as np
from projlearn import MODELS
from multiprocessing import cpu_count
#parser = argparse.ArgumentParser(description='Evaluation.')
#parser.add_argument('--w2v', default='all.norm-sz100-w10-cb0-it1-min100.w2v', nargs='?', help='Path to the #word2vec model.')
#parser.add_argument('--test', default='test.npz', nargs='?', help='Path to the test set.')
#parser.add_argument('--subsumptions', default='subsumptions-test.txt', nargs='?', help='Path to the test subsumptions.')
#parser.add_argument('--non_optimized', action='store_true', help='Disable most similar words calculation optimization.')
#parser.add_argument('--threads', nargs='?', type=int, default=cpu_count(), help='Number of threads.')
#parser.add_argument('path', nargs='*', help='List of the directories with results.')
#args = vars(parser.parse_args())
#if not len(sys.argv) > 1:
# print('Usage: %s path...' % (sys.argv[0]), file=sys.stderr)
# sys.exit(1)
def extract(clusters, Y_hat_clusters):
cluster_indices = {cluster: 0 for cluster in Y_hat_clusters}
Y_all_hat = []
for cluster in clusters:
Y_hat = Y_hat_clusters[cluster][cluster_indices[cluster]]
cluster_indices[cluster] += 1
Y_all_hat.append(Y_hat)
assert sum(cluster_indices.values()) == len(clusters)
return np.array(Y_all_hat)
def compute_ats(measures):
return [sum(measures[j].values()) / len(subsumptions_test) for j in range(len(measures))]
def compute_auc(ats):
return sum([ats[j] + ats[j + 1] for j in range(0, len(ats) - 1)]) / 2
class Evaluator:
def __init__(self, args):
self.args = args;
# load test vectors
with np.load(self.args['test']) as npz:
self.X_index_test = npz['X_index']
self.Y_all_test = npz['Y_all']
self.Z_all_test = npz['Z_all']
self.X_all_test = self.Z_all_test[self.X_index_test[:, 0], :]
# load reference for gold-standard validation data fold
self.subsumptions_test = self.args['subsumptions']
# load reference to embeddings
self.w2v = self.args['w2v']
print (len(self.subsumptions_test), self.X_all_test.shape[0])
# confirm that validation gold data is the same size as equivalent vector data
assert len(self.subsumptions_test) == self.X_all_test.shape[0]
def __call__(self, model_name):
predictions = {}
print('Evaluating "%s" on "%s".' % (model_name, self.args['test']), flush=True)
# load k-means model
kmeans = pickle.load(open(os.path.join(".", 'kmeans.pickle'), 'rb'))
print('The number of clusters is %d.' % (kmeans.n_clusters), flush=True)
# partition test data according to k-means model
clusters_test = kmeans.predict(self.Y_all_test - self.X_all_test)
try:
with np.load(os.path.join(".", '%s.test.npz') % model_name) as npz:
Y_hat_clusters = {int(cluster): npz[cluster] for cluster in npz.files}
except FileNotFoundError:
Y_hat_clusters = {}
if kmeans.n_clusters != len(Y_hat_clusters):
print('Missing the output for the model "%s"!' % model_name, file=sys.stderr, flush=True)
return predictions
# get estimated hypernyms for each term in test/validation set
Y_all_hat = extract(clusters_test, Y_hat_clusters)
# ensure we have the same number of estimates as we have of test terms
assert len(self.subsumptions_test) == Y_all_hat.shape[0]
# compute unit-norm of hypernym estimates
Y_all_hat_norm = Y_all_hat / np.linalg.norm(Y_all_hat,axis=1)[:,np.newaxis]
# find similar words
print('nn_vec...')
similar_indices = nn_vec(Y_all_hat_norm, self.w2v.syn0norm, topn=15, sort=True, return_sims=False,
nthreads=self.args['threads'], verbose=False)
print('nn_vec results covert...')
similar_words = [[self.w2v.index2word[ind] for ind in row] for row in similar_indices]
print('done')
for i, (hyponym, hypernym) in enumerate(self.subsumptions_test):
predictions[hyponym] = similar_words[i]
return predictions
"""
We can comment out loading the embeddings since we're keeping a reference of them in the test harness
WD = os.path.dirname(os.path.realpath(__file__))
w2v = KeyedVectors.load_word2vec_format(os.path.join(WD, args['w2v']),
binary=True, unicode_errors='ignore')
w2v.init_sims(replace=True)
"""
"""
We can comment this out because: i) we have already removed the OOV terms in the test harness; ii) we're passing directly a memory reference to a test fold
with open(args['subsumptions']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
subsumptions_test.append((row[0], row[1]))
# remove out-of-vocab entries
def confirmVocab(wordList):
return [*filter(lambda x: x[0] in w2v.vocab and x[1] in w2v.vocab, wordList)]
subsumptions_test = confirmVocab(subsumptions_test)
"""
"""
for path in args['path']:
print('Doing "%s" on "%s" and "%s".' % (path, args['test'], args['subsumptions']), flush=True)
kmeans = pickle.load(open(os.path.join(path, 'kmeans.pickle'), 'rb'))
print('The number of clusters is %d.' % (kmeans.n_clusters), flush=True)
clusters_test = kmeans.predict(Y_all_test - X_all_test)
for model in MODELS:
try:
with np.load(os.path.join(path, '%s.test.npz') % model) as npz:
Y_hat_clusters = {int(cluster): npz[cluster] for cluster in npz.files}
except FileNotFoundError:
Y_hat_clusters = {}
if kmeans.n_clusters != len(Y_hat_clusters):
print('Missing the output for the model "%s"!' % model, file=sys.stderr, flush=True)
continue
Y_all_hat = extract(clusters_test, Y_hat_clusters)
assert len(subsumptions_test) == Y_all_hat.shape[0]
measures = [{} for _ in range(10)]
if not args['non_optimized']:
# normalize Y_all_hat to make dot product equeal to cosine and monotonically decreasing function of euclidean distance
Y_all_hat_norm = Y_all_hat / np.linalg.norm(Y_all_hat,axis=1)[:,np.newaxis]
print('nn_vec...')
similar_indices = nn_vec(Y_all_hat_norm, w2v.syn0norm, topn=10, sort=True, return_sims=False, nthreads=args['threads'], verbose=False)
print('nn_vec results covert...')
similar_words = [[w2v.index2word[ind] for ind in row] for row in similar_indices]
print('done')
for i, (hyponym, hypernym) in enumerate(subsumptions_test):
if args['non_optimized']:
Y_hat = Y_all_hat[i].reshape(X_all_test.shape[1],)
actual = [w for w, _ in w2v.most_similar(positive=[Y_hat], topn=10)]
else:
actual = similar_words[i]
for j in range(0, len(measures)):
measures[j][(hyponym, hypernym)] = 1. if hypernym in actual[:j + 1] else 0.
if (i + 1) % 100 == 0:
ats = compute_ats(measures)
auc = compute_auc(ats)
ats_string = ', '.join(['A@%d=%.6f' % (j + 1, ats[j]) for j in range(len(ats))])
print('%d examples out of %d done for "%s/%s": %s. AUC=%.6f.' % (
i + 1,
len(subsumptions_test),
path,
model,
ats_string,
auc),
file=sys.stderr, flush=True)
ats = compute_ats(measures)
auc = compute_auc(ats)
ats_string = ', '.join(['A@%d=%.4f' % (j + 1, ats[j]) for j in range(len(ats))])
print('For "%s/%s": overall %s. AUC=%.6f.' % (
path,
model,
ats_string,
auc),
flush=True)
"""
```
#### File: jfarrugia-uom/hyperstar/multiprojection_transfer.py
```python
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Embedding, Dot, Flatten, Concatenate, Dropout, Lambda, Subtract
from tensorflow.keras.initializers import Initializer, RandomNormal, Zeros, Ones
from tensorflow.keras.regularizers import l2, l1, l1_l2
from tensorflow.keras.constraints import UnitNorm, MinMaxNorm
from tensorflow.keras.optimizers import Adam, Adadelta
from tensorflow.python.framework import dtypes
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
import semeval_eval
import crim_evaluator
import multiprojection_model
# we'll model the embeddings layer as a separate model which we can resuse against the feature extraction
# elements downstream
def get_trainable_embeddings_model(embeddings_matrix, synonym_sample_n):
hypo_input = Input(shape=(1,), name='Hyponym')
# include this input for backward compatability only
neg_input = Input(shape=(1,), name='Negative')
hyper_input = Input(shape=(1,), name='Hypernym')
embeddings_layer_1 = Embedding(embeddings_matrix.shape[0],
embeddings_matrix.shape[1],
input_length=1, name='TermEmbedding',
embeddings_constraint = UnitNorm(axis=1))
hypo_embedding = embeddings_layer_1(hypo_input)
hyper_embedding = embeddings_layer_1(hyper_input)
# negative input will actually be ignored
embedding_model = Model(inputs=[hypo_input, neg_input, hyper_input],
outputs=[hypo_embedding, hyper_embedding])
# inject pre-trained embeddings into this mini, resusable model/layer
embedding_model.get_layer(name='TermEmbedding').set_weights([embeddings_matrix])
embedding_model.get_layer(name='TermEmbedding').trainable = True
return embedding_model
class TransferModel(multiprojection_model.MultiProjModel):
def __init__(self, args):
# contains important bits such as the tokeniser, batch augmentation logic, etc.
self.data = args['data']
# model parameters
self.embeddings_layer = args['embeddings_layer']
self.batch_size = args['batch_size']
self.phi_k = args['phi_k']
self.lambda_c = args['lambda_c']
self.epochs = args['epochs']
self.negative_sample_n = args['negative_sample_n']
self.synonym_sample_n = args['synonym_sample_n']
self.lr = args['lr']
self.beta1 = args['beta1']
self.beta2 = args['beta2']
self.clip_value = args['clip_value']
# set patience > epochs to avoid early stop
self.patience = args['patience']
self.minimum_patience = args['minimum_patience']
# this model requires a load path to know where to load the weights from
self.save_path = args['save_path']
self.load_path = args['load_path']
self.eval_after_epoch = args['eval_after_epoch']
self.dropout_rate = args['dropout_rate']
# build new model + compile
# the new model will have frozen projections and trainable embeddings
self.model = self.build_model()
# this object generates the predictions from a model's learned parameters
self.evaluator = crim_evaluator.CrimEvaluator(self.data, self.model)
# now load the weights of some other model
self.load_base_model()
# maintain history object which contains training metrics
self.history = {metric:[] for metric in ['epoch', 'loss', 'test_loss', 'MAP', 'MRR']}
def reset_model(self, args):
# reset model parameters
self.save_path = args['save_path']
# modification that computes similarity of each synonymy vector against every projection
# and then calculates average similarity and regularises that
def build_model(self):
hypo_input = Input(shape=(1,), name='Hyponym')
neg_input = Input(shape=(self.synonym_sample_n,), name='Negative')
hyper_input = Input(shape=(1,), name='Hypernym')
hypo_embedding, hyper_embedding = self.embeddings_layer([hypo_input, neg_input, hyper_input])
hypo_embedding = Dropout(rate=self.dropout_rate, name='Dropout_Hypo')(hypo_embedding)
hyper_embedding = Dropout(rate=self.dropout_rate, name='Dropout_Hyper')(hyper_embedding)
phi_layer = []
for i in range(self.phi_k):
phi_layer.append(Dense(self.data.embeddings_matrix.shape[1],
activation=None, use_bias=False,
kernel_initializer = multiprojection_model.RandomIdentity(),
name='Phi%d' % (i)) (hypo_embedding))
# either merge phi layers in 1 or flatten single phi projection
if self.phi_k == 1:
# flatten tensors
phi = Flatten(name='Flatten_Phi')(phi_layer[0])
#hyper_embedding = Flatten(name='Flatten_Hyper')(hyper_embedding)
else:
phi = Concatenate(axis=1)(phi_layer)
phi = Dropout(rate=self.dropout_rate, name='Dropout_Phi')(phi)
# compute hypernym similarity to each projection
phi_hyper = Dot(axes=-1, normalize=True, name='SimHyper')([phi, hyper_embedding])
if self.phi_k > 1:
phi_hyper = Flatten(name='Flatten_PhiHyper')(phi_hyper)
prediction = Dense(1,
activation="sigmoid", name='Prediction',
use_bias=True,
kernel_initializer='random_normal', bias_initializer=Zeros(),
) (phi_hyper)
model = Model(inputs=[hypo_input, neg_input, hyper_input], outputs=prediction)
# freeze projection layer
for phi_projection in [l for l in model.layers if l.name.startswith('Phi')]:
phi_projection.trainable = False
adam = Adam(lr = self.lr, beta_1 = self.beta1, beta_2 = self.beta2, clipnorm=self.clip_value)
#adam = Adadelta()
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
return model
def save_model(self):
# load all weights including embeddings which would have been modified
print ("Saving model to %s now..." % (self.save_path))
weights = self.model.get_weights()
np.savez_compressed(self.save_path, weights=weights)
print ("Saving model to %s complete." % (self.save_path))
def load_model(self):
print ("Loading saved model from %s now..." % (self.save_path))
weights = np.load(self.save_path)
self.model.set_weights(weights['weights'].tolist())
# this object generates the predictions from a model's learned parameters
self.evaluator.set_model(self.model)
def load_base_model(self):
weights = np.load(self.load_path)
self.model.set_weights([self.data.embeddings_matrix]*1 + weights['weights'].tolist())
# this object generates the predictions from a model's learned parameters
self.evaluator.set_model(self.model)
```
#### File: hyperstar/projlearn/frobenius_loss.py
```python
import tensorflow as tf
from .baseline import Baseline
class FrobeniusLoss(Baseline):
"""
Using the Frobenius norm as the loss function for the baseline approach.
"""
def __init__(self, x_size, y_size, w_stddev, **kwargs):
super().__init__(x_size, y_size, w_stddev, **kwargs)
self.F_norm = tf.sqrt(tf.trace(tf.matmul(self.Y_error, tf.transpose(self.Y_error))))
self.loss = self.F_norm
```
#### File: jfarrugia-uom/hyperstar/semeval_eval.py
```python
import sys
import numpy as np
class HypernymEvaluation:
def __init__(self, dataset):
self.dataset = dataset
def convert_hypernyms_to_one_line(self):
#ordered_queries = sorted(list(set(self.dataset[0])))
ordered_queries = sorted(list(set([x for (x,y) in self.dataset])))
one_line = {}
for w in ordered_queries:
word_hypernyms = [h for q, h in self.dataset if q == w]
one_line[w] = word_hypernyms
return one_line
# taken from task_scorer.py provided with shared task resources
def mean_reciprocal_rank(self, r):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
r = np.asarray(r).nonzero()[0]
return 1. / (r[0] + 1) if r.size else 0.
def precision_at_k(self, r, k, n):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return (np.mean(r)*k)/min(k,n)
# Modified from the first version. Now the gold elements are taken into account
def average_precision(self, r,n):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [self.precision_at_k(r, k + 1, n) for k in range(r.size)]
#Modified from the first version (removed "if r[k]"). All elements (zero and nonzero) are taken into account
if not out:
return 0.
return np.mean(out)
def mean_average_precision(self, r, n):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return self.average_precision(r,n)
# predictions is a dictionary whereby key is query term and value is a list of ranked hypernym predictions
def get_evaluation_scores(self, predictions):
all_scores = []
scores_names = ['MRR', 'MAP', 'P@1', 'P@5', 'P@10']
for query, gold_hyps in self.convert_hypernyms_to_one_line().items():
avg_pat1 = []
avg_pat2 = []
avg_pat3 = []
pred_hyps = predictions[query]
gold_hyps_n = len(gold_hyps)
r = [0 for i in range(15)]
for j in range(len(pred_hyps)):
# I believe it's not fair to bias evaluation on how many hypernyms were found in gold set
# if anything a shorter list (ex. because a hypernym is very particular) will already make
# it harder for a match to be found but if system returns correct hypernym in second place
# why should it be ignored?
if j < gold_hyps_n:
pred_hyp = pred_hyps[j]
if pred_hyp in gold_hyps:
r[j] = 1
avg_pat1.append(self.precision_at_k(r,1,gold_hyps_n))
avg_pat2.append(self.precision_at_k(r,5,gold_hyps_n))
avg_pat3.append(self.precision_at_k(r,10,gold_hyps_n))
mrr_score_numb = self.mean_reciprocal_rank(r)
map_score_numb = self.mean_average_precision(r,gold_hyps_n)
avg_pat1_numb = sum(avg_pat1)/len(avg_pat1)
avg_pat2_numb = sum(avg_pat2)/len(avg_pat2)
avg_pat3_numb = sum(avg_pat3)/len(avg_pat3)
score_results = [mrr_score_numb, map_score_numb, avg_pat1_numb, avg_pat2_numb, avg_pat3_numb]
all_scores.append(score_results)
return scores_names, all_scores
```
#### File: jfarrugia-uom/hyperstar/train.py
```python
import datetime
import glob
import os
import sys
import shutil
import pickle
import random
import numpy as np
import tensorflow as tf
from projlearn import *
#flags = tf.app.flags
#FLAGS = flags.FLAGS
#flags.DEFINE_string( 'model', 'baseline', 'Model name.')
#flags.DEFINE_string( 'train', 'train.npz', 'Training set.')
#flags.DEFINE_string( 'test', 'test.npz', 'Test set.')
#flags.DEFINE_float( 'stddev', .01, 'Value of stddev for matrix initialization.')
#flags.DEFINE_float( 'lambdac', .10, 'Value of lambda.')
#flags.DEFINE_integer('seed', 228, 'Random seed.')
#flags.DEFINE_integer('num_epochs', 300, 'Number of training epochs.')
#flags.DEFINE_integer('batch_size', 2048, 'Batch size.')
#flags.DEFINE_boolean('gpu', True, 'Try using GPU.')
class Trainer:
def train(self, config, model, data, callback=lambda: None):
train_op = tf.train.AdamOptimizer(epsilon=1.).minimize(model.loss)
train_losses, test_losses = [], []
train_times = []
with tf.Session(config=config) as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
feed_dict_train, feed_dict_test = {
model.X: data.X_train,
model.Y: data.Y_train,
model.Z: data.Z_train
}, {
model.X: data.X_test,
model.Y: data.Y_test,
model.Z: data.Z_test
}
steps = max(data.Y_train.shape[0] // self.FLAGS['batch_size'], 1)
print('Cluster %d: %d train items and %d test items available; using %d steps of %d items.' % (
data.cluster + 1,
data.X_train.shape[0],
data.X_test.shape[0],
steps,
min(self.FLAGS['batch_size'], data.X_train.shape[0])),
flush=True)
for epoch in range(self.FLAGS['num_epochs']):
X, Y, Z = data.train_shuffle()
for step in range(steps):
head = step * self.FLAGS['batch_size']
tail = (step + 1) * self.FLAGS['batch_size']
feed_dict = {
model.X: X[head:tail, :],
model.Y: Y[head:tail, :],
model.Z: Z[head:tail, :]
}
t_this = datetime.datetime.now()
sess.run(train_op, feed_dict=feed_dict)
t_last = datetime.datetime.now()
train_times.append(t_last - t_this)
if (epoch + 1) % 10 == 0 or (epoch == 0):
train_losses.append(sess.run(model.loss, feed_dict=feed_dict_train))
test_losses.append(sess.run(model.loss, feed_dict=feed_dict_test))
print('Cluster %d: epoch = %05d, train loss = %f, test loss = %f.' % (
data.cluster + 1,
epoch + 1,
train_losses[-1] / data.X_train.shape[0],
test_losses[-1] / data.X_test.shape[0]),
file=sys.stderr, flush=True)
t_delta = sum(train_times, datetime.timedelta())
print('Cluster %d done in %s.' % (data.cluster + 1, str(t_delta)), flush=True)
#callback(sess)
return sess.run(model.Y_hat, feed_dict=feed_dict_test)
def __init__(self, flags):
# initialise all variables
self.FLAGS = flags
random.seed(self.FLAGS['seed'])
tf.set_random_seed(self.FLAGS['seed'])
self.config = tf.ConfigProto()
with np.load(self.FLAGS['train']) as npz:
self.X_index_train = npz['X_index']
self.Y_all_train = npz['Y_all']
self.Z_all_train = npz['Z_all']
with np.load(self.FLAGS['test']) as npz:
self.X_index_test = npz['X_index']
self.Y_all_test = npz['Y_all']
self.Z_all_test = npz['Z_all']
self.X_all_train = self.Z_all_train[self.X_index_train[:, 0], :]
self.X_all_test = self.Z_all_test[self.X_index_test[:, 0], :]
self.kmeans = pickle.load(open('kmeans.pickle', 'rb'))
self.clusters_train = self.kmeans.predict(self.Y_all_train - self.X_all_train)
self.clusters_test = self.kmeans.predict(self.Y_all_test - self.X_all_test)
self.model = MODELS[self.FLAGS['model']](x_size=self.Z_all_train.shape[1],
y_size=self.Y_all_train.shape[1],
w_stddev=self.FLAGS['stddev'],
lambda_=self.FLAGS['lambdac'])
print(self.model, flush=True)
def __call__(self):
for path in glob.glob('%s.k*.trained*' % self.FLAGS['model']):
print('Removing a stale file: "%s".' % path, flush=True)
os.remove(path)
if os.path.isfile('%s.test.npz' % self.FLAGS['model']):
print('Removing a stale file: "%s".' % ('%s.test.npz' % self.FLAGS['model']), flush=True)
os.remove('%s.test.npz' % self.FLAGS['model'])
Y_hat_test = {}
for cluster in range(self.kmeans.n_clusters):
data = Data(
cluster, self.clusters_train, self.clusters_test,
self.X_index_train, self.Y_all_train, self.Z_all_train,
self.X_index_test, self.Y_all_test, self.Z_all_test
)
#saver = tf.train.Saver()
#saver_path = './%s.k%d.trained' % (self.FLAGS['model'], cluster + 1)
Y_hat_test[str(cluster)] = self.train(self.config, self.model, data)
#,callback=lambda sess: saver.save(sess, saver_path))
#print('Writing the output model to "%s".' % saver_path, flush=True)
test_path = '%s.test.npz' % self.FLAGS['model']
np.savez_compressed(test_path, **Y_hat_test)
print('Writing the test data to "%s".' % test_path)
"""
def main(_):
random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
if not FLAGS.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
config = tf.ConfigProto()
with np.load(FLAGS.train) as npz:
X_index_train = npz['X_index']
Y_all_train = npz['Y_all']
for cluster in range(kmeans.n_clusters):
data = Data(
cluster, clusters_train, clusters_test,
X_index_train, Y_all_train, Z_all_train,
X_index_test, Y_all_test, Z_all_test
)
saver = tf.train.Saver()
saver_path = './%s.k%d.trained' % (FLAGS.model, cluster + 1)
Y_hat_test[str(cluster)] = train(config, model, data, callback=lambda sess: saver.save(sess, saver_path))
print('Writing the output model to "%s".' % saver_path, flush=True)
test_path = '%s.test.npz' % FLAGS.model
np.savez_compressed(test_path, **Y_hat_test)
print('Writing the test data to "%s".' % test_path)['Z_all']
with np.load(FLAGS.test) as npz:
X_index_test = npz['X_index']
Y_all_test = npz['Y_all']
Z_all_test = npz['Z_all']
X_all_train = Z_all_train[X_index_train[:, 0], :]
X_all_test = Z_all_test[X_index_test[:, 0], :]
kmeans = pickle.load(open('kmeans.pickle', 'rb'))
clusters_train = kmeans.predict(Y_all_train - X_all_train)
clusters_test = kmeans.predict(Y_all_test - X_all_test)
model = MODELS[FLAGS.model](x_size=Z_all_train.shape[1], y_size=Y_all_train.shape[1], w_stddev=FLAGS.stddev, lambda_=FLAGS.lambdac)
print(model, flush=True)
for path in glob.glob('%s.k*.trained*' % FLAGS.model):
print('Removing a stale file: "%s".' % path, flush=True)
os.remove(path)
if os.path.isfile('%s.test.npz' % FLAGS.model):
print('Removing a stale file: "%s".' % ('%s.test.npz' % FLAGS.model), flush=True)
os.remove('%s.test.npz' % FLAGS.model)
Y_hat_test = {}
for cluster in range(kmeans.n_clusters):
data = Data(
cluster, clusters_train, clusters_test,
X_index_train, Y_all_train, Z_all_train,
X_index_test, Y_all_test, Z_all_test
)
saver = tf.train.Saver()
saver_path = './%s.k%d.trained' % (FLAGS.model, cluster + 1)
Y_hat_test[str(cluster)] = train(config, model, data, callback=lambda sess: saver.save(sess, saver_path))
print('Writing the output model to "%s".' % saver_path, flush=True)
test_path = '%s.test.npz' % FLAGS.model
np.savez_compressed(test_path, **Y_hat_test)
print('Writing the test data to "%s".' % test_path)
# Added by jfarr on 8-10-2018
# move generated checkpoint files,
#directory = './ft-300-k%02d-l%.1f/' % (cluster+1, lambdac)
directory = './glove840b-k%02d-l%.1f/' % (cluster+1, FLAGS.lambdac)
# create directory if it doesn't exist already
if not os.path.exists(directory):
print ("Create directory %s" % (directory))
os.makedirs(directory)
# move all interim checkpoints and final model to designated directory
output_files = glob.glob('%s.*.trained*'%(FLAGS.model))
if os.path.exists('%s.test.npz'%(FLAGS.model)):
output_files.append('%s.test.npz'%(FLAGS.model))
for file in output_files:
os.rename("./%s"%(file), "%s%s"%(directory, file))
# copy cluster pickle to directory as well
shutil.copy2('./kmeans.pickle', directory)
if __name__ == '__main__':
tf.app.run()
"""
```
|
{
"source": "JFASOF/EduProject_Django3",
"score": 2
}
|
#### File: EduProject_con/courses/models.py
```python
from django.db import models
#classlar bir tablodur
class Course(models.Model):
name=models.CharField(max_length=200,unique=True,verbose_name="Kurs Adı :",help_text="Bu Kursun Adını Giriniz.")
desc=models.TextField(blank=True,null=True,verbose_name="Kurs Açıklaması :")#blank son kullanıcı null programsal olarak
image=models.ImageField(verbose_name="Kurs Görseli:",upload_to="courses/%Y/%m/%d/",default="courses/default_course.jpg")
date=models.DateTimeField(auto_now=True)#her değişiklik update olarak tarih içinde olsun.
available=models.BooleanField(verbose_name="Kurs Erişimi:",default=True)
def __str__(self):
return self.name
```
|
{
"source": "JFASOF/KodluyoruzPythonEgitimi",
"score": 4
}
|
#### File: JFASOF/KodluyoruzPythonEgitimi/W3-OOPandRepeat.py
```python
x='global x'
def func():
#local scope
x='local x'
print(x)
func()
print(x)
isim='Tülin'
def degisIsim(yeni_isim):
global isim
isim=yeni_isim
print(isim)
degisIsim('Tuğçe')
print(isim)
def square(num):
return num ** 2
print(square(6))
square=lambda num:num ** 2
print(square)
TulinHesap={
'adsoyad':' <NAME>',
'hesapNo': '13245678',
'bakiye': 3000,
'ekHesap': 2000
}
MelekHesap={
'adsoyad':' <NAME>',
'hesapNo': '13245678',
'bakiye': 2000,
'ekHesap': 1000
}
def paracekme(hesap,miktar):
print(f"Merhaba {hesap['adsoyad']}")
if(hesap['bakiye']>=miktar):
hesap['bakiye']-= miktar
print("Paranızı Alabilirsiniz.")
bakiyem(hesap)
else:
toplam_bakiye=hesap['bakiye']+hesap['ekHesap']
if(toplam_bakiye>=miktar):
ekHesapKullanilsinmi=input('Ek hesap kullanmak istiyor musunuz ? (e/h)')
if ekHesapKullanilsinmi=='e':
ekHesapKullanilacakMiktar=miktar-hesap['bakiye']
hesap['bakiye']=0
hesap['ekHesap']-=ekHesapKullanilacakMiktar
print("Paranızı Alabilirsiniz.")
bakiyem(hesap)
else:
print(f"{hesap['hesapNo']} nolu hesabınızda {hesap['bakiye']} bulunmamaktadır.")
else:
print('üzgünüz bakiye yetersiz')
bakiyem(hesap)
def bakiyem(hesap):
print(f"{hesap['hesapNo']} nolu hesabınızda {hesap['bakiye']} TL bulunmaktadır. Ek hesap limitiniz ise {hesap['ekHesap']} TL bulunmaktadır.")
paracekme(TulinHesap,3000)
paracekme(TulinHesap,1700)
class Dizi():
def __init__(self,diziAd,diziYonetmen,diziZaman):
self.diziAd=diziAd
self.diziYonetmen=diziYonetmen
self.diziZaman=diziZaman
def __str__(self):
return f"{self.diziAd} by {self.diziYonetmen}"
def __del__(self):
print('film objesi silindi')
dizi=Dizi('The 100','....',50)
class Circle:
#class object attribute
pi=3.14
def __init__(self,yaricap=1):
self.yaricap=yaricap
def cevrehesap(self):
return 2*self.pi*self.yaricap
def alanhesap(self):
return self.pi*(self.yaricap**2)
c1=Circle()
c2=Circle(5)
print(f'c1:alan={c1.alanhesap()} çevre={c1.cevrehesap()}')
print(f'c2:alan={c2.alanhesap()} çevre={c2.cevrehesap()}')
class Person():
def __init__(self, fname, lname):
self.firstName = fname
self.lastName = lname
print('Person Created')
def who_am_i(self):
print('I am a person')
def eat(self):
print('I am eating')
p1=Person('Tülin','Kışlak')
print(p1.firstName + ' ' + p1.lastName)
class Student(Person):
def __init__(self,fname,lname,number):
Person.__init__(self,fname,lname)
self.number=number
print('Student Created')
def who_am_i(self):
print('I am a Student')
def merhaba(self):
print('Hello I am a Student')
s1=Student('Mehmet','Bülbül',134)
print(s1.firstName + ' ' + s1.lastName+ ' '+ str(s1.number))
s1.merhaba()
s1.who_am_i()
#OOP
```
|
{
"source": "JFASOF/Plaka_Tespit_ALPR",
"score": 3
}
|
#### File: JFASOF/Plaka_Tespit_ALPR/plaka_tespit.py
```python
import cv2
import pytesseract
import matplotlib.pyplot as plt
# Görüntüyü okuma işlemi
gorsel = cv2.imread('bmw.jpg')
# Grayscale dönüşümünü görsele uygulama
gray_gorsel = cv2.cvtColor(gorsel, cv2.COLOR_BGR2GRAY)
def plot_images(gorsel1,gorsel2,baslik1="",baslik2=""):
fig=plt.figure(figsize=[15,15])
ax1=fig.add_subplot(121)#alt değer
ax1.imshow(gorsel1,cmap="gray")
ax1.set(xticks=[],yticks=[],title=baslik1)
ax2=fig.add_subplot(122)
ax2.imshow(gorsel2,cmap="gray")
ax2.set(xticks=[],yticks=[],title=baslik2)
plot_images(gorsel,gray_gorsel,baslik1="Araç:BMW",baslik2="Araç:BMW")
blur = cv2.bilateralFilter(gray_gorsel, 11,90, 90)
plot_images(gray_gorsel, blur)
#Canny Kenar Tespiti
canny_edge = cv2.Canny(gray_gorsel, 170, 200)
# Kenarlar üzerinde konturları tanımlama
contours, new = cv2.findContours(canny_edge.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours=sorted(contours, key = cv2.contourArea, reverse = True)[:30]
# Plaka konturunu ve x, y koordinatlarını tanımlama
plaka_lisans_kontur = None
plaka_lisans = None
x = None
y = None
w = None
h = None
# 4 potansiyel köşeye sahip konturu bulun ve çevresindeki ROI(Seçim işlemi)'yi oluşturun
for contour in contours:
# Kontur çevresini bulun ve kapalı bir kontur olmalıdır
perimeter = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, 0.01 * perimeter, True)
if len(approx) == 4: #Dikdörtgen olup olmadığına kontrol etme işlemi
plaka_lisans_kontur = approx
x, y, w, h = cv2.boundingRect(contour)
plaka_lisans = gray_gorsel[y:y + h, x:x + w]
break
# Tesseract'a göndermeden önce tespit edilen görüntüden gerekmeyen parçaların giderilmesi
plaka_lisans = cv2.bilateralFilter(plaka_lisans,11, 17, 17)
(thresh, plaka_lisans) = cv2.threshold(plaka_lisans, 150, 180, cv2.THRESH_BINARY)
#Metin Tanıma ve pytesseract yolu
pytesseract.pytesseract.tesseract_cmd=r'C:\Program Files (x86)\Tesseract-OCR\tesseract.exe'
text = pytesseract.image_to_string(plaka_lisans)
#plakayı çizme ve yazma
gorsel2 = cv2.rectangle(gorsel, (x,y), (x+w,y+h), (0,0,255), 3)
gorsel2 = cv2.putText(gorsel, text, (x-20,y-50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 3, cv2.LINE_AA)
print("Araç Plakası :", text)
cv2.imshow("Arac Plaka Tanima",gorsel2)
cv2.imshow("Arac GrayScale",gray_gorsel)
blur = cv2.bilateralFilter(gray_gorsel, 11,90, 90)
plot_images(gray_gorsel, blur)
cv2.imshow("<NAME>",blur)
#Canny <NAME>
canny_edge = cv2.Canny(gray_gorsel, 170, 200)
cv2.imshow("Arac Canny_Kenarlik_Cizimi",canny_edge)
cv2.waitKey(0)
```
|
{
"source": "jfathi/document-understanding-solution",
"score": 2
}
|
#### File: lambda/apiprocessor/documents.py
```python
# Licensed under the Apache License, Version 2.0 (the License). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
#####################################################################################################################
import boto3
def scanDocuments(table, pageSize, nextToken=None):
if(nextToken is not None):
return table.scan(
Limit=pageSize,
ExclusiveStartKey={"documentId": nextToken}
)
return table.scan(
Limit=pageSize,
)
def scanDocumentTotals(table, nextToken=None):
if(nextToken is not None):
return table.scan(
Select="COUNT",
ExclusiveStartKey={"documentId": nextToken}
)
return table.scan(
Select="COUNT",
)
def paginateDocuments(table, pageSize, nextToken=None):
scanning = True
documents = []
total = 0
nextCountToken = None
while(scanning is True):
response = scanDocumentTotals(table, nextCountToken)
total += response["Count"]
if("LastEvaluatedKey" in response):
nextCountToken = response["LastEvaluatedKey"]["documentId"]
else:
scanning = False
scanning = True
while(scanning is True):
limit = pageSize - len(documents)
response = scanDocuments(table, limit, nextToken)
if("Items" in response):
documents = documents + response["Items"]
if(len(documents) == pageSize):
scanning = False
if("LastEvaluatedKey" in response):
nextToken = response["LastEvaluatedKey"]["documentId"]
else:
scanning = False
nextToken = None
if(len(documents) == total):
scanning = False
nextToken = None
out = {
"documents": documents,
"Total": total
}
if(nextToken is not None):
out["nextToken"] = nextToken
return out
def getDocuments(request):
pageSize = 25
documentsTable = request["documentsTable"] if "documentsTable" in request else None
response = {}
if(documentsTable is not None):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(documentsTable)
nextToken = request["nextToken"] if "nextToken" in request else None
response = paginateDocuments(table, pageSize, nextToken)
if("Items" in response):
print("Total items in response {}".format(len(response["Items"])))
return response
```
#### File: lambda/kendraIndexPoller/lambda_function.py
```python
import boto3
import json
import datetime
def convert_datetime_to_string(obj):
if isinstance(obj, datetime.datetime):
return obj.__str__()
def on_create(event, context):
kendra_client = boto3.client('kendra')
# get status of index
response = kendra_client.describe_index(Id=event['PhysicalResourceId'])
is_complete = False
status = response['Status']
if status == "CREATING":
print("Still creating kendra index")
elif status == "ACTIVE":
print("Kendra index is now active")
return { 'IsComplete': True }
elif status == "FAILED":
# throw an error
raise Exception("Kendra index creation failed with reason: {}".format(response['ErrorMessage']))
elif status == "DELETING" or status == "SYSTEM_UPDATING":
raise Exception("Kendra index creation shows inconsistent status code, please fix and try again. Reason:{}".format(response['ErrorMessage']))
return { 'IsComplete': is_complete }
def on_delete(event, context):
kendra_client = boto3.client('kendra')
DUSkendraIndexId = event['PhysicalResourceId']
# check if the list_indices has the index id, if yes, then check status
kendra_indices = kendra_client.list_indices()
kendra_indices = json.loads(json.dumps(kendra_indices,default = convert_datetime_to_string))
kendra_index_ids = []
for index in kendra_indices['IndexConfigurationSummaryItems']:
kendra_index_ids.append(index['Id'])
# if the index id is not present, it has been deleted
if DUSkendraIndexId not in kendra_index_ids:
print("Kendra index with id {} deleted".format(DUSkendraIndexId))
return {'IsComplete': True}
for indexId in kendra_index_ids:
if indexId == DUSkendraIndexId:
response = kendra_client.describe_index(Id=DUSkendraIndexId)
if response['Status'] == "DELETING":
print("DUSKendraIndex still deleting")
return {'IsComplete':False}
if response['Status'] == "FAILED":
# send the response as data to aws cloudformation
print("Delete of Kendra index with id {} failed with response {}".format(DUSkendraIndexId,response))
return {'IsComplete':True,'Data':response}
def on_update(event, context):
kendra_client = boto3.client('kendra')
# get status of index
response = kendra_client.describe_index(Id=event['PhysicalResourceId'])
is_complete = False
status = response['Status']
if status == "UPDATING":
print("Still updating kendra index")
elif status == "ACTIVE":
print("Kendra index is now updated & active")
return { 'IsComplete': True }
elif status == "FAILED":
raise Exception("Kendra index update failed with reason: {}".format(response['ErrorMessage']))
elif status == "DELETING" or status == "SYSTEM_UPDATING":
raise Exception("Kendra index update shows inconsistent status code, please fix and try again. Reason:{}".format(response['ErrorMessage']))
return { 'IsComplete': is_complete }
def lambda_handler(event, context):
print("Event: {}".format(event))
event_type = event['RequestType']
if event_type == 'Create': return on_create(event, context)
if event_type == 'Delete': return on_delete(event, context)
if event_type == 'Update': return on_update(event, context)
```
#### File: source/test/test_datastore.py
```python
import sys
sys.path.append("./lambda/helper/python")
import boto3
import unittest
from moto import mock_s3
from moto import mock_dynamodb2
import datastore
DOCUMENTS_TABLE_NAME="DocumentsTestTable"
OUTPUT_TABLE_NAME="OutputTestTable"
current_session = boto3.session.Session()
REGION = current_session.region_name
print(f"Test region is {REGION}")
@mock_dynamodb2
class TestDocumentStore(unittest.TestCase):
def setUp(self):
self.conn = boto3.client('dynamodb',region_name=REGION)
self.conn.create_table(
TableName = DOCUMENTS_TABLE_NAME,
KeySchema = [{"AttributeName": "documentId","KeyType":"HASH"}],
AttributeDefinitions=[{"AttributeName": "documentId", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
self.conn.put_item(
TableName = DOCUMENTS_TABLE_NAME,
Item={
"documentId": {"S" : "b1a54fda-1809-49d7-8f19-0d1688eb65b9"},
"objectName": {"S": "public/samples/Misc/expense.png"},
"bucketName": {"S": "dusstack-sample-s3-bucket"},
"documentStatus": {"S": "IN_PROGRESS"}
}
)
self.conn.put_item(
TableName = DOCUMENTS_TABLE_NAME,
Item={
"documentId": {"S" : "b1a99fda-1809-49d7-8f19-0d1688eb65b9"},
"objectName": {"S": "public/samples/Misc/expense.png"},
"bucketName": {"S": "dusstack-sample-s3-bucket"},
"documentStatus": {"S": "IN_PROGRESS"}
}
)
self.ds = datastore.DocumentStore(DOCUMENTS_TABLE_NAME,OUTPUT_TABLE_NAME)
def test_create_document_success(self):
bucketName = "dusstack-sample-s3-bucket"
objectName = "public/samples/Finance/report.pdf"
documentId = "b1a66fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.createDocument(documentId, bucketName, objectName)
self.assertEqual(response, None)
def test_create_duplicate_document_id_throws_error(self):
bucketName = "dusstack-sample-s3-bucket"
objectName = "public/samples/Finance/report.pdf"
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.createDocument(documentId, bucketName, objectName)
self.assertEqual(response, {'Error': 'Document already exist.'})
def test_update_document_status_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.updateDocumentStatus(documentId, "FAILED")
self.assertEqual(response, None)
def test_update_document_status_throws_error_when_document_does_not_exist(self):
documentId = "b1333fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.updateDocumentStatus(documentId, "FAILED")
self.assertEqual(response, {'Error': 'Document does not exist.'})
def test_mark_document_complete_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.markDocumentComplete(documentId)
documentStatus = self.conn.get_item(
Key={'documentId': {'S': documentId}},
TableName=DOCUMENTS_TABLE_NAME
)['Item']['documentStatus']['S']
self.assertEqual(documentStatus, "SUCCEEDED")
self.assertEqual(response, None)
def test_delete_document_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
self.ds.deleteDocument(documentId)
response = self.conn.get_item(
Key={'documentId': {'S': documentId}},
TableName=DOCUMENTS_TABLE_NAME
)
self.assertTrue('Item' not in response)
def test_get_documents(self):
response = self.ds.getDocuments()
self.assertEqual(len(response['documents']),2)
document_ids = []
for document in response['documents']:
document_ids.append(document['documentId'])
self.assertTrue('b1a54fda-1809-49d7-8f19-0d1688eb65b9' in document_ids)
self.assertTrue('b1a99fda-1809-49d7-8f19-0d1688eb65b9' in document_ids)
def test_get_document_count(self):
response = self.ds.getDocumentCount()
self.assertEqual(response, 2)
def test_get_table(self):
response = self.ds.getTable()
self.assertEqual(response.name,DOCUMENTS_TABLE_NAME)
self.assertTrue("dynamodb.Table" in response.__class__.__name__)
def test_get_document(self):
documentId = 'b1a99fda-1809-49d7-8f19-0d1688eb65b9'
response = self.ds.getDocument(documentId)
self.assertEqual(response['documentStatus'], 'IN_PROGRESS')
self.assertEqual(response['documentId'], documentId)
self.assertEqual(response['bucketName'], "dusstack-sample-s3-bucket")
def tearDown(self):
self.conn.delete_table(TableName=DOCUMENTS_TABLE_NAME)
if __name__=='__main__':
unittest.main()
```
|
{
"source": "jfaubertin/KerasCuriosity",
"score": 3
}
|
#### File: jfaubertin/KerasCuriosity/models.py
```python
import numpy as np
#=========================================================================#
from keras.models import Model, Sequential
from keras.layers import Input, Concatenate, Convolution2D, Flatten, Dense, Reshape #, Activation
from keras import backend as K
#=========================================================================#
# simple action model (far from optimal)
def build_actor_model(state_shape, nb_actions):
model = Sequential()
model.add(Reshape(state_shape[1::], input_shape=state_shape))
# model.add(Reshape([224,256,3], input_shape=state_shape)) # FIXME: temporary patch; shouldn't be so environment dependent
# model.add(Convolution2D(8, (1, 1), strides=(1, 1), name='conv.1x1', padding='same', activation='relu' ))
model.add(Convolution2D(8, (4, 4), strides=(2, 2), name='conv1', padding='same', activation='relu' ))
model.add(Convolution2D(8, (4, 4), strides=(2, 2), name='conv2', padding='same', activation='relu' ))
model.add(Convolution2D(8, (4, 4), strides=(2, 2), name='conv3', padding='same', activation='relu' ))
model.add(Convolution2D(8, (4, 4), strides=(2, 2), name='conv4', padding='same', activation='relu' ))
model.add(Flatten())
# fc1 is intentionally smaller to reduce parameters
# feel free to increase this if you have the hardware
model.add(Dense( 16, name='fc1', activation='relu'))
model.add(Dense(128, name='fc2', activation='relu'))
model.add(Dense(nb_actions, name='output', activation='softmax'))
# print(model.summary())
return model
#=========================================================================#
# state_shape=[224,256,3]
def build_fmap(state_shape, name_prefix='fmap.'): # , output_shape, nb_actions
print("models.build_fmap()")
#state_shape=[224,256,3]
#print "state_shape:"
#print state_shape
# (input) 224x256(x3) / 16x16(via 5 conv layers) = 14x16(x8) = 1792 (output)
# fmap = (input) 224x256(x3) / 16x16(via 5 conv layers) = 14x16(x8) (output)
inputs = Input(shape=state_shape)
x = inputs
# x = Reshape([224,256,3])(x) # FIXME: temporary patch; shouldn't be so environment dependent
# optional - uncomment to scan for 16 colors first
# x = Convolution2D(16, (1, 1), strides=(1, 1), name='conv.1x1', padding='same', activation='relu')(x)
# x = Convolution2D(8, (4, 4), strides=(2, 2), name=name_prefix+'conv1', padding='same', activation='relu')(x)
x = Convolution2D(8, (4, 4), strides=(2, 2), name=name_prefix+'conv1', padding='same', activation='relu' )(x)
x = Convolution2D(8, (4, 4), strides=(2, 2), name=name_prefix+'conv2', padding='same', activation='relu' )(x)
x = Convolution2D(8, (4, 4), strides=(2, 2), name=name_prefix+'conv3', padding='same', activation='relu' )(x)
x = Convolution2D(8, (4, 4), strides=(2, 2), name=name_prefix+'conv4', padding='same', activation='relu' )(x)
# x = Convolution2D(8, (4, 4), strides=(2, 2), name=name_prefix+'conv5', activation='relu' )(x)
# Flatten so models that include this one don't have to
x = Flatten(name=name_prefix+'flat')(x)
model = Model(inputs, x, name=name_prefix+'feature_map')
return model
#=========================================================================#
# Intrinsic Curiosity Model
# Inverse model: predicts action given past and current state
def build_inverse_model(fmap1, fmap2, num_actions):
print("models.build_inverse_model()")
#======================#
# input = prev state + current state
# concat (prev state + current state)
# output = action taken between states
#======================#
# prepare inputs
obs1=fmap1
obs2=fmap2
x = Concatenate()([obs1.output, obs2.output])
#======================#
# fc1 is intentionally smaller to reduce parameters
# feel free to increase this if you have better hardware
x = Dense(16, name='icm_i.fc1', activation='relu')(x)
x = Dense(128, name='icm_i.fc2', activation='relu')(x)
#======================#
x = Dense(num_actions, name='icm_i.output', activation='sigmoid')(x)
i_model = Model([obs1.input,obs2.input], x, name='icm_inverse_model')
#print(i_model.summary())
return i_model
#=========================================================================#
# Intrinsic Curiosity Model
# Forward model: predicts future state given current state and action
def build_forward_model(fmap, num_actions):
print("models.build_forward_model()")
#======================#
# input = current state + action
# concat (flattened state + action)
# output = next state
#======================#
# prepare inputs
obs1=fmap
act1=Input(shape=(num_actions,))
x = Concatenate()([obs1.output, act1])
#======================#
# fc1 and fc3 are intentionally smaller to reduce parameters
# feel free to increase this if you have better hardware
x = Dense( 32, name='icm_f.fc1', activation='relu')(x)
x = Dense(128, name='icm_f.fc2', activation='relu')(x)
x = Dense( 32, name='icm_f.fc3', activation='relu')(x)
#======================#
output_shape = obs1.output_shape[1]
x = Dense( output_shape, name='icm_f.output', activation='linear')(x)
f_model = Model([obs1.input,act1], x, name='icm_forward_model')
#print(f_model.summary())
return f_model
#=========================================================================#
if __name__ == "__main__":
print("models.main()")
state_shape=(224,256,3)
nb_actions=24
# CREATE FEATURE MAP
fmap = build_fmap(state_shape)
fmap2 = build_fmap(state_shape, name_prefix='fmap2.')
print "feature map: "
print(fmap.summary())
# exit()
# CREATE MODELS
print "CREATE MODELS..."
inv_model = build_inverse_model(fmap, fmap2, nb_actions)
print "inv_model: "
print(inv_model.summary())
fwd_model = build_forward_model(fmap, nb_actions)
print "fwd_model: "
print(fwd_model.summary())
actor_model = build_actor_model((1,)+state_shape, nb_actions)
print "actor_model: "
print(actor_model.summary())
# exit()
# TEST MODELS
print "TEST MODELS..."
obs1 = np.random.rand( 224,256,3 )
obs2 = np.random.rand( 224,256,3 )
icm_action = np.zeros(nb_actions)
icm_action[1]=1
print "icm_action: "
print icm_action
print icm_action.shape
print "inv_model prediction: "
print inv_model.predict([np.expand_dims(obs1,0),np.expand_dims(obs2,0)]) # output = icm_action
print "fwd_model prediction: "
print fwd_model.predict([np.expand_dims(obs1,0),np.expand_dims(icm_action,0)]) # output = obs2
print "act_model prediction: "
print actor_model.predict([np.expand_dims(np.expand_dims(obs1,0),0)]) # output = action
# exit()
print("Done.")
exit()
#=========================================================================#
```
|
{
"source": "jfaulkner/googleAPIUtil",
"score": 3
}
|
#### File: googleAPIUtil/email/emailNotifier2.py
```python
import threading, feedparser, time
#import RPi_GPIO as GPIO
import urllib, pycurl, os
from time import strftime
from datetime import datetime
from time import sleep
USERNAME="piwoof"
RED_LED=14
def downloadFile(url, fileName):
fp=open(fileName, "wb")
curl=pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.WRITEDATA, fp)
curl.perform()
curl.close()
fp.close()
def getGoogleSpeechURL(phrase):
googleTranslateURL = "http://translate.google.com/translate_tts?tl=en&"
parameters = {'q': 'the quick brown fox jumped over the lazy dog'}
data = urllib.urlencode(parameters)
googleTranslateURL = "%s%s" % (googleTranslateURL,data)
return googleTranslateURL
def speakSpeechFromText(phrase):
googleSpeechURL = getGoogleSpeechURL(phrase)
print googleSpeechURL
downloadFile(googleSpeechURL,"tts.mp3")
sleep(10)
os.system("mplayer tts.mp3 -af extrastereo=0 &")
sleep(10)
os.remove("tts.mp3")
if __name__=='__main__':
x=0
password = ''
homedir=os.path.expanduser('~')
with open(os.path.abspath(homedir+'/.credentials/woof.txt')) as myfile:
password=myfile.read().rstrip()
while x<1:
newmails=int(feedparser.parse("https://"+USERNAME+":"+password+"@mail.google.com/gmail/feed/atom")["feed"]["fullcount"])
mailStr = "You have", newmails, "new messages"
print mailStr
speakSpeechFromText(mailStr)
time.sleep(5)
x=x+1
```
#### File: googleAPIUtil/traffic/trafficNotifier.py
```python
from datetime import datetime
from datetime import timedelta
import time
import json
import googlemaps
import os
HOMEDIR=os.path.expanduser('~')
def commuteEstimate(client, subject, d1, d2, time_threshold):
now = datetime.now()
alternates=False
routes = client.directions(d1, d2,
mode="driving",
traffic_model="best_guess",
departure_time=now)
route = routes[0]
distance_to_work=route['legs'][0]['distance']['text'].replace('mi', 'miles')
time_to_work=route['legs'][0]['duration']['text'].replace('mins', 'minutes')
time_int = int(float(time_to_work.split(" ")[0]))
time_str='{0} - Approximately {1} to work. {2} total.'.format(subject, time_to_work, distance_to_work)
if(time_int >= time_threshold):
time_str = time_str + " Recommend you take an alternate route."
alternates=True
#if(len(route['warnings']) == 0):
# warnings='No warnings'
#else:
# warnings=route['warnings']
send_str = time_str# + " " + warnings
voicedir = os.path.abspath(HOMEDIR + '/voices/fel')
cmd_str = ('echo "{0}" | ' + voicedir + ' >/dev/null').format(send_str)
print(time_str)
os.system(cmd_str)
if alternates:
alternatives(client, d1, d2)
def alternatives(client, d1, d2):
routes = client.directions(d1, d2,
alternatives=True)
for route in routes:
summary=route['summary']
duration=route['legs'][0]['duration']['text'].replace('mins', 'minutes')
alternatives=summary + " " + duration
print(alternatives)
if __name__=='__main__':
api_key=''
with open(os.path.abspath(HOMEDIR+'/.credentials/trafficAPIKey2.txt')) as myfile:
api_key=myfile.read().rstrip()
client = googlemaps.Client(key=api_key)
d1='PVCC Charlottesville, VA 22902'
d2=''
commuteEstimate(client, 'driver1', d1, '1954 Swanson Drive Charlottesville, VA 22901', 20)
commuteEstimate(client, 'driver2', d1, '3263 Proffit Rd, Charlottesville, VA 22911', 30)
```
#### File: googleAPIUtil/weather/weatherNotifier.py
```python
from pprint import pprint
import requests
from urllib.request import urlopen
import json
import os
APIKEY=''
ZIP='22902'
def currentWeather():
url = 'http://api.wunderground.com/api/' + APIKEY + '/geolookup/conditions/q/VA/' + ZIP + '.json'
f = urlopen(url)
json_string = f.read().decode('utf-8')
parsed_json = json.loads(json_string)
city = parsed_json['location']['city']
state = parsed_json['location']['state']
weather = parsed_json['current_observation']['weather']
temperature_string = parsed_json['current_observation']['temperature_string']
feelslike_string = parsed_json['current_observation']['feelslike_string']
print('Weather in ' + city + ', ' + state + ': ' + weather.lower() + '. The temperature is ' + temperature_string + ' but it feels like ' + feelslike_string + '.')
f.close()
def forecast10day():
url = 'http://api.wunderground.com/api/' + APIKEY + '/geolookup/forecast10day/q/' + ZIP + '.json'
f = urlopen(url)
json_string = f.read().decode('utf-8')
parsed_json = json.loads(json_string)
for day in parsed_json['forecast']['simpleforecast']['forecastday']:
print(day['date']['weekday'] + ' (' + day['date']['pretty'] + '):')
print(' Conditions: ' + day['conditions'])
print(' High:' + day['high']['fahrenheit'] + 'F')
print(' Low: ' + day['low']['fahrenheit'] + 'F')
f.close()
def alerts():
url = 'http://api.wunderground.com/api/0a4f528862472dcb/alerts/q/VA/' + ZIP + '.json'
f = urlopen(url)
json_string = f.read().decode('utf-8')
parsed_json = json.loads(json_string)
#print(parsed_json)
if len(parsed_json['alerts']) == 0:
print('No alerts received')
else:
for alert in parsed_json['alerts']:
print(' Alert: ' + alert['description'] + '(' + alert['date'] + ')')
print(' Expires: ' + alert['expires'])
print(' ' + alert['message'].decode('utf-8'))
f.close()
if __name__=='__main__':
homedir=os.path.expanduser('~')
with open(os.path.abspath(homedir+'/.credentials/weatherkey.txt')) as myfile:
APIKEY=myfile.read().rstrip()
currentWeather()
forecast10day()
alerts()
```
|
{
"source": "jfaulkner/piwoof",
"score": 2
}
|
#### File: jfaulkner/piwoof/woof.py
```python
def woof():
import smtplib
import os
fromaddr = '<EMAIL>'
toaddrs = '<EMAIL>,<EMAIL>'
msg = 'Configurable message'
username = '<EMAIL>'
password = ''
homedir=os.path.expanduser('~')
with open(os.path.abspath(homedir+'/.credentials/woof.txt')) as myfile:
password=myfile.read().rstrip()
server = smtplib.SMTP_SSL('smtp.googlemail.com', 465)
server.login(username,password)
server.sendmail(fromaddr,toaddrs,msg)
server.quit()
print 'Email sent to ',toaddrs
if __name__=="__main__":
import sys
woof()
```
|
{
"source": "jfaust/FBX2glTF",
"score": 2
}
|
#### File: jfaust/FBX2glTF/conanfile.py
```python
import os
from conans import ConanFile, CMake
class FBX2glTFConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = (("boost_filesystem/1.69.0@bincrafters/stable"),
("fmt/5.3.0@bincrafters/stable"))
generators = "cmake_find_package", "cmake_paths"
def build(self):
if os.environ.get('FBXSDK_SDKS') == None:
print("Please set the environment variable FBXSDK_SDKS.")
return
cmake = CMake(self)
cmake.definitions["FBXSDK_SDKS"] = os.getenv('FBXSDK_SDKS')
cmake.configure()
cmake.build()
```
|
{
"source": "jfavre/reframe",
"score": 2
}
|
#### File: apps/lammps/lammps_check.py
```python
import os
import reframe as rfm
import reframe.utility.sanity as sn
class LAMMPSBaseCheck(rfm.RunOnlyRegressionTest):
def __init__(self):
super().__init__()
self.valid_prog_environs = ['PrgEnv-gnu']
self.modules = ['LAMMPS']
# Reset sources dir relative to the SCS apps prefix
self.sourcesdir = os.path.join(self.current_system.resourcesdir,
'LAMMPS')
energy_reference = -4.6195
energy = sn.extractsingle(
r'\s+500000(\s+\S+){3}\s+(?P<energy>\S+)\s+\S+\s\n',
self.stdout, 'energy', float)
self.perf_patterns = {
'perf': sn.extractsingle(r'\s+(?P<perf>\S+) timesteps/s',
self.stdout, 'perf', float),
}
energy_diff = sn.abs(energy-energy_reference)
self.sanity_patterns = sn.all([
sn.assert_found(r'Total wall time:', self.stdout),
sn.assert_lt(energy_diff, 6e-4)
])
self.strict_check = False
self.extra_resources = {
'switches': {
'num_switches': 1
}
}
self.tags = {'scs'}
self.maintainers = ['TR', 'VH']
@rfm.required_version('>=2.16')
@rfm.parameterized_test(*([s, v]
for s in ['small', 'large']
for v in ['prod', 'maint']))
class LAMMPSGPUCheck(LAMMPSBaseCheck):
def __init__(self, scale, variant):
super().__init__()
self.valid_systems = ['daint:gpu']
self.executable = 'lmp_mpi'
self.executable_opts = ['-sf gpu', '-pk gpu 1', '-in in.lj.gpu']
self.variables = {'CRAY_CUDA_MPS': '1'}
self.num_gpus_per_node = 1
if scale == 'small':
self.valid_systems += ['dom:gpu']
self.num_tasks = 12
self.num_tasks_per_node = 2
else:
self.num_tasks = 32
self.num_tasks_per_node = 2
references = {
'maint': {
'small': {
'dom:gpu': {'perf': (3457, -0.10, None, 'timesteps/s')},
'daint:gpu': {'perf': (2524, -0.10, None, 'timesteps/s')}
},
'large': {
'daint:gpu': {'perf': (3832, -0.05, None, 'timesteps/s')}
}
},
'prod': {
'small': {
'dom:gpu': {'perf': (3132, -0.05, None, 'timesteps/s')},
'daint:gpu': {'perf': (2524, -0.10, None, 'timesteps/s')}
},
'large': {
'daint:gpu': {'perf': (2382, -0.50, None, 'timesteps/s')}
}
},
}
self.reference = references[variant][scale]
self.tags |= {'maintenance' if variant == 'maint' else 'production'}
@rfm.required_version('>=2.16')
@rfm.parameterized_test(*([s, v]
for s in ['small', 'large']
for v in ['prod']))
class LAMMPSCPUCheck(LAMMPSBaseCheck):
def __init__(self, scale, variant):
super().__init__()
self.valid_systems = ['daint:mc']
self.executable = 'lmp_omp'
self.executable_opts = ['-sf omp', '-pk omp 1', '-in in.lj.cpu']
if scale == 'small':
self.valid_systems += ['dom:mc']
self.num_tasks = 216
self.num_tasks_per_node = 36
else:
self.num_tasks_per_node = 36
self.num_tasks = 576
references = {
'prod': {
'small': {
'dom:mc': {'perf': (4394, -0.05, None, 'timesteps/s')},
'daint:mc': {'perf': (3824, -0.10, None, 'timesteps/s')}
},
'large': {
'daint:mc': {'perf': (5310, -0.65, None, 'timesteps/s')}
}
},
}
self.reference = references[variant][scale]
self.tags |= {'maintenance' if variant == 'maint' else 'production'}
```
#### File: microbenchmarks/dgemm/dgemm.py
```python
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.16-dev0')
@rfm.simple_test
class DGEMMTest(rfm.RegressionTest):
def __init__(self):
super().__init__()
self.descr = 'DGEMM performance test'
self.sourcepath = 'dgemm.c'
self.sanity_patterns = self.eval_sanity()
# the perf patterns are automaticaly generated inside sanity
self.perf_patterns = {}
self.valid_systems = [
'daint:gpu', 'daint:mc',
'dom:gpu', 'dom:mc',
'kesch:cn', 'kesch:pn'
]
if self.current_system.name in ['daint', 'dom']:
self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel']
if self.current_system.name == 'kesch':
self.valid_prog_environs = ['PrgEnv-gnu-nompi']
self.num_tasks = 0
self.num_tasks_per_node = 1
self.num_tasks_per_core = 1
self.num_tasks_per_socket = 1
self.use_multithreading = False
self.executable_opts = ['6144', '12288', '3072']
self.build_system = 'SingleSource'
self.build_system.cflags = ['-O3']
self.sys_reference = {
'daint:gpu': (300.0, -0.15, None, 'Gflop/s'),
'daint:mc': (860.0, -0.15, None, 'Gflop/s'),
'dom:gpu': (300.0, -0.15, None, 'Gflop/s'),
'dom:mc': (860.0, -0.15, None, 'Gflop/s'),
'kesch:cn': (300.0, -0.15, None, 'Gflop/s'),
'kesch:pn': (300.0, -0.15, None, 'Gflop/s'),
}
self.maintainers = ['AJ', 'VH', 'VK']
self.tags = {'benchmark', 'diagnostic'}
def setup(self, partition, environ, **job_opts):
if environ.name.startswith('PrgEnv-gnu'):
self.build_system.cflags += ['-fopenmp']
elif environ.name.startswith('PrgEnv-intel'):
self.build_system.cppflags = [
'-DMKL_ILP64', '-I${MKLROOT}/include']
self.build_system.cflags = ['-qopenmp']
self.build_system.ldflags = [
'${MKLROOT}/lib/intel64/libmkl_intel_ilp64.a',
'${MKLROOT}/lib/intel64/libmkl_intel_thread.a',
'${MKLROOT}/lib/intel64/libmkl_core.a',
'-liomp5', '-lpthread', '-lm', '-ldl']
if partition.fullname in ['daint:gpu', 'dom:gpu']:
self.num_cpus_per_task = 12
elif partition.fullname in ['daint:mc', 'dom:mc']:
self.num_cpus_per_task = 36
elif partition.fullname in ['kesch:cn', 'kesch:pn']:
self.num_cpus_per_task = 12
self.build_system.cflags += ['-I$EBROOTOPENBLAS/include']
self.build_system.ldflags = ['-L$EBROOTOPENBLAS/lib', '-lopenblas',
'-lpthread', '-lgfortran']
if self.num_cpus_per_task:
self.variables = {
'OMP_NUM_THREADS': str(self.num_cpus_per_task)
}
super().setup(partition, environ, **job_opts)
@sn.sanity_function
def eval_sanity(self):
all_tested_nodes = sn.evaluate(sn.extractall(
r'(?P<hostname>\S+):\s+Time for \d+ DGEMM operations',
self.stdout, 'hostname'))
num_tested_nodes = len(all_tested_nodes)
failure_msg = ('Requested %s node(s), but found %s node(s)' %
(self.job.num_tasks, num_tested_nodes))
sn.assert_eq(num_tested_nodes, self.job.num_tasks, msg=failure_msg)
for hostname in all_tested_nodes:
partition_name = self.current_partition.fullname
ref_name = '%s:%s' % (partition_name, hostname)
self.reference[ref_name] = self.sys_reference.get(
partition_name, (0.0, None, None, 'Gflop/s')
)
self.perf_patterns[hostname] = sn.extractsingle(
r'%s:\s+Avg\. performance\s+:\s+(?P<gflops>\S+)'
r'\sGflop/s' % hostname, self.stdout, 'gflops', float)
return True
```
#### File: microbenchmarks/shmem/shmem.py
```python
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.16-dev0')
@rfm.simple_test
class GPUShmemTest(rfm.RegressionTest):
def __init__(self):
super().__init__()
self.sourcepath = 'shmem.cu'
self.build_system = 'SingleSource'
self.valid_systems = ['daint:gpu', 'dom:gpu']
self.valid_prog_environs = ['PrgEnv-gnu']
self.num_tasks = 0
self.num_tasks_per_node = 1
self.num_gpus_per_node = 1
if self.current_system.name in {'daint', 'dom'}:
self.modules = ['craype-accel-nvidia60']
self.sanity_patterns = sn.assert_eq(
sn.count(sn.findall(r'Bandwidth', self.stdout)),
self.num_tasks_assigned * 2)
self.perf_patterns = {
'bandwidth': sn.extractsingle(
r'Bandwidth\(double\) (?P<bw>\S+) GB/s',
self.stdout, 'bw', float)
}
self.reference = {
# theoretical limit for P100:
# 8 [B/cycle] * 1.328 [GHz] * 16 [bankwidth] * 56 [SM] = 9520 GB/s
'dom:gpu': {
'bandwidth': (8850, -0.01, 9520/8850. - 1, 'GB/s')
},
'daint:gpu': {
'bandwidth': (8850, -0.01, 9520/8850. - 1, 'GB/s')
},
'*': {
'bandwidth': (0, None, None, 'GB/s')
}
}
self.maintainers = ['SK']
self.tags = {'benchmark', 'diagnostic'}
@property
@sn.sanity_function
def num_tasks_assigned(self):
return self.job.num_tasks
```
#### File: tools/profiling_and_debugging/intel_advisor_roofline.py
```python
import os
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.parameterized_test(*[[repeat, toolversion, datalayout]
for repeat in ['100000']
for toolversion in ['597843']
for datalayout in ['G3_AOS_SCALAR', 'G3_SOA_SCALAR',
'G3_AOS_VECTOR', 'G3_SOA_VECTOR']
])
class IntelRooflineAdvisorTest(rfm.RegressionTest):
'''This test checks the values reported by Intel Advisor's roofline model:
https://software.intel.com/en-us/intel-advisor-xe
The roofline model is based on GFLOPS and Arithmetic Intensity (AI):
"Self GFLOPS" = "Self GFLOP" / "Self Elapsed Time"
"Self GB/s" = "Self Memory GB" / "Self Elapsed Time"
"Self AI" = "Self GFLOPS" / "Self GB/s"
While a roofline analysis flag exists ('advixe-cl -collect roofline'), it
may not be used to collect data on MPI applications; in that case, the
survey and flops analysis must be collected separately: first run a survey
analysis ('advixe-cl -collect survey') and then run a tripcounts+flops
analysis ('advixe-cl -collect tripcounts -flop') using the same project
directory for both steps.
Example result on 1 core of Intel Broadwell CPU (E5-2695 v4):
G3_AOS_SCALAR: gflops, 2.79 arithmetic_intensity', 0.166 380ms <- slow
G3_AOS_VECTOR: gflops, 3.79 arithmetic_intensity', 0.125 143ms
G3_SOA_SCALAR: gflops, 2.79 arithmetic_intensity', 0.166 351ms
G3_SOA_VECTOR: gflops, 10.62 arithmetic_intensity', 0.166 57ms <- fast
'''
def __init__(self, repeat, toolversion, datalayout):
self.descr = 'Roofline Analysis test with Intel Advisor'
# for reference: advisor/2019 was failing on dom with:
# "Exceeded job memory limit" (webrt#36087)
self.valid_systems = ['daint:mc', 'dom:mc']
# Reporting MFLOPS is not available on Intel Haswell cpus, see
# https://www.intel.fr/content/dam/www/public/us/en/documents/manuals/
# 64-ia-32-architectures-software-developer-vol-1-manual.pdf
self.valid_prog_environs = ['PrgEnv-intel']
self.modules = ['advisor/2019_update4']
# Testing with advisor/2018 (build 551025) fails with:
# roof.dir/nid00753.000/trc000/trc000.advixe
# Application exit code: 139
self.sourcesdir = os.path.join(self.current_system.resourcesdir,
'roofline', 'intel_advisor')
self.build_system = 'SingleSource'
self.sourcepath = '_roofline.cpp'
self.executable = 'advixe-cl'
self.target_executable = './roof.exe'
self.build_system.cppflags = ['-D_ADVISOR',
'-I$ADVISOR_2019_DIR/include']
self.prgenv_flags = {
'PrgEnv-intel': ['-g', '-O2', '-std=c++11', '-restrict'],
}
self.build_system.ldflags = ['-L$ADVISOR_2019_DIR/lib64 -littnotify']
self.roofline_rpt = '%s.rpt' % self.target_executable
self.version_rpt = 'Intel_Advisor_version.rpt'
self.roofline_ref = 'Intel_Advisor_roofline_reference.rpt'
self.prebuild_cmd = [
'patch -s < ADVISOR/roofline_template.patch',
'sed -e "s-XXXX-%s-" -e "s-YYYY-%s-" %s &> %s' %
(repeat, datalayout, 'roofline_template.cpp', '_roofline.cpp')
]
self.exclusive = True
self.num_tasks = 1
self.num_tasks_per_node = 1
self.num_cpus_per_task = 1
self.num_tasks_per_core = 1
self.use_multithreading = False
self.variables = {
'OMP_NUM_THREADS': str(self.num_cpus_per_task),
'CRAYPE_LINK_TYPE': 'dynamic',
}
self.pre_run = [
'mv %s %s' % (self.executable, self.target_executable),
'advixe-cl -help collect | head -20',
]
self.roofdir = './roof.dir'
self.executable_opts = [
'--collect survey --project-dir=%s --search-dir src:rp=. '
'--data-limit=0 --no-auto-finalize --trace-mpi -- %s ' %
(self.roofdir, self.target_executable)
]
# - Reference roofline boundaries for Intel Broadwell CPU (E5-2695 v4):
L1bw = 293 # *1024**3
L2bw = 79 # *1024**3
L3bw = 33 # *1024**3
DPfmabw = 45*1024**3
DPaddbw = 12*1024**3
ScalarAddbw = 3*1024**3
# --- roofline (memory) boundaries from the tool:
# DRAM Bandwidth (single node) 63206331080 memory
# DRAM Bandwidth 125993278750 memory
# DRAM Bandwidth (single-threaded) 12715570803 memory
# L1 Bandwidth 11360856466728 memory
# Scalar L1 Bandwidth 2648216636280 memory
# L1 bandwidth (single-threaded) 315579346298 memory
# ************
# Scalar L1 bandwidth (single-threaded) 73561573230 memory
# L2 Bandwidth 3102773429268 memory
# Scalar L2 Bandwidth 921316779936 memory
# L2 bandwidth (single-threaded) 86188150813 memory
# ***********
# Scalar L2 bandwidth (single-threaded) 25592132776 memory
# L3 Bandwidth 1269637300440 memory
# Scalar L3 Bandwidth 845928498744 memory
# L3 bandwidth (single-threaded) 35267702790 memory
# ***********
# Scalar L3 bandwidth (single-threaded) 23498013854 memory
regex_roof_L1 = (r'^L1\sbandwidth\s\(single-threaded\)\s+(?P<L1bw>\d+)'
r'\s+memory$')
regex_roof_L2 = (r'^L2\sbandwidth\s\(single-threaded\)\s+(?P<L2bw>\d+)'
r'\s+memory$')
regex_roof_L3 = (r'^L3\sbandwidth\s\(single-threaded\)\s+(?P<L3bw>\d+)'
r'\s+memory$')
roof_L1 = sn.round(sn.extractsingle(regex_roof_L1, self.roofline_ref,
'L1bw', int) / 1024**3, 2)
roof_L2 = sn.round(sn.extractsingle(regex_roof_L2, self.roofline_ref,
'L2bw', int) / 1024**3, 3)
roof_L3 = sn.round(sn.extractsingle(regex_roof_L3, self.roofline_ref,
'L3bw', int) / 1024**3, 3)
# --- roofline (compute) boundaries from the tool:
# SP Vector FMA Peak 2759741518342 compute
# SP Vector FMA Peak (single-threaded) 98956234406 compute
# DP Vector FMA Peak 1379752337990 compute
# DP Vector FMA Peak (single-threaded) 49563336304 compute
# ***********
# Scalar Add Peak 93438527464 compute
# Scalar Add Peak (single-threaded) 3289577753 compute
# **********
# SP Vector Add Peak 689944922272 compute
# SP Vector Add Peak (single-threaded) 24691445241 compute
# DP Vector Add Peak 344978547363 compute
# DP Vector Add Peak (single-threaded) 12385333008 compute
# ***********
# Integer Scalar Add Peak 228677310757 compute
# Integer Scalar Add Peak (single-threaded) 8055287031 compute
# Int64 Vector Add Peak 747457604632 compute
# Int64 Vector Add Peak (single-threaded) 26300241032 compute
# Int32 Vector Add Peak 1494880413924 compute
# Int32 Vector Add Peak (single-threaded) 52738180380 compute
regex_roof_dpfma = (r'^DP Vector FMA Peak\s\(single-threaded\)\s+'
r'(?P<DPfmabw>\d+)\s+compute$')
regex_roof_dpadd = (r'^DP Vector Add Peak\s\(single-threaded\)\s+'
r'(?P<DPaddbw>\d+)\s+compute$')
regex_roof_scalaradd = (r'^Scalar Add Peak\s\(single-threaded\)\s+'
r'(?P<ScalarAddbw>\d+)\s+compute$')
roof_dpfma = sn.extractsingle(regex_roof_dpfma, self.roofline_ref,
'DPfmabw', int)
roof_dpadd = sn.extractsingle(regex_roof_dpadd, self.roofline_ref,
'DPaddbw', int)
roof_scalaradd = sn.extractsingle(regex_roof_scalaradd,
self.roofline_ref, 'ScalarAddbw',
int)
# - API output:
# ('self_elapsed_time', 0.1)
# ('self_memory_gb', 4.2496)
# ('self_gb_s', 42.496)
# ('self_gflop', 0.5312)
# ('self_gflops', 5.312)
# ('self_arithmetic_intensity', 0.125)
# ('_self_gb_s', 42.495999999999995, 42.496)
# ('_self_gflops', 5.311999999999999, 5.312)
# ('_self_arithmetic_intensity', 0.125, 0.125)
# ('gap _self_gb_s', -7.105427357601002e-15)
# ('gap _self_gflops', -8.881784197001252e-16)
# ('gap _self_arithmetic_intensity', 0.0)
# returned AI gap = 0.0000000000000000
# returned GFLOPS gap = -0.0000000000000009
regex_ai_gap = r'^returned\sAI\sgap\s=\s(?P<Intensity>.*)'
regex_ai_gflops = r'^returned\sGFLOPS\sgap\s=\s(?P<Flops>.*)'
ai_gap = sn.extractsingle(regex_ai_gap, self.roofline_rpt, 'Intensity',
float)
ai_gflops = sn.extractsingle(regex_ai_gflops, self.roofline_rpt,
'Flops', float)
regex_toolversion = r'I*.\(build\s(?P<version>\d+)\s*.'
found_toolversion = sn.extractsingle(regex_toolversion,
self.version_rpt, 'version')
self.sanity_patterns = sn.all([
# check the job status:
sn.assert_found('loop complete.', self.stdout),
# check the tool's version (2019=591264, 2018=551025):
sn.assert_eq(found_toolversion, toolversion),
# --- roofline boundaries:
# check --report=roofs (L1, L2 and L3 bandwidth):
# sn.assert_reference(roof_L1, L1bw, -0.12, 0.08),
# sn.assert_reference(roof_L2, L2bw, -0.12, 0.08),
# sn.assert_reference(roof_L3, L3bw, -0.12, 0.08),
# check --report=roofs (DP FMA, DP Add and Scalar Add):
sn.assert_reference(roof_dpfma, DPfmabw, -0.12, 0.08),
sn.assert_reference(roof_dpadd, DPaddbw, -0.12, 0.08),
sn.assert_reference(roof_scalaradd, ScalarAddbw, -0.12, 0.08),
# --- check Arithmetic_intensity:
sn.assert_reference(ai_gap, 0.0, -0.01, 0.01),
# --- check GFLOPS:
sn.assert_reference(ai_gflops, 0.0, -0.01, 0.01),
])
# --performance-report:
regex_mseconds = r'elapsed time: (?P<msec>\d+)ms'
regex_ai = r'^\(\'self_arithmetic_intensity\', (?P<AI>\d+.\d+)\)'
regex_gbs = r'^\(\'self_gb_s\', (?P<gbs>\d+.\d+)\)'
regex_gflops = r'^\(\'self_gflops\', (?P<gflops>\d+.\d+)\)'
mseconds = sn.extractsingle(regex_mseconds, self.stdout,
'msec', int)
arithmetic_intensity = sn.extractsingle(regex_ai, self.roofline_rpt,
'AI', float)
bandwidth = sn.extractsingle(regex_gbs, self.roofline_rpt,
'gbs', float)
gflops = sn.extractsingle(regex_gflops, self.roofline_rpt,
'gflops', float)
self.perf_patterns = {
'Elapsed': mseconds,
'ArithmeticIntensity': arithmetic_intensity,
'GFlops': gflops,
'Bandwidth': bandwidth,
'roof_L1': roof_L1,
'roof_L2': roof_L2,
'roof_L3': roof_L3,
}
self.reference = {
'*': {
'Elapsed': (0, None, None, 'ms'),
'ArithmeticIntensity': (0, None, None, ''),
'GFlops': (0, None, None, 'GFLOPs/s'),
'Bandwidth': (0, None, None, 'GB/s'),
'roof_L1': (L1bw, -0.12, 0.08, 'GB/s'),
'roof_L2': (L2bw, -0.12, 0.08, 'GB/s'),
'roof_L3': (L3bw, -0.12, 0.08, 'GB/s'),
}
}
self.maintainers = ['JG']
self.tags = {'production'}
def setup(self, partition, environ, **job_opts):
super().setup(partition, environ, **job_opts)
environ_name = self.current_environ.name
prgenv_flags = self.prgenv_flags[environ_name]
self.build_system.cxxflags = prgenv_flags
launcher_cmd = ' '.join(self.job.launcher.command(self.job))
self.post_run = [
# --- collecting the performance data for the roofline model is a 2
# steps process:
'%s %s --collect tripcounts --flop --project-dir=%s '
'--search-dir src:rp=. --data-limit=0 --no-auto-finalize '
'--trace-mpi -- %s' %
(launcher_cmd, self.executable, self.roofdir,
self.target_executable),
# --- check tool's version:
'advixe-cl -V &> %s' % self.version_rpt,
# "advixe-cl --report" looks for e000/ in the output directory;
# if not found, it will fail with:
# IOError: Survey result cannot be loaded
'cd %s;ln -s nid* e000;cd -' % self.roofdir,
# --- report reference values/boundaries (roofline_ref):
'advixe-cl --report=roofs --project-dir=%s &> %s' %
(self.roofdir, self.roofline_ref),
'python2 API/cscs.py %s &> %s' % (self.roofdir, self.roofline_rpt),
'touch the_end',
# 'advixe-cl --format=csv' seems to be not working (empty report),
# keeping as reference for a future check:
# 'advixe-cl --show-all-columns -csv-delimiter=";"'
# ' --report=tripcounts --format=csv --project-dir=%s &> %s'
# This can be used instead (see advisor/config/report/roofs.tmpl):
# 'advixe-cl --report custom --report-template ./TEMPL/cscs.tmpl'
# ' --project-dir=%s &> %s'
]
```
#### File: tools/profiling_and_debugging/perftools.py
```python
import os
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.14')
@rfm.parameterized_test(['Cuda'], ['C++'], ['F90'])
class PerftoolsCheck(rfm.RegressionTest):
def __init__(self, lang):
super().__init__()
self.name = 'jacobi_perftools_%s' % lang.replace('+', 'p')
self.descr = '%s check' % lang
if lang != 'Cuda':
self.valid_systems = ['daint:gpu', 'dom:gpu',
'daint:mc', 'dom:mc']
else:
self.valid_systems = ['daint:gpu', 'dom:gpu']
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-cray_classic',
'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi']
if lang == 'Cpp':
self.sourcesdir = os.path.join('src', 'C++')
else:
self.sourcesdir = os.path.join('src', lang)
self.modules = ['craype-accel-nvidia60', 'perftools-lite']
self.build_system = 'Make'
# NOTE: Restrict concurrency to allow creation of Fortran modules
if lang == 'F90':
self.build_system.max_concurrency = 1
self.prgenv_flags = {
'PrgEnv-cray': ['-O2', '-g', '-h nomessage=3140',
'-homp' if lang == 'F90' else '-fopenmp'],
'PrgEnv-cray_classic': ['-O2', '-g', '-h nomessage=3140',
'-homp'],
'PrgEnv-gnu': ['-O2', '-g', '-fopenmp'],
'PrgEnv-intel': ['-O2', '-g', '-qopenmp'],
'PrgEnv-pgi': ['-O2', '-g', '-mp']
}
self.num_iterations = 200
if lang == 'Cuda':
self.build_system.options = [
'NVCCFLAGS="-arch=sm_60"',
'DDTFLAGS="-DUSE_MPI -D_CSCS_ITMAX=%s"' % self.num_iterations,
'LIB=-lstdc++']
self.executable = 'jacobi'
# NOTE: Reduce time limit because for PrgEnv-pgi even if the output
# is correct, the batch job uses all the time.
self.time_limit = (0, 5, 0)
self.num_tasks = 3
self.num_tasks_per_node = 3
self.num_cpus_per_task = 4
if lang == 'Cuda':
self.num_gpus_per_node = 1
self.num_tasks = 1
self.num_tasks_per_node = 1
self.num_cpus_per_task = 1
self.variables = {
'ITERATIONS': str(self.num_iterations),
'OMP_NUM_THREADS': str(self.num_cpus_per_task),
'OMP_PROC_BIND': 'true',
'CRAYPE_LINK_TYPE': 'dynamic'
}
if self.num_tasks == 1:
# will be fixed in perftools/7.1
self.variables['PAT_RT_REPORT_METHOD'] = 'pe'
self.sanity_patterns = sn.assert_found('Table 1: Profile by Function',
self.stdout)
self.maintainers = ['MK', 'JG']
self.tags = {'production'}
def setup(self, environ, partition, **job_opts):
super().setup(environ, partition, **job_opts)
flags = self.prgenv_flags[self.current_environ.name]
self.build_system.cflags = flags
self.build_system.cxxflags = flags
self.build_system.fflags = flags
```
#### File: tools/profiling_and_debugging/scorep_mpi_omp.py
```python
import os
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.14')
@rfm.parameterized_test(['C++'], ['F90'])
class ScorepHybrid(rfm.RegressionTest):
def __init__(self, lang):
super().__init__()
self.name = 'scorep_mpi_omp_%s' % lang.replace('+', 'p')
self.descr = 'SCORE-P %s check' % lang
self.valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc']
self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi',
'PrgEnv-cray']
self.prgenv_flags = {
'PrgEnv-cray': ['-g', '-homp'],
'PrgEnv-gnu': ['-g', '-fopenmp'],
'PrgEnv-intel': ['-g', '-openmp'],
'PrgEnv-pgi': ['-g', '-mp']
}
self.sourcesdir = os.path.join('src', lang)
self.executable = 'jacobi'
self.build_system = 'Make'
self.build_system.makefile = 'Makefile_scorep_mpi_omp'
# NOTE: Restrict concurrency to allow creation of Fortran modules
if lang == 'F90':
self.build_system.max_concurrency = 1
self.num_tasks = 3
self.num_tasks_per_node = 3
self.num_cpus_per_task = 4
self.num_iterations = 200
self.variables = {
'OMP_NUM_THREADS': str(self.num_cpus_per_task),
'ITERATIONS': str(self.num_iterations),
'SCOREP_ENABLE_PROFILING': 'false',
'SCOREP_ENABLE_TRACING': 'true',
'OMP_PROC_BIND': 'true',
'SCOREP_TIMER': 'clock_gettime'
}
cpu_count = self.num_cpus_per_task * self.num_tasks_per_node
self.otf2_file = 'otf2.txt'
self.sanity_patterns = sn.all([
sn.assert_found('SUCCESS', self.stdout),
sn.assert_eq(sn.count(sn.extractall(
r'(?P<line>LEAVE.*omp\s+\S+\s+\@_jacobi)', self.otf2_file,
'line')), 4 * self.num_iterations * cpu_count),
sn.assert_not_found('warning|WARNING', self.stderr)
])
self.maintainers = ['MK', 'JG']
self.tags = {'production'}
# additional program call in order to generate the tracing output for
# the sanity check
self.post_run = [
'otf2-print scorep-*/traces.otf2 > %s' % self.otf2_file
]
def setup(self, partition, environ, **job_opts):
scorep_ver = '5.0'
tc_ver = '19.03'
cu_ver = '10.0'
self.scorep_modules = {
'PrgEnv-gnu': ['Score-P/%s-CrayGNU-%s' % (scorep_ver, tc_ver)],
'PrgEnv-intel': ['Score-P/%s-CrayIntel-%s' % (scorep_ver, tc_ver)],
'PrgEnv-pgi': ['Score-P/%s-CrayPGI-%s' % (scorep_ver, tc_ver)],
'PrgEnv-cray': ['Score-P/%s-CrayCCE-%s' % (scorep_ver, tc_ver)]
}
if partition.fullname in ['daint:gpu', 'dom:gpu']:
self.scorep_modules['PrgEnv-gnu'] = [
'Score-P/%s-CrayGNU-%s-cuda-%s' % (scorep_ver, tc_ver, cu_ver)
]
self.modules = self.scorep_modules[environ.name]
super().setup(partition, environ, **job_opts)
prgenv_flags = self.prgenv_flags[self.current_environ.name]
self.build_system.cflags = prgenv_flags
self.build_system.cxxflags = prgenv_flags
self.build_system.fflags = prgenv_flags
self.build_system.ldflags = ['-lm']
self.build_system.options = [
"PREP='scorep --nopreprocess --mpp=mpi --thread=omp'"
]
```
#### File: core/schedulers/__init__.py
```python
import abc
import reframe.core.debug as debug
import reframe.core.environments as env
import reframe.core.fields as fields
import reframe.core.shell as shell
import reframe.utility.typecheck as typ
from reframe.core.exceptions import JobError, JobNotStartedError
from reframe.core.launchers import JobLauncher
from reframe.core.logging import getlogger
class Job(abc.ABC):
'''A job descriptor.
.. caution::
This is an abstract class.
Users may not create jobs directly.
'''
num_tasks = fields.TypedField('num_tasks', int)
num_tasks_per_node = fields.TypedField('num_tasks_per_node',
int, type(None))
num_tasks_per_core = fields.TypedField('num_tasks_per_core',
int, type(None))
num_tasks_per_socket = fields.TypedField('num_tasks_per_socket',
int, type(None))
num_cpus_per_tasks = fields.TypedField('num_cpus_per_task',
int, type(None))
use_smt = fields.TypedField('use_smt', bool, type(None))
time_limit = fields.TimerField('time_limit', type(None))
#: Options to be passed to the backend job scheduler.
#:
#: :type: :class:`List[str]`
#: :default: ``[]``
options = fields.TypedField('options', typ.List[str])
#: The parallel program launcher that will be used to launch the parallel
#: executable of this job.
#:
#: :type: :class:`reframe.core.launchers.JobLauncher`
launcher = fields.TypedField('launcher', JobLauncher)
_jobid = fields.TypedField('_jobid', int, type(None))
_exitcode = fields.TypedField('_exitcode', int, type(None))
_state = fields.TypedField('_state', str, type(None))
# The sched_* arguments are exposed also to the frontend
def __init__(self,
name,
launcher,
workdir='.',
num_tasks=1,
num_tasks_per_node=None,
num_tasks_per_core=None,
num_tasks_per_socket=None,
num_cpus_per_task=None,
use_smt=None,
time_limit=None,
script_filename=None,
stdout=None,
stderr=None,
pre_run=[],
post_run=[],
sched_flex_alloc_tasks=None,
sched_access=[],
sched_account=None,
sched_partition=None,
sched_reservation=None,
sched_nodelist=None,
sched_exclude_nodelist=None,
sched_exclusive_access=None,
sched_options=[]):
# Mutable fields
self.num_tasks = num_tasks
self.num_tasks_per_node = num_tasks_per_node
self.num_tasks_per_core = num_tasks_per_core
self.num_tasks_per_socket = num_tasks_per_socket
self.num_cpus_per_task = num_cpus_per_task
self.use_smt = use_smt
self.time_limit = time_limit
self.options = list(sched_options)
self.launcher = launcher
self._name = name
self._workdir = workdir
self._script_filename = script_filename or '%s.sh' % name
self._stdout = stdout or '%s.out' % name
self._stderr = stderr or '%s.err' % name
self._nodelist = None
# Backend scheduler related information
self._sched_flex_alloc_tasks = sched_flex_alloc_tasks
self._sched_access = sched_access
self._sched_nodelist = sched_nodelist
self._sched_exclude_nodelist = sched_exclude_nodelist
self._sched_partition = sched_partition
self._sched_reservation = sched_reservation
self._sched_account = sched_account
self._sched_exclusive_access = sched_exclusive_access
# Live job information; to be filled during job's lifetime by the
# scheduler
self._jobid = None
self._exitcode = None
self._state = None
def __repr__(self):
return debug.repr(self)
# Read-only properties
@property
def exitcode(self):
return self._exitcode
@property
def jobid(self):
return self._jobid
@property
def state(self):
return self._state
@property
def name(self):
return self._name
@property
def workdir(self):
return self._workdir
@property
def script_filename(self):
return self._script_filename
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
@property
def sched_flex_alloc_tasks(self):
return self._sched_flex_alloc_tasks
@property
def sched_access(self):
return self._sched_access
@property
def sched_nodelist(self):
return self._sched_nodelist
@property
def sched_exclude_nodelist(self):
return self._sched_exclude_nodelist
@property
def sched_partition(self):
return self._sched_partition
@property
def sched_reservation(self):
return self._sched_reservation
@property
def sched_account(self):
return self._sched_account
@property
def sched_exclusive_access(self):
return self._sched_exclusive_access
def prepare(self, commands, environs=None, **gen_opts):
environs = environs or []
if self.num_tasks <= 0:
num_tasks_per_node = self.num_tasks_per_node or 1
min_num_tasks = (-self.num_tasks if self.num_tasks else
num_tasks_per_node)
try:
guessed_num_tasks = self.guess_num_tasks()
except NotImplementedError as e:
raise JobError('flexible task allocation is not supported by '
'this backend') from e
if guessed_num_tasks < min_num_tasks:
nodes_required = min_num_tasks // num_tasks_per_node
nodes_found = guessed_num_tasks // num_tasks_per_node
raise JobError('could not find enough nodes: '
'required %s, found %s' %
(nodes_required, nodes_found))
self.num_tasks = guessed_num_tasks
getlogger().debug('flex_alloc_tasks: setting num_tasks to %s' %
self.num_tasks)
with shell.generate_script(self.script_filename,
**gen_opts) as builder:
builder.write_prolog(self.emit_preamble())
builder.write(env.emit_load_commands(*environs))
for c in commands:
builder.write_body(c)
@abc.abstractmethod
def emit_preamble(self):
pass
def guess_num_tasks(self):
if isinstance(self.sched_flex_alloc_tasks, int):
if self.sched_flex_alloc_tasks <= 0:
raise JobError('invalid number of flex_alloc_tasks: %s' %
self.sched_flex_alloc_tasks)
return self.sched_flex_alloc_tasks
available_nodes = self.get_all_nodes()
getlogger().debug('flex_alloc_tasks: total available nodes %s ' %
len(available_nodes))
# Try to guess the number of tasks now
available_nodes = self.filter_nodes(available_nodes,
self.sched_access + self.options)
if self.sched_flex_alloc_tasks == 'idle':
available_nodes = {n for n in available_nodes
if n.is_available()}
getlogger().debug(
'flex_alloc_tasks: selecting idle nodes: '
'available nodes now: %s' % len(available_nodes))
num_tasks_per_node = self.num_tasks_per_node or 1
num_tasks = len(available_nodes) * num_tasks_per_node
return num_tasks
@abc.abstractmethod
def get_all_nodes(self):
# Gets all the available nodes
pass
@abc.abstractmethod
def filter_nodes(self, nodes, options):
# Filter nodes according to the scheduler options
pass
@abc.abstractmethod
def submit(self):
pass
@abc.abstractmethod
def wait(self):
if self._jobid is None:
raise JobNotStartedError('cannot wait an unstarted job')
@abc.abstractmethod
def cancel(self):
if self._jobid is None:
raise JobNotStartedError('cannot cancel an unstarted job')
@abc.abstractmethod
def finished(self):
if self._jobid is None:
raise JobNotStartedError('cannot poll an unstarted job')
@property
def nodelist(self):
'''The list of node names assigned to this job.
This attribute is :class:`None` if no nodes are assigned to the job
yet.
This attribute is set reliably only for the ``slurm`` backend, i.e.,
Slurm *with* accounting enabled.
The ``squeue`` scheduler backend, i.e., Slurm *without* accounting,
might not set this attribute for jobs that finish very quickly.
For the ``local`` scheduler backend, this returns an one-element list
containing the hostname of the current host.
This attribute might be useful in a flexible regression test for
determining the actual nodes that were assigned to the test.
For more information on flexible task allocation, please refer to the
corresponding `section <advanced.html#flexible-regression-tests>`__ of
the tutorial.
This attribute is *not* supported by the ``pbs`` scheduler backend.
.. versionadded:: 2.17
'''
return self._nodelist
```
#### File: reframe/unittests/test_containers.py
```python
import abc
import unittest
import pytest
import reframe.core.containers as containers
from reframe.core.exceptions import ContainerError
class _ContainerPlatformTest(abc.ABC):
@abc.abstractmethod
def create_container_platform(self):
pass
@property
@abc.abstractmethod
def exp_cmd_mount_points(self):
pass
@property
@abc.abstractmethod
def exp_cmd_custom_registry(self):
pass
def setUp(self):
self.container_platform = self.create_container_platform()
def test_mount_points(self):
self.container_platform.image = 'name:tag'
self.container_platform.mount_points = [('/path/one', '/one'),
('/path/two', '/two')]
self.container_platform.commands = ['cmd1', 'cmd2']
self.container_platform.workdir = '/stagedir'
assert (self.exp_cmd_mount_points ==
self.container_platform.emit_launch_cmds())
def test_missing_image(self):
self.container_platform.commands = ['cmd']
with pytest.raises(ContainerError):
self.container_platform.validate()
def test_missing_commands(self):
self.container_platform.image = 'name:tag'
with pytest.raises(ContainerError):
self.container_platform.validate()
def test_custom_registry(self):
self.container_platform.registry = 'registry/custom'
self.container_platform.image = 'name:tag'
self.container_platform.commands = ['cmd']
self.container_platform.mount_points = [('/path/one', '/one')]
self.container_platform.workdir = '/stagedir'
assert (self.exp_cmd_custom_registry ==
self.container_platform.emit_launch_cmds())
class TestDocker(_ContainerPlatformTest, unittest.TestCase):
def create_container_platform(self):
return containers.Docker()
@property
def exp_cmd_mount_points(self):
return ('docker run -v "/path/one":"/one" -v "/path/two":"/two" '
"name:tag bash -c 'cd /stagedir; cmd1; cmd2'")
@property
def exp_cmd_custom_registry(self):
return ('docker run -v "/path/one":"/one" registry/custom/name:tag '
"bash -c 'cd /stagedir; cmd'")
```
|
{
"source": "jfayaz/Obtain_Hazard_and_Deaggregation_Data_from_USGS",
"score": 3
}
|
#### File: jfayaz/Obtain_Hazard_and_Deaggregation_Data_from_USGS/process_deag.py
```python
from urllib.request import urlopen
import pandas as pd
import numpy as np
import json
def url_resp_values_deag(url_final):
#deagg capture responses
# Opening url
#print(url_final)
deag_response = urlopen(url_final)
# Converting response to str format
response_1 = deag_response.read()
deag_response.close()
return response_1
def url_deag_process(lm,sfmt,sfmt_2):
### ---------- HAZARD CURVES ---------- ###
Deag_data_avaliable = 'No'
lm['vs30'] = np.int(lm['vs30'])
k,urls = checking_deag_urls(lm,sfmt,sfmt_2)
if k == 0:
Deag_data_avaliable = 'No'
print('\nNo Response from USGS for Deaggregation')
print('\nUSGS Server Busy! No Response from USGS. Please try again after sometime.')
return Deag_data_avaliable,0
else:
params_deag = lm.apply(lambda x: sfmt(**x), 1)
for i,row in enumerate(params_deag.values):
url_deag = urls + row
response_deag = url_resp_values_deag(url_deag)
data = json.loads(response_deag)
if data['status'] == 'success':
Deag_data_avaliable = 'Yes'
return Deag_data_avaliable,data
else:
print('\nNo Response from USGS for Deaggregation')
print('\nUSGS Server Busy! No Response from USGS. Please try again after sometime.')
return Deag_data_avaliable,0
def checking_deag_urls(lm,sfmt,sfmt_2):
url_responses = {}
data = pd.DataFrame()
url_head = ["https://earthquake.usgs.gov/nshmp-haz-ws/deagg/","https://prod01-earthquake.cr.usgs.gov/nshmp-haz-ws/deagg/"]
url_tail_1 = list(lm.apply(lambda x: sfmt(**x), 1))
url_tail_2 = list(lm.apply(lambda x: sfmt_2(**x), 1))
urls = {1:url_head[0]+url_tail_1[0],2:url_head[0]+url_tail_2[0],3:url_head[1]+url_tail_1[0],4:url_head[1]+url_tail_2[0]}
for i in range(1,5):
data = pd.DataFrame()
#print("\n\n Checking deaggregation URL:", i)
#print(urls[i])
df = url_resp_values_deag(urls[i])
data = json.loads(df)
#print("\n Response from URL:", data['status'])
url_responses.update({i:data['status']})
for k, v in url_responses.items():
if "success" == v and k in (1,3):
return k,url_head[0]
elif "success" == v and k in (2,4):
return k,url_head[1]
else:
return 0,url_head[0]
```
|
{
"source": "jfb1121/ds_algo",
"score": 4
}
|
#### File: linked_list/single_linked_list/single_linked_list_utility.py
```python
from copy import copy
def get_no_of_nodes_to_end(node):
"""
a utility function to calculate the no of elements to
end of the linked list from the current node
"""
count = 0
while node.has_next():
node = node.get_next()
count = count + 1
return count
def add_element_at_end(head,
node):
"""
adds a new node at the end of the linked list
"""
last_element = traverse_to_end(head)
last_element.next = node
node.next = None
def traverse_to_end(head):
"""
traverses to the last node in the linked list and returns it
"""
node = copy(head)
while node.has_next():
node = node.get_next()
return node
def add_element_at_a_position(head,
element,
position):
"""
adds a new element to the linked list at the given position if it exists
"""
if position == 0:
head = add_element_at_beginning(head,
element)
else:
try:
node_before_position = traverse_to_position(head,
position - 1)
insert_as_next_node(node_before_position,
element)
except Exception:
return False
return True
def traverse_to_position(head,
position):
"""
traverses to the element before the given
"""
if position == 0:
return head
count = 0
node = copy(head)
while count < position:
if node.has_next():
node = node.get_next()
count += 1
else:
raise Exception('position out of reach')
return node
def insert_as_next_node(current_node,
new_node):
"""
inserts the new node that needs to be inserted after the current node
"""
if current_node.has_next():
temp_holder = current_node.get_next()
current_node.set_next(new_node)
new_node.set_next(temp_holder)
else:
current_node.set_next(new_node)
new_node.set_next(None)
def add_element_at_beginning(head,
element):
"""
adds a new element at the beginning and returns it as the new head
"""
element.set_next(head)
return element
```
|
{
"source": "jfb8856606/bpftools",
"score": 3
}
|
#### File: bpftools/bpftools/utils.py
```python
import os
import struct
import subprocess
import sys
from pkg_resources import resource_filename
def find_binary(prefixes, name, args):
for prefix in prefixes:
try:
subprocess.call([os.path.join(prefix, name)] + args)
except OSError, e:
continue
return prefix
print >> sys.stderr, prefix, "%r tool not found in your PATH" % (name,)
os._exit(-2)
def bpf_compile(assembly):
prefixes = [resource_filename(__name__, os.path.join("..","linux_tools")),
resource_filename(__name__, "linux_tools"),
".",
"linux_tools",
os.path.dirname(sys.argv[0]),
os.path.realpath(os.path.dirname(sys.argv[0])),
]
prefix = find_binary(prefixes, "bpf_asm", ['/dev/null'])
out, err = subprocess.Popen([os.path.join(prefix, "bpf_asm")],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate(assembly)
if set(out) - set(" ,0123456789\n") or not out:
print >> sys.stderr, "Compiling failed with:\n%s\n" % (out.strip() + err.strip())
os._exit(-3)
return out.strip()
def _looks_like_ip(l2, off):
ipver, _, total_length = struct.unpack_from('!BBH', l2, off)
if (ipver & 0xF0 == 0x40 and (ipver & 0x0f) >= 5):
return 4
vertos, _, _, pay_len, proto, ttl = struct.unpack_from('!BBHHBB', l2, off)
if (vertos & 0xF0 == 0x60 and pay_len + off + 40 == len(l2)
and ttl > 0):
return 6
return None
def find_ip_offset(l2, max_off=40):
# first look for both ethernet and ip header
for off in xrange(2, max_off+2, 2):
if l2[off-2:off] == '\x08\x00' and _looks_like_ip(l2, off) == 4:
return off
if l2[off-2:off] == '\x86\xdd' and _looks_like_ip(l2, off) == 6:
return off
# okay, just look for ip header
for off in xrange(0, max_off, 2):
if _looks_like_ip(l2, off):
return off
return None
def do_scrub(l2, off):
data = list(l2)
if off not in (14, 16):
raise Exception("off=%i Not ethernet, not sure how to scrub MACS" % off)
for i in xrange(off-2):
data[i] = '\x00'
ipver = ord(data[off])
if ipver & 0xF0 == 0x40:
for i in xrange(off+12, off+12+4+4):
data[i] = '\x00'
elif ipver & 0xF0 == 0x60:
for i in xrange(off+8, off+8+16+16):
data[i] = '\x00'
return ''.join(data)
```
|
{
"source": "jfbaeta/hope",
"score": 2
}
|
#### File: hope/models/Data.py
```python
class Data(object):
'''
Class used for /hana/data attributes.
Attributes and methods are passed to other LVM Classes.
'''
name = 'data'
vg_physical_extent_size = '-s 1M'
vg_data_alignment = '--dataalignment 1M'
vg_args = vg_physical_extent_size + ' ' + vg_data_alignment
lv_stripes = '-i 4'
lv_stripe_size = '-I 256'
lv_size = '-l 100%VG'
lv_args = lv_stripes + ' ' + lv_stripe_size + ' ' + lv_size
fs_block_size = '-b size=4096'
fs_sector_size = '-s size=4096'
fs_type = 'xfs'
fs_mount_point = '/hana/data'
fs_args = fs_block_size + ' ' + fs_sector_size
def __init__(self):
super(Data, self).__init__()
```
#### File: hope/models/Shared.py
```python
class Shared(object):
'''
Class used for /hana/shared attributes.
Attributes and methods are passed to other LVM Classes.
'''
name = 'shared'
vg_physical_extent_size = '-s 1M'
vg_data_alignment = '--dataalignment 1M'
vg_args = vg_physical_extent_size + ' ' + vg_data_alignment
lv_size = '-l 100%VG'
lv_args = lv_size
fs_block_size = '-b size=4096'
fs_sector_size = '-s size=4096'
fs_type = 'xfs'
fs_mount_point = '/hana/shared'
fs_args = fs_block_size + ' ' + fs_sector_size
def __init__(self):
super(Shared, self).__init__()
```
#### File: hope/models/UsrSap.py
```python
class UsrSap(object):
'''
Class used for /usr/sap attributes.
Attributes and methods are passed to other LVM Classes.
'''
name = 'usrsap'
vg_args = ''
lv_size = '-l 100%VG'
lv_args = lv_size
fs_type = 'ext3'
fs_mount_point = '/usr/sap'
fs_args = ''
def __init__(self):
super(UsrSap, self).__init__()
```
#### File: hope/models/VolumeGroup.py
```python
from Formatter import Formatter
from PhysicalVolume import PhysicalVolume
from Root import Root
from UsrSap import UsrSap
from Data import Data
from Log import Log
from Shared import Shared
import json, os, re, subprocess
class VolumeGroup(object):
'''
Class used for List, Creation and Removal of Volume Groups.
Attributes and methods are used by Formatter Class to output results.
'''
general_header = 'Volume Groups:'
index_header = 'Index:'
name_header = 'Name:'
size_header = 'Size:'
free_header = 'Free:'
max_index_header = len(index_header)
max_name_header = len(name_header)
max_size_header = len(size_header)
max_free_header = len(free_header)
list_headers = []
list_headers.append(index_header)
list_headers.append(name_header)
list_headers.append(size_header)
list_headers.append(free_header)
def __init__(self, index='', name='', size='', free=''):
super(VolumeGroup, self).__init__()
self.__list = []
self.__index = index
self.__name = name
self.__size = size
self.__free = free
@property
def index(self):
return self.__index
@property
def name(self):
return self.__name
@property
def size(self):
return self.__size
@property
def free(self):
return self.__free
@property
def all(self):
list_all = []
list_all.append(self.__index)
list_all.append(self.__name)
list_all.append(self.__size)
list_all.append(self.__free)
return list_all
@property
def lengths(self):
self.list_max_lenghts = []
if len(self.__index) > self.max_index_header:
self.max_index_header = len(self.__index)
if len(self.__name) > self.max_name_header:
self.max_name_header = len(self.__name)
if len(self.__size) > self.max_size_header:
self.max_size_header = len(self.__size)
if len(self.__free) > self.max_free_header:
self.max_free_header = len(self.__free)
self.list_max_lenghts.append(self.max_index_header)
self.list_max_lenghts.append(self.max_name_header)
self.list_max_lenghts.append(self.max_size_header)
self.list_max_lenghts.append(self.max_free_header)
return self.list_max_lenghts
@property
def header(self):
return self.general_header
def add(self, resource):
self.__list.append(resource)
def get(self):
return self.__list
def detect(self):
'''
Method to detect current LVM Volume Groups.
It relies on 'vgs' output.
'''
temp_vgs_list = []
reg_exps = [
re.compile(r'(\w+)(?:.*)'),\
re.compile(r'(?::)(.*)(?::)'),\
re.compile(r'(?:.*:)(.*)'),\
]
cmd_vgs_list = subprocess.Popen(['vgs -o vg_name,vg_size,vg_free --noheadings --unbuffered --separator : 2> /dev/null'], stdout=subprocess.PIPE, shell=True).communicate()[0]
for reg_exp in reg_exps:
reg_exp_result = re.findall(reg_exp, cmd_vgs_list)
temp_vgs_list.append(reg_exp_result)
vgs_list = zip(*temp_vgs_list)
vg_index = 0
for vg_list in vgs_list:
vg_name = vg_list[0]
vg_size = vg_list[1]
vg_free = vg_list[2]
self.add(VolumeGroup(index=str(vg_index), name=vg_name, size=vg_size, free=vg_free))
vg_index+=1
def show(self):
'''
Method to show current LVM Volume Groups.
It uses detect method to parse results and Formatter class to output it.
'''
self.detect()
return Formatter().show(self)
def create(self):
'''
Method to create LVM Volume Groups based on interactive user input.
It relies on 'vgcreate' command.
'''
usrsap = UsrSap()
data = Data()
log = Log()
shared = Shared()
pvs = PhysicalVolume()
purposes = [usrsap, data, log, shared]
pvs.show()
for purpose in purposes:
print 'Type Volume Group \033[1mNAME\033[0m for %s:' % (purpose.fs_mount_point),
vg_name = raw_input()
print 'Type Physical Volume \033[1mINDEXES\033[0m for %s (comma-separated):' % (vg_name),
pv_indexes = re.findall('\d+', raw_input())
pv_names = ''
for pv_index in pv_indexes:
for pv in pvs.get():
if pv.index == pv_index:
pv_names += '%s ' % (pv.name)
cmd_vgcreate = 'vgcreate %s %s %s' % (purpose.vg_args, vg_name, pv_names)
os.system(cmd_vgcreate)
self.show()
def create_from_config_file(self):
'''
Method to create LVM Volume Groups based on a JSON config file.
It relies on 'vgcreate' command.
'''
usrsap = UsrSap()
data = Data()
log = Log()
shared = Shared()
purposes = [usrsap, data, log, shared]
with open('/opt/hope/config/config.json', 'r') as config_file:
config = json.load(config_file)
for purpose in purposes:
for purpose_key, purpose_value in config.items():
if purpose_key == purpose.name:
pv_names = ''
for pv in purpose_value['pvs']:
pv_names += '/dev/mapper/%s ' % (pv['alias'])
os.system('vgcreate %s %s %s' % (purpose.vg_args, purpose_value['vg'], pv_names))
self.show()
def remove(self):
'''
Method to remove LVM Volume Groups file and reload multipaths.
It doesn't detect if there's LVM in place neither asks for user confirmation.
'''
vgs = VolumeGroup()
self.show()
print 'Type Volume Group \033[1mINDEXES\033[0m to remove (comma-separated):',
vg_indexes = re.findall('\d+', raw_input())
for vg_index in vg_indexes:
for vg in self.get():
if vg.index == vg_index:
cmd_vgremove = 'vgremove -f %s' % (vg.name)
os.system(cmd_vgremove)
vgs.show()
```
|
{
"source": "jfbastien/pnacl-compiler-rt",
"score": 2
}
|
#### File: builtins/Unit/nacltest.py
```python
import os
import shutil
import subprocess
import sys
import tempfile
def find_run_py():
""" Find run.py somewhere in one of the parents of the current directory."""
dir = os.path.abspath(os.path.dirname(__file__))
while os.path.dirname(dir) != dir:
run_py = os.path.join(dir, 'run.py')
if os.path.isfile(run_py):
return run_py
dir = os.path.dirname(dir)
print 'Could not find run.py'
sys.exit(1)
def parse_args(argv):
cc = argv[1]
tests = argv[2:]
return cc, tests
def get_tests(functions, test_dir):
""" Find the test files for the given functions.
Given a list of functions to test, find each corresponding compiler-rt
unit test, if it exists. The functions may be specified as bare function
names (with no underscores), or source files of the same name.
"""
tests = []
for function in functions:
# If the argument is specified as a source file, strip the suffix.
if function.endswith('.c'):
function = function[:-2]
test_src = os.path.join(test_dir, '%s_test.c' % function)
if os.path.isfile(test_src):
tests.append(test_src)
else:
print 'no test for', function
return tests
def main(argv):
cc, functions = parse_args(argv)
cc = os.path.abspath(cc)
src_root = os.getcwd()
test_src_dir = os.path.join(src_root, 'test', 'builtins', 'Unit')
inc_dir = os.path.join(src_root, 'lib', 'builtins')
tests = get_tests(functions, test_src_dir)
run_py = find_run_py()
failures = 0
workdir = tempfile.mkdtemp()
for test in tests:
flags = ['-lm']
exe_suffix = '.pexe' if 'pnacl-clang' in cc else '.nexe'
nexe = os.path.join(workdir, os.path.basename(test + exe_suffix))
build_cmd = [cc, '-I' + inc_dir, '-o', nexe, test] + flags
run_cmd = [run_py, '-arch', 'env', nexe]
try:
print ' '.join(build_cmd)
subprocess.check_call(build_cmd)
print ' '.join(run_cmd)
subprocess.check_call(run_cmd)
except subprocess.CalledProcessError, e:
print '[ FAILED ]:', test
print e
failures += 1
shutil.rmtree(workdir)
return failures
if __name__ == '__main__':
sys.exit(main(sys.argv))
```
|
{
"source": "jfbermudezbe/Trabajo-1-Sistemas-Operativos",
"score": 3
}
|
#### File: jfbermudezbe/Trabajo-1-Sistemas-Operativos/test.py
```python
import signal
import threading
import time
import sys
import subprocess
import os
proc = None
def mostrarTareas():
x = subprocess.check_output(
"ps -A -o stat,user,ppid,pid,cmd --cols 100 | grep '^S\|^R'", shell=True)
x = x.decode('UTF-8')
print(x)
def imprimirmenu():
return '''
1: Show
2: Create
3: Kill
5: exit
'''
if __name__ == "__main__":
print('''
1: Show
2: Create
3: Kill
5: exit
''')
op = int(input())
while op <= 5 or op > 0:
uid = os.getuid()
if op == 1:
mostrarTareas()
elif op == 2:
print(uid)
result = subprocess.check_output('cut -d: -f1 /etc/passwd', shell=True)
print(str(result).split("\\n"))
usuario = int(input("ingrese el UID del usuario\n"))
try:
os.setuid(usuario)
os.fork()
print(os.getpid())
print("Proceso creado ")
except:
print("no se pudo crear, solo se puede crear 2 procesos(padre e hijo) por cada vez que se ejecuta el programa")
elif op == 3:
x = int(input("Ingrese PID: \n"))
p = os.kill(x, signal.SIGKILL)
print("Eliminado ")
elif op == 5:
break
print(imprimirmenu())
op = int(input())
#hola
```
|
{
"source": "jfbethlehem/Scripts",
"score": 2
}
|
#### File: jfbethlehem/Scripts/nessus_download_merge_and_upload.py
```python
import requests, json, sys, os, getpass, time, shutil, ssl
import xml.etree.ElementTree as etree
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from socket import error as SocketError
import errno
#=========DEBUG=========
#import logging
#logging.basicConfig(level=logging.DEBUG)
#
#import http.client
#
#http.client.HTTPConnection.debuglevel = 1
#
#logging.basicConfig()
#logging.getLogger().setLevel(logging.DEBUG)
#requests_log = logging.getLogger("requests.packages.urllib3")
#requests_log.setLevel(logging.DEBUG)
#requests_log.propagate = True
#=========END DEBUG===========
url = 'https://host:8834'
verify = False
token = ''
username = 'admin'
password = '<PASSWORD>'
destpath = '/var/log/nessusscans/'
merged_folder_id = 682
def build_url(resource):
return '{0}{1}'.format(url, resource)
def connect(method, resource, data=None):
headers = {'X-Cookie': 'token={0}'.format(token), 'content-type': 'application/json'}
data = json.dumps(data)
if method == 'POST':
r = requests.post(build_url(resource), data=data, headers=headers, verify=verify)
elif method == 'PUT':
r = requests.put(build_url(resource), data=data, headers=headers, verify=verify)
elif method == 'DELETE':
r = requests.delete(build_url(resource), data=data, headers=headers, verify=verify)
return
else:
r = requests.get(build_url(resource), params=data, headers=headers, verify=verify)
if r.status_code != 200:
e = r.json()
print('Connect: Error: {0}'.format(e['error']))
sys.exit()
if 'download' in resource:
return r.content
else:
return r.json()
def login(usr, pwd):
login = {'username': usr, 'password': <PASSWORD>}
data = connect('POST', '/session', data=login)
return data['token']
def logout():
connect('DELETE', '/session')
def list_scan():
data = connect('GET', '/scans')
return data
def count_scan(scans, folder_id):
count = 0
for scan in scans:
if scan['folder_id']==folder_id: count=count+1
return count
def print_scans(data):
for folder in data['folders']:
print("\\{0} - ({1})\\".format(folder['name'], count_scan(data['scans'], folder['id'])))
for scan in data['scans']:
if scan['folder_id']==folder['id']:
print("\t\"{0}\" - uuid: {1}".format(scan['name'].encode('utf-8'), scan['uuid']))
def export_status(scan_id, file_id):
data = connect('GET', '/scans/{0}/export/{1}/status'.format(scan_id, file_id))
return data['status'] == 'ready'
def get_folder_id(serch_folder_name, data):
folder_id = 0;
for folder in data['folders']:
if folder['name']==serch_folder_name:
folder_id = folder['id']
break
return folder_id
def export_folder(folder_name, data):
if folder_name == 'All' or folder_name == 'all':
for scan in data['scans']:
file_id = export(scan['id'])
download(scan['name'], scan['id'], file_id,os.path.join(os.getcwd(),destpath))
else:
folder_id = get_folder_id(folder_name,data)
if count_scan(data['scans'], folder_id)==0:
print("This folder does not contain reports")
return
if folder_id!=0:
for scan in data['scans']:
if scan['folder_id'] == folder_id:
file_id = export(scan['id'])
download(scan['name'], scan['id'], file_id, os.path.join(os.getcwd(),destpath))
else:
print("No such folder...")
def export(scan_id):
data = {'format': 'nessus'}
data = connect('POST', '/scans/{0}/export'.format(scan_id), data=data)
file_id = data['file']
while export_status(scan_id, file_id) is False:
time.sleep(5)
return file_id
def download(report_name, scan_id, file_id, save_path):
if not(os.path.exists(save_path)): os.mkdir(save_path)
data = connect('GET', '/scans/{0}/export/{1}/download'.format(scan_id, file_id))
file_name = 'nessus_{0}_{1}.nessus'.format(report_name.encode('utf-8'), file_id)
file_name = file_name.replace(' ', '_')
file_name = file_name.replace("\'", "")
print('Saving scan results to {0}'.format(file_name))
with open(os.path.join(save_path,file_name), 'wb') as f:
f.write(data)
donefile = '{0}_done'.format(os.path.join(save_path,file_name))
print('Data saved to {0}, writing {1}'.format(file_name, donefile))
with open(donefile, 'wb') as fd:
fd.write(bytes('', 'UTF-8'))
print('Done-file written')
def merge():
print('waiting for 60 seconds before merging and uploading.\n');
for i in range(0,60):
time.sleep(1)
print('.', end='',flush=True)
print('\nDone waiting.')
first = 1
for fileName in os.listdir(destpath):
if ".nessus_processed" in fileName:
print(":: Parsing", fileName)
if first:
mainTree = etree.parse('{0}/{1}'.format(destpath,fileName))
report = mainTree.find('Report')
report.attrib['name'] = 'Merged Report'
first = 0
else:
tree = etree.parse('{0}/{1}'.format(destpath,fileName))
for element in tree.findall('.//ReportHost'):
report.append(element)
print(":: => done.")
if "nss_report" in os.listdir(destpath):
shutil.rmtree('{0}/nss_report'.format(destpath))
os.mkdir('{0}/nss_report'.format(destpath))
mainTree.write('{0}/nss_report/report.nessus_merged'.format(destpath), encoding="utf-8", xml_declaration=True)
def upload(upload_file, count=0):
"""
File uploads don't fit easily into the connect method so build the request
here instead.
"""
try:
params = {'no_enc': 0}
headers = {'X-Cookie': 'token={0}'.format(token)}
filename = os.path.basename(upload_file)
files = {'Filename': (filename, filename),
'Filedata': (filename, open(upload_file, 'r'))}
print('Uploading file now.')
r = requests.post(build_url('/file/upload'), params=params, files=files,
headers=headers, verify=verify)
print('done')
resp = r.json()
print('{0} {1} {2}'.format(count, resp['fileuploaded'], r.status_code))
if r.status_code != 200:
print('Upload: Error: {0}'.format(resp['error']))
if count < 5:
count = count + 1
print('ErrNot200: Retrying upload ({0}/5)'.format(count))
time.sleep(5)
return upload(upload_file, count)
else:
print('Upload failed too often. Aborting.')
sys.exit
return resp['fileuploaded']
except SocketError as e:
if count < 5:
count = count + 1
print('SocketErr: Retrying upload ({0}/5) {1}'.format(count, e))
time.sleep(5)
return upload(upload_file, count)
else:
print('Upload failed too often. Aborting.')
sys.exit
def import_scan(filename):
im_file = {'file': filename, 'folder_id': merged_folder_id}
print('Importing uploaded report {0} into Nessus'.format(filename))
data = connect('POST', '/scans/import', data=im_file)
print('Done')
scan_name = data['scan']['name']
print('Successfully imported the scan {0}.'.format(scan_name))
for the_file in os.listdir(destpath):
file_path = os.path.join(destpath, the_file)
if os.path.isfile(file_path):
print("Deleting {0}".format(file_path))
os.unlink(file_path)
print("Logging in...")
token = login(username, password)
print("List of reports...")
rep_list = list_scan()
print_scans(rep_list)
print("Exporting reports...")
export_folder('scans', rep_list)
merge()
#fn = upload('{0}/nss_report/report.nessus_merged'.format(destpath))
fn = upload(os.path.join(destpath, 'nss_report/report.nessus_merged'))
if fn != None:
import_scan(fn)
logout()
```
|
{
"source": "jfbg/insight",
"score": 4
}
|
#### File: insight/notebooks/fractions.py
```python
def gcd(m,n):
while m%n != 0:
oldm = m
oldn = n
m = oldn
n = oldm%oldn
return n
class Fraction:
def __init__(self,top,bottom):
self.num = top
self.den = bottom
def show(self):
print(''.join([str(self.num),"/",str(self.den)]))
def __str__(self): #what to return for print(class)
return str(self.num)+"/"+str(self.den)
def __add__(self,otherfraction):
newnum = self.num*otherfraction.den + self.den*otherfraction.num
newden = self.den * otherfraction.den
common = gcd(newnum,newden)
return Fraction(newnum//common,newden//common)
def __mul__(self, otherfraction):
newnum = self.num * otherfraction.num
newden = self.den * otherfraction.den
common = gcd(newnum,newden)
return Fraction(newnum//common,newden//common)
```
|
{
"source": "jfbilodeau/ansible-module-example",
"score": 2
}
|
#### File: jfbilodeau/ansible-module-example/ios_version.py
```python
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ios import run_commands
import re
def main():
module = AnsibleModule(dict())
commands = [ 'show version' ]
responses = run_commands(module, commands)
response = responses[0]
match = re.search('Version ([^(]+)', response)
version = match.group(1)
module.exit_json(changed=False, success=True, responses=responses, version=version)
if __name__ == '__main__':
main()
```
|
{
"source": "jfblanchard/gaussian-beam",
"score": 4
}
|
#### File: jfblanchard/gaussian-beam/gaussian1D_profile.py
```python
import numpy as np
import matplotlib.pyplot as plt
def gaussian_1D_profile(x_min, x_max, x_step, center, sigma, amplitude):
"""Function to create a 1D Gaussian distribution.
Parameters
----------
x_min, x_max, x_step: float, float, float
Creates a sequence (1D ndarray) of points over which to compute the Gaussian
center: float
The center point of the gaussian profile
sigma: float
1/e-squared width of beam
amplitude: float
Amplitude at peak value
Returns
-------
x,y: ndarray
the gaussian profile amplitude values
"""
x = np.arange(x_min, x_max,x_step) #create spatial array
d = 2*float(sigma)
y = amplitude*np.e**(-2*np.power((x-center)/d, 2))
return x,y
# todo: learn how to do proper unit testing...heres some manual checks
# what if center > max(X)? still works, just get the tail end
# what if center, sigma negative? Since is getting squared, doesn't matter
# what if amplitude is neg or zero? Straight line at zero
# what if d = 0? Straight line
# what if the ndarray goes negative? Is ok.
# What if the array is empty or null? should catch an error.
def plot_1d_gaussian(x,y,hold=True):
"""Plot the gaussian profile.
Parameters
----------
x: ndarray
X axis values
y: float
Y axis values
"""
plt.hold = hold
plt.plot(x,y)
plt.xlabel('X axis')
plt.ylabel('Amplitude')
plt.title('Gaussian 1D Profile')
plt.show()
# todo: check if the hold true works or not
if __name__ == '__main__':
x,y = gaussian_1D_profile(-50,50,.2, 0, 10, 1)
plot_1d_gaussian(x,y,True)
```
|
{
"source": "jfblanchard/md5-verification",
"score": 3
}
|
#### File: jfblanchard/md5-verification/verify_md5_qt.py
```python
import os
import sys
from PyQt5.QtWidgets import QDialog, QApplication
from PyQt5.QtGui import QColor
from hash_verify_gui import Ui_hashDialog
from PyQt5 import QtWidgets
import hashlib
class HashDialog(QDialog):
def __init__(self):
super(HashDialog, self).__init__()
# Set up the user interface from Designer.
self.ui = Ui_hashDialog()
self.ui.setupUi(self)
self.filename = ''
self.digest = ''
self.original = ''
# Connect up the buttons.
self.ui.browseButton.clicked.connect(self.browse_file)
self.ui.startButton.clicked.connect(self.verify_digest)
self.ui.closeButton.clicked.connect(self.close_me)
self.ui.resetButton.clicked.connect(self.reset_me)
def browse_file(self):
self.filename = QtWidgets.QFileDialog.getOpenFileName(None)[0]
self.ui.inputEdit.setText(self.filename)
def close_me(self):
self.close()
def reset_me(self):
self.ui.digestEdit.setText('')
self.filename = ''
self.ui.inputEdit.setText('')
self.ui.resultsBox.setText('')
self.ui.progressBar.setValue(0)
def verify_digest(self):
""" Function to compute hash sums.
"""
# check if empty strings.
# hashes = hashlib.algorithms_available
original = self.ui.digestEdit.text()
print('original = ' + original)
h = hashlib.new('md5')
# h = hashlib.new('sha256')
if os.path.isfile(self.filename):
bytes_read = 0
block_size = 128 #bytes
file_size = os.path.getsize(self.filename)
with open(self.filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b""):
h.update(block)
bytes_read += block_size
self.ui.progressBar.setValue(100*bytes_read/file_size)
computed_hash = h.hexdigest()
# compare results...move to separate function?
self.ui.resultsBox.setTextColor(QColor(0, 0, 0))
self.ui.resultsBox.setText('Original: ' + original)
self.ui.resultsBox.append('Computed: ' + computed_hash + '\n')
if original == computed_hash:
self.ui.resultsBox.setTextColor(QColor(50, 225, 50))
self.ui.resultsBox.append('Success. File Integrity Verified')
else:
self.ui.resultsBox.setTextColor(QColor(225, 50, 50))
self.ui.resultsBox.append('Verification Failed.')
else:
QtWidgets.QMessageBox.warning(None, 'Warning', 'File not found. Please check file name.')
if __name__ == '__main__':
app = 0
app = QApplication(sys.argv)
window = HashDialog()
window.show()
sys.exit(app.exec_())
# Todo:
# Add more algorithm types
```
|
{
"source": "jfblanchard/optical-tools",
"score": 3
}
|
#### File: jfblanchard/optical-tools/draw_lens_plt.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = .5
def draw_arc(loc,R,sa):
surf1_x, surf1_y = loc,0 #location of the surface, assume y=0 (optical axis)
xc,yc = surf1_x + R, 0 #center of sphere
#setup theta ranges and step size
theta_max = np.arcsin(sa/R)
theta_min = -1 * theta_max #symmetric about the origin
theta_steps = 100
theta_vals = np.linspace(theta_min,theta_max,theta_steps)
x = np.zeros(len(theta_vals))
y = np.zeros(len(theta_vals))
for i in range(len(theta_vals)):
x[i] = xc - R*np.cos(theta_vals[i])
y[i] = yc - R*np.sin(theta_vals[i])
plt.plot(x,y,'b')
return x,y
def draw_lens(R1,R2,th,sa,loc):
""" Draw a lens with radii R1 and R2, thickness th, and semi-aperture sa.
Todo: need flats for concave surfaces.
Need index
Make a path instead? Tyr this in draw_lens2
"""
ax = plt.subplot(111)
x1,y1 = draw_arc(loc,R1,sa) #draw surface 1 at origin
x2,y2 = draw_arc(loc+th,R2,sa) #draw surface 2 at th
ax.plot([x1[0],x2[0]],[y1[0],y2[0]],'b') #draw lower edge
ax.plot([x1[-1],x2[-1]],[y1[-1],y2[-1]],'b') #draw lower edge
ax.set_aspect('equal')
#plt.xlim(-20+loc,20+loc)
def draw_lens_path(loc, R1, R2, th, sa):
""" Draw a lens via path """
def draw_triplet():
""" Draw triplet from KDP """
draw_lens(40.91,1e10,8.74,18.5,0) # element 1
draw_lens(-55.65,1e10,2.78,14.5,19.8) # element 2
draw_lens(107.56,-43.33,9.55,15.5,28.6) # element 3
if __name__ == "__main__":
#example lenses
plt.figure()
draw_triplet()
#calculate system length and scale accordingly.
#draw_lens(-50,-20,4,10,10) #later this will come from a spreadsheet, or cmd
#draw_lens(50,-50,4,10,-10) # actually, think of the data structure first. json?
# then the table can read from it, the plot can,
# but want local datastructure too (not alwyas read and write from file.)
#plt.axis('off')
plt.tick_params(
axis='both',
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
labelleft=False,
labelbottom=False) # labels along the bottom edge are off
plt.axis('equal')
plt.show()
# this is where I belong.
```
#### File: jfblanchard/optical-tools/optical_calcs.py
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as const
import seaborn as sns
sns.set_style('whitegrid')
def diff_limited_spot(wavelength, f,D):
"""Compute the diffraction limited spot size achievable by a lens of
focal length f, wavelength lambda, and collimated input beam diameter D.
Units must match, and will return same in units.
Parameters
----------
wavelength : float
The wavelength in microns
f : float
The focal length of the lens
D: float
The diameter of the collimated input beam
Returns
-------
d: the diffraction limited spot size
"""
d = (4*wavelength*f)/(np.pi*D)
return d
def fnum(efl,diameter):
"""Compute the F-number from the efl and diameter. Both have to be the
same units.
Parameters
----------
efl : float
The focal length of the lens
diameter : float
The diameter of the input beam (in the same units as efl)
Returns
-------
fnum : float
The fnumber of the system
"""
fnum = efl/diameter
return fnum
def half_angle_from_NA(na, n=1,deg=True):
"""Compute the half angle of the cone of light from the NA value. From
the equation NA = n x sin(theta).
Parameters
----------
na : float
The numerical aperture
n : float (optional)
The index of the material. Default is 1.0 (air)
deg : bool (optional)
Return result in degrees or radians. Default is degrees.
Returns
-------
theta : float
The half angle of the cone of light in degrees
"""
if deg==True:
theta = np.rad2deg(np.arcsin(na/n))
else:
theta = np.arcsin(na/n)
return theta
def snells_law(n1,n2,theta1):
"""Compute the refracted ray angle (theta2) from index1,index2,
and angle in (theta1). Angle must be in the range -90 to 90 deg
Parameters
----------
n1 : float
Index of medium for the entering ray
n2 : float
Index of the medium the ray is entering into.
theta1 : float
Incident ray angle (degrees) measured from normal to the surface
Returns
-------
theta2 : float
The exiting angle of the ray after refraction (in degress),
measured from the surface normal.
"""
#need check for within -90 to 90 range, and handle it gracefully
theta1rad = np.deg2rad(theta1)
theta2rad = np.arcsin((n1/n2)*np.sin(theta1rad))
theta2 = np.rad2deg(theta2rad)
return theta2
def fresnel_refl(n1,n2,theta_i):
""" Compute the fresnel reflections at a dielectric surface with incident
index n1, and entering index n2, with incident angle theta_i (in radians).
Returns both the S and P polarized reflections.
"""
sterm1 = n1 * np.cos(theta_i)
sterm2 = n2*np.sqrt(1 - ((n1/n2)*np.sin(theta_i))**2)
Rs = ((sterm1 - sterm2)/(sterm1 + sterm2))**2
pterm1 = n2*np.cos(theta_i)
pterm2 = n1*np.sqrt(1 - ((n1/n2)*np.sin(theta_i))**2)
Rp = ((pterm2 - pterm1)/(pterm2 + pterm1))**2
#tested with 0 deg incidence, correct at 4% Reflection
#T = 1 - R
return Rs,Rp
def braggs_law():
"""Bragg's Law - unimplemented"""
pass
def irradiance(power,diameter,units='mm'):
"""Compute the irradiance (power per unit area 'W/cm*2') on a surface.
Parameters
----------
power : float
Power in watts
diameter : float
Spot size diameter in mm (default)
units : String (optinal)
units, valid = m,mm,um,nm
Returns
-------
irrad : float
The irradiance impinging on the surface in W/cm**2
"""
if units == 'mm':
d = .1*diameter
area = np.pi * d
irr = power/area
return irr
def newton_wedge_fringe_sep(alpha, wavelength):
"""Calculate the separation between fringes for an optical flat with angle
alpha."""
d = wavelength/(2*np.sin(alpha))
return d
def sag_depth(R,h):
""" Calculate sag depth of a shphere at height h. """
if np.abs(h) > np.abs(R):
print('height must be less than the raduis')
return
else:
theta = np.arcsin(h/R)
sag = R*(1-np.cos(theta))
return sag
def abbe_number(nd, nF, nC):
""" Compute the Abbe number (reciprocal dispersion). Using the visible F,
d, and C lines:
F(H): 486.1 nm
d(He): 587.6 nm
C(H): 656.3 nm
nd, nF, and nC are the refractive indicies at each of these three lines.
Todo: Alternately, select a glass type and compute these three n's.
"""
V = (nd - 1)/(nF - nC)
return V
if __name__ == "__main__":
#test some functions here
#test fresnel
theta = np.linspace(0,np.pi/2,100)
Rs,Rp = fresnel_refl(1,1.5,theta)
plt.figure()
plt.plot(np.rad2deg(theta),Rs, label = 'Rs')
plt.plot(np.rad2deg(theta),Rp, label = 'Rp')
plt.title('Fresenel Reflection vs. Angle of incidence')
plt.xlabel('Angle (deg)')
plt.ylabel('Reflection')
plt.legend()
plt.show()
```
#### File: jfblanchard/optical-tools/read_ohara.py
```python
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#ohara has 140 glasses in 20171130 version
df = pd.read_csv('OHARA_20171130_6.csv', header=[0, 1])
#glass names are in column 1
glass = df[df.columns[1]].values
for i in range(len(glass)):
glass[i] = glass[i].replace(" ","") #make consitent with no spaces
#Index at sodium d-line (Nd) is in column 16
nd = df[df.columns[16]].values
columns = ['Nd', 'A1', 'A2', 'A3', 'B1', 'B2', 'B3']
# Create a new data frame with just the glass type, Nd, and sellmeiers.
# Todo: maybe add other properties.
# best format - pickled df, json, hdf5, yml?
df_sell = pd.DataFrame(index=glass,columns=columns)
df_sell = df_sell.fillna(0)
abbe = df[df.columns[26]].values
A1 = df[df.columns[60]].values
A2 = df[df.columns[61]].values
A3 = df[df.columns[62]].values
B1 = df[df.columns[63]].values
B2 = df[df.columns[64]].values
B3 = df[df.columns[65]].values
df_sell['Glass'] = glass
df_sell['Abbe'] = abbe
df_sell['Nd'] = nd
df_sell['A1'] = A1
df_sell['A2'] = A2
df_sell['A3'] = A3
df_sell['B1'] = B1
df_sell['B2'] = B2
df_sell['B3'] = B3
#plot
sns.set_style(style='whitegrid')
fig,ax = plt.subplots()
plt.title('Index vs. Abbe Number for Ohara Glass')
plt.ylabel('Refractive Index (Nd)')
plt.xlabel('Abbe Number')
plt.gca().invert_xaxis()
sc = plt.scatter(abbe, nd)
#annotations
annot = ax.annotate("", xy=(0,0), xytext=(10,10),textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
def update_annot(ind):
pos = sc.get_offsets()[ind["ind"][0]]
annot.xy = pos
text = "{}, {}".format(" ".join([glass[n] for n in ind["ind"]]),
" ".join(str([nd[n] for n in ind["ind"]])))
annot.set_text(text)
#annot.get_bbox_patch().set_facecolor(cmap(norm(c[ind["ind"][0]])))
annot.get_bbox_patch().set_alpha(0.4)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = sc.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
#connect the hover function and show the plot
fig.canvas.mpl_connect("motion_notify_event", hover)
plt.show()
#save the data frame as json and pickle
path = os.getcwd()
df_sell.to_json(path + '/ohara_glasses.json')
pd.to_pickle(df_sell, path + '/ohara_glasses.pkl')
#later add schott glasses too
#schottdf = pd.read_csv('schott-optical-glass-06032017.csv') #utf-8 error
# Reference: index, glass types
#0 S-FPL51
#1 S-FPL53
#2 S-FPL55
#3 S-FPM2
#4 S-FPM3
#5 S-FSL5
#6 S-BSL7
#7 S-BSM2
#8 S-BSM4
#9 S-BSM10
#10 S-BSM14
#11 S-BSM15
#12 S-BSM16
#13 S-BSM18
#14 S-BSM22
#15 S-BSM25
#16 S-BSM28
#17 S-BSM71
#18 S-BSM81
#19 S-NSL3
#20 S-NSL5
#21 S-NSL36
#22 S-BAL2
#23 S-BAL3
#24 S-BAL12
#25 S-BAL14
#26 S-BAL35
#27 S-BAL41
#28 S-BAL42
#29 S-BAM4
#30 S-BAM12
#31 S-BAH10
#32 S-BAH11
#33 S-BAH27
#34 S-BAH28
#35 S-BAH32
#36 S-PHM52
#37 S-PHM53
#38 S-TIL1
#39 S-TIL2
#40 S-TIL6
#41 S-TIL25
#42 S-TIL26
#43 S-TIL27
#44 S-TIM1
#45 S-TIM2
#46 S-TIM5
#47 S-TIM8
#48 S-TIM22
#49 S-TIM25
#50 S-TIM27
#51 S-TIM28
#52 S-TIM35
#53 S-TIM39
#54 S-TIH1
#55 S-TIH3
#56 S-TIH4
#57 S-TIH6
#58 S-TIH10
#59 S-TIH11
#60 S-TIH13
#61 S-TIH14
#62 S-TIH18
#63 S-TIH23
#64 S-TIH53
#65 S-TIH53W
#66 S-TIH57
#67 S-LAL7
#68 S-LAL8
#69 S-LAL9
#70 S-LAL10
#71 S-LAL12
#72 S-LAL13
#73 S-LAL14
#74 S-LAL18
#75 S-LAL19
#76 S-LAL20
#77 S-LAL54
#78 S-LAL54Q
#79 S-LAL58
#80 S-LAL59
#81 S-LAL61
#82 S-LAM2
#83 S-LAM3
#84 S-LAM7
#85 S-LAM52
#86 S-LAM54
#87 S-LAM55
#88 S-LAM58
#89 S-LAM59
#90 S-LAM60
#91 S-LAM61
#92 S-LAM66
#93 S-LAM73
#94 S-LAH51
#95 S-LAH52
#96 S-LAH52Q
#97 S-LAH53
#98 S-LAH53V
#99 S-LAH55V
#100 S-LAH55VS
#101 S-LAH58
#102 S-LAH59
#103 S-LAH60
#104 S-LAH60V
#105 S-LAH63
#106 S-LAH63Q
#107 S-LAH64
#108 S-LAH65V
#109 S-LAH65VS
#110 S-LAH66
#111 S-LAH71
#112 S-LAH79
#113 S-LAH88
#114 S-LAH89
#115 S-LAH92
#116 S-LAH93
#117 S-LAH95
#118 S-LAH96
#119 S-LAH97
#120 S-YGH51
#121 S-FTM16
#122 S-NBM51
#123 S-NBH5
#124 S-NBH8
#125 S-NBH51
#126 S-NBH52
#127 S-NBH52V
#128 S-NBH53
#129 S-NBH53V
#130 S-NBH55
#131 S-NBH56
#132 S-NBH57
#133 S-NPH1
#134 S-NPH1W
#135 S-NPH2
#136 S-NPH3
#137 S-NPH4
#138 S-NPH5
#139 S-NPH53
# Parsed raw columns from csv ------------------------------------------
#0 ('Unnamed: 0_level_0', 'Unnamed: 0_level_1')
#1 ('Unnamed: 1_level_0', 'Glass ')
#2 ('Unnamed: 2_level_0', 'Code(d)')
#3 ('Unnamed: 3_level_0', 'Code(e)')
#4 ('REFRACTIVE INDICES', 'n2325')
#5 ('Unnamed: 5_level_0', 'n1970')
#6 ('Unnamed: 6_level_0', 'n1530')
#7 ('Unnamed: 7_level_0', 'n1129')
#8 ('REFRACTIVE INDICES', 'nt')
#9 ('Unnamed: 9_level_0', 'ns')
#10 ('Unnamed: 10_level_0', "nA'")
#11 ('Unnamed: 11_level_0', 'nr')
#12 ('REFRACTIVE INDICES', 'nC')
#13 ('Unnamed: 13_level_0', "nC'")
#14 ('Unnamed: 14_level_0', 'nHe-Ne')
#15 ('Unnamed: 15_level_0', 'nD')
#16 ('REFRACTIVE INDICES', 'nd')
#17 ('Unnamed: 17_level_0', 'ne')
#18 ('Unnamed: 18_level_0', 'nF')
#19 ('Unnamed: 19_level_0', "nF'")
#20 ('REFRACTIVE INDICES', 'nHe-Cd')
#21 ('Unnamed: 21_level_0', 'ng')
#22 ('Unnamed: 22_level_0', 'nh')
#23 ('Unnamed: 23_level_0', 'ni')
#24 ('ABBE', '?d')
#25 ('Unnamed: 25_level_0', '?e')
#26 ('ABBE', '?d').1
#27 ('Unnamed: 27_level_0', '?e')
#28 ('DISPERSIONS', 'nF-nC')
#29 ('Unnamed: 29_level_0', 'nF-nC')
#30 ('Unnamed: 30_level_0', "nF'-nC'")
#31 ('PARTIAL DISPERSIONS', 'nC-nt')
#32 ('Unnamed: 32_level_0', "nC-nA'")
#33 ('Unnamed: 33_level_0', 'nd-nC')
#34 ('Unnamed: 34_level_0', 'ne-nC')
#35 ('Unnamed: 35_level_0', 'ng-nd')
#36 ('Unnamed: 36_level_0', 'ng-nF')
#37 ('PARTIAL DISPERSIONS', 'nh-ng')
#38 ('Unnamed: 38_level_0', 'ni-ng')
#39 ('Unnamed: 39_level_0', "nC'-nt")
#40 ('Unnamed: 40_level_0', "ne-nC'")
#41 ('Unnamed: 41_level_0', "nF'-ne")
#42 ('Unnamed: 42_level_0', "ni-nF'")
#43 ('RELATIVE PARTIAL DISPERSIONS', '?C,t')
#44 ('Unnamed: 44_level_0', "?C,A'")
#45 ('Unnamed: 45_level_0', '?d,C')
#46 ('Unnamed: 46_level_0', '?e,C')
#47 ('Unnamed: 47_level_0', '?g,d')
#48 ('Unnamed: 48_level_0', '?g,F')
#49 ('RELATIVE PARTIAL DISPERSIONS', '?h,g')
#50 ('Unnamed: 50_level_0', '?i,g')
#51 ('Unnamed: 51_level_0', "?'C',t")
#52 ('Unnamed: 52_level_0', "?'e,C'")
#53 ('Unnamed: 53_level_0', "?'F',e")
#54 ('Unnamed: 54_level_0', "?'i,F'")
#55 ('Deviation of Relative Partial Dispesions', '??C,t')
#56 ('Unnamed: 56_level_0', "??C,A'")
#57 ('Unnamed: 57_level_0', '??g,d')
#58 ('Unnamed: 58_level_0', '??g,F')
#59 ('Unnamed: 59_level_0', '??i,g')
#60 ('CONSTANTS OF DISPERSION FORMULA (Sellmeier)', 'A1')
#61 ('Unnamed: 61_level_0', 'A2')
#62 ('Unnamed: 62_level_0', 'A3')
#63 ('Unnamed: 63_level_0', 'B1')
#64 ('Unnamed: 64_level_0', 'B2')
#65 ('Unnamed: 65_level_0', 'B3')
#66 ('CONSTANTS OF DISPERSION FORMULA (Cauchy)', 'A0')
#67 ('Unnamed: 67_level_0', 'A1')
#68 ('Unnamed: 68_level_0', 'A2')
#69 ('Unnamed: 69_level_0', 'A3')
#70 ('Unnamed: 70_level_0', 'A4')
#71 ('Unnamed: 71_level_0', 'A5')
#72 ('COLORING', '?80')
#73 ('Unnamed: 73_level_0', '(?70)')
#74 ('Unnamed: 74_level_0', '?5')
#75 ('INTERNAL TRANSMISSION COLORING', '?0.80')
#76 ('Unnamed: 76_level_0', '?0.05')
#77 ('CCI', 'B')
#78 ('Unnamed: 78_level_0', 'G')
#79 ('Unnamed: 79_level_0', 'R')
#80 ('INTERNAL TRANSMISSION (?/10mm Thick) ', '280')
#81 ('Unnamed: 81_level_0', '290')
#82 ('Unnamed: 82_level_0', '300')
#83 ('Unnamed: 83_level_0', '310')
#84 ('Unnamed: 84_level_0', '320')
#85 ('Unnamed: 85_level_0', '330')
#86 ('Unnamed: 86_level_0', '340')
#87 ('Unnamed: 87_level_0', '350')
#88 ('Unnamed: 88_level_0', '360')
#89 ('INTERNAL TRANSMISSION (?/10mm Thick) ', '370')
#90 ('Unnamed: 90_level_0', '380')
#91 ('Unnamed: 91_level_0', '390')
#92 ('Unnamed: 92_level_0', '400')
#93 ('Unnamed: 93_level_0', '420')
#94 ('Unnamed: 94_level_0', '440')
#95 ('Unnamed: 95_level_0', '460')
#96 ('Unnamed: 96_level_0', '480')
#97 ('INTERNAL TRANSMISSION (?/10mm Thick) ', '500')
#98 ('Unnamed: 98_level_0', '550')
#99 ('Unnamed: 99_level_0', '600')
#100 ('Unnamed: 100_level_0', '650')
#101 ('Unnamed: 101_level_0', '700')
#102 ('Unnamed: 102_level_0', '800')
#103 ('Unnamed: 103_level_0', '900')
#104 ('Unnamed: 104_level_0', '1000')
#105 ('INTERNAL TRANSMISSION (?/10mm Thick) ', '1200')
#106 ('Unnamed: 106_level_0', '1400')
#107 ('Unnamed: 107_level_0', '1600')
#108 ('Unnamed: 108_level_0', '1800')
#109 ('Unnamed: 109_level_0', '2000')
#110 ('Unnamed: 110_level_0', '2200')
#111 ('Unnamed: 111_level_0', '2400')
#112 ('dn/dT relative (10-6 / ?)', 't(-40~-20)')
#113 ('Unnamed: 113_level_0', 't(-20~0)')
#114 ('Unnamed: 114_level_0', 't(0~20)')
#115 ('Unnamed: 115_level_0', 't(20~40)')
#116 ('Unnamed: 116_level_0', 't(40~60)')
#117 ('Unnamed: 117_level_0', 't(60~80)')
#118 ('dn/dT relative (10-6 / ?)', "C'(-40~-20)")
#119 ('Unnamed: 119_level_0', "C'(-20~0)")
#120 ('Unnamed: 120_level_0', "C'(0~20)")
#121 ('Unnamed: 121_level_0', "C'(20~40)")
#122 ('Unnamed: 122_level_0', "C'(40~60)")
#123 ('Unnamed: 123_level_0', "C'(60~80)")
#124 ('dn/dT relative (10-6 / ?)', 'He-Ne(-40~-20)')
#125 ('Unnamed: 125_level_0', 'He-Ne(20~0)')
#126 ('Unnamed: 126_level_0', 'He-Ne(0~20)')
#127 ('Unnamed: 127_level_0', 'He-Ne(20~40)')
#128 ('Unnamed: 128_level_0', 'He-Ne(40~60)')
#129 ('Unnamed: 129_level_0', 'He-Ne(60~80)')
#130 ('dn/dT relative (10-6 / ?)', 'D(-40~-20)')
#131 ('Unnamed: 131_level_0', 'D(-20~0)')
#132 ('Unnamed: 132_level_0', 'D(0~20)')
#133 ('Unnamed: 133_level_0', 'D(20~40)')
#134 ('Unnamed: 134_level_0', 'D(40~60)')
#135 ('Unnamed: 135_level_0', 'D(60~80)')
#136 ('dn/dT relative (10-6 / ?)', 'e(-40~-20)')
#137 ('Unnamed: 137_level_0', 'e(-20~0)')
#138 ('Unnamed: 138_level_0', 'e(0~20)')
#139 ('Unnamed: 139_level_0', 'e(20~40)')
#140 ('Unnamed: 140_level_0', 'e(40~60)')
#141 ('Unnamed: 141_level_0', 'e(60~80)')
#142 ('dn/dT relative (10-6 / ?)', "F'(-40~-20)")
#143 ('Unnamed: 143_level_0', "F'(-20~0)")
#144 ('Unnamed: 144_level_0', "F'(0~20)")
#145 ('Unnamed: 145_level_0', "F'(20~40)")
#146 ('Unnamed: 146_level_0', "F'(40~60)")
#147 ('Unnamed: 147_level_0', "F'(60~80)")
#148 ('dn/dT relative (10-6 / ?)', 'g(-40~-20)')
#149 ('Unnamed: 149_level_0', 'g(-20~0)')
#150 ('Unnamed: 150_level_0', 'g(0~20)')
#151 ('Unnamed: 151_level_0', 'g(20~40)')
#152 ('Unnamed: 152_level_0', 'g(40~60)')
#153 ('Unnamed: 153_level_0', 'g(60~80)')
#154 ('Constants of dn/dT', ' D0')
#155 ('Unnamed: 155_level_0', ' D1')
#156 ('Unnamed: 156_level_0', ' D2')
#157 ('Unnamed: 157_level_0', ' E0')
#158 ('Unnamed: 158_level_0', ' E1')
#159 ('Unnamed: 159_level_0', '?TK')
#160 ('Thermal Properties', 'StP(?)')
#161 ('Unnamed: 161_level_0', 'AP(?)')
#162 ('Unnamed: 162_level_0', 'Tg(?)')
#163 ('Unnamed: 163_level_0', 'At(?)')
#164 ('Unnamed: 164_level_0', 'SP(?)')
#165 ('CTE?(10-7/?)', '(-30~+70)')
#166 ('Unnamed: 166_level_0', '(100~300)')
#167 ('Conductivity', 'k(W/m?K)')
#168 ('Mechanical Properties', "Young's (E) ")
#169 ('Unnamed: 169_level_0', 'Rigidity (G)')
#170 ('Unnamed: 170_level_0', "Poisson's(?)")
#171 ('Unnamed: 171_level_0', 'Knoop (Hk)')
#172 ('Unnamed: 172_level_0', 'Group')
#173 ('Unnamed: 173_level_0', 'Abrasion(Aa)')
#174 ('Unnamed: 174_level_0', '?')
#175 ('Chemical Properties', 'RW(P)')
#176 ('Unnamed: 176_level_0', 'RA(P)')
#177 ('Unnamed: 177_level_0', 'W(S)max')
#178 ('Unnamed: 178_level_0', 'W(S)min')
#179 ('Unnamed: 179_level_0', 'SR')
#180 ('Unnamed: 180_level_0', 'PR')
#181 ('Bubble Grp', 'B')
#182 ('Spec. Gravity', 'd')
```
|
{
"source": "jfblanchard/ventilator-pressure-prediction",
"score": 3
}
|
#### File: ventilator-pressure-prediction/scripts/single_breath_PID_opt_trial1.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
# load files
train = pd.read_csv(r'F:\Sync\Work\Kaggle Competitions\Ventilator Pressure Prediction\Data\train.csv')
test = pd.read_csv(r'F:\Sync\Work\Kaggle Competitions\Ventilator Pressure Prediction\Data\test.csv')
# function to get single breath (id must exist...there are some that are missing)
def get_breath(df,id):
# generate start stop points
start = 80 * id
stop = start + 80
return train.iloc[start:stop,:]
# Get a single breath
myid = 7
id1 = get_breath(train,myid)
id1 = id1.reset_index()
lag = id1.u_in.shift(1,fill_value = 0)
id1['lag'] = lag
# make a dt column
dt = np.diff(id1.time_step)
dt_list = list(dt)
dt_list.append(dt.mean())
d_uin = np.diff(id1.lag)
d_pressure = np.diff(id1.pressure)
id1['dt'] = dt_list
# make a volume column from u_in
vol = np.zeros(len(id1))
vol[0] = 6 # start with avg starting value of pressure
r = id1.R.iloc[0]
c = id1.C.iloc[0]
for i in range(len(id1)-1):
if id1.u_out.iloc[i] == 0:
vol[i+1] = vol[i] + (id1.dt.iloc[0]*id1.lag.iloc[i+1] * (c/r)) *-.3*np.log(id1.time_step.iloc[i+1]*1)
else:
vol[i+1] = 6 #make this a log too
# plot a single breath
pal1 = sns.color_palette("viridis",3)
sns.set_palette(pal1)
r = id1.R.iloc[0]
c = id1.C.iloc[0]
plt.figure(figsize=(8,5))
plt.plot(id1.pressure,label='pressure')
plt.plot(id1.lag,label='u_in')
plt.plot(id1.u_out,label='u_out')
plt.plot(vol,label='vol',color='red')
plt.title(f'Pressure and u_in for Breath id={myid}, R={r}, C={c}')
plt.legend();
```
|
{
"source": "jfblg/Tracktime",
"score": 3
}
|
#### File: models/categories/views.py
```python
from flask import Blueprint, request, render_template, sessions, redirect, url_for
from src.models.categories.categories import CategoryModel, CategoryAddForm
categories_blueprint = Blueprint('categories', __name__)
@categories_blueprint.route('/list', methods=['GET'])
def list():
loaded_data = [category.json() for category in CategoryModel.list_all()]
category_count = len(loaded_data)
return render_template('categories/categories_list.html', data=loaded_data, category_count=category_count)
@categories_blueprint.route('/add', methods=['GET', 'POST'])
def add():
form = CategoryAddForm(request.form)
if request.method == 'POST' and form.validate():
input_data = dict()
input_data['category_name'] = request.form['category_name'].strip()
input_data['gender'] = request.form['gender']
input_data['year_start'] = request.form['year_start'].strip()
input_data['year_end'] = request.form['year_end'].strip()
new_category = CategoryModel(**input_data)
new_category.save_to_db()
return render_template('categories/categories_add_success.html', form=form, data=input_data)
return render_template('categories/categories_add.html', form=form)
```
#### File: models/participants/mass_import.py
```python
import xlrd
import csv
from os import remove
from os.path import join, abspath, dirname, isfile
from src.models.participants.participants import ParticipantModel
class MassImport:
@staticmethod
def insert_many(path_to_file):
""" Insert loaded data to the db
"""
if isfile(path_to_file):
file_ext = MassImport.get_file_extension(path_to_file)
print("DEBUG: ", file_ext)
if file_ext == "xls" or file_ext == "xlsx":
loaded_data = MassImport.read_wb(path_to_file)
elif file_ext == "csv":
loaded_data = MassImport.read_csv(path_to_file)
# This case should not happen, as MassImport.allowed_file prevents that
else:
return False
for item in loaded_data:
record = ParticipantModel(**item)
record.save_to_db()
remove(path_to_file)
return True
else:
return False
@staticmethod
def read_wb(wb_path):
"""Load participants data from xls data
"""
keys = "first_name last_name gender year".split(" ")
loaded_data = []
xl_workbook = xlrd.open_workbook(wb_path)
xl_sheet = xl_workbook.sheet_by_index(0)
for row_idx in range(1, xl_sheet.nrows):
values = [item.value for item in xl_sheet.row(row_idx)]
# converting year from float to int
values[3] = int(values[3])
values = dict(zip(keys, values))
loaded_data.append(values)
return loaded_data
@staticmethod
def read_csv(path_to_file):
"""Load participants from csv file"""
keys = "first_name last_name gender year".split(" ")
loaded_data = []
with open(path_to_file, newline='') as f:
reader = csv.reader(f)
for row in reader:
values = dict(zip(keys, row))
print(values)
loaded_data.append(values)
return loaded_data
@staticmethod
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ['csv', 'xls', 'xlsx']
@staticmethod
def get_file_extension(filename):
return filename.rsplit('.', 1)[1].lower()
```
|
{
"source": "jfblg/Tracktime-UZE",
"score": 2
}
|
#### File: Tracktime-UZE/src/app.py
```python
import os
import sys
# If you delete following line, the flask applicaiton can't be executed form command line
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from src.config import *
from threading import Thread
# TODO how to run while loop and Flask at the same time?
# http://stackoverflow.com/questions/23100704/running-infinite-loops-using-threads-in-python?answertab=votes#tab-top
from flask import Flask, render_template, session
from src.models.timedb.timy import Timy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///timetrack.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = "justSom3Kei"
app.config['UPLOAD_FOLDER'] = os.path.join(os.path.dirname(os.getcwd()), UPLOAD_FOLDER_NAME)
@app.before_first_request
def create_table():
''' SQLAlchemy creates the tables it sees from the imports above.
'''
db.create_all()
db.session.commit()
@app.route('/')
def home():
return render_template('home.html')
@app.route('/documentation')
def documentation():
"""
This page show several schemas.
"""
return render_template('documentation.html')
from src.models.participants.views import participants_blueprint
from src.models.categories.views import categories_blueprint
from src.models.startlist.views import startlist_blueprint
from src.models.timedb.views import timedb_blueprint
# add another models
app.register_blueprint(participants_blueprint, url_prefix="/participants")
app.register_blueprint(categories_blueprint, url_prefix="/categories")
app.register_blueprint(startlist_blueprint, url_prefix="/startlist")
app.register_blueprint(timedb_blueprint, url_prefix="/timedb")
# register another blueprints
if __name__ == "__main__":
from src.common.database import db
db.init_app(app)
app.run(port=4999, debug=True)
```
#### File: models/participants/participants.py
```python
from wtforms import Form, BooleanField, IntegerField, StringField, PasswordField, validators
from wtforms.fields.html5 import EmailField
from src.common.database import db
from sqlalchemy import exc
class RunnerRegistrationForm(Form):
first_name = StringField('First name', [
validators.Length(min=2, max=25),
validators.DataRequired(message="Required")])
last_name = StringField('Last name', [
validators.Length(min=2, max=25)])
gender = StringField('Gender', [
validators.Length(min=2, max=6),
validators.data_required(message="Required. 'boy' or 'girl'")])
year = IntegerField('Year of birth', [
validators.NumberRange(min=1917, max=2017),
validators.data_required(message="Required. Please specify number between 1917 and 2017.")])
class ParticipantModel(db.Model):
__tablename__ = "participants"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(80), nullable=False)
last_name = db.Column(db.String(80), nullable=False)
gender = db.Column(db.String(6), nullable=False)
year = db.Column(db.Integer, nullable=False)
startlist = db.relationship("StartlistModel",
back_populates='participants',
cascade="all, delete, delete-orphan")
__table_args__ = (db.UniqueConstraint('first_name', 'last_name', 'year'),)
def __init__(self, first_name, last_name, gender, year):
self.first_name = first_name
self.last_name = last_name
self.gender = gender
self.year = int(year)
def json(self):
return {
"first_name": self.first_name,
"last_name": self.last_name,
"gender": self.gender,
"year": self.year,
}
@classmethod
def find_by_year(cls, year):
# 'guery' is a SQLAlchemy query builder
# SELECT FROM items WHERE name=name LIMIT 1
# returned data gets converted into ItemModel object
return cls.query.filter_by(year=int(year))
@classmethod
def find_by_gender_and_year(cls, gender, year):
return cls.query.filter_by(gender=gender, year=year)
def save_to_db(self):
''' Function does update and insert to the DB (upserting)
'''
# SQLAlchemy can translate object into the row
try:
db.session.add(self)
db.session.commit()
except exc.IntegrityError as e:
db.session().rollback()
@classmethod
def get_participants_ordered(cls):
return db.session.query(ParticipantModel.id,
ParticipantModel.last_name,
ParticipantModel.first_name,
ParticipantModel.gender,
ParticipantModel.year).\
order_by(ParticipantModel.last_name).\
order_by(ParticipantModel.first_name).\
all()
@classmethod
def get_by_id(cls, participant_id):
return db.session.query(cls).filter_by(id=participant_id).one()
@staticmethod
def drop_table():
db.drop_all()
@classmethod
def list_all(cls):
return cls.query.all()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
@classmethod
def delete_all_rows(cls):
all_rows = cls.list_all()
for row in all_rows:
row.delete_from_db()
```
#### File: models/startlist/startlist_alg.py
```python
def order_from_the_middle(data_list):
list_length = len(data_list)
if list_length % 2 == 0:
new_index = get_index_even_count(list_length)
else:
new_index = get_index_odd_count(list_length)
new_order_data_list = []
for index in new_index:
new_order_data_list.append(data_list[index])
return [data_list[index] for index in new_index]
def get_index_odd_count(length):
new_index_list = []
x = length - 1
for index in range(0, length):
new_index_list.append(x)
if x == 0:
x += 1
elif x % 2 == 0:
x -= 2
else:
x += 2
return new_index_list
def get_index_even_count(length):
new_index_list = []
x = length
for index in range(0, length):
if x == 0:
x += 1
elif x % 2 == 0:
x -= 2
else:
x += 2
new_index_list.append(x)
return new_index_list
```
#### File: src/tests_delete_later/flow.py
```python
import random
# 12:23.4456
def main():
print(generate_time())
def generate_time():
"""
Generates a pseudo-random time and returns it as a string
Only temporary solution until not integrated with timy
:return:
string
"""
minutes = random.randrange(12, 15)
seconds = round(random.uniform(10.0, 60.0), 4)
return "{0:01d}:{1}".format(minutes, seconds)
if __name__ == "__main__":
main()
```
|
{
"source": "jfbm74/holberton-system_engineering-devops",
"score": 3
}
|
#### File: holberton-system_engineering-devops/0x16-api_advanced/0-subs.py
```python
import requests
def number_of_subscribers(subreddit):
"""[function that queries the Reddit API
and returns the number of subscribers for a given subreddit.]
Args:
subreddit ([string]): [topic of subredit]
Returns:
[int]: [Number of subscribers for a given topic]
"""
url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
data = requests.get(url, headers={"User-Agent": "yuyo2211"},
allow_redirects=False)
if data.status_code == 200:
return data.json().get('data').get('subscribers')
else:
return 0
```
|
{
"source": "jfboismenu/advent-of-code-2021",
"score": 3
}
|
#### File: jfboismenu/advent-of-code-2021/4-1.py
```python
import pprint
import collections
from typing import NamedTuple
if True:
values = """6,69,28,50,36,84,49,13,48,90,1,33,71,0,94,59,53,58,60,96,30,34,29,91,11,41,77,95,17,80,85,93,7,9,74,89,18,25,26,8,87,38,68,5,12,43,27,46,62,73,16,55,22,4,65,76,54,52,83,10,21,67,15,47,45,40,35,66,79,51,75,39,64,24,37,72,3,44,82,32,78,63,57,2,86,31,19,92,14,97,20,56,88,81,70,61,42,99,23,98
22 59 7 10 6
33 36 96 55 23
13 85 18 29 28
75 46 83 73 58
34 40 87 56 98
73 96 47 14 10
28 11 79 84 20
74 30 0 59 71
80 93 42 22 17
44 2 81 29 15
73 32 37 93 39
2 87 38 99 97
15 12 42 49 33
9 23 25 94 19
57 4 18 70 79
43 79 59 71 78
51 97 37 28 26
46 21 84 8 86
96 30 17 34 49
55 90 99 75 61
43 86 35 51 24
16 25 29 21 3
2 13 4 36 54
89 27 8 85 34
96 59 65 14 56
5 57 43 4 1
86 80 67 30 20
22 16 99 0 14
40 25 59 91 54
82 95 96 37 24
97 20 87 66 21
25 40 9 78 89
52 75 95 63 62
32 43 13 47 69
53 48 56 29 4
24 4 65 12 8
76 3 81 99 49
45 33 31 66 34
17 94 75 35 88
64 10 7 93 95
88 20 99 15 83
81 40 5 6 98
49 74 18 27 9
43 69 28 37 71
87 82 90 14 47
21 97 92 42 60
11 65 98 95 29
2 49 75 20 74
56 40 78 66 81
68 4 46 77 61
26 14 39 76 11
41 74 34 91 4
51 9 97 29 75
83 86 23 35 77
90 2 49 30 89
34 97 0 22 15
65 60 82 66 47
30 9 49 95 48
54 73 67 17 78
33 96 44 1 32
90 29 10 49 63
8 26 95 61 0
54 13 51 12 44
32 48 16 47 84
55 46 93 14 3
68 55 46 16 54
97 59 49 6 21
52 32 15 96 70
57 56 61 51 0
40 31 62 24 48
75 56 70 42 14
77 9 23 62 25
29 27 54 17 2
55 72 43 15 96
45 30 88 86 1
65 12 22 6 51
77 81 15 91 19
80 71 27 89 52
75 92 41 83 57
40 44 66 84 98
89 80 73 59 18
74 55 67 71 10
78 27 37 57 3
92 26 17 5 72
52 83 28 76 51
64 0 87 4 13
14 43 42 62 31
17 84 72 91 83
53 80 59 61 74
47 22 38 28 77
65 49 91 94 19
25 81 64 57 9
50 95 82 12 66
53 54 85 36 11
31 56 83 18 60
6 12 76 37 33
14 96 16 31 40
17 82 27 94 83
1 50 49 9 54
29 70 59 91 77
85 0 33 41 16
7 86 80 53 65
94 19 28 35 32
4 55 93 97 63
83 22 67 56 36
28 70 65 17 19
88 51 15 73 12
11 49 80 23 42
26 54 90 75 29
74 77 91 86 66
88 22 83 10 43
57 34 15 23 8
94 42 68 67 37
84 3 6 71 62
89 45 72 55 38
68 27 93 7 15
85 96 1 56 6
32 2 30 55 17
99 90 37 80 50
77 78 58 98 57
5 95 22 13 97
19 2 52 92 20
75 49 86 31 41
51 63 56 33 10
82 27 79 94 57
15 44 91 26 4
41 78 53 95 86
90 92 46 56 33
80 93 68 66 70
36 10 38 0 6
11 47 52 64 17
16 60 39 55 78
46 18 33 83 35
13 92 4 23 50
40 5 29 6 90
20 60 2 23 74
43 39 91 75 12
58 67 50 29 79
10 40 92 95 25
13 24 53 72 36
69 64 15 53 86
61 11 48 71 27
26 93 38 70 73
96 29 30 98 45
13 7 31 59 84
13 55 50 54 57
32 24 45 5 77
82 86 0 4 99
75 72 14 40 56
76 94 89 17 30
66 71 86 41 0
80 64 3 29 78
23 21 42 99 87
96 60 75 61 49
2 83 85 12 1
78 96 4 82 6
73 36 57 99 8
11 83 15 35 30
85 87 25 22 66
32 64 16 50 43
6 39 55 2 4
1 61 99 71 80
46 88 22 31 60
62 66 37 97 76
74 34 10 52 24
42 38 13 65 12
4 39 60 62 21
81 99 1 69 40
59 15 11 95 53
91 78 72 30 73
43 68 82 72 2
34 92 33 97 89
63 70 9 0 45
69 10 12 65 21
40 20 55 57 49
41 22 8 16 49
71 64 19 65 92
17 10 26 36 29
30 88 7 54 11
77 55 70 72 47
15 94 90 6 39
99 55 16 54 91
18 7 82 44 51
25 34 83 14 12
30 2 77 27 66
44 50 74 97 58
87 42 56 92 28
49 82 52 17 78
54 89 63 77 27
83 14 24 16 84
47 32 8 55 22
19 14 24 82 4
76 73 9 10 64
40 6 92 67 17
68 44 43 3 54
26 10 9 92 81
46 98 13 0 14
68 99 35 18 72
74 33 22 61 93
80 38 71 6 75
17 23 54 55 22
10 8 60 76 24
71 28 16 62 82
13 12 21 78 39
26 66 89 64 79
72 12 91 79 99
84 18 37 98 41
10 71 88 23 24
11 34 26 83 74
58 27 77 5 8
9 90 13 0 46
6 58 74 92 8
75 77 56 76 80
55 60 44 68 91
67 28 96 66 18
14 26 54 21 35
69 67 16 76 62
78 45 87 44 94
27 89 39 85 28
3 81 93 64 74
10 28 35 84 76
40 11 95 59 57
53 4 24 50 45
7 43 78 17 81
1 74 82 16 27
6 26 72 53 52
51 91 80 11 18
20 63 74 25 33
79 4 8 59 67
3 13 55 81 83
98 85 27 84 42
90 15 17 61 34
40 64 86 96 45
59 47 53 5 35
11 7 41 80 13
47 48 54 31 76
99 32 98 20 15
61 41 30 94 37
34 59 86 55 45
9 83 92 53 3
3 80 24 94 25
17 23 64 76 71
20 97 0 56 72
95 73 28 59 21
14 81 46 67 88
2 95 5 38 90
63 62 11 24 34
19 31 57 84 80
47 86 36 85 74
13 39 73 94 42
28 6 60 34 15
63 36 51 30 92
43 10 7 88 49
78 76 31 19 66
22 20 35 45 79
19 42 49 57 73
99 50 97 93 43
67 52 40 16 33
2 55 0 71 46
21 75 59 66 83
19 86 30 25 3
32 39 65 54 29
38 6 85 52 13
43 95 18 44 15
53 70 16 31 71
68 73 74 83 70
56 15 12 78 4
43 87 63 90 86
41 16 23 17 77
80 14 61 30 50
88 28 45 80 65
64 11 68 33 27
29 70 44 82 37
42 66 9 32 87
10 24 15 3 46
99 81 5 62 97
4 36 23 38 35
42 16 37 98 54
34 41 25 30 48
8 60 63 89 72
25 33 94 23 14
45 10 79 30 3
22 28 95 27 11
74 13 39 84 83
72 88 56 53 97
31 92 91 84 71
54 90 89 80 0
98 96 65 66 68
35 39 70 11 82
15 34 42 52 2
68 71 86 82 37
28 48 12 34 54
62 55 10 25 89
60 4 50 21 22
3 2 18 40 84
18 96 95 47 45
14 51 2 88 43
94 56 19 15 8
48 65 62 6 75
35 28 25 72 30
43 85 69 92 26
61 75 5 73 66
16 87 4 99 48
18 19 79 23 83
37 88 31 38 40
23 10 89 84 76
45 39 62 55 66
25 73 79 43 60
12 69 36 93 71
9 77 14 58 49
9 24 26 53 79
99 15 30 50 16
14 95 12 25 33
54 40 58 46 66
5 23 39 29 48
64 67 11 10 92
59 99 80 97 66
45 51 88 47 82
35 27 72 85 16
54 58 2 1 52
10 97 27 54 66
89 85 39 99 98
8 68 95 51 19
4 17 79 87 3
72 43 76 58 33
95 72 0 18 17
3 42 24 86 34
52 79 46 58 98
76 77 78 19 10
81 61 88 85 54
74 70 15 80 72
77 89 11 19 22
34 59 56 65 91
58 6 50 40 16
93 30 95 26 85
47 62 57 6 25
40 79 22 95 29
42 11 70 10 92
60 53 84 96 17
75 86 74 89 18
30 42 4 19 92
40 58 72 7 70
17 98 45 76 50
93 57 65 79 2
56 94 73 84 62
51 0 14 7 53
63 36 48 81 84
50 22 11 88 6
83 99 21 31 91
86 17 72 42 94
83 0 20 26 7
42 24 37 86 65
73 82 63 21 27
30 35 9 47 80
79 53 3 14 84
50 99 83 86 42
81 36 24 4 76
0 71 66 41 57
7 54 94 78 97
12 8 82 45 31
4 91 57 50 37
22 46 86 24 26
39 54 48 7 42
2 45 95 29 12
38 25 52 0 72
51 94 46 44 62
95 60 0 48 61
38 13 85 32 8
22 56 53 30 80
43 65 58 68 88
49 76 41 43 51
57 46 45 82 90
48 33 36 5 23
81 30 3 61 75
56 70 29 91 59
58 74 50 47 84
2 1 62 36 60
32 16 95 43 27
79 12 39 56 11
9 33 4 25 61
23 84 16 51 39
72 19 53 64 43
9 44 10 52 26
45 68 29 56 74
62 42 46 95 0
16 83 27 85 56
13 41 49 79 53
18 63 7 60 3
45 15 48 69 29
46 86 35 34 32
85 2 96 15 43
33 30 29 53 98
21 55 61 73 40
31 4 66 75 59
26 32 91 38 80
69 81 65 30 77
82 22 83 0 38
2 3 29 47 94
42 55 9 18 97
53 45 90 31 44
23 86 0 35 84
27 80 3 64 12
1 96 48 93 85
69 24 61 15 22
91 72 62 13 76
81 51 67 60 16
65 48 86 39 97
92 93 49 77 59
15 94 88 52 19
80 83 23 61 4
47 84 46 79 55
92 38 65 42 76
9 58 26 95 86
30 49 56 69 59
94 4 25 89 44
73 10 29 0 48
56 40 19 84 61
52 31 25 86 21
79 55 53 51 5
81 9 35 72 15
41 95 30 58 73
26 80 4 21 96
61 92 76 93 74
2 69 60 8 20
46 98 70 72 83
1 99 31 4 86
93 64 8 43 61
33 36 75 90 50
52 13 3 42 34
22 65 60 18 76
17 63 6 66 92
51 67 86 88 18
82 83 32 74 30
5 33 9 28 61
72 75 25 23 60
43 28 40 53 52
54 12 77 10 83
21 44 63 0 1
15 22 33 49 2
80 41 3 46 55
1 97 75 37 4
47 33 13 21 40
27 62 15 90 30
11 83 63 36 35
0 12 60 91 42
0 45 17 88 18
66 10 63 62 8
36 5 47 39 67
21 3 61 29 19
82 58 33 6 59
37 92 69 56 52
46 66 20 78 13
83 99 16 31 0
36 35 2 68 9
70 82 94 96 29
62 65 85 37 3
74 95 34 96 58
15 33 49 21 93
19 83 66 6 25
81 84 23 0 76
95 84 71 92 52
54 36 66 59 82
0 76 32 45 83
69 27 25 88 38
81 96 63 4 61
73 51 28 48 40
3 38 11 14 35
66 91 86 20 81
53 39 46 71 1
97 60 21 93 23
92 70 4 60 95
58 49 20 15 25
55 68 21 84 80
56 41 82 23 19
30 74 65 27 29
81 97 68 46 75
62 73 63 36 41
1 5 91 84 37
45 92 20 49 7
25 26 3 88 56
25 9 94 37 26
44 58 84 91 38
39 46 57 98 50
96 42 73 24 70
71 32 53 48 13
98 72 25 96 77
80 64 88 53 23
21 37 45 24 18
41 86 59 68 5
76 50 36 26 12
77 79 88 74 12
21 9 85 26 68
11 62 64 4 5
47 33 76 63 87
55 19 2 60 95
74 79 30 14 35
90 52 17 29 63
18 69 78 34 26
92 42 85 71 56
12 2 5 0 98"""
else:
values = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7"""
# Grab the numbers and split them into individual numbers
values = values.split("\n")
random_numbers = [int(number) for number in values.pop(0).split(",")]
# parse the cards and create a checked state for each
cards = []
while values:
assert values.pop(0) == ""
card = []
for _ in range(5):
card.append([[int(number), False] for number in values.pop(0).split()])
cards.append(card)
# updates the bingo cards to mark the number as crossed.
def update_cards(cards, number):
for card in cards:
for line in card:
for idx, (value, state) in enumerate(line):
if value == number:
line[idx][1] = True
def is_winning(lines):
# check horizontal lines
for line in lines:
for _, state in line:
if state is False:
break
else:
print("horizontal")
return True
# check vertical
for idx in range(5):
for line in lines:
if line[idx][1] is False:
break
else:
print("vertical")
return True
return False
# check left to right diagonal
for idx in range(5):
if lines[idx][idx][1] is False:
break
else:
print("diagonal 1")
return True
# check right to left diagonal
for idx in range(5):
if lines[idx][4 - idx][1] is False:
break
else:
print("diagonal 2")
return True
return False
def compute_card_value(card, number):
unchecked_sum = 0
for line in card:
for value, state in line:
if state is False:
unchecked_sum += value
return unchecked_sum * number
def bingo():
for number in random_numbers:
update_cards(cards, number)
print(number)
for idx, card in enumerate(cards):
if is_winning(card):
pprint.pprint(card)
return compute_card_value(card, number)
print(bingo())
```
|
{
"source": "jfbu/pygments",
"score": 2
}
|
#### File: tests/snippets/conftest.py
```python
import pytest
from tests.conftest import LexerInlineTestItem
def pytest_collect_file(parent, path):
if path.ext == '.txt':
return LexerTestFile.from_parent(parent, fspath=path)
class LexerTestFile(pytest.File):
def collect(self):
yield LexerInlineTestItem.from_parent(self, name='')
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.