id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
11225260
|
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse, reverse_lazy
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView,
UpdateView,
DeleteView)
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.http import HttpResponseRedirect
from trips.models import Trip
from django.contrib.auth.models import User
from trips.forms import TripForm
class TripList(ListView):
'''Renders all the Trips currently made by site Users.'''
model = Trip
template_name = 'trips/index.html'
def get(self, request):
'''Render a context containing all Trip instances.'''
trips = self.get_queryset().all()
return render(request, self.template_name, {
'trips': trips
})
class TripDetail(DetailView):
'''Displays a page with instructions associated with a specific trip.'''
model = Trip
template_name = 'trips/instructions.html'
def get(self, request, pk):
"""Renders a page to show the boarding instructions for a single Trip.
Parameters:
request(HttpRequest): the GET request sent to the server
pk(int): unique id value of the Trip instance
Returns:
HttpResponse: the view of the detail template
"""
trip = self.get_queryset().get(pk=pk)
context = {
'trip': trip
}
return render(request, self.template_name, context)
class TripCreate(CreateView):
'''Allows user to add new Trip instances.'''
model = Trip
form_class = TripForm
template_name = 'trips/create.html'
queryset = Trip.objects.all()
def form_valid(self, form):
'''Initializes the passenger based on who submitted the form.'''
form.instance.passenger = self.request.user
return super().form_valid(form)
class TripUpdate(UserPassesTestMixin, UpdateView):
'''Allows for editing of a trip.'''
model = Trip
form_class = TripForm
template_name = 'trips/update.html'
queryset = Trip.objects.all()
def test_func(self):
'''Ensures the user editing the trip is the passenger who posted it.'''
trip = self.get_object()
return (self.request.user == trip.passenger)
class TripDelete(UserPassesTestMixin, DeleteView):
'''Allows for removal of Trip instances by User.'''
model = Trip
template_name = 'trips/deletion.html'
success_url = reverse_lazy('trips:all-trips')
queryset = Trip.objects.all()
def get(self, request, pk):
"""Renders a page to show the boarding instructions for a single Trip.
Parameters:
request(HttpRequest): the GET request sent to the server
slug(slug): unique slug field value of the Trip instance
Returns:
HttpResponse: the view of the detail template
"""
trip = self.get_queryset().get(pk=pk)
context = {
'trip': trip
}
return render(request, self.template_name, context)
def test_func(self):
'''Ensures the user removing the trip is the one who posted it.'''
trip = self.get_object()
return (self.request.user == trip.passenger)
|
StarcoderdataPython
|
3324083
|
#!/usr/bin/python
from distutils.core import setup
from distutils.core import Command
from unittest import TextTestRunner, TestLoader
import fnmatch
import os, sys
import os.path
import re, glob
from distutils.core import Command
from unittest import TextTestRunner, TestLoader
from glob import glob
from os.path import splitext, basename, join as pjoin, walk
import os
## Custom test command for distutils.
## TestCommand based on http://da44en.wordpress.com/2002/11/22/using-distutils/
## Filter code based on http://stackoverflow.com/a/5141829/1432488
#class TestCommand(Command):
# user_options = [ ]
#
# def initialize_options(self):
# self._dir = os.getcwd()
#
# def finalize_options(self):
# pass
#
# def run(self):
# '''
# Finds all the tests modules in tests/, and runs them.
# '''
# testfiles = []
#
# includes = ['*_test.py', '*_tests.py'] # for files only
# excludes = ['__init__.py', 'setup.py'] # for dirs and files
#
# # transform glob patterns to regular expressions
# includes = r'|'.join([fnmatch.translate(x) for x in includes])
# excludes = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
#
# for root, dirs, files in os.walk(self._dir):
#
# # exclude dirs
# dirs[:] = [d for d in dirs if not d == 'build']
#
# # exclude/include files
# #files = [os.path.join(root, f) for f in files]
#
# files = [f for f in files if not re.match(excludes, f)]
# files = [f for f in files if re.match(includes, f)]
#
# for fname in files:
# pypath = root[len(self._dir)+1:]
#
# if pypath != '':
# pyname = fname[0:-3] # convert filename ('file.py') to python mod name ('file')
#
# # convert path to python module ('path/to') to python package name ('path.to')
# pypackage = pypath.replace('/', '.')
#
# # join pypackage to pname ('path.to.file') and add to list of test modules
# testfiles.append('.'.join( [pypackage, pyname] ))
#
# print 'TEST FILES: ' + str(testfiles)
# print sys.path
# print os.getcwd()
# tests = TestLoader().loadTestsFromNames(testfiles)
# t = TextTestRunner(verbosity = 2)
# t.run(tests)
class CleanCommand(Command):
user_options = [ ]
def initialize_options(self):
self._clean_me = [ ]
for root, dirs, files in os.walk('.'):
for f in files:
if f.endswith('.pyc'):
self._clean_me.append(pjoin(root, f))
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except:
pass
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
'''
Finds all the tests modules in tests/, and runs them.
'''
def run(self):
testfiles = [ ]
for t in glob(pjoin(self._dir, 'tests', '*.py')):
if not t.endswith('__init__.py'):
testfiles.append('.'.join(
['tests', splitext(basename(t))[0]])
)
print 'TEST FILES: ' + str(testfiles)
print sys.path
print os.getcwd()
tests = TestLoader().loadTestsFromNames(testfiles)
t = TextTestRunner(verbosity = 1)
t.run(tests)
setup(name='pCacheFS',
version='0.2',
description='Persistent Caching FUSE Filesystem',
keywords=['fuse', 'cache'],
author='<NAME>',
author_email='<EMAIL>',
url='http://code.google.com/p/pcachefs',
license='Apache 2.0',
scripts=['scripts/pcachefs'],
packages=['pcachefs'],
cmdclass = { 'test': TestCommand, 'clean': CleanCommand }
)
|
StarcoderdataPython
|
30131
|
<reponame>mnagaku/ParaMol
# -*- coding: utf-8 -*-
"""
Description
-----------
This module defines the :obj:`ParaMol.Objective_function.Properties.regularization.Regularization` class, which is a ParaMol representation of the regularization property.
"""
import numpy as np
from .property import *
# -----------------------------------------------------------#
# #
# REGULARIZATION #
# #
# -----------------------------------------------------------#
class Regularization(Property):
"""
ParaMol representation of the regularization property.
Parameters
----------
initial_parameters_values : list or np.array of floats
List or np.array containing the initial parameters' values.
prior_widths : list or np.array of floats
List or np.array containing the prior width of each parameter.
method : str
Type of regularization. Options are 'L1', 'L2' or 'hyperbolic' ('hyperbolic' only for RESP calculations)
weight : float
Weight of this property in the objective function.
scaling_factor : float
Scaling factor of the regularization value.
hyperbolic_beta : float
Hyperbolic beta value. Only used if `method` is `hyperbolic`.
Attributes
----------
name : str
'REGULARIZATION'
systems : list of :obj:`ParaMol.System.system.ParaMolSystem`
List of ParaMol Systems. Currently not used and it is set to None.
units : str
'ADIMENSIONAL'
value : float
Current value of this property
weight : float
Weight of this property in the objective function.
"""
def __init__(self, initial_parameters_values, prior_widths, method, weight=1.0, scaling_factor=1.0, hyperbolic_beta=0.01):
self.name = "REGULARIZATION"
self.systems = None
self._regularization_type = method
self._scaling_factor = scaling_factor
self._hyperbolic_beta = hyperbolic_beta
self._initial_parameters_values = initial_parameters_values
self._prior_widths = prior_widths
self.units = 'ADIMENSIONAL'
self.value = None
self.weight = weight
# ------------------------------------------------------------ #
# #
# PUBLIC METHODS #
# #
# ------------------------------------------------------------ #
def set_initial_parameters_values(self, initial_parameters_values):
"""
Method that sets the initial parameters' values as a private attribute of this instance.
Parameters
----------
initial_parameters_values : list or np.array of floats
List or np.array containing the initial parameters' values.
Returns
-------
initial_parameters_values : list of floats
List containing the prior width of each parameter (private attribute).
"""
self._initial_parameters_values = initial_parameters_values
return self._initial_parameters_values
def set_prior_widths(self, prior_widths):
"""
Method that sets the prior widths of the variables as a private attribute of this instance.
Parameters
----------
prior_widths : list or np.array of floats
List or np.array containing the prior width of each parameter.
Returns
-------
prior_widths: list of floats
List containing the prior width of each parameter (private attribute).
"""
self._prior_widths = prior_widths
return self._prior_widths
def calculate_property(self, current_parameters, a=None, b=None):
"""
Method that wraps private regularization methods in order to calculate the regularization term of the objective function.
Parameters
----------
current_parameters : list of floats
Lists containing the optimizable values of the parameters.
a : float, default=`None`
a parameter (scaling factor). If not `None`, instance attribute `self._scaling_factor` is ignored.
b : float, default=`None`
Hyperbolic beta parameter. If not `None`, instance attribute `self._hyperbolic_beta` is ignored.
Returns
-------
float
Regularization value.
"""
if self._regularization_type.upper() == "L2":
return self._l2_regularization(current_parameters, a)
elif self._regularization_type.upper() == "L1":
return self._l1_regularization(current_parameters, a)
elif self._regularization_type.upper() == "HYPERBOLIC":
return self._hyperbolic_regularization(current_parameters, a, b)
else:
raise NotImplementedError("Regularization {} scheme not implement.".format(self._regularization_type))
# ------------------------------------------------------------ #
# #
# PRIVATE METHODS #
# #
# ------------------------------------------------------------ #
def _l2_regularization(self, current_parameters, a=None):
"""
Method that computes the value of the L2 regularization.
Parameters
----------
current_parameters : list of floats
Lists containing the optimizable values of the parameters.
a : float, default=`None`
a parameter (scaling factor). If not `None`, instance attribute `self._scaling_factor` is ignored.
Notes
-----
:math:`L2 = a(param-param_0)^2` where a is a scaling factor.
Returns
-------
value : float
Value of the regularization.
"""
if a is None:
a = self._scaling_factor
diff = (np.asarray(current_parameters) - self._initial_parameters_values) / self._prior_widths
reg = np.power(diff, 2)
self.value = a * np.sum(reg)
return self.value
def _l1_regularization(self, current_parameters, a=None):
"""
Method that computes the value of the L1 regularization.
Parameters
----------
current_parameters : list of floats
Lists containing the optimizable values of the parameters.
a : float, default=`None`
a parameter (scaling factor). If not `None`, instance attribute `self._scaling_factor` is ignored.
Notes
-----
:math:`L1 = a|param-param_0|` where a is a scaling factor.
Returns
-------
value : float
Value of the regularization.
"""
if a is None:
a = self._scaling_factor
diff = (np.asarray(current_parameters) - self._initial_parameters_values) / self._prior_widths
reg = np.abs(diff)
self.value = a * np.sum(reg)
return self.value
def _hyperbolic_regularization(self, current_parameters, a=None, b=None):
"""
Method that computes the value of the hyperbolic regularization.
Parameters
----------
current_parameters : list of floats
Lists containing the optimizable values of the parameters.
a : float, default=`None`
a parameter (scaling factor). If not `None`, instance attribute `self._scaling_factor` is ignored.
b : float, default=`None`
Hyperbolic beta parameter. If not `None`, instance attribute `self._hyperbolic_beta` is ignored.
Notes
-----
:math:`hyperbolic = a\sum_{m}^{N_{charges}} ((q_m^2 + b^2 )^{1/2} - b)`
Returns
-------
value : float
Value of the regularization.
"""
if a is None:
a = self._scaling_factor
if b is None:
b = self._hyperbolic_beta
reg = np.sum( ((np.asarray(current_parameters) )**2 + b**2)**(1/2.) - b)
self.value = a * reg
return self.value
|
StarcoderdataPython
|
9725133
|
# coding: utf-8
class SinglyLinkedList:
def __init__(self):
self.head = None
def is_empty(self):
return self.head == None
def length(self):
curr = self.head
len_ = 0
while curr is not None:
curr = curr.next
len_ += 1
return len_
def search(self, item):
curr = self.head
found = False
while not found and curr is not None:
if curr.data == item:
found = True
else:
curr = curr.next
return found
def access(self, pos):
if pos > self.length() or pos < 0:
return 'pos parameter not within bounds of list.'
curr = self.head
index = 0
while index < pos:
curr = curr.next
index += 1
return curr.data
def append(self, item):
temp = Node(item)
temp.next = self.head
self.head = temp
def insert(self, pos, item):
if pos > self.length() or pos < 0:
return 'pos parameter not within bounds of list.'
curr = self.head
prev = None
index = 0
while index < pos:
curr, prev = curr.next, curr
index += 1
temp = Node(item)
if curr is self.head:
if self.head is None:
self.head = temp
else:
temp.next = self.head
self.head = temp
else:
temp.next = prev.next
prev.next = temp
def delete(self, item):
curr = self.head
prev = None
while curr is not None and curr.data != item:
prev = curr
curr = curr.next
if curr is None:
return 'Item not found.'
elif curr is self.head:
self.head = curr.next
else:
prev.next = curr.next
def __iter__(self):
curr = self.head
if curr:
while curr.next is not None:
yield curr
curr = curr.next
yield curr
def __str__(self):
return '(Head) {}-> None'.format('->'.join(map(str, self)))
class DoublyLinkedList:
def __init__(self):
self.head = None
def is_empty(self):
return self.head == None
def length(self):
curr = self.head
len_ = 0
while curr is not None:
curr = curr.next
len_ += 1
return len_
def search(self, item):
curr = self.head
found = False
while not found and curr is not None:
if curr.data == item:
found = True
else:
curr = curr.next
return found
def access(self, pos):
if pos > self.length() or pos < 0:
return 'pos parameter not within bounds of list.'
curr = self.head
index = 0
while index < pos:
curr = curr.next
index += 1
return curr.data
def append(self, item):
temp = Node(item)
temp.next = self.head
self.head = temp
def insert(self, pos, item):
if pos > self.length() or pos < 0:
return 'pos parameter not within bounds of list.'
curr = self.head
prev = None
index = 0
while index < pos:
curr, prev = curr.next, curr
index += 1
temp = Node(item)
if curr is self.head:
if self.head is None:
self.head = temp
else:
temp.next = self.head
self.head.prev = temp
self.head = temp
else:
temp.next = prev.next
temp.prev = prev
prev.next = temp
def delete(self, item):
curr = self.head
prev = None
while curr is not None and curr.data != item:
prev = curr
curr = curr.next
if curr is None:
return 'Item not found.'
elif curr is self.head:
self.head = curr.next
if self.head is not None:
self.head.prev = None
else:
prev.next = curr.next
if curr.next is not None:
curr.next.prev = prev
def __iter__(self):
curr = self.head
if curr:
while curr.next is not None:
yield curr
curr = curr.next
yield curr
def __str__(self):
return '(Head) {}-> None'.format('<->'.join(map(str, self)))
class Node:
def __init__(self, data, prev = None, next = None):
self.data = data
self.prev = prev
self.next = next
def __str__(self):
return str(self.data)
|
StarcoderdataPython
|
1667220
|
import unittest
from runmd import *
class TestCommandBuilder(unittest.TestCase):
def test_append(self):
self.assertEqual(build_command("test", "name"), "test name")
def test_insert(self):
self.assertEqual(build_command("test %s t", "name"), "test name t")
def test_multi_insert(self):
self.assertEqual(
build_command("cmd %s foo %s", "/tmp/hi"), "cmd /tmp/hi foo /tmp/hi"
)
def test_ignore(self):
self.assertEqual(build_command("%s %d %%s %", "test %s"), "test %s %d %s %")
def test_lots_escape(self):
self.assertEqual(build_command("%%%s", "name"), "%name")
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
9671089
|
<gh_stars>0
import os, sys, glob
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../")))
from v2.lib.resource_op import Config
import datetime
import json
import v2.utils.utils as utils
from v2.utils.utils import HttpResponseParser
from v2.lib.exceptions import TestExecError
import v2.lib.manage_data as manage_data
import logging
log = logging.getLogger()
def validate_prefix_rule(bucket, config):
"""
This function is to validate the prefix rule for versioned objects
Parameters:
bucket(char): Name of the bucket
config(list): config
"""
log.info('verification starts')
op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket=%s" % bucket.name)
op2 = utils.exec_shell_cmd("radosgw-admin bucket list --bucket=%s" % bucket.name)
json_doc = json.loads(op)
json_doc2 = json.loads(op2)
objects = json_doc['usage']['rgw.main']['num_objects']
objs_total = (config.test_ops['version_count']) * (config.objects_count)
objs_ncurr = (config.test_ops['version_count']) * (config.objects_count) - (config.objects_count)
objs_diff = objs_total - objs_ncurr
c1 = 0
if objects == objs_total:
for i, entry in enumerate(json_doc2):
print(entry['tag'])
if entry['tag'] == 'delete-marker':
c1 = c1 + 1
if c1 == (config.objects_count):
log.info('Lifecycle expiration of current object version validated for prefix filter')
if objects == objs_diff:
log.info('Lifecycle expiration of non_current object version validated for prefix filter')
def validate_and_rule(bucket, config):
"""
This function is to validate AND rule
Parameters:
bucket(char): Name of the bucket
config(list): config
"""
log.info('verification starts')
op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket=%s" % bucket.name)
json_doc = json.loads(op)
objects = json_doc['usage']['rgw.main']['num_objects']
if objects == 0 :
log.info('Lifecycle expiration with And rule validated successfully')
|
StarcoderdataPython
|
6660445
|
<reponame>autobotasia/autoface<gh_stars>1-10
from django.apps import AppConfig
class TaggedImgConfig(AppConfig):
name = 'tools'
|
StarcoderdataPython
|
5062625
|
class PopulateStatus:
status = False
def set_initialized(self):
self.status = False
def set_populated(self):
self.status = True
def get_status(self):
return self.status
|
StarcoderdataPython
|
4929974
|
import sqlite3
#roll identifiers: 1 - solo roll
# 2 - dual roll
def get_all_tiers_at_level(level: int, is_fa: bool, roll_identifier: int) -> tuple:
# At the specified table, return the entire row at the corresponding level
if roll_identifier == 1:
db_name = 'solo_values.db' if not is_fa else 'solo_values_fa.db'
connection = sqlite3.connect(db_name)
table = 'solo_vals' if not is_fa else 'solo_vals_flame_advantaged'
elif roll_identifier == 2:
db_name = 'dual_values.db' if not is_fa else 'dual_values_fa.db'
connection = sqlite3.connect(db_name)
table = 'dual_vals' if not is_fa else 'dual_vals_flame_advantaged'
else:
return ((-1,))
cursor = connection.cursor()
# sql_query = 'SELECT * FROM "{a}" WHERE "{b}" <= ? AND "{c}" >= ?'
result_query = cursor.execute('SELECT * FROM "{}" WHERE "{}" <= ? AND "{}" >= ?'
.format(table, "Min Level".replace('"','""'),
"Max Level".replace('"','""')),(level,level)).fetchone()
connection.close()
return result_query
def get_specific_tier_at_level(level: int, roll_identifier: int, tier: int, is_fa: bool) -> int:
# Return the stat available at the level range at the specific tier
# Min and max tiers are 1 and 7 respectively, (the better the tier, the high the tier number)
if tier not in range(1,8): # stop is exclusive, must stop at 8
return -1
else:
# first two values indicate levels, corresponding tier at integer is 1 above the int
return get_all_tiers_at_level(level, is_fa, roll_identifier)[tier + 1] if not is_fa else\
get_all_tiers_at_level(level, is_fa, roll_identifier)[tier - 1]
|
StarcoderdataPython
|
8106274
|
<gh_stars>0
__copyright__ = 'Copyright(c) <NAME> 2017'
""" Facade for a collection of model instances
"""
import logging
from functools import wraps
LOG = logging.getLogger(__name__)
def chainable(generator_method):
""" Decorator for use with Collection class
Allows for chained invocation of filter methods and deferred iteration
:param generator_method callable: generator method to decorate; must consume the iterable
passed in the second parameter
:return: wrapped generator method
"""
@wraps(generator_method)
def wrapped_generator(self, *method_args, **method_kwargs):
# reset flag that indicates that all the items have been processed by the generators
self._filter_applied = False
# self._filtered updated with the new generator
self._filtered = generator_method(self, self._filtered, *method_args, **method_kwargs)
# returning self - the Collection - allows the calls to be chained
return self
return wrapped_generator
class Collection(object):
""" Collection of model instances
Encapsulate implementation of collection operations. In particular, calling code
need not know when the underlying instances are processed. For efficient use of memory
it is often desirable to defer instantiating a collection until the latest possible point.
"""
def __init__(self, items):
self._items = items
self._items_iter = None
self._filtered = iter(items)
self._filter_applied = True
def __iter__(self):
if self._filter_applied:
self._items_iter = iter(self._items)
else:
self._items = []
return self
def __next__(self):
try:
items = self._items_iter if self._filter_applied else self._filtered
item = next(items)
if not self._filter_applied:
self._items.append(item)
return item
except StopIteration:
self._filter_applied = True
raise
def lookup(self, id_to_match, fieldname):
for item in self:
if getattr(item, fieldname) == id_to_match:
yield item
def lookup_map(self, fieldname):
lookup_dict = {}
duplicates = set()
for item in self:
key = getattr(item, fieldname)
if key is not None:
if key in lookup_dict:
# remove ambiguous key and first mapping
del lookup_dict[key]
duplicates.add(key)
else:
lookup_dict[key] = item
if duplicates:
LOG.warning('Duplicates excluded from {} lookup map {}'.format(fieldname, duplicates))
return lookup_dict
@chainable
def not_null(self, items, fieldname):
for item in items:
if getattr(item, fieldname) is not None:
yield item
@chainable
def is_null(self, items, fieldname):
for item in items:
if getattr(item, fieldname) is None:
yield item
@chainable
def is_empty(self, items, fieldname):
for item in items:
if len(getattr(item, fieldname)) == 0:
yield item
@chainable
def filter(self, items, item_expression):
for item in items:
if item_expression(item):
yield item
|
StarcoderdataPython
|
6545228
|
<filename>hylfm/metrics/psnr.py
from math import log10
import numpy
import torch.nn.functional
from hylfm.metrics import SimpleSingleValueMetric
# class PSNR_SkImage(Metric):
# sum_: float
# num_examples: int
#
# def __init__(self, *, data_range=None, **super_kwargs):
# super().__init__(**super_kwargs)
# self.data_range = data_range
#
# def reset(self):
# self.sum_ = 0.0
# self.num_examples = 0
#
# def update_with_batch(self, *, prediction, target) -> None:
# n = prediction.shape[0]
# self.sum_ += sum(
# compare_psnr(im_test=p, im_true=t, data_range=self.data_range)
# for p, t in zip(prediction.cpu().numpy(), target.cpu().numpy())
# )
# self.num_examples += n
#
# update_with_sample = update_with_batch
#
# def compute(self):
# if self.num_examples == 0:
# raise RuntimeError("PSNR_SKImage must have at least one example before it can be computed.")
#
# return {self.name: self.sum_ / self.num_examples}
class PSNR(SimpleSingleValueMetric):
def __init__(self, *super_args, data_range: float, **super_kwargs):
super().__init__(*super_args, **super_kwargs)
self.log10dr20 = log10(data_range) * 20
@torch.no_grad()
def __call__(self, prediction, target):
n = prediction.shape[0]
prediction = torch.from_numpy(prediction) if isinstance(prediction, numpy.ndarray) else prediction
target = torch.from_numpy(target) if isinstance(target, numpy.ndarray) else target
return (
sum(
self.log10dr20 - 10 * torch.log10(torch.nn.functional.mse_loss(p, t)).item()
for p, t in zip(prediction, target)
)
/ n
)
|
StarcoderdataPython
|
8116878
|
<reponame>Satwaj-Dhavale/Sentiment-Analysis-of-Multimedia
from face_detection import create_video_output
from face_detection import create_webcam_output
from face_detection import create_image_output
from tkinter import *
from tkinter import filedialog
from PIL import Image, ImageTk
import os
import shutil
root = Tk()
root.title('Sentiment Analysis')
root.geometry('500x400+500+500')
root.resizable(0, 0)
def open_file(label=0):
if label == 1:
create_webcam_output()
else:
file_paths = filedialog.askopenfilenames(parent=root, initialdir=os.getcwd(), title='Please Select a file', filetypes=[('All files', '.*')])
print(file_paths)
if label == 2:
if len(file_paths) == 0:
create_image_output()
else:
create_image_output(file_paths)
elif label == 3:
dirpath = './video_data/frames/'
if len(file_paths) == 0:
create_video_output()
else:
for filename in os.listdir(dirpath):
frame_path = os.path.join(dirpath, filename)
try:
shutil.rmtree(frame_path)
except OSError:
os.remove(frame_path)
create_video_output(file_paths)
background_img = Image.open('Sentiment_background.jpg')
background_tk = ImageTk.PhotoImage(background_img)
label_back = Label(root, image=background_tk)
label_back.pack()
photo = PhotoImage(file='Danger_Noodle.jpg')
root.iconphoto(False, photo)
btnImage = Button(root, text="Image", width=10, command=lambda: open_file(2))
btnImage.place(x=100, y=300)
btnVideo = Button(root, text="Video", width=10, command=lambda: open_file(3))
btnVideo.place(x=200, y=300)
btnWebcam = Button(root, text="Webcam", width=10, command=lambda: open_file(1))
btnWebcam.place(x=300, y=300)
btnExit = Button(root, text="Exit", width=10, command=root.destroy)
btnExit.place(x=200, y=350)
team_name = "Danger Noodles"
label1 = Label(root, text=team_name)
label1.config(width=200, font=('Courier', 20))
label1.pack()
root.mainloop()
|
StarcoderdataPython
|
162012
|
<reponame>mfarthin/PyDMD
# Tutorial 3: Multiresolution DMD: different time scales
# In this tutorial we will show the possibilities of the multiresolution dynamic modes decomposition (mrDMD) with respect to the classical DMD. We follow a wonderful blog post written by <NAME> [available here](http://www.pyrunner.com/weblog/2016/08/05/mrdmd-python/). We did not use his implementation of the mrDMD but only the sample data and the structure of the tutorial. You can find a mathematical reference for the mrDMD by Kutz et al. [here](http://epubs.siam.org/doi/pdf/10.1137/15M1023543).
# For the advanced settings of the DMD base class please refer to [this tutorial](https://github.com/mathLab/PyDMD/blob/master/tutorials/tutorial-2-adv-dmd.ipynb).
# First of all we just import the MrDMD and DMD classes from the pydmd package, we set matplotlib for the notebook and we import numpy.
import matplotlib.pyplot as plt
from pydmd import MrDMD
from pydmd import DMD
import numpy as np
# The code below generates a spatio-temporal example dataset. The data can be thought of as 80 locations or signals (the x-axis) being sampled 1600 times at a constant rate in time (the t-axis). It contains many features at varying time scales, like oscillating sines and cosines, one-time events, and random noise.
def create_sample_data():
x = np.linspace(-10, 10, 80)
t = np.linspace(0, 20, 1600)
Xm, Tm = np.meshgrid(x, t)
D = np.exp(-np.power(Xm/2, 2)) * np.exp(0.8j * Tm)
D += np.sin(0.9 * Xm) * np.exp(1j * Tm)
D += np.cos(1.1 * Xm) * np.exp(2j * Tm)
D += 0.6 * np.sin(1.2 * Xm) * np.exp(3j * Tm)
D += 0.6 * np.cos(1.3 * Xm) * np.exp(4j * Tm)
D += 0.2 * np.sin(2.0 * Xm) * np.exp(6j * Tm)
D += 0.2 * np.cos(2.1 * Xm) * np.exp(8j * Tm)
D += 0.1 * np.sin(5.7 * Xm) * np.exp(10j * Tm)
D += 0.1 * np.cos(5.9 * Xm) * np.exp(12j * Tm)
D += 0.1 * np.random.randn(*Xm.shape)
D += 0.03 * np.random.randn(*Xm.shape)
D += 5 * np.exp(-np.power((Xm+5)/5, 2)) * np.exp(-np.power((Tm-5)/5, 2))
D[:800,40:] += 2
D[200:600,50:70] -= 3
D[800:,:40] -= 2
D[1000:1400,10:30] += 3
D[1000:1080,50:70] += 2
D[1160:1240,50:70] += 2
D[1320:1400,50:70] += 2
return D.T
# Here we have an auxiliary function that we will use to plot the data.
def make_plot(X, x=None, y=None, figsize=(12, 8), title=''):
"""
Plot of the data X
"""
plt.figure(figsize=figsize)
plt.title(title)
X = np.real(X)
CS = plt.pcolor(x, y, X)
cbar = plt.colorbar(CS)
plt.xlabel('Space')
plt.ylabel('Time')
plt.show()
# Let us start by creating the dataset and plot the data in order to have a first idea of the problem.
sample_data = create_sample_data()
x = np.linspace(-10, 10, 80)
t = np.linspace(0, 20, 1600)
make_plot(sample_data.T, x=x, y=t)
# First we apply the classical DMD without the svd rank truncation, and then we try to reconstruct the data. You can clearly see that all the transient time events are missing.
first_dmd = DMD(svd_rank=-1)
first_dmd.fit(X=sample_data)
make_plot(first_dmd.reconstructed_data.T, x=x, y=t)
# Now we do the same but using the mrDMD instead. The result is remarkable even with the svd rank truncation (experiment changing the input parameters).
sub_dmd = DMD(svd_rank=-1)
dmd = MrDMD(sub_dmd, max_level=7, max_cycles=1)
dmd.fit(X=sample_data)
make_plot(dmd.reconstructed_data.T, x=x, y=t)
# We now plot the eigenvalues in order to better understand the mrDMD. Without truncation we have 80 eigenvalues.
print('The number of eigenvalues is {}'.format(dmd.eigs.shape[0]))
dmd.plot_eigs(show_axes=True, show_unit_circle=True, figsize=(8, 8))
# It is also possible to plot only specific eigenvalues, given the level and the node. If the node is not provided all the eigenvalues of that level will be plotted.
dmd.plot_eigs(show_axes=True, show_unit_circle=True, figsize=(8, 8), level=3, node=0)
# The idea is to extract the slow modes at each iteration, where a slow mode is a mode with a relative low frequency. This just means that the mode changes somewhat slowly as the system evolves in time. Thus the mrDMD is able to catch different time events.
#
# The general mrDMD algorithm is as follows:
#
# 1. Compute DMD for available data.
# 2. Determine fast and slow modes.
# 3. Find the best DMD approximation to the available data constructed from the slow modes only.
# 4. Subtract off the slow-mode approximation from the available data.
# 5. Split the available data in half.
# 6. Repeat the procedure for the first half of data (including this step).
# 7. Repeat the procedure for the second half of data (including this step).
# Let us have a look at the modes for the first two levels and the corresponding time evolution. At the first level we have two very slow modes, while at the second one there are 5 modes.
pmodes = dmd.partial_modes(level=0)
fig = plt.plot(x, pmodes.real)
pdyna = dmd.partial_dynamics(level=0)
fig = plt.plot(t, pdyna.real.T)
# Notice the discontinuities in the time evolution where the data were split.
pdyna = dmd.partial_dynamics(level=1)
print('The number of modes in the level number 1 is {}'.format(pdyna.shape[0]))
fig = plt.plot(t, pdyna.real.T)
# Now we recreate the original data by adding levels together. For each level, starting with the first (note that the starting index is 0), we construct an approximation of the data.
pdata = dmd.partial_reconstructed_data(level=0)
make_plot(pdata.T, x=x, y=t, title='level 0', figsize=(7.5, 5))
# Then, we sequentially add them all together, one on top of another. It is interesting to see how the original data has been broken into features of finer and finer resolution.
for i in range(1, 7):
pdata += dmd.partial_reconstructed_data(level=i)
make_plot(pdata.T, x=x, y=t, title='levels 0-' + str(i), figsize=(7.5, 5))
# The multiresolution DMD has been employed in many different fields of study due to its versatility. Feel free to share with us your applications!
|
StarcoderdataPython
|
3312953
|
<gh_stars>1-10
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Type, TypeVar, cast, overload
from asyncpg import Record
from asyncpg.pool import PoolConnectionProxy
from attr import dataclass
from discord.ext import typed_commands
from ..compat import (
AbstractAsyncContextManager,
Awaitable,
Callable,
Coroutine,
Generator,
Sequence,
dict,
list,
tuple,
)
from .util import delete_from, insert_into, search, select_all, select_one, update
if TYPE_CHECKING:
from .bot import Bot
_Record = TypeVar('_Record', bound=Record)
@dataclass(slots=True)
class AquireContextManager(
AbstractAsyncContextManager['PoolConnectionProxy[Record]'],
Awaitable['PoolConnectionProxy[Record]'],
):
ctx: Context
timeout: float | None = None
def __await__(self, /) -> Generator[Any, None, PoolConnectionProxy[Record]]:
return self.ctx._acquire(self.timeout).__await__()
async def __aenter__(self, /) -> PoolConnectionProxy[Record]:
return await self.ctx._acquire(self.timeout)
async def __aexit__(self, /, *args: Any) -> None:
await self.ctx.release()
FunctionType = Callable[..., Coroutine[Any, Any, Any]]
F = TypeVar('F', bound=FunctionType)
def ensure_db(func: F, /) -> F:
def wrapper(self: Any, /, *args: Any, **kwargs: Any) -> Any:
if not hasattr(self, 'db'):
raise RuntimeError(
'No database object available; ensure acquire() was called'
)
return func(self, *args, **kwargs)
return cast(F, wrapper)
class Context(typed_commands.Context):
bot: Bot[Any]
db: PoolConnectionProxy[Record]
async def _acquire(self, timeout: float | None, /) -> PoolConnectionProxy[Record]:
if not hasattr(self, 'db'):
self.db = await self.bot.pool.acquire(timeout=timeout)
return self.db
def acquire(self, /, *, timeout: float | None = None) -> AquireContextManager:
return AquireContextManager(self, timeout)
async def release(self, /) -> None:
if hasattr(self, 'db'):
await self.bot.pool.release(self.db)
del self.db
@overload
async def select_all(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
where: Sequence[str] | None = ...,
group_by: Sequence[str] | None = ...,
order_by: str | None = ...,
joins: Sequence[tuple[str, str]] | None = ...,
record_class: None = ...,
) -> list[Record]:
...
@overload
async def select_all(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
where: Sequence[str] | None = ...,
group_by: Sequence[str] | None = ...,
order_by: str | None = ...,
joins: Sequence[tuple[str, str]] | None = ...,
record_class: Type[_Record],
) -> list[_Record]:
...
@ensure_db
async def select_all(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
where: Sequence[str] | None = None,
group_by: Sequence[str] | None = None,
order_by: str | None = None,
joins: Sequence[tuple[str, str]] | None = None,
record_class: Any | None = None,
) -> list[Any]:
return await select_all(
self.db,
*args,
columns=columns,
table=table,
order_by=order_by,
where=where,
group_by=group_by,
joins=joins,
record_class=record_class,
)
@overload
async def select_one(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
where: Sequence[str] | None = ...,
group_by: Sequence[str] | None = ...,
joins: Sequence[tuple[str, str]] | None = ...,
record_class: None = ...,
) -> Record | None:
...
@overload
async def select_one(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
where: Sequence[str] | None = ...,
group_by: Sequence[str] | None = ...,
joins: Sequence[tuple[str, str]] | None = ...,
record_class: Type[_Record],
) -> _Record | None:
...
@ensure_db
async def select_one(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
where: Sequence[str] | None = None,
group_by: Sequence[str] | None = None,
joins: Sequence[tuple[str, str]] | None = None,
record_class: Any | None = None,
) -> Any | None:
return await select_one(
self.db,
*args,
columns=columns,
table=table,
where=where,
group_by=group_by,
joins=joins,
record_class=record_class,
)
@overload
async def search(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
search_columns: Sequence[str],
terms: Sequence[str],
where: Sequence[str] | None = None,
group_by: Sequence[str] | None = None,
order_by: str | None = None,
joins: Sequence[tuple[str, str]] | None = None,
record_class: None = ...,
) -> list[Record]:
...
@overload
async def search(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
search_columns: Sequence[str],
terms: Sequence[str],
where: Sequence[str] | None = None,
group_by: Sequence[str] | None = None,
order_by: str | None = None,
joins: Sequence[tuple[str, str]] | None = None,
record_class: Type[_Record],
) -> list[_Record]:
...
@ensure_db
async def search(
self,
/,
*args: Any,
table: str,
columns: Sequence[str],
search_columns: Sequence[str],
terms: Sequence[str],
where: Sequence[str] | None = None,
group_by: Sequence[str] | None = None,
order_by: str | None = None,
joins: Sequence[tuple[str, str]] | None = None,
record_class: Any | None = None,
) -> list[Any]:
return await search(
self.db,
*args,
columns=columns,
table=table,
search_columns=search_columns,
terms=terms,
where=where,
group_by=group_by,
order_by=order_by,
joins=joins,
record_class=record_class,
)
@ensure_db
async def update(
self,
/,
*args: Any,
table: str,
values: dict[str, Any],
where: Sequence[str] | None = None,
) -> None:
return await update(self.db, *args, table=table, values=values, where=where)
@ensure_db
async def insert_into(
self, /, *, table: str, values: dict[str, Any], extra: str = ''
) -> None:
return await insert_into(self.db, table=table, values=values, extra=extra)
@ensure_db
async def delete_from(
self, /, *args: Any, table: str, where: Sequence[str]
) -> None:
return await delete_from(self.db, *args, table=table, where=where)
|
StarcoderdataPython
|
3512162
|
<reponame>gabrielkotev/HumanVoiceRecognition
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 4 22:40:25 2017
@author: Gabriel
"""
from keras.models import Sequential
from keras.layers import Conv1D, MaxPool1D, Flatten
import numpy as np
model.summary()
input = np.ones(161 * 5).reshape(1,161, 5)
model = Sequential()
model.add(Conv1D(16, 2, input_shape=(161, 5)))
model.add(MaxPool1D())
model.add(Conv1D(16, 2))
model.add(MaxPool1D())
model.add(Flatten())
model.compile('adam', 'mean_squared_error')
prediction = model.predict(input, batch_size=1)
prediction.shape
|
StarcoderdataPython
|
5199399
|
<gh_stars>10-100
class Animal:
def _init_(self, nombre: str):
self.nombre = nombre
def get_nombre(self) -> str:
pass
def sonido(self) -> str:
pass
# se crean clases para los distintosanimales
class Perro(Animal):
def sonido(self):
return 'guau'
class Gato(Animal):
def sonido(self):
return 'miaun'
def sonido(animales: list):
for animal in animales:
print(animal.sonido())
animales = [Animal('Perro'), Animal('Gato')]
sonido(animales)
|
StarcoderdataPython
|
9768589
|
<filename>exercises/gradient_descent_investigation.py
import numpy as np
import pandas as pd
from typing import Tuple, List, Callable, Type
from IMLearn import BaseModule
from IMLearn.desent_methods import GradientDescent, FixedLR, ExponentialLR
from IMLearn.desent_methods.modules import L1, L2
from IMLearn.learners.classifiers.logistic_regression import LogisticRegression
from IMLearn.utils import split_train_test
import plotly.graph_objects as go
ITERATIONS = 1000
def plot_descent_path(module: Type[BaseModule],
descent_path: np.ndarray,
title: str = "",
xrange=(-1.5, 1.5),
yrange=(-1.5, 1.5)) -> go.Figure:
"""
Plot the descent path of the gradient descent algorithm
Parameters:
-----------
module: Type[BaseModule]
Module type for which descent path is plotted
descent_path: np.ndarray of shape (n_iterations, 2)
Set of locations if 2D parameter space being the regularization path
title: str, default=""
Setting details to add to plot title
xrange: Tuple[float, float], default=(-1.5, 1.5)
Plot's x-axis range
yrange: Tuple[float, float], default=(-1.5, 1.5)
Plot's x-axis range
Return:
-------
fig: go.Figure
Plotly figure showing module's value in a grid of [xrange]x[yrange] over which regularization path is shown
Example:
--------
fig = plot_descent_path(IMLearn.desent_methods.modules.L1, np.ndarray([[1,1],[0,0]]))
fig.show()
"""
def predict_(w):
return np.array([module(weights=wi).compute_output() for wi in w])
from utils import decision_surface
return go.Figure([decision_surface(predict_, xrange=xrange, yrange=yrange,
density=70, showscale=False),
go.Scatter(x=descent_path[:, 0], y=descent_path[:, 1],
mode="markers+lines", marker_color="black")],
layout=go.Layout(xaxis=dict(range=xrange),
yaxis=dict(range=yrange),
title=f"GD Descent Path {title}"))
def plot_convergence_rate(values: List,
title: str = "",
color: str = "blue",
iterations=1000) -> go.Figure:
"""
Plot the convergence rate of the gradient descent algorithm
Parameters:
-----------
values: np.ndarray
Loss values of the currently learned function.
title: str, default=""
Setting details to add to plot title
iter_range: int, default=1000
Plot's x-axis range (number of iterations)
Return:
-------
fig: go.Figure
Plotly figure showing module's convergence rate.
"""
return go.Figure(
[go.Scatter(x=list(range(iterations)), y=values,
mode="markers", marker_color=color)],
layout=go.Layout(title=f"GD Convergence Rate {title}"))
def get_gd_state_recorder_callback() -> Tuple[
Callable[[], None], List[np.ndarray], List[np.ndarray]]:
"""
Callback generator for the GradientDescent class, recording the objective's value and parameters at each iteration
Return:
-------
callback: Callable[[], None]
Callback function to be passed to the GradientDescent class, recoding the objective's value and parameters
at each iteration of the algorithm
values: List[np.ndarray]
Recorded objective values
weights: List[np.ndarray]
Recorded parameters
"""
values, weights_lst = [], []
def callback(solver, weights, val, grad, t, eta, delta, **kwargs):
weights_lst.append(weights)
values.append(val)
return callback, values, weights_lst
def compare_fixed_learning_rates(
init: np.ndarray = np.array([np.sqrt(2), np.e / 3]),
etas: Tuple[float] = (1, .1, .01, .001)):
for eta in etas:
learning_rate = FixedLR(base_lr=eta)
l1 = L1(init.copy())
l2 = L2(init.copy())
callback_l1, vals_l1, weights_l1 = get_gd_state_recorder_callback()
callback_l2, vals_l2, weights_l2 = get_gd_state_recorder_callback()
weights_l1.insert(0, init)
weights_l2.insert(0, init)
vals_l1.insert(0, l1.compute_output())
vals_l2.insert(0, l2.compute_output())
GradientDescent(learning_rate=learning_rate, callback=callback_l1,
max_iter=ITERATIONS).fit(
f=l1, X=None, y=None)
GradientDescent(learning_rate=learning_rate, callback=callback_l2,
max_iter=ITERATIONS).fit(
f=l2, X=None, y=None)
fig_l1 = plot_descent_path(module=L1,
descent_path=np.array(weights_l1),
title=f"L1, \u03B7 = {eta}")
fig_l2 = plot_descent_path(module=L2,
descent_path=np.array(weights_l2),
title=f"L2, \u03B7 = {eta}")
fig_l1.show(renderer="browser")
fig_l2.show(renderer="browser")
fig_l1 = plot_convergence_rate(values=vals_l1,
title=f"L1, \u03B7 = {eta}",
color="blue")
fig_l2 = plot_convergence_rate(values=vals_l2,
title=f"L2, \u03B7 = {eta}",
color="orange")
fig_l1.show(renderer="browser")
fig_l2.show(renderer="browser")
def compare_exponential_decay_rates(
init: np.ndarray = np.array([np.sqrt(2), np.e / 3]),
eta: float = .1,
gammas: Tuple[float] = (.9, .95, .99, 1)):
# Optimize the L1 objective using different decay-rate values of the
# exponentially decaying learning rate
for gamma in gammas:
learning_rate = ExponentialLR(base_lr=eta, decay_rate=gamma)
l1 = L1(init.copy())
callback_l1, vals_l1, weights_l1 = get_gd_state_recorder_callback()
weights_l1.insert(0, init)
vals_l1.insert(0, l1.compute_output())
GradientDescent(learning_rate=learning_rate, callback=callback_l1,
max_iter=ITERATIONS).fit(
f=l1, X=None, y=None)
# Plot algorithm's convergence for the different values of gamma
fig_l1 = plot_convergence_rate(values=vals_l1,
title=f"L1, Exponential Learning Rate: "
f"\u03B7={eta}, \u03B3={gamma}",
color="blue")
fig_l1.show(renderer="browser")
# Plot descent path for gamma=0.95
if gamma == 0.95:
fig = plot_descent_path(module=L1,
descent_path=np.array(weights_l1),
title=f"GD Descent Path L1, Exponential "
f"Learning Rate: \u03B7={eta}, "
f"\u03B3={gamma}")
fig.show(renderer="browser")
def load_data(path: str = "../datasets/SAheart.data",
train_portion: float = .8) -> \
Tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series]:
"""
Load South-Africa Heart Disease dataset and randomly split into a train- and test portion
Parameters:
-----------
path: str, default= "../datasets/SAheart.data"
Path to dataset
train_portion: float, default=0.8
Portion of dataset to use as a training set
Return:
-------
train_X : DataFrame of shape (ceil(train_proportion * n_samples), n_features)
Design matrix of train set
train_y : Series of shape (ceil(train_proportion * n_samples), )
Responses of training samples
test_X : DataFrame of shape (floor((1-train_proportion) * n_samples), n_features)
Design matrix of test set
test_y : Series of shape (floor((1-train_proportion) * n_samples), )
Responses of test samples
"""
df = pd.read_csv(path)
df.famhist = (df.famhist == 'Present').astype(int)
return split_train_test(df.drop(['chd', 'row.names'], axis=1), df.chd,
train_portion)
def plot_roc_curve(y, y_prob):
from sklearn.metrics import roc_curve, auc
from utils import custom
c = [custom[0], custom[-1]]
fpr, tpr, thresholds = roc_curve(y, y_prob)
return go.Figure(
data=[go.Scatter(x=[0, 1], y=[0, 1], mode="lines",
line=dict(color="black", dash='dash'),
name="Random Class Assignment"),
go.Scatter(x=fpr, y=tpr, mode='markers+lines', text=thresholds,
name="", showlegend=False, marker_size=5,
marker_color=c[1][1],
hovertemplate="<b>Threshold:</b>%{text:.3f}<br>FPR: %{x:.3f}<br>TPR: %{y:.3f}")],
layout=go.Layout(
title=rf"$\text{{ROC Curve Of Fitted Model - AUC}}={auc(fpr, tpr):.6f}$",
xaxis=dict(title=r"$\text{False Positive Rate (FPR)}$"),
yaxis=dict(title=r"$\text{True Positive Rate (TPR)}$")))
def fit_logistic_regression():
# Load and split SA Heard Disease dataset
X_train, y_train, X_test, y_test = load_data()
# # Plotting convergence rate of logistic regression over SA heart disease data
# raise NotImplementedError()
#
# # Fitting l1- and l2-regularized logistic regression models, using cross-validation to specify values
# # of regularization parameter
# raise NotImplementedError()
module = LogisticRegression(include_intercept=True,
solver=GradientDescent(),
penalty="l1")
module.fit(X_train.to_numpy(), y_train.to_numpy())
y_pred = module.predict(X_train.to_numpy())
y_prob = module.predict_proba(X_train.to_numpy())
plot_roc_curve(y_pred, y_prob).show()
if __name__ == '__main__':
np.random.seed(0)
# compare_fixed_learning_rates()
# compare_exponential_decay_rates()
fit_logistic_regression()
|
StarcoderdataPython
|
329717
|
import smbus
ADC_ADDR = 0x48
CHN_ADDR = {
'AIN0' : 0x40,
'AIN1' : 0x41,
'AIN2' : 0xA2,
'AIN3' : 0xA3
}
class Signal(object):
def __init__(self, channel):
self.channel_address = CHN_ADDR[channel]
self.bus = smbus.SMBus(1)
def measure(self):
self.bus.write_byte(ADC_ADDR, self.channel_address)
raw = self.bus.read_byte(ADC_ADDR)
return raw * 3.3 / 255
|
StarcoderdataPython
|
5177402
|
# Generated by Django 3.0.6 on 2020-05-20 11:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_sources_oauth", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="oauthsource",
name="access_token_url",
field=models.CharField(
help_text="URL used by authentik to retrieve tokens.",
max_length=255,
verbose_name="Access Token URL",
),
),
migrations.AlterField(
model_name="oauthsource",
name="request_token_url",
field=models.CharField(
blank=True,
help_text="URL used to request the initial token. This URL is only required for OAuth 1.",
max_length=255,
verbose_name="Request Token URL",
),
),
migrations.AlterField(
model_name="oauthsource",
name="authorization_url",
field=models.CharField(
help_text="URL the user is redirect to to conest the flow.",
max_length=255,
verbose_name="Authorization URL",
),
),
migrations.AlterField(
model_name="oauthsource",
name="profile_url",
field=models.CharField(
help_text="URL used by authentik to get user information.",
max_length=255,
verbose_name="Profile URL",
),
),
migrations.AlterModelOptions(
name="oauthsource",
options={
"verbose_name": "OAuth Source",
"verbose_name_plural": "OAuth Sources",
},
),
]
|
StarcoderdataPython
|
3494703
|
<reponame>mrgiser/helloworld-python<gh_stars>0
number = 23
guess = int(input('Enter an integer : '))
if guess == number:
# 新块从这里开始
print('Congratulations, you guessed it.')
print('(but you do not win any prizes!)')
# 新块在这里结束
elif guess < number:
# 另一代码块
print('No, it is a little higher than that')
# 你可以在此做任何你希望在该代码块内进行的事情
else:
print('No, it is a little lower than that')
# 你必须通过猜测一个大于( >) 设置数的数字来到达这里。
print('Done')
# 这最后一句语句将在
# if 语句执行完毕后执行。
|
StarcoderdataPython
|
1882841
|
<reponame>yabirgb/simobility<gh_stars>0
import logging
import pandas as pd
import numpy as np
from datetime import datetime
import uuid
import random
from .itinerary import Itinerary
from .vehicle import Vehicle
from .booking import Booking
from .position import Position
def basic_booking_itinerary(
current_time: int,
vehicle: Vehicle,
booking: Booking,
pickup_eta: int = None,
dropoff_eta: int = None,
) -> Itinerary:
"""
Create a simple Itinerary: one vehicle picks up and drops off
one customer
"""
itinerary = Itinerary(current_time, vehicle)
itinerary.move_to(booking.pickup, pickup_eta)
itinerary.pickup(booking, pickup_eta)
itinerary.move_to(booking.dropoff, dropoff_eta)
itinerary.dropoff(booking, dropoff_eta)
return itinerary
class ReplayDemand:
def __init__(
self,
clock,
file_name: str,
from_datetime: datetime,
to_datetime: datetime,
round_to: str,
sample_size: int = None,
map_matcher=None,
seed=None,
):
"""
Expected columns:
- datetime
- pickup_lon
- pickup_lat
- dropoff_lon
- dropoff_lat
"""
self.clock = clock
self.data = pd.read_feather(file_name)
logging.debug(f"Total number of trips: {self.data.shape[0]}")
# # TODO: is it really needed???
# time_jitter = np.array([
# pd.to_timedelta("{} sec".format(np.round(i)))
# for i in np.random.normal(0, 120, self.data.datetime.shape[0])
# ])
# self.data.datetime = self.data.datetime.dt.to_pydatetime() + time_jitter
idx = (self.data.pickup_datetime >= from_datetime) & (self.data.pickup_datetime < to_datetime)
self.data = self.data[idx]
logging.debug(f"Time filtered number of trips: {self.data.shape[0]}")
# "local" randomizer, independent from the "global", simulation level
state = np.random.RandomState(seed)
if sample_size is not None:
replace = self.data.index.shape[0] < sample_size
index = state.choice(self.data.index, sample_size, replace=replace)
self.data = self.data.loc[index]
logging.debug(f"Sample size: {self.data.shape[0]}")
self.data.pickup_datetime = self.data.pickup_datetime.dt.round(round_to)
self.demand = {g: item for g, item in self.data.groupby(self.data.pickup_datetime)}
self.map_matcher = map_matcher
self._seq_id = 0
def next(self, key=None):
if key is None:
key = pd.to_datetime(self.clock.to_datetime())
bookings = []
seats = 1
if key in self.demand:
for b in self.demand[key].itertuples():
pu = Position(b.pickup_lon, b.pickup_lat)
do = Position(b.dropoff_lon, b.dropoff_lat)
# TODO: if booking map matched too far from the original point????
if self.map_matcher:
pu = self.map_matcher.map_match(pu)
do = self.map_matcher.map_match(do)
id_ = self._seq_id
bookings.append(Booking(self.clock, pu, do, seats, booking_id=id_))
self._seq_id += 1
return bookings
|
StarcoderdataPython
|
12818179
|
#!/usr/bin/env python
# encoding: utf-8
"""
File: userprofile_userid_paidinfo.py
Date: 2018/10/01
submit command:
submit command:
spark-submit --master yarn --deploy-mode client --driver-memory 1g --executor-memory 2g
--executor-cores 2 --num-executors 30 userprofile_userid_paidinfo.py start-date
A220U083_001 累计购买金额
A220U084_001 最近一次购买距今天数
A220U087_001 累计购买次数
A220U088_001 注册未购买
"""
from pyspark import SparkContext,SparkConf
from pyspark.sql import SparkSession
import sys
import datetime
def main():
start_date = sys.argv[1]
start_date_str = str(start_date)
format_1 = "%Y%m%d"
format_2 = "%Y-%m-%d"
strptime, strftime = datetime.datetime.strptime, datetime.datetime.strftime
old_date_partition = strftime(strptime(start_date_str, format_1), format_2)
target_table = 'dw.profile_tag_user'
# 累计购买金额
insert_all_paid_money = " insert overwrite table " + target_table + " partition(data_date="+"'"+start_date_str+"'"+",tagtype='userid_all_paid_money') \
select 'A220U083_001' as tagid, \
user_id as userid, \
sum(order_total_amount) as tagweight, \
'' as tagranking, \
'' as reserve, \
'' as reserve1 \
from dw.dw_order_fact \
where pay_status in (1,3) \
group by 'A220U083_001',user_id "
# 累计购买次数
insert_all_paid_times = " insert overwrite table " + target_table + " partition(data_date="+"'"+start_date_str+"'"+",tagtype='userid_all_paid_times') \
select 'A220U087_001' as tagid, \
user_id as userid, \
count(distinct order_id) as tagweight, \
'' as tagranking, \
'' as reserve, \
'' as reserve1 \
from dw.dw_order_fact \
where pay_status in (1,3) \
group by 'A220U087_001',user_id "
# 最近一次购买距今天数
insert_last_paid_days = " insert overwrite table " + target_table + " partition(data_date="+"'"+start_date_str+"'"+",tagtype='userid_last_paid') \
select 'A220U084_001' as tagid, \
t.user_id as userid, \
datediff(to_date("+"'"+old_date_partition+"'"+"),concat(substr(t.result_pay_time,1,10))) as tagweight, \
'' as tagranking, \
'' as reserve, \
'' as reserve1 \
from ( \
select user_id, \
result_pay_time, \
row_number() over(partition by user_id order by result_pay_time desc) as rank \
from dw.dw_order_fact \
where pay_status in (1,3) \
) t \
where t.rank =1 \
group by 'A220U084_001',t.user_id, \
datediff(to_date("+"'"+old_date_partition+"'"+"),concat(substr(t.result_pay_time,1,10)))"
# 注册未购买
regist_notpaid = " insert overwrite table " + target_table + " partition(data_date="+"'"+start_date_str+"'"+",tagtype='userid_regist_notpaid') \
select 'A220U088_001' as tagid, \
user_id as userid, \
'' as tagweight, \
'' as tagranking, \
'' as reserve, \
'' as reserve1 \
from dim.dim_user_info \
where data_date = "+"'"+start_date_str+"'"+" \
and (paid_order_amount = 0 or paid_order_amount is null ) \
group by 'A220U088_001', user_id "
spark = SparkSession.builder.appName("userid_paidinfo").enableHiveSupport().getOrCreate()
spark.sql(insert_all_paid_money)
spark.sql(insert_all_paid_times)
spark.sql(insert_last_paid_days)
spark.sql(regist_notpaid)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11307202
|
<reponame>aananditadhawan/bcc
#!/usr/bin/python
# This is an example of a hardware breakpoint on a kernel address.
# run in project examples directory with:
# sudo ./breakpoint.py"
# <0xaddress> <pid> <breakpoint_type>
# HW_BREAKPOINT_W = 2
# HW_BREAKPOINT_RW = 3
# You may need to clear the old tracepipe inputs before running the script :
# echo > /sys/kernel/debug/tracing/trace
# 10-Jul-2019 <NAME> Created this.
from __future__ import print_function
from bcc import BPF
from bcc.utils import printb
prog = """
#include <uapi/linux/ptrace.h>
int func(struct pt_regs *ctx) {
bpf_trace_printk("Hello World, Here I accessed the address, Instr. ptr = 0x%p\\n", ctx->ip);
return 0;
}
"""
b = BPF(text=prog)
symbol_addr = input()
pid = input()
bp_type = input()
b.attach_breakpoint(symbol_addr, pid, "func", bp_type)
# header
print("%-18s %-16s %-6s %s" % ("TIME(s)", "COMM", "PID", "MESSAGE"))
# format output
while 1:
try:
(task, pid, cpu, flags, ts, msg) = b.trace_fields()
except ValueError:
continue
except KeyboardInterrupt:
break
printb(b"%-18.9f %-16s %-6d %s" % (ts, task, pid, msg))
|
StarcoderdataPython
|
107724
|
import os
import sys
def main():
# no need for int_* in the variable name
start = int(input("Enter the start digit: "))
stop = int(input("Enter the stop number: "))
step = int(input("Enter the step number: "))
print(f"Generated integers: {list(range(start, stop, step))}")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
3413774
|
import pygame
from .viewport import Viewport
class Renderer(object):
def __init__(self, viewport: Viewport):
self.renderers = []
self.window = pygame.display.set_mode((viewport.width, viewport.height), pygame.HWSURFACE, 32)
self.window.fill((0, 0, 0))
self.viewport = viewport
# pygame.mouse.set_visible(0)
pygame.display.set_caption("ForcePush")
def render(self, surface: pygame.Surface):
self.viewport.update()
for renderer in self.renderers:
renderer.render(surface)
pygame.display.update()
def _render(self):
self.render(self.window)
def add_renderer(self, renderer):
# Todo: double check no duplicate
self.renderers.append(renderer)
def remove_renderer(self, renderer):
self.renderers.remove(renderer)
|
StarcoderdataPython
|
4867317
|
<reponame>entelecheia/eKorpKit
import logging
import pandas as pd
from .base import BaseSentimentAnalyser
log = logging.getLogger(__name__)
class HIV4SA(BaseSentimentAnalyser):
"""
A class for sentiment analysis using the HIV4 lexicon.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _get_score(self, tokens, lexicon_features, feature="polarity"):
"""Get score for features.
:returns: int
"""
lxfeat_names = self._features.get(feature).get("lexicon_features")
lxfeat = pd.DataFrame.from_dict(lexicon_features, orient="index")
score = {}
if feature == "polarity":
lxfeat["pos"] = lxfeat.apply(
lambda x: 1 * x["count"] if x["Positiv"] else 0, axis=1
)
lxfeat["neg"] = lxfeat.apply(
lambda x: 1 * x["count"] if x["Negativ"] else 0, axis=1
)
lxfeat_agg = lxfeat.agg({"pos": "sum", "neg": "sum"})
polarity = (lxfeat_agg["pos"] - lxfeat_agg["neg"]) / (
lxfeat_agg["pos"] + lxfeat_agg["neg"] + self.EPSILON
)
polarity2 = (lxfeat_agg["pos"] - lxfeat_agg["neg"]) / (
len(tokens) + self.EPSILON
)
subjectivity = (lxfeat_agg["pos"] + lxfeat_agg["neg"]) / (
len(tokens) + self.EPSILON
)
score["positive"] = lxfeat_agg["pos"] / (len(tokens) + self.EPSILON)
score["negative"] = lxfeat_agg["neg"] / (len(tokens) + self.EPSILON)
score["polarity"] = polarity
score["polarity2"] = polarity2
score["subjectivity"] = subjectivity
elif isinstance(lxfeat_names, str):
lxfeat[feature] = lxfeat.apply(
lambda x: 1 * x["count"] if x[lxfeat_names] else 0, axis=1
)
lxfeat_agg = lxfeat.agg({feature: "sum"})
feat_score = lxfeat_agg[feature] / (len(tokens) + self.EPSILON)
score[feature] = feat_score
return score
def _assign_class(self, score, feature="polarity"):
"""Assign class to a score.
:returns: str
"""
labels = self._features.get(feature).get("labels")
if labels:
score["label"] = ""
for label, thresh in labels.items():
if isinstance(thresh, str):
thresh = eval(thresh)
if score[feature] >= thresh[0] and score[feature] <= thresh[1]:
score["label"] = label
return score
class LMSA(BaseSentimentAnalyser):
"""
A class for sentiment analysis using the LM lexicon.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _get_score(self, tokens, lexicon_features, feature="polarity"):
"""Get score for features.
:returns: int
"""
lxfeat_names = self._features.get(feature).get("lexicon_features")
lxfeat = pd.DataFrame.from_dict(lexicon_features, orient="index")
score = {}
if lxfeat.empty:
return score
if feature == "polarity":
lxfeat["pos"] = lxfeat.apply(
lambda x: 1 * x["count"] if x["Positive"] > 0 else 0, axis=1
)
lxfeat["neg"] = lxfeat.apply(
lambda x: 1 * x["count"] if x["Negative"] > 0 else 0, axis=1
)
lxfeat_agg = lxfeat.agg({"pos": "sum", "neg": "sum"})
polarity = (lxfeat_agg["pos"] - lxfeat_agg["neg"]) / (
lxfeat_agg["pos"] + lxfeat_agg["neg"] + self.EPSILON
)
polarity2 = (lxfeat_agg["pos"] - lxfeat_agg["neg"]) / (
len(tokens) + self.EPSILON
)
subjectivity = (lxfeat_agg["pos"] + lxfeat_agg["neg"]) / (
len(tokens) + self.EPSILON
)
score["positive"] = lxfeat_agg["pos"] / (len(tokens) + self.EPSILON)
score["negative"] = lxfeat_agg["neg"] / (len(tokens) + self.EPSILON)
score["num_tokens"] = len(tokens)
score[feature] = polarity
score['polarity2'] = polarity2
score["subjectivity"] = subjectivity
elif isinstance(lxfeat_names, str):
lxfeat[feature] = lxfeat.apply(
lambda x: 1 * x["count"] if x[lxfeat_names] > 0 else 0, axis=1
)
lxfeat_agg = lxfeat.agg({feature: "sum"})
feat_score = lxfeat_agg[feature] / (len(tokens) + self.EPSILON)
score[feature] = feat_score
return score
def _assign_class(self, score, feature="polarity"):
"""Assign class to a score.
:returns: str
"""
label_key = feature + "_label"
labels = self._features.get(feature).get("labels")
if labels:
score[label_key] = ""
for label, thresh in labels.items():
if isinstance(thresh, str):
thresh = eval(thresh)
if score[feature] >= thresh[0] and score[feature] <= thresh[1]:
score[label_key] = label
return score
|
StarcoderdataPython
|
3397068
|
<filename>resources/python/KemendagriKTP/options.py
paths = {
"xls": "./src/xls",
"csv": "./src/csv"
}
database = {
"host":"localhost",
"user":"kosan",
"pwd":"<PASSWORD>!",
"database":"kosan_system",
"table":"regions"
}
|
StarcoderdataPython
|
9715910
|
<reponame>JinyuanSun/SeqDDG
#!/usr/bin/python
#By <NAME>, 2021
# use HHblist to search sequences and build a3m file
from os import popen
import subprocess
import time
#subprocess.call('a.exe')
def hhsearch(seqfilename, iter_num, path_to_database, num_threads):
searchcmd = "hhblits -i " + seqfilename + " -o " + seqfilename + ".hhr -oa3m " + seqfilename + ".a3m -n " + str(
iter_num) + " -d " + path_to_database + " -cpu " + str(num_threads)
# print("grep \">\" " + seqfilename + ".a3m|wc -l")
search = subprocess.Popen(searchcmd,shell=True)
while search.poll() != 0:
time.sleep(1)
#if search.poll() == 0:
hits_num = popen("grep \">\" " + seqfilename + ".a3m|wc -l").read()
print("Found " + hits_num + "hits!")
a3mfilename = seqfilename + ".a3m"
return a3mfilename
#else:
#search.wait(1)
if __name__ == '__main__':
# print_hi('PyCharm')
seqfilename = "g.fasta"
iter_num = "3"
path_to_database = "/ydata/jsun/database/UniRef30_2020_03"
num_threads = 8
hhsearch(seqfilename, iter_num, path_to_database, num_threads)
|
StarcoderdataPython
|
9799114
|
<gh_stars>0
'''ALUMNA: <NAME>
EJERCICIO 06: REVIEW OF TIME COMPLEXITY'''
'''---------------------------------------------------'''
# Q6: What is the time complexity of
def sumOfNumbers(n):
# IMPRIME LA SUMA DE LOS NUMEROS DE 0 HASTA I
p = 0 # O(1)
i = 1 # O(1)
while(p <= n):
p = p + i
i = i + 1
print("suma desde 1 a",i,"= ", p + i)
# Tracing the values of the variables
# i p
# ------------------------
# 1 0 + 1
# 2 1 + 2
# 3 1 + 2 + 3
# 4 1 + 2 + 3 + 4
# .
# .
# .
# k 1 + 2 + 3 + 4 + ... + k
# Assume k > n
# p = k * (k + 1) / 2
# p > n
# k * (k + 1) / 2 > n
# k^2 > n
# k > sqrt(n)
'''---------------------------------------------------'''
# TIEMPO DE COMPLEJIDAD: O(n^(1/2))
# DEBIDO A QUE SE VA A ITERAR MIENTRAS QUE EL VALOR DE P SEA MENOR O IGUAL A N
# Y EN CADA ITERACION EN EL VALOR DE P SE VA ACUMULANDO EL VALOR DE I, HACIENDO
# QUE SE ITERE LA RAIZ DE N VECES
def main():
sumOfNumbers(10)
main()
|
StarcoderdataPython
|
3218958
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for export_tflite."""
import itertools
import os
from absl.testing import parameterized
import tensorflow as tf
from official.core import exp_factory
from official.core import task_factory
from official.projects.edgetpu.vision.serving import export_util
def _build_experiment_model(experiment_type):
"""Builds model from experiment type configuration w/o loading checkpoint.
To reduce test latency and avoid unexpected errors (e.g. checkpoint files not
exist in the dedicated path), we skip the checkpoint loading for the tests.
Args:
experiment_type: model type for the experiment.
Returns:
TF/Keras model for the task.
"""
params = exp_factory.get_exp_config(experiment_type)
if 'deeplabv3plus_mobilenet_edgetpuv2' in experiment_type:
params.task.model.backbone.mobilenet_edgetpu.pretrained_checkpoint_path = None
if 'autoseg_edgetpu' in experiment_type:
params.task.model.model_params.model_weights_path = None
params.validate()
params.lock()
task = task_factory.get_task(params.task)
return task.build_model()
def _build_model(config):
model = _build_experiment_model(config.model_name)
model_input = tf.keras.Input(
shape=(config.image_size, config.image_size, 3), batch_size=1)
model_output = export_util.finalize_serving(model(model_input), config)
model_for_inference = tf.keras.Model(model_input, model_output)
return model_for_inference
def _dump_tflite(model, config):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
export_util.configure_tflite_converter(config, converter)
tflite_buffer = converter.convert()
tf.io.gfile.makedirs(os.path.dirname(config.output_dir))
tflite_path = os.path.join(config.output_dir, f'{config.model_name}.tflite')
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_buffer)
return tflite_path
SEG_MODELS = [
'autoseg_edgetpu_xs',
]
FINALIZE_METHODS = [
'resize512,argmax,squeeze', 'resize256,argmax,resize512,squeeze',
'resize128,argmax,resize512,squeeze'
]
class ExportTfliteTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('mobilenet_edgetpu_v2_xs', 224),
('autoseg_edgetpu_xs', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32', 512),
)
def test_model_build_and_export_tflite(self, model_name, image_size):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name, image_size=image_size, output_dir=tmp_dir)
config.quantization_config.quantize = False
model = _build_model(config)
tflite_path = _dump_tflite(model, config)
self.assertTrue(tf.io.gfile.exists(tflite_path))
@parameterized.parameters(
('mobilenet_edgetpu_v2_xs', 224),
('autoseg_edgetpu_xs', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32', 512),
)
def test_model_build_and_export_saved_model(self, model_name, image_size):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name, image_size=image_size, output_dir=tmp_dir)
model = _build_model(config)
saved_model_path = os.path.join(config.output_dir, config.model_name)
model.save(saved_model_path)
self.assertTrue(tf.saved_model.contains_saved_model(saved_model_path))
@parameterized.parameters(itertools.product(SEG_MODELS, FINALIZE_METHODS))
def test_segmentation_finalize_methods(self, model_name, finalize_method):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name,
image_size=512,
output_dir=tmp_dir,
finalize_method=finalize_method.split(','))
config.quantization_config.quantize = False
model = _build_model(config)
model_input = tf.random.normal([1, config.image_size, config.image_size, 3])
self.assertEqual(
model(model_input).get_shape().as_list(),
[1, config.image_size, config.image_size])
if __name__ == '__main__':
tf.test.main()
|
StarcoderdataPython
|
8174264
|
<gh_stars>1-10
class TableauServerConnection:
def __init__(self,
config_json,
env='tableau_prod'):
"""
Initialize the TableauServer object.
The config_json parameter requires a valid config file.
The env parameter is a string that indicates which environment to reference from the config file.
:param config_json: The configuration object. This should be a dict / JSON object that defines the
Tableau Server configuration.
:type config_json: JSON or dict
:param env: The environment from the configuration file to use.
:type env: string
"""
self._config = config_json
self._env = env
self.__auth_token = None
self.__site_id = None
self.__user_id = None
self.active_endpoint = None
self.active_request = None
self.active_headers = None
@property
def server(self):
return self._config[self._env]['server']
@property
def api_version(self):
return self._config[self._env]['api_version']
@property
def username(self):
return self._config[self._env]['username']
@property
def password(self):
return self._config[self._env]['password']
@property
def site_name(self):
return self._config[self._env]['site_name']
@property
def site_url(self):
return self._config[self._env]['site_url']
@property
def sign_in_headers(self):
return {
"Content-Type": "application/json",
"Accept": "application/json"
}
@property
def x_auth_header(self):
return {
"X-Tableau-Auth": self.auth_token
}
@property
def default_headers(self):
headers = self.sign_in_headers.copy()
headers.update({"X-Tableau-Auth": self.auth_token})
return headers
@property
def auth_token(self):
return self.__auth_token
@auth_token.setter
def auth_token(self, token_value):
if token_value != self.__auth_token or token_value is None:
self.__auth_token = token_value
else:
raise Exception('You are already signed in with a valid auth token.')
@property
def site_id(self):
return self.__site_id
@site_id.setter
def site_id(self, site_id_value):
if self.site_id != site_id_value:
self.__site_id = site_id_value
else:
raise Exception('This Tableau Server connection is already connected the specified site.')
@property
def user_id(self):
return self.__user_id
@user_id.setter
def user_id(self, user_id_value):
self.__user_id = user_id_value
# authentication
def sign_in(self):
request = SignInRequest(ts_connection=self, username=self.username, password=<PASSWORD>).get_request()
endpoint = AuthEndpoint(ts_connection=self, sign_in=True).get_endpoint()
response = requests.post(url=endpoint, json=request, headers=self.sign_in_headers)
if response.status_code == 200:
self.auth_token = response.json()['credentials']['token']
self.site_id = response.json()['credentials']['site']['id']
self.user_id = response.json()['credentials']['user']['id']
@verify_signed_in
def sign_out(self):
endpoint = AuthEndpoint(ts_connection=self, sign_out=True).get_endpoint()
response = requests.post(url=endpoint, headers=self.x_auth_header)
if response.status_code == 204:
self.auth_token = None
self.site_id = None
self.user_id = None
return response
@verify_signed_in
def switch_site(self, site_name):
self.active_request = SwitchSiteRequest(ts_connection=self, site_name=site_name).get_request()
self.active_endpoint = AuthEndpoint(ts_connection=self, switch_site=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
if response.status_code == 200:
self.auth_token = response.json()['credentials']['token']
self.site_id = response.json()['credentials']['site']['id']
self.user_id = response.json()['credentials']['user']['id']
return response
def server_info(self):
self.active_endpoint = AuthEndpoint(ts_connection=self, get_server_info=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
# sites
def create_site(self):
# This method can only be called by server administrators.
print("This method can only be called by server administrators.")
pass
def query_site(self, parameter_dict=None):
self.active_endpoint = SiteEndpoint(ts_connection=self,
query_site=True,
site_id=self.site_id,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_sites(self, parameter_dict=None):
self.active_endpoint = SiteEndpoint(ts_connection=self,
query_sites=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_views_for_site(self, parameter_dict=None):
self.active_endpoint = SiteEndpoint(ts_connection=self,
query_views=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def update_site(self):
# This method can only be called by server administrators.
print("This method can only be called by server administrators.")
pass
def delete_site(self):
# This method can only be called by server administrators.
print("This method can only be called by server administrators.")
pass
def delete_data_driven_alert(self, data_alert_id):
self.active_endpoint = DataAlertEndpoint(ts_connection=self,
data_alert_id=data_alert_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def query_data_driven_alert_details(self, data_alert_id):
self.active_endpoint = DataAlertEndpoint(ts_connection=self,
query_data_alert=True,
data_alert_id=data_alert_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_data_driven_alerts(self, parameter_dict=None):
self.active_endpoint = DataAlertEndpoint(ts_connection=self,
query_data_alerts=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def add_user_to_data_driven_alert(self, user_id, data_alert_id):
# this appears to be broken on Tableau's side, always returning an internal server error
self.active_request = AddUserToAlertRequest(ts_connection=self, user_id=user_id).get_request()
self.active_endpoint = DataAlertEndpoint(ts_connection=self, add_user=True, user_id=user_id,
data_alert_id=data_alert_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def delete_user_from_data_driven_alert(self, user_id, data_alert_id):
self.active_endpoint = DataAlertEndpoint(ts_connection=self, remove_user=True, user_id=user_id,
data_alert_id=data_alert_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def update_data_driven_alert(self, data_alert_id, subject=None, frequency=None, alert_owner_id=None,
is_public_flag=None):
self.active_request = UpdateDataAlertRequest(ts_connection=self, subject=subject, frequency=frequency,
alert_owner_id=alert_owner_id,
is_public_flag=is_public_flag).get_request()
self.active_endpoint = DataAlertEndpoint(ts_connection=self, data_alert_id=data_alert_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
# flows
def query_flow(self, flow_id):
self.active_endpoint = FlowEndpoint(ts_connection=self, flow_id=flow_id, query_flow=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_flow(self, flow_id):
self.active_endpoint = FlowEndpoint(ts_connection=self, flow_id=flow_id, delete_flow=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, headers=self.active_headers)
return response
def download_flow(self, flow_id):
self.active_endpoint = FlowEndpoint(ts_connection=self, flow_id=flow_id, download_flow=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_flow_connections(self, flow_id):
self.active_endpoint = FlowEndpoint(ts_connection=self, flow_id=flow_id,
query_flow_connections=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_flows_for_site(self):
self.active_endpoint = FlowEndpoint(ts_connection=self, query_flows_for_site=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_flows_for_user(self, user_id, parameter_dict=None):
self.active_endpoint = FlowEndpoint(ts_connection=self, user_id=user_id, query_flows_for_user=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def update_flow(self, flow_id, new_project_id=None, new_owner_id=None):
self.active_request = UpdateFlowRequest(ts_connection=self, new_project_id=new_project_id,
new_owner_id=new_owner_id).get_request()
self.active_endpoint = FlowEndpoint(ts_connection=self, flow_id=flow_id, update_flow=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def update_flow_connection(self, flow_id, connection_id, server_address=None, port=None, connection_username=None,
connection_password=<PASSWORD>, embed_password_flag=None):
"""Note that you must set the connection_password='' if changing the embed_password_flag from True to False"""
self.active_request = UpdateFlowConnectionRequest(ts_connection=self, server_address=server_address, port=port,
connection_username=connection_username,
connection_password=<PASSWORD>,
embed_password_flag=embed_password_flag).get_request()
self.active_endpoint = FlowEndpoint(ts_connection=self, flow_id=flow_id, connection_id=connection_id,
update_flow_connection=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
# projects
def create_project(self, project_name, project_description=None, content_permissions='ManagedByOwner',
parent_project_id=None, parameter_dict=None):
self.active_request = CreateProjectRequest(ts_connection=self, project_name=project_name,
project_description=project_description,
content_permissions=content_permissions,
parent_project_id=parent_project_id).get_request()
self.active_endpoint = ProjectEndpoint(ts_connection=self, create_project=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def query_projects(self, parameter_dict=None):
self.active_endpoint = ProjectEndpoint(ts_connection=self, query_projects=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def update_project(self, project_id, project_name=None, project_description=None, content_permissions=None,
parent_project_id=None):
self.active_request = UpdateProjectRequest(ts_connection=self, project_name=project_name,
project_description=project_description,
content_permissions=content_permissions,
parent_project_id=parent_project_id).get_request()
self.active_endpoint = ProjectEndpoint(ts_connection=self, update_project=True,
project_id=project_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def delete_project(self, project_id):
self.active_endpoint = ProjectEndpoint(ts_connection=self, project_id=project_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
# workbooks and views
def add_tags_to_view(self, view_id, tags):
self.active_request = AddTagsRequest(ts_connection=self, tags=tags).get_request()
self.active_endpoint = ViewEndpoint(ts_connection=self, view_id=view_id, add_tags=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_tags_to_workbook(self, workbook_id, tags):
self.active_request = AddTagsRequest(ts_connection=self, tags=tags).get_request()
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
add_tags=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def query_views_for_site(self, parameter_dict=None):
self.active_endpoint = ViewEndpoint(ts_connection=self, query_views=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_views_for_workbook(self, workbook_id, parameter_dict=None):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, query_views=True, workbook_id=workbook_id,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_view_data(self, view_id, parameter_dict=None):
# the CSV returned is in the response body as response.content
self.active_endpoint = ViewEndpoint(ts_connection=self, view_id=view_id, query_view_data=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_view_image(self, view_id, parameter_dict=None):
# the image returned is in the response body as response.content
self.active_endpoint = ViewEndpoint(ts_connection=self, view_id=view_id, query_view_image=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_view_pdf(self, view_id, parameter_dict=None):
# the PDF returned is in the response body as response.content
self.active_endpoint = ViewEndpoint(ts_connection=self, view_id=view_id, query_view_pdf=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_view_preview_image(self, workbook_id, view_id, parameter_dict=None):
# the preview thumbnail image returned is in the response body as response.content
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id, view_id=view_id,
query_workbook_view_preview_img=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_workbook(self, workbook_id, parameter_dict=None):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id, query_workbook=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_workbook_connections(self, workbook_id, parameter_dict=None):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id, query_connections=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def get_workbook_revisions(self, workbook_id, parameter_dict=None):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
get_workbook_revisions=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def remove_workbook_revision(self, workbook_id, revision_number):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
revision_number=revision_number,
remove_workbook_revision=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def query_workbook_preview_image(self, workbook_id, parameter_dict=None):
# the preview image returned is in the response body as response.content
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
query_workbook_preview_img=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_workbooks_for_site(self, parameter_dict=None):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, query_workbooks=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_workbooks_for_user(self, user_id, parameter_dict=None):
self.active_endpoint = UserEndpoint(ts_connection=self, user_id=user_id, query_workbooks_for_user=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def download_workbook(self, workbook_id, parameter_dict=None):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id, download_workbook=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def download_workbook_revision(self, workbook_id, revision_number, parameter_dict=None):
# this method only works for workbook versions that are NOT the current version
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
revision_number=revision_number,
download_workbook_revision=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def update_workbook(self, workbook_id, show_tabs_flag=None, project_id=None, owner_id=None):
self.active_request = UpdateWorkbookRequest(ts_connection=self, show_tabs_flag=show_tabs_flag,
project_id=project_id, owner_id=owner_id).get_request()
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
update_workbook=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def update_workbook_connection(self, workbook_id, connection_id, server_address=None, port=None,
connection_username=None,
connection_password=<PASSWORD>, embed_password_flag=None, parameter_dict=None):
# fails to execute correctly on Tableau Server's side
self.active_request = UpdateWorkbookConnectionRequest(ts_connection=self, server_address=server_address,
port=port,
connection_username=connection_username,
connection_password=<PASSWORD>,
embed_password_flag=embed_password_flag).get_request()
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
connection_id=connection_id,
update_workbook_connection=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def update_workbook_now(self, workbook_id, ):
self.active_request = EmptyRequest(ts_connection=self).get_request()
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
refresh_workbook=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def delete_workbook(self, workbook_id):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id,
delete_workbook=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_tag_from_view(self, view_id, tag_name):
self.active_endpoint = ViewEndpoint(ts_connection=self, view_id=view_id, tag_name=tag_name,
delete_tag=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_tag_from_workbook(self, workbook_id, tag_name):
self.active_endpoint = WorkbookEndpoint(ts_connection=self, workbook_id=workbook_id, tag_name=tag_name,
delete_tag=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
# data sources
def add_tags_to_data_source(self, datasource_id, tags):
self.active_request = AddTagsRequest(ts_connection=self, tags=tags).get_request()
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
add_tags=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def delete_tag_from_data_source(self, datasource_id, tag_name):
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id, tag_name=tag_name,
delete_tag=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def query_data_source(self, datasource_id):
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
query_datasource=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_data_sources(self, parameter_dict=None):
self.active_endpoint = DatasourceEndpoint(ts_connection=self, query_datasources=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_data_source_connections(self, datasource_id):
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
query_datasource_connections=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def get_data_source_revisions(self, datasource_id, parameter_dict=None):
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
get_datasource_revisions=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def download_data_source_revision(self, datasource_id, revision_number, parameter_dict=None):
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
revision_number=revision_number,
download_datasource_revision=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def update_data_source(self, datasource_id, new_project_id=None, new_owner_id=None, is_certified_flag=None,
certification_note=None):
"""Note that assigning an embedded extract will remain in the same project as its workbook, even if the response indicates it has moved"""
self.active_request = UpdateDatasourceRequest(ts_connection=self, new_project_id=new_project_id,
new_owner_id=new_owner_id,
is_certified_flag=is_certified_flag,
certification_note=certification_note).get_request()
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
update_datasource=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def update_data_source_connection(self, datasource_id, connection_id, server_address=None, port=None,
connection_username=None,
connection_password=<PASSWORD>, embed_password_flag=None):
"""Note that you must set the connection_password='' if changing the embed_password_flag from True to False"""
self.active_request = UpdateDatasourceConnectionRequest(ts_connection=self, server_address=server_address,
port=port,
connection_username=connection_username,
connection_password=<PASSWORD>,
embed_password_flag=embed_password_flag).get_request()
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
connection_id=connection_id,
update_datasource_connection=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def update_data_source_now(self, datasource_id):
self.active_request = EmptyRequest(ts_connection=self).get_request()
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
refresh_datasource=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def delete_data_source(self, datasource_id):
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
delete_datasource=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def remove_data_source_revision(self, datasource_id, revision_number):
self.active_endpoint = DatasourceEndpoint(ts_connection=self, datasource_id=datasource_id,
revision_number=revision_number,
remove_datasource_revision=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
# users and groups
def create_group(self, new_group_name, active_directory_group_name=None, active_directory_domain_name=None,
default_site_role=None, parameter_dict=None):
self.active_request = CreateGroupRequest(ts_connection=self, new_group_name=new_group_name,
active_directory_group_name=active_directory_group_name,
active_directory_domain_name=active_directory_domain_name,
default_site_role=default_site_role).get_request()
self.active_endpoint = GroupEndpoint(ts_connection=self, create_group=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_user_to_group(self, group_id, user_id):
self.active_request = AddUserToGroupRequest(ts_connection=self, user_id=user_id).get_request()
self.active_endpoint = GroupEndpoint(ts_connection=self, group_id=group_id, add_user=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_user_to_site(self, user_name, site_role, auth_setting=None):
self.active_request = AddUserToSiteRequest(ts_connection=self, user_name=user_name,
site_role=site_role, auth_setting=auth_setting).get_request()
self.active_endpoint = UserEndpoint(ts_connection=self, add_user=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def get_users_in_group(self, group_id, parameter_dict=None):
self.active_endpoint = GroupEndpoint(ts_connection=self, group_id=group_id, get_users=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def get_users_on_site(self, parameter_dict=None):
self.active_endpoint = UserEndpoint(ts_connection=self, query_users=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_groups(self, parameter_dict=None):
self.active_endpoint = GroupEndpoint(ts_connection=self, query_groups=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_user_on_site(self, user_id):
self.active_endpoint = UserEndpoint(ts_connection=self, user_id=user_id, query_user=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def update_group(self, group_id, new_group_name=None, active_directory_group_name=None,
active_directory_domain_name=None,
default_site_role=None, parameter_dict=None):
self.active_request = UpdateGroupRequest(ts_connection=self, new_group_name=new_group_name,
active_directory_group_name=active_directory_group_name,
active_directory_domain_name=active_directory_domain_name,
default_site_role=default_site_role).get_request()
self.active_endpoint = GroupEndpoint(ts_connection=self, group_id=group_id, update_group=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def update_user(self, user_id, new_full_name=None, new_email=None, new_password=<PASSWORD>, new_site_role=None,
new_auth_setting=None):
self.active_request = UpdateUserRequest(ts_connection=self, new_full_name=new_full_name, new_email=new_email,
new_password=<PASSWORD>, new_site_role=new_site_role,
new_auth_setting=new_auth_setting).get_request()
self.active_endpoint = UserEndpoint(ts_connection=self, user_id=user_id, update_user=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.default_headers)
return response
def remove_user_from_group(self, group_id, user_id):
self.active_endpoint = GroupEndpoint(ts_connection=self, group_id=group_id, user_id=user_id,
remove_user=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def remove_user_from_site(self, user_id):
self.active_endpoint = UserEndpoint(ts_connection=self, user_id=user_id, remove_user=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_group(self, group_id):
self.active_endpoint = GroupEndpoint(ts_connection=self, group_id=group_id, delete_group=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
# permissions
def add_data_source_permissions(self, datasource_id, user_capability_dict=None, group_capability_dict=None,
user_id=None, group_id=None):
self.active_request = AddDatasourcePermissionsRequest(ts_connection=self, datasource_id=datasource_id,
user_id=user_id, group_id=group_id,
user_capability_dict=user_capability_dict,
group_capability_dict=group_capability_dict).get_request()
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='datasource',
object_id=datasource_id,
add_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_flow_permissions(self, flow_id, user_capability_dict=None, group_capability_dict=None, user_id=None,
group_id=None):
self.active_request = AddFlowPermissionsRequest(ts_connection=self, user_id=user_id, group_id=group_id,
user_capability_dict=user_capability_dict,
group_capability_dict=group_capability_dict).get_request()
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='flow', object_id=flow_id,
add_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_project_permissions(self, project_id, user_capability_dict=None, group_capability_dict=None, user_id=None,
group_id=None):
self.active_request = AddProjectPermissionsRequest(ts_connection=self, user_id=user_id, group_id=group_id,
user_capability_dict=user_capability_dict,
group_capability_dict=group_capability_dict).get_request()
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='project', object_id=project_id,
add_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_default_permissions(self, project_id, project_permissions_object, group_id=None,
user_id=None, user_capability_dict=None, group_capability_dict=None):
self.active_request = AddDefaultPermissionsRequest(ts_connection=self,
group_id=group_id,
user_id=user_id,
group_capability_dict=group_capability_dict,
user_capability_dict=user_capability_dict).get_request()
self.active_endpoint = PermissionsEndpoint(ts_connection=self,
project_id=project_id,
project_permissions_object=project_permissions_object,
add_default_project_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_view_permissions(self, view_id, user_capability_dict=None, group_capability_dict=None, user_id=None,
group_id=None):
self.active_request = AddViewPermissionsRequest(ts_connection=self, view_id=view_id, user_id=user_id,
group_id=group_id,
user_capability_dict=user_capability_dict,
group_capability_dict=group_capability_dict).get_request()
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='view', object_id=view_id,
add_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_workbook_permissions(self, workbook_id, user_capability_dict=None, group_capability_dict=None, user_id=None,
group_id=None):
self.active_request = AddWorkbookPermissionsRequest(ts_connection=self, workbook_id=workbook_id,
user_id=user_id, group_id=group_id,
user_capability_dict=user_capability_dict,
group_capability_dict=group_capability_dict).get_request()
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='workbook', object_id=workbook_id,
add_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers.copy()
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def query_data_source_permissions(self, datasource_id):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='datasource',
object_id=datasource_id,
query_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_flow_permissions(self, flow_id):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='flow', object_id=flow_id,
query_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_project_permissions(self, project_id):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='project', object_id=project_id,
query_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_default_permissions(self, project_id, project_permissions_object):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, project_id=project_id,
project_permissions_object=project_permissions_object,
query_default_project_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_view_permissions(self, view_id):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='view', object_id=view_id,
query_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_workbook_permissions(self, workbook_id):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='workbook', object_id=workbook_id,
query_object_permissions=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_data_source_permission(self, datasource_id, delete_permissions_object, delete_permissions_object_id,
capability_name, capability_mode):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='datasource',
object_id=datasource_id, delete_object_permissions=True,
delete_permissions_object=delete_permissions_object,
delete_permissions_object_id=delete_permissions_object_id,
capability_name=capability_name,
capability_mode=capability_mode).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_flow_permission(self, flow_id, delete_permissions_object, delete_permissions_object_id, capability_name,
capability_mode):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='flow', object_id=flow_id,
delete_object_permissions=True,
delete_permissions_object=delete_permissions_object,
delete_permissions_object_id=delete_permissions_object_id,
capability_name=capability_name,
capability_mode=capability_mode).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_project_permission(self, project_id, delete_permissions_object, delete_permissions_object_id,
capability_name, capability_mode):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='project', object_id=project_id,
delete_object_permissions=True,
delete_permissions_object=delete_permissions_object,
delete_permissions_object_id=delete_permissions_object_id,
capability_name=capability_name,
capability_mode=capability_mode).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_default_permission(self, project_id, project_permissions_object, delete_permissions_object,
delete_permissions_object_id,
capability_name, capability_mode):
self.active_endpoint = PermissionsEndpoint(ts_connection=self,
project_id=project_id,
project_permissions_object=project_permissions_object,
delete_default_project_permissions=True,
delete_permissions_object=delete_permissions_object,
delete_permissions_object_id=delete_permissions_object_id,
capability_name=capability_name,
capability_mode=capability_mode).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_view_permission(self, view_id, delete_permissions_object, delete_permissions_object_id,
capability_name, capability_mode):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='view', object_id=view_id,
delete_object_permissions=True,
delete_permissions_object=delete_permissions_object,
delete_permissions_object_id=delete_permissions_object_id,
capability_name=capability_name,
capability_mode=capability_mode).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_workbook_permission(self, workbook_id, delete_permissions_object, delete_permissions_object_id,
capability_name, capability_mode):
self.active_endpoint = PermissionsEndpoint(ts_connection=self, object_type='workbook', object_id=workbook_id,
delete_object_permissions=True,
delete_permissions_object=delete_permissions_object,
delete_permissions_object_id=delete_permissions_object_id,
capability_name=capability_name,
capability_mode=capability_mode).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
# jobs, tasks, and schedules
def add_data_source_to_schedule(self, datasource_id, schedule_id):
self.active_request = AddDatasourceToScheduleRequest(ts_connection=self,
datasource_id=datasource_id).get_request()
self.active_endpoint = SchedulesEndpoint(ts_connection=self, schedule_id=schedule_id,
add_datasource=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_flow_task_to_schedule(self, flow_id, schedule_id):
self.active_request = AddFlowToScheduleRequest(ts_connection=self, flow_id=flow_id).get_request()
self.active_endpoint = SchedulesEndpoint(ts_connection=self, schedule_id=schedule_id,
add_flow=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_workbook_to_schedule(self, workbook_id, schedule_id):
self.active_request = AddWorkbookToScheduleRequest(ts_connection=self, workbook_id=workbook_id).get_request()
self.active_endpoint = SchedulesEndpoint(ts_connection=self, schedule_id=schedule_id,
add_workbook=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def cancel_job(self, job_id):
self.active_endpoint = JobsEndpoint(ts_connection=self, job_id=job_id, cancel_job=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, headers=self.active_headers)
return response
def query_job(self, job_id):
self.active_endpoint = JobsEndpoint(ts_connection=self, job_id=job_id, query_job=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_jobs(self, parameter_dict=None):
self.active_endpoint = JobsEndpoint(ts_connection=self, query_jobs=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def get_extract_refresh_task(self, task_id):
self.active_endpoint = TasksEndpoint(ts_connection=self, task_id=task_id, get_refresh_task=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def get_extract_refresh_tasks(self):
self.active_endpoint = TasksEndpoint(ts_connection=self, get_refresh_tasks=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def get_flow_run_task(self, task_id):
self.active_endpoint = TasksEndpoint(ts_connection=self, task_id=task_id, get_flow_run_task=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def get_flow_run_tasks(self):
self.active_endpoint = TasksEndpoint(ts_connection=self, get_flow_run_tasks=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def create_schedule(self, schedule_name, schedule_priority=50, schedule_type='Extract',
schedule_execution_order='Parallel', schedule_frequency='Weekly',
start_time='07:00:00', end_time='23:00:00', interval_expression_dict={'weekDay': 'Monday'}):
self.active_request = CreateScheduleRequest(ts_connection=self, schedule_name=schedule_name,
schedule_priority=schedule_priority, schedule_type=schedule_type,
schedule_execution_order=schedule_execution_order,
schedule_frequency=schedule_frequency,
start_time=start_time, end_time=end_time,
interval_expression_dict=interval_expression_dict).get_request()
self.active_endpoint = SchedulesEndpoint(ts_connection=self, create_schedule=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def query_extract_refresh_tasks(self, schedule_id, parameter_dict=None):
self.active_endpoint = TasksEndpoint(ts_connection=self, query_schedule_refresh_tasks=True,
schedule_id=schedule_id,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_schedules(self, parameter_dict=None):
self.active_endpoint = SchedulesEndpoint(ts_connection=self, query_schedules=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def run_extract_refresh_task(self, task_id):
self.active_request = EmptyRequest(ts_connection=self).get_request()
self.active_endpoint = TasksEndpoint(ts_connection=self, task_id=task_id, run_refresh_task=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def run_flow_task(self, task_id):
self.active_request = EmptyRequest(ts_connection=self).get_request()
self.active_endpoint = TasksEndpoint(ts_connection=self, task_id=task_id, run_flow_task=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def update_schedule(self, schedule_id, schedule_name=None, schedule_priority=None, schedule_type=None,
schedule_execution_order=None,
schedule_frequency=None, start_time=None, end_time=None, interval_expression_dict=None):
self.active_request = UpdateScheduleRequest(ts_connection=self, schedule_name=schedule_name,
schedule_priority=schedule_priority, schedule_type=schedule_type,
schedule_execution_order=schedule_execution_order,
schedule_frequency=schedule_frequency,
start_time=start_time, end_time=end_time,
interval_expression_dict=interval_expression_dict).get_request()
self.active_endpoint = SchedulesEndpoint(ts_connection=self, schedule_id=schedule_id,
update_schedule=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def delete_schedule(self, schedule_id):
self.active_endpoint = SchedulesEndpoint(ts_connection=self, schedule_id=schedule_id,
delete_schedule=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
# subscriptions
def create_subscription(self, subscription_subject, content_type, content_id, schedule_id, user_id):
self.active_request = CreateSubscriptionRequest(ts_connection=self, subscription_subject=subscription_subject,
content_type=content_type,
content_id=content_id, schedule_id=schedule_id,
user_id=user_id).get_request()
self.active_endpoint = SubscriptionsEndpoint(ts_connection=self, create_subscription=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def query_subscription(self, subscription_id):
self.active_endpoint = SubscriptionsEndpoint(ts_connection=self, subscription_id=subscription_id,
query_subscription=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def query_subscriptions(self, parameter_dict=None):
self.active_endpoint = SubscriptionsEndpoint(ts_connection=self, query_subscriptions=True,
parameter_dict=parameter_dict).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
def update_subscription(self, subscription_id, new_subscription_subject=None, new_schedule_id=None):
self.active_request = UpdateSubscriptionRequest(ts_connection=self, new_schedule_id=new_schedule_id,
new_subscription_subject=new_subscription_subject).get_request()
self.active_endpoint = SubscriptionsEndpoint(ts_connection=self, subscription_id=subscription_id,
update_subscription=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def delete_subscription(self, subscription_id):
self.active_endpoint = SubscriptionsEndpoint(ts_connection=self, subscription_id=subscription_id,
delete_subscription=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
# favorites
def add_data_source_to_favorites(self, datasource_id, user_id, favorite_label):
self.active_request = AddDatasourceToFavoritesRequest(ts_connection=self, datasource_id=datasource_id,
favorite_label=favorite_label).get_request()
self.active_endpoint = FavoritesEndpoint(ts_connection=self, add_to_favorites=True,
user_id=user_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_project_to_favorites(self, project_id, user_id, favorite_label):
self.active_request = AddProjectToFavoritesRequest(ts_connection=self, project_id=project_id,
favorite_label=favorite_label).get_request()
self.active_endpoint = FavoritesEndpoint(ts_connection=self, add_to_favorites=True,
user_id=user_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_view_to_favorites(self, view_id, user_id, favorite_label):
self.active_request = AddViewToFavoritesRequest(ts_connection=self, view_id=view_id,
favorite_label=favorite_label).get_request()
self.active_endpoint = FavoritesEndpoint(ts_connection=self, add_to_favorites=True,
user_id=user_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def add_workbook_to_favorites(self, workbook_id, user_id, favorite_label):
self.active_request = AddWorkbookToFavoritesRequest(ts_connection=self, workbook_id=workbook_id,
favorite_label=favorite_label).get_request()
self.active_endpoint = FavoritesEndpoint(ts_connection=self, add_to_favorites=True,
user_id=user_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.put(url=self.active_endpoint, json=self.active_request, headers=self.active_headers)
return response
def delete_data_source_from_favorites(self, datasource_id, user_id):
self.active_endpoint = FavoritesEndpoint(ts_connection=self, object_type='datasource', object_id=datasource_id,
user_id=user_id, delete_from_favorites=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_project_from_favorites(self, project_id, user_id):
self.active_endpoint = FavoritesEndpoint(ts_connection=self, object_type='project', object_id=project_id,
user_id=user_id, delete_from_favorites=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_view_from_favorites(self, view_id, user_id):
self.active_endpoint = FavoritesEndpoint(ts_connection=self, object_type='view', object_id=view_id,
user_id=user_id, delete_from_favorites=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def delete_workbook_from_favorites(self, workbook_id, user_id):
self.active_endpoint = FavoritesEndpoint(ts_connection=self, object_type='workbook', object_id=workbook_id,
user_id=user_id, delete_from_favorites=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.delete(url=self.active_endpoint, headers=self.active_headers)
return response
def get_favorites_for_user(self, user_id):
self.active_endpoint = FavoritesEndpoint(ts_connection=self, get_user_favorites=True,
user_id=user_id).get_endpoint()
self.active_headers = self.default_headers
response = requests.get(url=self.active_endpoint, headers=self.active_headers)
return response
# publishing
def initiate_file_upload(self):
self.active_endpoint = FileUploadEndpoint(ts_connection=self, initiate_file_upload=True).get_endpoint()
self.active_headers = self.default_headers
response = requests.post(url=self.active_endpoint, headers=self.active_headers)
return response
def append_to_file_upload(self, upload_session_id, payload, content_type):
self.active_endpoint = FileUploadEndpoint(ts_connection=self, append_to_file_upload=True,
upload_session_id=upload_session_id).get_endpoint()
self.active_headers = self.default_headers.copy()
self.active_headers.update({'content-type': content_type})
response = requests.put(url=self.active_endpoint, data=payload, headers=self.active_headers)
return response
def publish_data_source(self, datasource_file_path, datasource_name, project_id, connection_username=None,
connection_password=<PASSWORD>,
embed_credentials_flag=False, oauth_flag=False, parameter_dict={}):
publish_request = PublishDatasourceRequest(ts_connection=self,
datasource_name=datasource_name,
datasource_file_path=datasource_file_path,
project_id=project_id,
connection_username=connection_username,
connection_password=<PASSWORD>,
embed_credentials_flag=embed_credentials_flag,
oauth_flag=oauth_flag)
self.active_request, content_type = publish_request.get_request()
self.active_headers, parameter_dict = publish_request.publish_prep(content_type, parameter_dict=parameter_dict)
self.active_endpoint = DatasourceEndpoint(ts_connection=self, publish_datasource=True,
parameter_dict=parameter_dict).get_endpoint()
response = requests.post(url=self.active_endpoint, data=self.active_request, headers=self.active_headers)
return response
def publish_workbook(self, workbook_file_path, workbook_name, project_id, show_tabs_flag=False,
user_id=None, server_address=None, port_number=None, connection_username=None,
connection_password=<PASSWORD>,
embed_credentials_flag=False, oauth_flag=False, workbook_views_to_hide=None,
hide_view_flag=False, parameter_dict={}):
publish_request = PublishWorkbookRequest(ts_connection=self,
workbook_name=workbook_name,
workbook_file_path=workbook_file_path,
project_id=project_id,
show_tabs_flag=show_tabs_flag,
user_id=user_id,
server_address=server_address,
port_number=port_number,
connection_username=connection_username,
connection_password=<PASSWORD>,
embed_credentials_flag=embed_credentials_flag,
oauth_flag=oauth_flag,
workbook_views_to_hide=workbook_views_to_hide,
hide_view_flag=hide_view_flag)
self.active_request, content_type = publish_request.get_request()
self.active_headers, parameter_dict = publish_request.publish_prep(content_type, parameter_dict=parameter_dict)
self.active_endpoint = WorkbookEndpoint(ts_connection=self, publish_workbook=True,
parameter_dict=parameter_dict).get_endpoint()
response = requests.post(url=self.active_endpoint, data=self.active_request, headers=self.active_headers)
return response
def publish_flow(self, flow_file_path, flow_name, project_id, flow_description=None, server_address=None,
port_number=None,
connection_username=None, connection_password=<PASSWORD>, embed_credentials_flag=False, oauth_flag=False,
parameter_dict={}):
publish_request = PublishFlowRequest(ts_connection=self,
flow_file_path=flow_file_path,
flow_name=flow_name,
project_id=project_id,
flow_description=flow_description,
server_address=server_address,
port_number=port_number,
connection_username=connection_username,
connection_password=<PASSWORD>,
embed_credentials_flag=embed_credentials_flag,
oauth_flag=oauth_flag)
self.active_request, content_type = publish_request.get_request()
self.active_headers, parameter_dict = publish_request.publish_prep(content_type, parameter_dict=parameter_dict)
self.active_endpoint = FlowEndpoint(ts_connection=self, publish_flow=True,
parameter_dict=parameter_dict).get_endpoint()
response = requests.post(url=self.active_endpoint, data=self.active_request, headers=self.active_headers)
return response
|
StarcoderdataPython
|
3540533
|
<gh_stars>1-10
# This file is part of beets.
# Copyright 2013, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Get a random song or album from the library.
"""
from __future__ import absolute_import
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, decargs, print_obj
from beets.util.functemplate import Template
import random
def random_item(lib, opts, args):
query = decargs(args)
if opts.path:
fmt = '$path'
else:
fmt = opts.format
template = Template(fmt) if fmt else None
if opts.album:
objs = list(lib.albums(query=query))
else:
objs = list(lib.items(query=query))
number = min(len(objs), opts.number)
objs = random.sample(objs, number)
for item in objs:
print_obj(item, lib, template)
random_cmd = Subcommand('random',
help='chose a random track or album')
random_cmd.parser.add_option('-a', '--album', action='store_true',
help='choose an album instead of track')
random_cmd.parser.add_option('-p', '--path', action='store_true',
help='print the path of the matched item')
random_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
random_cmd.parser.add_option('-n', '--number', action='store', type="int",
help='number of objects to choose', default=1)
random_cmd.func = random_item
class Random(BeetsPlugin):
def commands(self):
return [random_cmd]
|
StarcoderdataPython
|
3516119
|
<filename>beartype_test/a00_unit/a00_util/cache/pool/test_utilcachepoolobjecttyped.py
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype utility fixed list pool unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.cache.pool.utilcachepoolobjecttyped` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from io import StringIO
from pytest import raises
# ....................{ TESTS ~ pool }....................
def test_objecttyped_pool_pass() -> None:
'''
Test successful usage of the
:mod:`beartype._util.cache.pool.utilcachepoolobjecttyped` submodule.
'''
# Defer heavyweight imports.
from beartype._util.cache.pool.utilcachepoolobjecttyped import (
acquire_object_typed, release_object_typed)
# Culturally relevant Clash lyrics to be tested below.
PUBLIC_SERVICE_ANNOUNCEMENT = '\n'.join((
'You have the right not to be killed.',
'Murder is a crime,',
'Unless it was done',
'By a policeman',
'Or an aristocrat.',
))
KNOW_YOUR_RIGHTS = '\n'.join((
'You have the right to food money --',
'Providing, of course, you',
"Don't mind a little",
'Investigation, humiliation,',
'And (if you cross your fingers)',
'Rehabilitation.',
))
# Acquire an arbitrary string buffer.
public_service_announcement = acquire_object_typed(cls=StringIO)
# Clear this buffer and reset its position to the start.
public_service_announcement.truncate(0)
public_service_announcement.seek(0)
# Write a series of culturally relevant Clash lyrics to this buffer.
public_service_announcement.write('You have the right not to be killed.\n')
public_service_announcement.write('Murder is a crime,\n')
public_service_announcement.write('Unless it was done\n')
public_service_announcement.write('By a policeman\n')
public_service_announcement.write('Or an aristocrat.')
# Acquire another arbitrary string buffer.
know_your_rights = acquire_object_typed(cls=StringIO)
# Clear this buffer and reset its position to the start.
know_your_rights.truncate(0)
know_your_rights.seek(0)
# Write another series of culturally relevant Clash lyrics to this buffer.
know_your_rights.write('You have the right to food money --\n')
know_your_rights.write('Providing, of course, you\n')
know_your_rights.write("Don't mind a little\n")
know_your_rights.write('Investigation, humiliation,\n')
know_your_rights.write('And (if you cross your fingers)\n')
know_your_rights.write('Rehabilitation.')
# Assert the contents of these buffers to still be as expected.
assert (
public_service_announcement.getvalue() == PUBLIC_SERVICE_ANNOUNCEMENT)
assert know_your_rights.getvalue() == KNOW_YOUR_RIGHTS
# Release the first buffer back to its parent pool.
release_object_typed(public_service_announcement)
# Reacquire the same buffer again.
public_service_announcement_too = acquire_object_typed(cls=StringIO)
# Assert this to be the same buffer.
assert public_service_announcement is public_service_announcement_too
# Assert the second buffer to *NOT* be the same buffer.
assert public_service_announcement is not know_your_rights
# Release these buffers back to their parent pools (in acquisition order).
release_object_typed(public_service_announcement)
release_object_typed(know_your_rights)
def test_objecttyped_pool_fail() -> None:
'''
Test unsuccessful usage of the
:mod:`beartype._util.cache.pool.utilcachepoolobjecttyped` submodule.
'''
# Defer heavyweight imports.
from beartype._util.cache.pool.utilcachepoolobjecttyped import (
acquire_object_typed)
from beartype.roar._roarexc import _BeartypeUtilCachedObjectTypedException
# Assert that typed objects may only be acquired with types.
with raises(_BeartypeUtilCachedObjectTypedException):
acquire_object_typed((
'You have the right to free speech',
'As long as',
"You're not dumb enough to actually try it.",
))
with raises(_BeartypeUtilCachedObjectTypedException):
acquire_object_typed(1977)
|
StarcoderdataPython
|
5096626
|
from ..utils.importing import import_file
class Regressor(object):
def __init__(self, workflow_element_names=['regressor']):
self.element_names = workflow_element_names
# self.name = 'regressor_workflow' # temporary
def train_submission(self, module_path, X_array, y_array, train_is=None):
if train_is is None:
train_is = slice(None, None, None)
regressor = import_file(module_path, self.element_names[0])
reg = regressor.Regressor()
reg.fit(X_array[train_is], y_array[train_is])
return reg
def test_submission(self, trained_model, X_array):
reg = trained_model
y_pred = reg.predict(X_array)
return y_pred
|
StarcoderdataPython
|
12840755
|
<reponame>TLasguignes/signal_scope
'''
A working example for signals from Anymal
Plots x,y,z in position and the yaw angle
'''
import numpy
import sys
sys.argv = ['test']
import tf
def getYawDegrees(msg):
'''yaw degrees'''
quaternion = (
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
return msg.header.stamp, euler[2]*180.0/numpy.pi
def getPositionNorm(msg):
'''position magnitude'''
x = [msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z]
return msg.header.stamp, numpy.linalg.norm(x)
def getVelocity(msg):
'''velocity in m/sec'''
vel = [msg.twist.twist.linear.x, msg.twist.twist.linear.y, msg.twist.twist.linear.z]
return msg.header.stamp, numpy.linalg.norm(vel)
addPlot(timeWindow=20, yLimits=[-10, 10])
addSignal('/state_estimator/anymal_state', msg.header.stamp, msg.pose.pose.position.x)
addSignal('/state_estimator/anymal_state', msg.header.stamp, msg.pose.pose.position.y)
addSignal('/state_estimator/anymal_state', msg.header.stamp, msg.pose.pose.position.z)
addSignalFunction('/state_estimator/anymal_state', getPositionNorm)
addPlot(timeWindow=20, yLimits=[-180, 180])
addSignalFunction('/state_estimator/anymal_state', getYawDegrees)
addPlot(timeWindow=20, yLimits=[-2, 2])
addSignalFunction('/state_estimator/anymal_state', getVelocity)
|
StarcoderdataPython
|
4929638
|
<gh_stars>1-10
import numpy as np
import torch
try:
import nvidia.dali as dali
import nvidia.dali.plugin.pytorch as to_pytorch
except ImportError:
dali = None
if not torch.cuda.is_available():
raise RuntimeError("DALI requires CUDA support.")
seed = 1549361629
class _DaliImageDecoderPipeline(dali.ops.Pipeline):
def __init__(self, batch_size: int, num_threads: int, device_id: int):
super(_DaliImageDecoderPipeline, self).__init__(
batch_size, num_threads, device_id, seed = seed
)
self.input = dali.ops.ExternalSource()
#self.decode = dali.ops.ImageDecoder(
# device='mixed', output_type=dali.types.RGB
#)
self.pos_rng_x = dali.ops.Uniform(range = (0.0, 1.0))
self.pos_rng_y = dali.ops.Uniform(range = (0.0, 1.0))
self.decode = dali.ops.ImageDecoderCrop(
device='mixed', output_type=dali.types.RGB, crop=(64, 64))
@property
def data(self):
return self._data
def set_data(self, data):
self._data = data
def define_graph(self):
self.jpegs = self.input()
#images = self.decode(self.jpegs)
pos_x = self.pos_rng_x()
pos_y = self.pos_rng_y()
images = self.decode(self.jpegs, crop_pos_x=pos_x, crop_pos_y=pos_y)
return images
def iter_setup(self):
images = self.data
self.feed_input(self.jpegs, images, layout="HWC")
class _DaliImageDecoder:
def __init__(self, batch_size: int, num_workers: int, device: torch.device) -> None:
self._pipe = _DaliImageDecoderPipeline(batch_size, num_workers, device)
self._pipe.build()
self._device = device
def __call__(self, input):
# set data and run the pipeline
self._pipe.set_data(input)
out_pipe = self._pipe.run()
# retrieve dali tensor
d_images: nvidia.dali.backend_impl.TensorGPU = out_pipe[0].as_tensor()
# create torch tensor header with expected size
t_images = torch.empty(
d_images.shape(), dtype=torch.uint8, device=self._device)
# populate torch tensor with dali tensor
to_pytorch.feed_ndarray(d_images, t_images)
t_images = t_images.permute([0, 3, 1, 2])
return t_images
class DaliImageCollateWrapper:
def __init__(self, batch_size: int, device: torch.device):
self._decoder = _DaliImageDecoder(batch_size, 8, device.index)
self._device = torch.device("cuda:0")
def __call__(self, input):
images = [data[0] for data in input]
labels = [data[1] for data in input]
t_images = self._decoder(images)
t_labels = torch.tensor(labels, device=t_images.device)
return t_images, t_labels
class DaliImageReader:
def __init__(self, device: torch.device, decode: bool = False) -> None:
self._loader = _DaliImageDecoder(1, 8, device.index)
self._decode = decode
def __call__(self, image_file: str) -> torch.Tensor:
f = open(image_file, 'rb')
np_array = np.frombuffer(f.read(), dtype=np.uint8)
if self._decode:
return self._decoder([np_array])
return np_array
|
StarcoderdataPython
|
8189924
|
<gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
from datetime import datetime
from dateutil.tz import tzlocal
try:
from django.utils.timezone import now as dj_now # pylint:disable=import-error
except ImportError:
dj_now = None
def get_timezone(tz=None):
from polyaxon import settings
tz = tz or settings.CLIENT_CONFIG.timezone
if tz:
return pytz.timezone(tz)
return tzlocal()
def now(tzinfo=True, no_micor=False):
"""
Return an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
value = None
if dj_now:
try:
value = dj_now()
except Exception: # Improper configuration
pass
if not value:
if tzinfo:
value = datetime.utcnow().replace(tzinfo=pytz.utc)
else:
value = datetime.now()
if no_micor:
return value.replace(microsecond=0)
return value
def local_datetime(datetime_value, tz=None):
return datetime_value.astimezone(get_timezone(tz))
|
StarcoderdataPython
|
187253
|
<reponame>franziskabraendle/alchemy_empowerment<filename>empowermentexploration/resources/littlealchemy/gametrees.py<gh_stars>0
import json
import empowermentexploration.utils.data_handle as data_handle
class Gametrees():
"""Class functions generate Little Alchemy game trees.
"""
def __init__(self):
"""Initializes game tree class.
"""
def get_alchemy1_gametree(self):
"""Gets game tree of Little Alchemy 1.
"""
# print info for user
print('\nGet alchemy1 game tree.')
# load raw gametree
with open('empowermentexploration/resources/littlealchemy/data/raw/elements.json', encoding='utf8') as infile:
old_gametree = json.load(infile)
# initialize element storage for alchemy 1 elements
elements = set()
# get all elements from little alchemy 1
for key, value in old_gametree.items():
parents = key.split(',')
results = value
elements.update(parents, results)
elements.difference_update({'water', 'fire', 'earth', 'air'})
elements = ['water', 'fire', 'earth', 'air'] + list(elements)
# initialize game tree
gametree = dict()
for element_id, element in enumerate(elements):
gametree[element_id] = {'name': element, 'parents': []}
# fill game tree
for key, value in old_gametree.items():
parents = key.split(',')
parents = sorted([elements.index(parents[0]), elements.index(parents[1])])
results = value
for result in results:
gametree[elements.index(result)]['parents'].append(parents)
# write edited library to JSON file
with open('empowermentexploration/resources/littlealchemy/data/alchemy1Gametree.json', 'w') as filehandle:
json.dump(gametree, filehandle, indent=4, sort_keys=True)
# write elements to JSON file
with open('empowermentexploration/resources/littlealchemy/data/alchemy1Elements.json', 'w') as filehandle:
json.dump(elements, filehandle, indent=4, sort_keys=True)
def get_alchemy2_gametree(self):
"""Gets game tree of Little Alchemy 2.
"""
# print info for user
print('\nGet alchemy2 game tree.')
# load raw game tree
with open('empowermentexploration/resources/littlealchemy/data/raw/all.json', encoding='utf8') as infile:
old_gametree = json.load(infile)
# initialize element storage for alchemy 2 elements and myths and monsters elements
elements = list()
myths_and_monsters = list()
# set for hidden elements that won't be included in game tree
hidden_elements = {'tardis', 'the doctor', 'blaze', 'conflagration', 'inferno', 'terrain', 'ground', 'supervolcano', 'keyboard cat', 'batman'}
# store little alchemy 2 elements and myths and monsters elements
for element in old_gametree['elements']:
if 'dlc' in old_gametree['elements'][element]:
myths_and_monsters.append(element)
elif old_gametree['elements'][element]['name'] not in hidden_elements:
elements.append(old_gametree['elements'][element]['name'])
# get assignment of old ID to new ID
id_assignments = dict()
new_ID = 0
for old_ID in old_gametree['elements']:
if 'dlc' not in old_gametree['elements'][old_ID] and old_gametree['elements'][old_ID]['name'] not in hidden_elements:
id_assignments[old_ID] = new_ID
new_ID += 1
# initialize new game tree
gametree = dict()
for element in old_gametree['elements']:
if "dlc" not in old_gametree['elements'][element] and old_gametree['elements'][element]['name'] not in hidden_elements:
# store element info with new id
gametree[id_assignments[element]] = old_gametree['elements'][element]
# get parents that are not part of the myths and monsters pack or hidden elements
old_parents = old_gametree['elements'][element]['parents']
parents = list()
for parent in old_parents:
if parent[0] not in myths_and_monsters and parent[1] not in myths_and_monsters and parent[0] not in hidden_elements and parent[1] not in hidden_elements:
parents.append(parent)
# edit parent IDs
new_parents = list()
for parent in parents:
new_parents.append([id_assignments[parent[0]], id_assignments[parent[1]]])
gametree[id_assignments[element]]['parents'] = new_parents
# edit conditions
if 'condition'in old_gametree['elements'][element] and 'elements' in old_gametree['elements'][element]['condition']:
old_condition = old_gametree['elements'][element]['condition']['elements']
new_condition = list()
for condition_element in old_condition:
if condition_element not in myths_and_monsters and condition_element not in hidden_elements:
new_condition.append(id_assignments[condition_element])
gametree[id_assignments[element]]['condition']['elements'] = new_condition
# write edited library to JSON file
with open('empowermentexploration/resources/littlealchemy/data/alchemy2Gametree.json', 'w') as filehandle:
json.dump(gametree, filehandle, indent=4, sort_keys=True)
# write elements to JSON file
with open('empowermentexploration/resources/littlealchemy/data/alchemy2Elements.json', 'w') as filehandle:
json.dump(elements, filehandle, indent=4, sort_keys=True)
def get_joined_gametree(self):
"""Gets game tree of joined version Little Alchemy 1 + Little Alchemy 2.
"""
# print info for user
print('\nGet joined game tree.')
# load raw game trees
alchemy1_gametree = data_handle.get_gametree('alchemy1')
alchemy2_gametree = data_handle.get_gametree('alchemy2')
# load elements
alchemy1_elements = data_handle.get_elements('alchemy1')
alchemy2_elements = data_handle.get_elements('alchemy2')
# initialize element storage for joined elements
elements = set()
elements.update(alchemy1_elements, alchemy2_elements)
elements.difference_update({'water', 'fire', 'earth', 'air'})
elements = ['water', 'fire', 'earth', 'air'] + list(elements)
# initialize game tree
gametree = dict()
for element_id, element in enumerate(elements):
gametree[element_id] = {'name': element, 'parents': []}
for element in alchemy1_gametree:
# get parents
alchemy1_parents = alchemy1_gametree[element]['parents']
parents = list()
for parent in alchemy1_parents:
parents.append(sorted([elements.index(alchemy1_elements[parent[0]]), elements.index(alchemy1_elements[parent[1]])]))
if parents:
gametree[elements.index(alchemy1_elements[element])]['parents'].extend(parents)
for element in alchemy2_gametree:
# get parents
alchemy2_parents = alchemy2_gametree[element]['parents']
parents = list()
for parent in alchemy2_parents:
parents.append(sorted([elements.index(alchemy2_elements[parent[0]]), elements.index(alchemy2_elements[parent[1]])]))
if parents:
gametree[elements.index(alchemy2_elements[element])]['parents'].extend(parents)
# edit conditions
if 'condition' in alchemy2_gametree[element]:
if 'elements' in alchemy2_gametree[element]['condition']:
old_condition = alchemy2_gametree[element]['condition']['elements']
new_condition = list()
for condition_element in old_condition:
new_condition.append(elements.index(alchemy2_elements[condition_element]))
gametree[elements.index(alchemy2_elements[element])]['condition'] = {}
gametree[elements.index(alchemy2_elements[element])]['condition']['elements'] = new_condition
else:
gametree[elements.index(alchemy2_elements[element])]['condition'] = alchemy2_gametree[element]['condition']
# write edited library to JSON file
with open('empowermentexploration/resources/littlealchemy/data/joinedGametree.json', 'w') as filehandle:
json.dump(gametree, filehandle, indent=4, sort_keys=True)
# write elements to JSON file
with open('empowermentexploration/resources/littlealchemy/data/joinedElements.json', 'w') as filehandle:
json.dump(elements, filehandle, indent=4, sort_keys=True)
def get_tiny_gametree(self, version='alchemy'):
"""Gets game tree of Little Alchemy 1.
Args:
version (str, optional): Defines version more precisely. Can either be 'alchemy' or 'pixels' Defaults to 'alchemy'.
"""
# print info for user
print('\nGet tiny{} game tree.'.format(version))
# load raw gametree
with open('empowermentexploration/resources/littlealchemy/data/raw/rawTinyGametree.json', encoding='utf8') as infile:
old_gametree = json.load(infile)
# load elements
with open('empowermentexploration/resources/littlealchemy/data/raw/tiny{}Elements.json'.format(version), encoding='utf8') as infile:
elements = json.load(infile)
# initialize game tree
gametree = dict()
for element_id, element in enumerate(elements):
gametree[element_id] = {'name': element, 'parents': []}
# initialize memory storing previous combinations
memory = list()
# fill game tree
for element_combinations in old_gametree:
for combination in element_combinations[1]:
# when combinations yield more than one element, keep only the first
if sorted(combination) not in memory:
gametree[element_combinations[0]]['parents'].append(sorted(combination))
memory.append(sorted(combination))
# write edited library to JSON file
with open('empowermentexploration/resources/littlealchemy/data/tiny{}Gametree.json'.format(version), 'w') as filehandle:
json.dump(gametree, filehandle, indent=4, sort_keys=True)
# write elements to JSON file
with open('empowermentexploration/resources/littlealchemy/data/tiny{}Elements.json'.format(version), 'w') as filehandle:
json.dump(elements, filehandle, indent=4, sort_keys=True)
|
StarcoderdataPython
|
8031600
|
<gh_stars>1-10
"""
Contains functions related to the sparsifying front end.
Images are assumed to be in the range [0, 1].
"""
import numpy as np
import pywt
def sp_frontend(images, rho=0.03, wavelet='bior4.4', mode='periodization', max_lev=1):
"""
Sparsifies input in the wavelet basis (using the PyWavelets package) and returns reconstruction.
:param images: Should be in the range [0, 1] and of shape [num_samples, num_features, num_features, 1].
:param rho: Sparsity level, in the range [0, 1].
:param wavelet: Wavelet to use in the transform. See https://pywavelets.readthedocs.io/ for more details.
:param mode: Signal extension mode. See https://pywavelets.readthedocs.io/ for more details.
:param max_lev: Maximum allowed level of decomposition.
"""
num_samples = images.shape[0]
num_features = images.shape[1]
images_sp = images.copy()
for i in range(num_samples):
image = images[i].reshape(num_features,num_features)
wp = pywt.WaveletPacket2D(image, wavelet, mode, max_lev)
paths = [node.path for node in wp.get_level(max_lev)]
m = wp[paths[0]].data.shape[0]
l = (4**max_lev)*m*m
k = np.floor(rho*l).astype('int')
n = l-k
coeffs = np.zeros(l)
for j in range(4**max_lev):
coeffs[j*m*m:(j+1)*m*m] = wp[paths[j]].data.flatten()
indices = np.argpartition(np.abs(coeffs), n)[:n]
coeffs[indices] = 0
for j in range(4**max_lev):
wp[paths[j]].data = coeffs[j*m*m:(j+1)*m*m].reshape([m,m])
image_r = wp.reconstruct(update=False).astype('float32')
image_r = np.clip(image_r, 0.0, 1.0)
images_sp[i, :, :, 0] = image_r
return images_sp
def sp_project(image, weights, wavelet='bior4.4', mode='periodization', max_lev=1, rho=0.03):
"""
Projects weights onto top rho% of the support of image (in the wavelet basis).
:param image: Should be in the range [0, 1], and resizable to shape [num_features, num_features]
:param weights: Should be resizable to shape [num_features, num_features].
:param rho: Sparsity level, in the range [0, 1].
:param wavelet: Wavelet to use in the transform. See https://pywavelets.readthedocs.io/ for more details.
:param mode: Signal extension mode. See https://pywavelets.readthedocs.io/ for more details.
:param max_lev: Maximum allowed level of decomposition.
"""
num_features = image.shape[1]
weights_proj = np.array(weights).reshape([1, num_features, num_features, 1])
wp = pywt.WaveletPacket2D(image.reshape(num_features,num_features), wavelet, mode, max_lev)
paths = [node.path for node in wp.get_level(max_lev)]
m = wp[paths[0]].data.shape[0]
l = (4**max_lev)*m*m
k = np.floor(rho*l).astype('int')
n = l-k
coeffs = np.zeros(l)
for j in range(4**max_lev):
coeffs[j*m*m:(j+1)*m*m] = wp[paths[j]].data.flatten()
indices = np.argpartition(np.abs(coeffs), n)[:n]
weight = weights_proj.reshape(num_features,num_features)
wp_w = pywt.WaveletPacket2D(weight.reshape(num_features,num_features), wavelet, mode, max_lev)
paths_w = [node.path for node in wp_w.get_level(max_lev)]
coeffs_w = np.zeros(l)
for j in range(4**max_lev):
coeffs_w[j*m*m:(j+1)*m*m] = wp_w[paths_w[j]].data.flatten()
coeffs_w[indices] = 0
for j in range(4**max_lev):
wp_w[paths_w[j]].data = coeffs_w[j*m*m:(j+1)*m*m].reshape([m,m])
weights_proj[0, :, :, 0] = wp_w.reconstruct(update=False).astype('float32')
return weights_proj
|
StarcoderdataPython
|
3412616
|
# Copyright (C) 2020 Amazon.com, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This lambda focuses on data enrichment before passing along to the importer.
import json
import boto3
import os
def process(payload):
# queue url is
queueUrl = os.environ.get('QUEUE_TARGET')
# Policy is required.
payload['policy_name'] = os.environ.get('POLICY_NAME')
# Thing group is desired, but optional.
# The reason why 'None' has to be set is an environment variable
# on a Lambda function cannot be set to empty
if (os.environ.get('THING_GROUP_NAME') == "None"):
payload['thing_group_name'] = ""
else:
payload['thing_group_name'] = os.environ.get('THING_GROUP_NAME')
# Thing group is desired, but optional.
if (os.environ.get('THING_TYPE_NAME') == "None"):
payload['thing_type_name'] = ""
else:
payload['thing_type_name'] = os.environ.get('THING_TYPE_NAME')
# Pass on to the queue for target processing.
print(json.dumps(payload))
client = boto3.client("sqs")
client.send_message( QueueUrl=queueUrl,
MessageBody=json.dumps(payload))
def lambda_handler(event, context):
# Get the payload coming in and process it. There might be more than one.
for record in event['Records']:
process(json.loads(record["body"]))
|
StarcoderdataPython
|
5061407
|
<reponame>IKrukov-HORIS/lets-plot<gh_stars>100-1000
# Copyright (c) 2020. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
import pytest
import shapely
from shapely.geometry import Point
import lets_plot.geo_data as geodata
from lets_plot.geo_data import DF_COLUMN_FOUND_NAME
from .geo_data import run_intergration_tests, assert_row, assert_error, get_request_column_name, \
assert_request_and_found_name_are_equal
ShapelyPoint = shapely.geometry.Point
BOSTON_ID = '4631409'
NYC_ID = '351811'
TURN_OFF_INTERACTION_TEST = not run_intergration_tests()
MOSCOW_LON = 37.620393
MOSCOW_LAT = 55.753960
@pytest.mark.parametrize('level,expected_name', [
pytest.param('city', 'Москва', id='city-Moscow'),
pytest.param('county', 'Центральный административный округ', id='county-Central administrative district'),
pytest.param('country', 'Россия', id='Russian Federeation')
])
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_reverse_moscow(level, expected_name):
r = geodata.reverse_geocode(lon=MOSCOW_LON, lat=MOSCOW_LAT, level=level)
assert_row(r.get_geocodes(), found_name=expected_name)
@pytest.mark.parametrize('geometry_getter', [
pytest.param(lambda regions_obj: regions_obj.get_centroids(), id='centroids()'),
pytest.param(lambda regions_obj: regions_obj.get_limits(), id='limits()'),
pytest.param(lambda regions_obj: regions_obj.get_boundaries(5), id='boundaries(5)'),
pytest.param(lambda regions_obj: regions_obj.get_boundaries(), id='boundaries()')
])
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_name_columns(geometry_getter):
request = 'boston'
found_name = 'Boston'
boston = geodata.geocode_cities(request)
assert_row(boston.get_geocodes(), names=request, found_name=found_name)
assert_row(geometry_getter(boston), names=request, found_name=found_name)
@pytest.mark.parametrize('geometry_getter', [
pytest.param(lambda regions_obj: regions_obj.get_centroids(), id='centroids()'),
pytest.param(lambda regions_obj: regions_obj.get_limits(), id='limits()'),
pytest.param(lambda regions_obj: regions_obj.get_boundaries(5), id='boundaries(5)'),
pytest.param(lambda regions_obj: regions_obj.get_boundaries(), id='boundaries()')
])
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_empty_request_name_columns(geometry_getter):
request = 'Vermont'
found_name = 'Vermont'
states = geodata.geocode_states('us-48')
assert_row(states.get_geocodes(), names=request, found_name=found_name)
assert_row(geometry_getter(states), names=request, found_name=found_name)
BOSTON_LON = -71.057083
BOSTON_LAT = 42.361145
NYC_LON = -73.935242
NYC_LAT = 40.730610
@pytest.mark.parametrize('lons, lats', [
pytest.param(geodata.Series([BOSTON_LON, NYC_LON]), geodata.Series([BOSTON_LAT, NYC_LAT])),
pytest.param([BOSTON_LON, NYC_LON], [BOSTON_LAT, NYC_LAT])
])
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_reverse_geocoding_of_list_(lons, lats):
r = geodata.reverse_geocode(lons, lats, 'city')
assert_row(r.get_geocodes(), index=0, names='[-71.057083, 42.361145]', found_name='Boston')
assert_row(r.get_geocodes(), index=1, names='[-73.935242, 40.73061]', found_name='New York')
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_reverse_geocoding_of_nyc():
r = geodata.reverse_geocode(NYC_LON, NYC_LAT, 'city')
assert_row(r.get_geocodes(), found_name='New York')
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_reverse_geocoding_of_nothing():
try:
geodata.reverse_geocode(-30.0, -30.0, 'city').get_geocodes()
except ValueError as e:
assert str(e).startswith('No objects were found for [-30.000000, -30.000000].\n')
return
assert False, 'Should fail with nothing found exceptuion'
SEVASTOPOL_LON = 33.5224
SEVASTOPOL_LAT = 44.58883
SEVASTOPOL_ID = '6061953'
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_only_one_sevastopol():
sevastopol = geodata.reverse_geocode(SEVASTOPOL_LON, SEVASTOPOL_LAT, 'city')
assert_row(sevastopol.get_geocodes(), id=SEVASTOPOL_ID)
WARWICK_LON = -71.4332938210472
WARWICK_LAT = 41.715542525053
WARWICK_ID = '785807'
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_ambiguity_closest_to_boston_by_name():
r = geodata.geocode(
level='city',
names='Warwick'
) \
.where('Warwick', closest_to=geodata.geocode_cities('boston'))
assert_row(r.get_geocodes(), id=WARWICK_ID, found_name='Warwick')
assert_row(r.get_centroids(), lon=WARWICK_LON, lat=WARWICK_LAT)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_ambiguity_closest_to_boston_by_coord():
r = geodata.geocode(
level='city',
names='Warwick'
) \
.where('Warwick', closest_to=ShapelyPoint(BOSTON_LON, BOSTON_LAT))
assert_row(r.get_geocodes(), id=WARWICK_ID, found_name='Warwick')
assert_row(r.get_centroids(), lon=WARWICK_LON, lat=WARWICK_LAT)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_ambiguity_scope_boston_by_box():
boston = geodata.geocode_cities('boston').get_centroids().iloc[[0]]
buffer = 0.6
boston_centroid = ShapelyPoint(boston.geometry.x, boston.geometry.y)
r = geodata.geocode(
level='city',
names='Warwick'
) \
.where('Warwick',
scope=shapely.geometry.box(
boston_centroid.x - buffer,
boston_centroid.y - buffer,
boston_centroid.x + buffer,
boston_centroid.y + buffer
))
assert_row(r.get_geocodes(), id=WARWICK_ID, found_name='Warwick')
assert_row(r.get_centroids(), lon=WARWICK_LON, lat=WARWICK_LAT)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_ambiguity_allow_ambiguous():
r = geodata.geocode_cities(['gotham', 'new york', 'manchester']) \
.allow_ambiguous() \
.get_geocodes()
actual = r[DF_COLUMN_FOUND_NAME].tolist()
assert 29 == len(actual) # 1 New York + 27 Manchester
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_ambiguity_drop_not_matched():
r = geodata.geocode_cities(['gotham', 'new york', 'manchester']) \
.ignore_all_errors() \
.get_geocodes()
actual = r[DF_COLUMN_FOUND_NAME].tolist()
assert actual == ['New York']
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_ambiguity_drop_not_found():
try:
r = geodata.geocode_cities(['gotham', 'new york', 'manchester']) \
.ignore_not_found() \
.get_geocodes()
except ValueError as ex:
str(ex).startswith('Multiple objects (27) were found for manchester')
return
assert False, 'Should throw exception'
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_single_request_level_detection():
r = geodata.geocode(names=['new york', 'boston']).scope('usa').get_geocodes()
assert r.id.tolist() == [NYC_ID, BOSTON_ID]
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_where_request_level_detection():
"""
where('new york', region=geodata.geocode_states('new york')) gives county as first detected level
where('boston', region=geodata.geocode_countries('usa')) gives city as first detected level
But 'new york' also matches a city name so common level should be a city
"""
r = geodata.geocode(names=['new york', 'boston']) \
.where('new york', scope=geodata.geocode_states('new york')) \
.where('boston', scope=geodata.geocode_countries('usa')) \
.get_geocodes()
assert [NYC_ID, BOSTON_ID] == r.id.tolist()
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_positional_regions():
df = geodata.geocode_cities(['york', 'york']).states(['New York', 'Illinois']).get_geocodes()
assert ['New York', 'Little York'] == df['found name'].tolist()
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_duplications():
r1 = geodata.geocode(names=['Virginia', 'West Virginia'], scope='USA')
r1.get_centroids()
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_limits_request():
print(geodata.geocode(names='texas').get_limits())
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_centroids_request():
print(geodata.geocode(names='texas').get_centroids())
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_polygon_boundaries_request():
print(geodata.geocode(names='colorado').get_boundaries(14))
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_multipolygon_boundaries_request():
assert geodata.geocode(names='USA').get_boundaries(1) is not None
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_regions():
countries_geocoder = geodata.geocode(level='country', names=['Russia', 'USA'])
countries_geocoder.get_boundaries()
assert countries_geocoder is not None
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_mapregion():
usa = geodata.geocode_countries(names='USA')
print(usa.get_centroids())
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_geocoderegion_as_region():
usa = geodata.geocode_countries(names=['usa'])
states_list = ['NY', 'TX', 'NV']
geodata.geocode_states(names=states_list).scope(usa).get_geocodes()
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_error_response():
with pytest.raises(ValueError) as exception:
geodata.geocode_countries(names='blablabla').get_centroids()
assert 'No objects were found for blablabla.\n' == exception.value.args[0]
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_rows_order():
city_names = ['Boston', 'Phoenix', 'Tucson', 'Salt Lake City', 'Los Angeles', 'San Francisco']
city_regions = geodata.geocode_cities(city_names).scope('US')
# create path preserving the order
df = city_regions.get_centroids()
df = df.set_index(get_request_column_name(df))
df = df.reindex(city_names)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_new_server():
c = geodata.geocode_countries(names='USA')
print(c.get_centroids())
print(c)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_case():
usa = geodata.geocode_countries(names=['usa'])
states_48 = geodata.geocode_states(['us-48'])
states_list = ['NY', 'TX', 'louisiana']
states = geodata.geocode_states(names=states_list).scope(usa)
cities_list = ['New york', 'boston', 'la']
t_cities = geodata.geocode_cities(names=cities_list).scope(usa)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_ambiguous_not_found_with_level():
with pytest.raises(ValueError) as exception:
r = geodata.geocode(names=['zimbabwe', 'moscow'], level='country').get_geocodes()
assert 'No objects were found for moscow.\n' == exception.value.args[0]
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_order():
bound = geodata.geocode(names=['Russia', 'USA', 'France', 'Japan'])
assert_row(bound.get_geocodes(), names=['Russia', 'USA', 'France', 'Japan'])
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_resolution():
r = geodata.geocode(names=['monaco', ], level='country')
sizes = []
for res in range(1, 16):
b = r.get_boundaries(res)
sizes.append(len(b))
assert 15 == len(sizes)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_should_copy_found_name_to_request_for_us48():
df = geodata.geocode_states('us-48').get_geocodes()
assert len(df) == 49
assert_request_and_found_name_are_equal(df)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_us48_in_scope():
df = geodata.geocode_states().scope('us-48').get_geocodes()
assert 49 == len(df)
assert_request_and_found_name_are_equal(df)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_us48_in_name_without_level():
df = geodata.geocode(names='us-48').get_geocodes()
assert 49 == len(df)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_duplication_with_us48():
df = geodata.geocode_states(names=['tx', 'us-48', 'tx']).get_geocodes()
assert 51 == len(df)
assert_row(df, names='tx', found_name='Texas', index=0)
assert_row(df, names='Vermont', found_name='Vermont', index=1)
assert_row(df, names='tx', found_name='Texas', index=50)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_empty_request_get_geocodes():
orange_county = geodata.geocode_counties('orange county').scope('north carolina')
r = geodata.geocode_cities().scope(orange_county)
df = r.get_geocodes()
assert_request_and_found_name_are_equal(df)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_empty_request_centroid():
orange_county = geodata.geocode_counties('orange county').scope('north carolina')
r = geodata.geocode_cities().scope(orange_county)
df = r.get_centroids()
assert_request_and_found_name_are_equal(df)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_highlights():
r = geodata.geocode(level='city', names='NYC').highlights(True)
df = r.get_geocodes()
assert_row(df, found_name='New York')
assert df['highlights'].tolist() == [['NYC']]
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_countries():
df = geodata.geocode_countries().get_centroids()
assert 217 == len(df)
@pytest.mark.skipif(TURN_OFF_INTERACTION_TEST, reason='Need proper server ip')
def test_not_found_scope():
assert_error(
"Region is not found: blablabla",
lambda: geodata.geocode(names=['texas'], scope='blablabla').get_geocodes()
)
|
StarcoderdataPython
|
3595395
|
#!/usr/local/bin/python
# coding=utf-8
from django.conf.urls import url
from docs import views
urlpatterns = [
url(r'^(?P<doc_name>[A-Za-z0-9\-]+)/$', views.docs_view, name='docs_view'),
]
|
StarcoderdataPython
|
11270285
|
<gh_stars>0
import telegram
class BaseTrigger:
"""
Базовый триггер
"""
def __init__(self, client: telegram.Bot, user_id, messenger, text, message: telegram.update.Message, user_state):
"""
Инициализация класса
:param client: API для работы и отпаврки сообщений
:param user_id: id пользователя для отправки сообщения
:param messenger: Мессенджер с коротого пришло сообщение
:param text: текст от пользователя
:param user_state: состояние пользователя
"""
self.client = client
self.text = text
self.message = message
self.messenger = messenger
self.user_id = user_id
self.state = user_state
def send_keyboard(self, message, buttons, whom=None):
pass
def send_message(self, message, whom=None):
pass
def get_user(self, whom=None):
pass
def create_user(self):
pass
def send_photo(self, image_path):
pass
|
StarcoderdataPython
|
1824112
|
<reponame>ivaleriano/ProtoPNet
base_architecture = 'densenet169'
img_size = 139
prototype_shape = (30, 128, 1, 1) #(2000,128,1,1)
num_classes = 2
prototype_activation_function = 'log'
add_on_layers_type = 'regular'
experiment_run = '001'
data_path = '/mnt/nas/Users/Sebastian/adni-mri-pet/classification-nomci/mri-pet'
#train_dir = data_path + '/0-train.h5'
#train_dir = [data_path + '/0-train.h5', data_path + '/1-train.h5', data_path + '/2-train.h5', data_path + '/3-train.h5', data_path + '/4-train.h5']
train_dir = [data_path + '/0-train.h5', data_path + '/1-train.h5']
#test_dir = data_path + '/0-test.h5'
test_dir = [data_path + '/0-test.h5', data_path + '/1-test.h5', data_path + '/2-test.h5', data_path + '/3-test.h5', data_path + '/4-test.h5']
#test_dir = [data_path + '/0-test.h5', data_path + '/1-test.h5', data_path + '/2-test.h5', data_path + '/3-test.h5',]
train_push_dir = data_path + '/0-train.h5'
train_batch_size = 100
test_batch_size = 50
train_push_batch_size = 100
joint_optimizer_lrs = {'features': 1e-4, # changed by Icxel 1e-4
'add_on_layers': 3e-4, #3e-3
'prototype_vectors': 3e-4} #3e-3
joint_lr_step_size = 5
warm_optimizer_lrs = {'add_on_layers': 3e-4, #3e-3
'prototype_vectors': 3e-4} #3e-3
last_layer_optimizer_lr = 1e-4 # originally 1e-4 changed by Icxel
coefs = {
'crs_ent': 1,
'clst': 0.8, # originally 0.8
'sep': -0.08, # originally -0.08
'l1': 1e-4, #1e-4
}
num_train_epochs = 200
num_warm_epochs = 5
push_start = 10
push_epochs = [i for i in range(num_train_epochs) if i % 10 == 0]
|
StarcoderdataPython
|
6645160
|
import numpy as np
import pandas as pd
genetox = {'Ames': 'bacterial reverse mutation test',
'Ames study' :'bacterial reverse mutation test',
'Ames II' : 'bacterial reverse mutation test',
'bacterial reverse mutation assay (e.g. Ames test)' : 'bacterial reverse mutation test',
'Bacterial Mutagenesis' : 'bacterial reverse mutation test (single strain)',
'Histidine reverse gene mutation, Ames assay' : 'bacterial reverse mutation test',
'according to B Ames et al. (1975) Mutat Res 31:347-364': 'bacterial reverse mutation test',
'Cell Transformation' : 'cell transformation',
'Cell transformation' : 'in vitro cell transformation assay',
'Chromosome aberrations, in vivo' : 'in vivo chromosome aberrations',
'Chromosome aberrations in vivo' : 'in vivo chromosome aberrations',
'Chromosome aberrations in vitro' : 'in vitro mammalian chromosome aberration test',
'chromosomal aberration test in Chinese hamster lung cells (CHL/IU)' : 'in vitro mammalian chromosome aberration test',
'In Vitro Chromosome Aberration' : 'in vitro mammalian chromosome aberration test',
'Chromosomal aberation' : 'in vitro mammalian chromosome aberration test',
'Chromosomal aberration assay' : 'in vitro mammalian chromosome aberration test',
'Micronucleus test in vitro, chromosome aberrations' : 'in vitro mammalian chromosome aberration test',
'Chromosome aberrations' : 'chromosome aberrations (plant)',
'in vitro mammalian cytogenicity (B10)' :'in vitro mammalian chromosome aberration test',
'in vitro mammalian cell transformation assay' : 'in vitro cell transformation assay',
'Chinese hamster ovary cell/hypoxanthine-guanine-phosphoribosyl transferase (CHO/HGPRT) forward gene mutation assay': 'in vitro mammalian cell gene mutation test using the Hprt and xprt genes',
'in vitro mammalian cell gene mutation': 'in vitro mammalian cell gene mutation test using the Hprt and xprt genes',
'mammalian cell gene mutation assay' : 'in vitro mammalian cell gene mutation test',
'in vitro mammalian cell gene mutation (B.17)' :'in vitro mammalian cell gene mutation test using the Hprt and xprt genes', 'in vitro mammalian cell gene mutation (B17)' : 'in vitro mammalian cell gene mutation test using the Hprt and xprt genes',
'in vitro mammalian cytogenecity (B10)' : 'in vitro mammalian chromosome aberration test',
'in vitro mammalian cytogenicity' : 'in vitro mammalian chromosome aberration test', 'in vitro mammalian cytogenicity (B10)' : 'in vitro mammalian chromosome aberration test',
'in vitro mammallian cytogenicity (B10)' : 'in vitro mammalian chromosome aberration test',
'in vitro mammalian chromosome aberration test' : 'in vitro mammalian chromosome aberration test',
'in vitro mammalian chromosome aberration test, human lymphocytes from healthy, non smoking donors' : 'in vitro mammalian chromosome aberration test',
'Mouse lymphoma assay' : 'in vitro mammalian cell gene mutation test using the thymidine kinase gene',
'In vitro L5178Y TK+/- Mouse Lymphoma Cell Assay': 'in vitro mammalian cell gene mutation test using the thymidine kinase gene',
'DNA Damage/Repair' : 'DNA damage/repair',
'DNA repair' : 'DNA damage/repair',
'DNA damage' : 'DNA damage/repair',
'unscheduled DNA synthesis' : 'unscheduled DNA synthesis',
'DNA damage/gene conversion' : 'DNA damage/repair',
'Transgenic' : 'transgenic rodent somatic and germ cell gene mutation assays (TGR)',
'GENE MUTATION ASSAY IN CHINESE HAMSTER V79 CELLS IN VITRO' :'in vitro mammalian cell gene mutation test',
'bacterial gene mutation assay' : 'bacterial reverse mutation test',
'Forward gene mutation at the HPRT or ouabain locus' : 'in vitro mammalian cell gene mutation test using the Hprt and xprt genes',
'Forward gene mutation at the HPRT locus': 'in vitro mammalian cell gene mutation test using the Hprt and xprt genes',
'Forward gene mutation at the thymidine kinase (TK) locus; chromosome aberrations': 'in vitro mammalian cell gene mutation test using the thymidine kinase gene',
'Gene mutation' : 'gene mutation (plant)',
'In Vivo Micronucleus' : 'in vivo micronucleus test',
'In Vitro Micronucleus' : 'in vitro mammalian cell micronucleus test',
'in vitro micronucleus assay' : 'in vitro mammalian chromosome aberration test',
'micronucleus assay' : 'in vitro mammalian chromosome aberration test',
'in vitro micronucleus assay' : 'in vitro mammalian cell micronucleus test',
'In vitro micronucleus test in mouse lymphoma L5178Y cells' : 'in vitro mammalian cell gene mutation test using the thymidine kinase gene',
'In-vitro micronucleus test in cultured human lymphocytes' : 'in vitro mammalian chromosome aberration test',
'In Vitro Mammalian Cell Micronucleus Test (MNvit)' : 'in vitro mammalian chromosome aberration test',
'Unscheduled DNA synthesis (UDS) in vitro, DNA effects' : 'unscheduled DNA synthesis (UDS) in vitro',
'unscheduled DNA synthesis in mammalian cells in vitro' : 'unscheduled DNA synthesis (UDS) in vitro',
'Unscheduled DNA synthesis (UDS) in vivo' : 'unscheduled DNA synthesis (UDS) in vivo',
'Unscheduled DNA synthesis' : 'unscheduled DNA synthesis (UDS) in vivo',
'DNA damage and repair assay, unscheduled DNA synthesis in mammalian cells in vitro' :'unscheduled DNA synthesis (UDS) in vitro',
'In Vivo Chromosome Aberration' : 'in vivo chromosome aberrations',
'Micronucleus' : 'in vivo micronucleus test',
'Micronucleus test, chromosome aberrations': 'mammalian erythrocyte micronucleus test',
'Unscheduled DNA synthesis (UDS) in vivo, DNA effects' :'unscheduled DNA synthesis (UDS) in vivo',
'Unscheduled DNA synthesis (UDS) in vivo; DNA effects' :'unscheduled DNA synthesis (UDS) in vivo',
'in vitro mammalian cell gene mutation assay' : 'in vitro mammalian cell gene mutation test',
'In vitro mammalian cell gene mutation test' : 'in vitro mammalian cell gene mutation test',
}
assay_result_std = {'equivocal' : 'inconclusive', 'inconsistent (cancer in vivo)' : 'inconclusive', 'ambiguous' :'inconclusive', 'negative (cancer in vivo)' : 'negative', 'positive (cancer in vivo)' : 'positive', 'positive (weak)' : 'positive', 'no data' : 'not determined', 'technically compromised' : 'inadequate', 'uninterpretable' : 'inadequate'}
outcome = {'inadequate' : 2, 'inconclusive' : 2, 'not determined' : 2, 'positive' : 1, 'negative' : 0}
def clean_up(df):
df['standard_assay_type'] = df['assay_type']
df['standard_assay_type'].replace(genetox, inplace = True)
df['assay_result_std'] = df['assay_result']
df['assay_result_std'].replace(assay_result_std, inplace = True)
df['assay_outcome'] = df['assay_result_std']
df['assay_outcome'].replace(outcome, inplace = True)
return df
|
StarcoderdataPython
|
6617878
|
from setuptools import *
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'pydep-cli',
version = '0.1.1',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/Devansh3712/PyDep',
description = 'Create pyproject.toml & poetry.lock dependency files from requirements.txt',
long_description = long_description,
long_description_content_type = "text/markdown",
license = 'MIT',
packages = find_packages(),
include_package_data = True,
entry_points = {
"console_scripts": [
"pydep=pydep.__main__:pydep",
]
},
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires = ["click==7.1.2"],
)
|
StarcoderdataPython
|
164415
|
<gh_stars>0
from dal.test import case, stories
from dal_select2.test import Select2Story
from .models import TestModel
class AdminGenericForeignKeyTestCase(Select2Story, case.AdminMixin,
case.ContentTypeOptionMixin,
case.AutocompleteTestCase):
field_name = 'test'
inline_related_name = 'inline_test_models'
model = TestModel
def setUp(self):
super(AdminGenericForeignKeyTestCase, self).setUp()
self.get(url=self.get_modeladmin_url('add'))
def test_can_select_option(self):
option, ctype = self.create_option()
story = stories.SelectOption(self)
story.select_option(option.name)
story.assert_value('%s-%s' % (ctype.pk, option.pk))
def test_can_select_option_in_first_inline(self):
option, ctype = self.create_option()
story = stories.InlineSelectOption(self, inline_number=0)
story.select_option(option.name)
story.assert_value('%s-%s' % (ctype.pk, option.pk))
def test_can_select_option_in_first_extra_inline(self):
option, ctype = self.create_option()
story = stories.InlineSelectOption(self, inline_number=3)
story.select_option(option.name)
story.assert_value('%s-%s' % (ctype.pk, option.pk))
|
StarcoderdataPython
|
4913298
|
<filename>rubik/application/help_functions/help_output.py
#!/usr/bin/env python3
#
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "<NAME>"
__all__ = [
'HelpOutput',
'help_output',
]
from .functor_help_example import HelpExample
def help_output(test=None, interactive=None, writer=None):
HelpOutput(test=test, interactive=interactive, writer=writer)()
class HelpOutput(HelpExample):
TEXT = """\
# Output modes
These are the available output modes:
* print the result to a file (--print/-P, or function 'print_cube()')
* print statistics about the resulting cube (--stats/-S, or function
'print_stats()')
* compare statistics about many resulting cubes (--compare-stats/-C, or
function 'compare_stats()'); this is similar to --stats/-S, but all
the stats are shown side by side
* show statistics about differences between two cubes (--diff/-D, or
function 'diff()')
* print an histogram of the resulting cube to a file (--histogram/-H, or
function 'print_histogram(...)')
* write the output cube to file (--output-filename/-o, or function
'write_cube(label=...)')
* visualization of the resulting cube (--view/-V, or function 'view()')
If the --dry-run/-d option is set, the previous mode are ignored, and no command
is executed.
<<<BREAK>>>
It is possible to have a report of the rubik configuration, using the flag
--report/-R; it can be repeated to increase the report level.
<<<BREAK>>>
Each output mode applies to the most recent result in the stack; so, for
instance, the following command will show only the linear cube:
$ rubik -e 'cb.linear_cube("5x3")' --print \\
-e 'cb.const_cube("4x4", 2)'
[[ 0. 1. 2.]
[ 3. 4. 5.]
[ 6. 7. 8.]
[ 9. 10. 11.]
[ 12. 13. 14.]]
rubik: WARNING: pointless expression 'cb.const_cube("4x4", 2)'
$
The second expression is pointless, as shown by the warning message.
<<<BREAK>>>
The following command
$ rubik -e 'cb.linear_cube("5x3")' --print \\
-o 'l.raw' \\
-e 'cb.const_cube("4x4", 2)' \\
-o 'r.raw'
[[ 0. 1. 2.]
[ 3. 4. 5.]
[ 6. 7. 8.]
[ 9. 10. 11.]
[ 12. 13. 14.]]
$
writes the linear cube to 'l.raw', the random cube to 'r.raw'.
It is possible to have many output modes for the same cube:
$ rubik -e 'cb.linear_cube("5x3")' --print --stats \\
-e 'cb.const_cube("4x4", 2)' --print
[[ 0. 1. 2.]
[ 3. 4. 5.]
[ 6. 7. 8.]
[ 9. 10. 11.]
[ 12. 13. 14.]]
shape = 5x3
#elements = 15
%elements = 100.00%
min = 0.0
min_index = (0, 0)
max = 14.0
max_index = (4, 2)
sum = 105
ave = 7
#zero = 1
%zero = 6.67%
#nonzero = 14
%nonzero = 93.33%
#nan = 0
%nan = 0.00%
#inf = 0
%inf = 0.00%
[[ 2. 2. 2. 2.]
[ 2. 2. 2. 2.]
[ 2. 2. 2. 2.]
[ 2. 2. 2. 2.]]
$
It is possible to use expressions to call specific output modes:
$ rubik -e 'cb.linear_cube("5x3")' 'print_stats()'
shape = 5x3
#elements = 15
%elements = 100.00%
min = 0.0
min_index = (0, 0)
max = 14.0
max_index = (4, 2)
sum = 105
ave = 7
#zero = 1
%zero = 6.67%
#nonzero = 14
%nonzero = 93.33%
#nan = 0
%nan = 0.00%
#inf = 0
%inf = 0.00%
$
Visualization can be performed on multiple cubes, only if they all have the same
shape.
"""
|
StarcoderdataPython
|
3515809
|
#!/usr/bin/python
import sys, os, subprocess, re
import argparse
usage = """
JOTTER Jadas Output Tif daTa ExporteR
viruszoo.py -s VH-HCF_20kx -f 1 -l 50
eman2 must be in user's path.
"""
parser = argparse.ArgumentParser(description=usage)
# example command
#> viruszoo.py Tomogram OR coordmode Inputfile (type?) Boxsize outputfile [binning]
parser.add_argument("-s", "--session", "--ses", "-S", action="store", default="none", dest="session",
help="Required. Input file name. First input file name in the session you're processing.")
parser.add_argument("-f", "--first", "--fi", "-F", action="store", dest="first", type=int,
help="Optional. If present will start processing from this image, useful to continue interrupted session")
parser.add_argument("-l", "--last", "--la", "-L", action="store", dest="last", type=int,
help="Optional. If present will start processing from this image, useful to continue interrupted session")
args = parser.parse_args()
def inputChecker(args):
"""check sensible input has been given"""
# Check session has been correctly given, files exist and are correct type
print args
if args.session == "none":
response = "No input files given. Please check input."
print response
sys.exit()
else: session = args.session
min_image = int(args.first)
max_image = int(args.last)
return session, min_image, max_image
def countImages(session):
"""Counts number of images and number of frames for a session"""
# loop over images from 1 to check what the highest existing number is. NEEDS SEQUENTIAL NUMBERS
max_image = 1
image_exists = os.path.exists(str(session)+'_'+str(max_image).zfill(4)+'_frameImage1.mrc')
while image_exists == True:
max_image += 1
image_exists = os.path.exists(str(session)+'_'+str(max_image).zfill(4)+'_frameImage1.mrc')
else:
max_image -= 1
max_frame = 0
frame_exists = os.path.exists(str(session)+'_0001_'+str(max_frame)+'.mrc')
while frame_exists == True:
max_frame += 1
frame_exists = os.path.exists(str(session)+'_0001_'+str(max_frame)+'.mrc')
else:
max_frame -= 1
return max_image, max_frame
def environCheck():
"""Checks required programs are in PATH"""
run = subprocess.Popen(['newstack', "--help"])
success = run.wait()
if success != 0:
response = "Cannot find Imod!"
print response
sys.exit()
return success
def virusChopper(session, image):
"""Takes OneView image and chops it into smaller bits """
infile = str(session)+'_'+str(image).zfill(4)+'.dm3'
if os.path.isfile(infile) == True:
for xcoord in 700,1600,2500,3396:
for ycoord in 700,1600,2500,3396:
outfile = str(session)+'_'+str(image).zfill(4)+'_'+str(xcoord)+'_'+str(ycoord)+'.png'
clip = '--clip=1400,1400,'+str(xcoord)+','+str(ycoord)
command = ['e2proc2d.py', infile, 'temp.png', clip]
print command
subprocess.call(command)#
command2 = ['e2proc2d.py', 'temp.png', outfile, '--meanshrink=2']
subprocess.call(command2)
# print command
else:
print "file " + infile + " does not exist. Skipping."
return infile
#check = environCheck()
#args = parser.parse_args()
session, min_image, max_image = inputChecker(args)
print session, min_image, max_image
#pseudocode:
# determine number of images in session
# check options here
# if last_frame given
# if last_frame > max_frame:
# print: last_frame + " does not exist, using " +max_frame+ as last frame
#print "Found " +str(max_image)+ " images, with "+str(max_frame)+ " frames each."
#loop over images to chop them
#image = 155
for image in range(min_image,max_image+1):
outfile = virusChopper(session, image)
|
StarcoderdataPython
|
6474938
|
<gh_stars>0
from twilio.rest import TwilioRestClient
#Don't share your secrets!
account_sid = "{{ account_sid }}" # Your Account SID from www.twilio.com/console
auth_token = "{{ auth_token }}" # Your Auth Token from www.twilio.com/console
client = TwilioRestClient(account_sid, auth_token)
#Note: The number +12345678901 is unverified. Trial accounts cannot send messages to unverified numbers;
message = client.messages.create(body="Hello from Python",
to="+12345678901", # Replace with your phone number
from_="+12345678901") # Replace with your Twilio number
print(message.sid)
|
StarcoderdataPython
|
11246384
|
"""Unit tests for Reach Helper."""
import logging
import time
from typing import Union
import numpy as np
from robogym.robot.utils import reach_helper
from robogym.robot.utils.measurement_units import MeasurementUnit
from robogym.robot.utils.reach_helper import ReachHelperDebugRecorder
logger = logging.getLogger(__name__)
def assert_speed_is_ok(
_debug_recorder: ReachHelperDebugRecorder,
_expected_speed: Union[float, np.ndarray],
_speed_limit_threshold: Union[float, np.ndarray],
) -> None:
"""This function inspects the speed samples from the given recorder (for all controls), and asserts whether
all are within the desired speed limit.
:param _debug_recorder: Recorder to check velocity samples.
:param _expected_speed: Speed limit that we set in the reach helper for the command generation.
:param _speed_limit_threshold: Small threshold that the robot would potentially pass above the commanded
expected speed, since reaction time will have a catch-up effect on the robot, which may cause speed to
increase over the commanded speed briefly.
"""
# prepare speed limit
actuator_count = len(_debug_recorder.robot.actuators())
if np.isscalar(_expected_speed):
_expected_speed = np.full(actuator_count, _expected_speed)
if np.isscalar(_speed_limit_threshold):
_speed_limit_threshold = np.full(actuator_count, _speed_limit_threshold)
speed_limit = _expected_speed + _speed_limit_threshold
# compare observed vs limit
max_obs_speed_per_control = np.max(np.abs(_debug_recorder.obs_vel), axis=0)
limit_ok_per_control = max_obs_speed_per_control < speed_limit
was_speed_ok = np.alltrue(limit_ok_per_control)
# assert/print relevant info
random_id = str(time.time())
if not was_speed_ok:
logger.info(
"Speed limit violation, will dump plots of the samples for debugging:"
)
for act_idx in range(len(_debug_recorder.obs_pos[0])):
_debug_recorder.plot_pos_and_vel_for_actuator(
act_idx,
reach_helper.PlotOutput.FILE,
MeasurementUnit.RADIANS,
MeasurementUnit.DEGREES,
f"test_reach_helper_{random_id}",
)
assert (
was_speed_ok
), f"Speed limit violation: \n{max_obs_speed_per_control} \nvs \n{speed_limit}"
def _build_reach_helper_test_robot(max_position_change=0.020):
from gym.envs.robotics import utils
from robogym.envs.rearrange.simulation.blocks import (
BlockRearrangeSim,
BlockRearrangeSimParameters,
)
from robogym.robot.robot_interface import ControlMode, RobotControlParameters
sim = BlockRearrangeSim.build(
n_substeps=20,
robot_control_params=RobotControlParameters(
control_mode=ControlMode.TCP_WRIST.value,
max_position_change=max_position_change,
),
simulation_params=BlockRearrangeSimParameters(),
)
# reset mocap welds if any. This is actually needed for TCP arms to move
utils.reset_mocap_welds(sim.mj_sim)
# extract arm since CompositeRobots are not fully supported by reach_helper
composite_robot = sim.robot
arm = composite_robot.robots[0]
arm.autostep = True
return arm
def test_curve_generation_two_steps() -> None:
"""This test is used to verify a bugfix. The bug was that if a target's distance is too close to the current
position (closer than the max speed), the curve would only generate one step for the actuator, and the step
would be for the current position, not for the target position. Bugfix: reach helper should generate at least
two steps.
"""
robot = _build_reach_helper_test_robot()
cur_pos = robot.observe().joint_positions()
# calculate the small step that was bugged
control_delta = robot.get_control_time_delta()
max_speed = np.deg2rad(60)
max_change_per_step = max_speed * control_delta
offset_that_was_bugged = max_change_per_step - np.deg2rad(
0.01
) # offset needs to be below max_change_per_step
position_threshold = offset_that_was_bugged - np.deg2rad(
0.01
) # threshold needs to be below the offset
assert position_threshold < offset_that_was_bugged
target_pos = cur_pos.copy()
target_pos[0] += offset_that_was_bugged
ret_i = reach_helper.reach_position(
robot,
target_pos,
speed_units_per_sec=max_speed,
position_threshold=position_threshold,
)
assert ret_i.reached
|
StarcoderdataPython
|
1724427
|
#!/usr/bin/env python
#import modules
import csv
import string
import time
from neutronclient.v2_0 import client as neutronclient
from novaclient import client as novaclient
#write a new rule with a Cidr block reference
def elb_create(credentials, my_csv, external_pool, archi, tags=None):
#open csv file and read each row as dictionary
elb_file = open(my_csv, 'rb')
elb_reader = csv.DictReader(elb_file)
print "########################## Starting elbs creation ###############################"
#iterate through rows
if 'elb' not in archi:
archi['elb'] = {}
if 'public_ip' not in archi:
archi['public_ip'] = {}
for elb_dict in elb_reader:
neutron = neutronclient.Client(username=credentials['username'],password=<PASSWORD>['password'],tenant_name=archi['vpc'][elb_dict['vpc']],auth_url=credentials['auth_url'])
nova = novaclient.Client(2,credentials['username'],credentials['password'],archi['vpc'][elb_dict['vpc']],credentials['auth_url'])
subnets = []
for subnet in string.split(elb_dict['subnets'],sep='&'):
subnets.append(tuple([subnet,archi['subnet'][subnet]]))
ports = []
tmp = string.split(elb_dict['listeners'],sep='&')
for port in tmp:
ports.append(tuple(string.split(port,sep='$')))
instance_ips = []
for instance in string.split(elb_dict['servers'],sep='&'):
ip = nova.servers.ips(archi['instance'][instance]).itervalues().next()[0]['addr']
instance_ips.append(ip)
for subnet in subnets:
for port in ports:
pool_name = elb_dict['elb']+ '_' + port[0]
pool = neutron.create_pool({'pool': {'name':pool_name,'description':pool_name,'provider':'haproxy','subnet_id':subnet[1],'protocol':port[2].upper(),'lb_method':'ROUND_ROBIN','admin_state_up':True}})
archi['elb'] [elb_dict['elb']] = pool['pool']['id']
print ">> >> pool " + pool_name + " created"
health = neutron.create_health_monitor({'health_monitor': {'admin_state_up': True, 'delay': 10, 'max_retries': 3, 'timeout': 10, 'type': port[2].upper()}})
neutron.associate_health_monitor(pool['pool']['id'], {'health_monitor': {'id': health['health_monitor']['id']}})
print ">> >> health monitor associated to pool : " + pool_name
vip = neutron.create_vip({'vip':{'protocol': port[2].upper(), 'name': pool_name+'_vip', 'description': pool_name+'_vip', 'admin_state_up': True, 'subnet_id': subnet[1], 'pool_id': pool['pool']['id'], 'session_persistence': {'type': 'SOURCE_IP'}, 'protocol_port': port[1]}})
status = 'creating'
while(status != 'ACTIVE'):
print ">> >> VIP status : " + status
time.sleep(1)
status = neutron.show_vip(vip['vip']['id'])['vip']['status']
fip = neutron.create_floatingip({'floatingip': {'floating_network_id': archi['network'][external_pool],'fixed_ip_address': vip['vip']['address'], 'port_id':vip['vip']['port_id']}})
archi['public_ip'] [pool_name] = fip['floatingip']['floating_ip_address']
print ">> >> Floating IP associated to : " + pool_name
for ip in instance_ips:
status = 'creating'
while(status != 'ACTIVE'):
print ">> >> " + pool_name + " status : " + status
time.sleep(1)
status = neutron.show_pool(pool['pool']['id'])['pool']['status']
neutron.create_member({'member': {'protocol_port': port[1], 'weight': 1, 'admin_state_up': True,'pool_id': pool['pool']['id'], 'address': ip}})
print ">> >> member with ip : " + ip + " added to pool : " + pool_name
print "done creating elbs :) "
return archi
|
StarcoderdataPython
|
4812061
|
<gh_stars>0
__author__ = 'pulphix'
|
StarcoderdataPython
|
4810235
|
<reponame>tatevm/supermariopy
import pytest
import numpy as np
from supermariopy import plotting
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
class Test_Plotting:
@pytest.mark.mpl_image_compare
def test_add_colorbars_to_axes(self):
from supermariopy.plotting import add_colorbars_to_axes
from matplotlib import pyplot as plt
plt.subplot(121)
plt.imshow(np.arange(100).reshape((10, 10)))
plt.subplot(122)
plt.imshow(np.arange(100).reshape((10, 10)))
add_colorbars_to_axes()
plt.show()
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_set_all_axis_off(self):
from supermariopy.plotting import set_all_axis_off
from matplotlib import pyplot as plt
plt.subplot(121)
plt.imshow(np.arange(100).reshape((10, 10)))
plt.subplot(122)
plt.imshow(np.arange(100).reshape((10, 10)))
set_all_axis_off()
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_imageStack_2_subplots(self):
from supermariopy.plotting import imageStack_2_subplots
from matplotlib import pyplot as plt
images = np.stack([np.arange(100).reshape((10, 10))] * 3)
fig, axes = imageStack_2_subplots(images, axis=0)
return fig
@pytest.mark.mpl_image_compare
def test_change_linewidth(self):
from supermariopy.plotting import change_linewidth
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1)
x = np.arange(10)
y = np.arange(10)
ax.plot(x, y, x + 1, y, x - 1, y)
change_linewidth(ax, 3)
return plt.gcf()
@pytest.mark.mpl_image_compare
def test_change_fontsize(self):
from supermariopy.plotting import change_fontsize
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(np.arange(10), np.arange(10))
change_fontsize(ax, 5)
return plt.gcf()
def test_colorpalettes(self):
from supermariopy import plotting
name = "msoffice"
palette = plotting.get_palette(name, bytes=False)
assert all((palette >= 0.0).ravel()) and all((palette <= 1.0).ravel())
palette = plotting.get_palette(name, bytes=True)
assert all((palette >= 0.0).ravel()) and all((palette <= 255.0).ravel())
name = "navy"
palette = plotting.get_palette(name, bytes=False)
assert all((palette >= 0.0).ravel()) and all((palette <= 1.0).ravel())
palette = plotting.get_palette(name, bytes=True)
assert all((palette >= 0.0).ravel()) and all((palette <= 255.0).ravel())
def test_plot_canvas(self):
image = np.ones((128, 128, 3), dtype=np.uint8)
image_stack = np.stack([image * i for i in range(25)], axis=0)
from supermariopy import imageutils
canvas = imageutils.batch_to_canvas(image_stack)
from supermariopy import plotting
fig, ax = plotting.plot_canvas(canvas, 128, 128)
def test_plot_to_image(self):
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.plot(np.arange(10), np.arange(10))
image = plotting.figure_to_image(fig)
assert image.shape == (1, 500, 500, 3)
@pytest.mark.mpl_image_compare
def test_plot_bars(self):
m = np.arange(20)
fig, ax = plotting.plot_bars(m)
return fig
@pytest.mark.mpl_image_compare
def test_overlay_boxes_without_labels(self):
fig, ax = plt.subplots(1, 1)
import skimage
image = skimage.data.astronaut()
bboxes = [np.array([0, 0, 50, 50])]
from supermariopy import plotting
overlay = plotting.overlay_boxes_without_labels(image, bboxes)
ax.imshow(overlay)
return fig
|
StarcoderdataPython
|
6445948
|
<reponame>ElHombreMorado8/sodp
# Generated by Django 3.1.12 on 2021-07-20 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reports', '0005_report_status'),
]
operations = [
migrations.AddField(
model_name='report',
name='thresholds',
field=models.JSONField(default='{}'),
),
]
|
StarcoderdataPython
|
3346668
|
<gh_stars>0
"""
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '3.1.98.dev'
|
StarcoderdataPython
|
9752495
|
import csv
import datetime
import gc
import os
import numpy as np
from benchmark.experiments.credit_scoring_experiment import run_credit_scoring_problem
from core.composer.optimisers.crossover import CrossoverTypesEnum
from core.composer.optimisers.gp_optimiser import GPChainOptimiserParameters
from core.composer.optimisers.mutation import MutationTypesEnum
from core.composer.optimisers.regularization import RegularizationTypesEnum
from core.composer.optimisers.selection import SelectionTypesEnum
from core.composer.optimisers.gp_optimiser import GeneticSchemeTypesEnum
from core.utils import project_root
from benchmark.experiments.viz import show_history_optimization_comparison
def write_header_to_csv(f):
f = f'../../../tmp/{f}'
if not os.path.isdir('../../../tmp'):
os.mkdir('../../../tmp')
with open(f, 'w', newline='') as file:
writer = csv.writer(file, quoting=csv.QUOTE_ALL)
writer.writerow(['t_opt', 'regular', 'AUC', 'n_models', 'n_layers'])
def add_result_to_csv(f, t_opt, regular, auc, n_models, n_layers):
f = f'../../../tmp/{f}'
with open(f, 'a', newline='') as file:
writer = csv.writer(file, quoting=csv.QUOTE_ALL)
writer.writerow([t_opt, regular, auc, n_models, n_layers])
def _reduced_history_best(history, generations, pop_size):
reduced = []
for gen in range(generations):
fitness_values = [abs(individ[1]) for individ in history[gen * pop_size: (gen + 1) * pop_size]]
best = max(fitness_values)
print(f'Min in generation #{gen}: {best}')
reduced.append(best)
return reduced
if __name__ == '__main__':
max_amount_of_time = 400
step = 400
file_path_train = 'cases/data/scoring/scoring_train.csv'
full_path_train = os.path.join(str(project_root()), file_path_train)
file_path_test = 'cases/data/scoring/scoring_test.csv'
full_path_test = os.path.join(str(project_root()), file_path_test)
file_path_result = 'regular_exp.csv'
history_file = 'history.csv'
write_header_to_csv(file_path_result)
time_amount = step
crossover_types_set = [[CrossoverTypesEnum.subtree], [CrossoverTypesEnum.onepoint],
[CrossoverTypesEnum.subtree, CrossoverTypesEnum.onepoint], [CrossoverTypesEnum.none]]
history_gp = [[] for _ in range(len(crossover_types_set))]
pop_size = 20
iterations = 20
runs = 8
while time_amount <= max_amount_of_time:
for type_num, crossover_type in enumerate(crossover_types_set):
for run in range(runs):
gc.collect()
selection_types = [SelectionTypesEnum.tournament]
crossover_types = crossover_type
mutation_types = [MutationTypesEnum.simple, MutationTypesEnum.growth, MutationTypesEnum.reduce]
regular_type = RegularizationTypesEnum.decremental
genetic_scheme_type = GeneticSchemeTypesEnum.steady_state
optimiser_parameters = GPChainOptimiserParameters(selection_types=selection_types,
crossover_types=crossover_types,
mutation_types=mutation_types,
regularization_type=regular_type,
genetic_scheme_type=genetic_scheme_type)
roc_auc, chain, composer = run_credit_scoring_problem(full_path_train, full_path_test,
max_lead_time=datetime.timedelta(
minutes=time_amount),
gp_optimiser_params=optimiser_parameters,
pop_size=pop_size, generations=iterations)
is_regular = regular_type == RegularizationTypesEnum.decremental
add_result_to_csv(file_path_result, time_amount, is_regular, round(roc_auc, 4), len(chain.nodes),
chain.depth)
history_gp[type_num].append(composer.history)
time_amount += step
reduced_fitness_gp = [[] for _ in range(len(history_gp))]
for launch_num in range(len(history_gp)):
for history in history_gp[launch_num]:
fitness = _reduced_history_best(history, iterations, pop_size)
reduced_fitness_gp[launch_num].append(fitness)
np.save('reduced_fitness_gp', reduced_fitness_gp)
print(reduced_fitness_gp)
m = [_ * pop_size for _ in range(iterations)]
show_history_optimization_comparison(first=reduced_fitness_gp[0], second=reduced_fitness_gp[1],
third=reduced_fitness_gp[2], fourth = reduced_fitness_gp[3],
iterations=[_ for _ in range(iterations)],
label_first='Subtree crossover',
label_second='One-point crossover',
label_third='All crossover types', label_fourth = 'Without crossover')
|
StarcoderdataPython
|
3463622
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Copied and adapted from http://www.eurion.net/python-snippets/snippet/Threaded%20Server.html
# GPL license
import sys
import os
import socket
from threading import Thread
import time
import datetime
from server import *
application = tornado.web.Application([
(r'/ws', WSHandler),
])
PORT = 8888
MAX_PLANE_NUMBER = 20
def launch_server():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(PORT)
WSHandler.subscribe(ClientThread.notifyRouting)
tornado.ioloop.IOLoop.instance().start()
client_socket = None
class ClientThread( Thread ):
"""Thread responsible for communicating with a plane """
@staticmethod
def notifyRouting(message):
"""Relay a routing message from the websocket to the plane"""
global client_socket
#print "PASSED FROM WSOCKET : " + message
client_socket.send(message)
def __init__( self, server_sock, client_sock, onLocalized, onPositionUpdated, onBeacon):
Thread.__init__( self )
self.client = client_sock
global client_socket
client_socket = client_sock
self.userPosition = {}
self.onLocalized = onLocalized
self.onPositionUpdated = onPositionUpdated
self.onBeacon = onBeacon
self.interrupt = False
self.last_keepalive = datetime.datetime.now()
self.check_client_connection()
def exit(self):
self.interrupt = True
def check_client_connection(self):
"""If no KEEPALIVE message are received from the last 7 seconds
we suppose that the connection between the plane and the server
is lost"""
#Check if client is still up (if he has sent a keepalive msg
#in the last 6 seconds)
diff = (datetime.datetime.now() - self.last_keepalive).seconds
if diff > 6:
print "Assuming that the client is disconnected... %d" % diff
wsSend("disconnected")
else:
threading.Timer(1, self.check_client_connection).start()
def run( self ):
"""This method reads the message sent by the plane
and triggers the corresponding action"""
strBuffer = ""
while True:
try:
strLine = (strBuffer + self.readline()).split('\n')
strBuffer = ""
for line in strLine:
line = line.lower()
#Plane position received
if line.startswith("[plane]"):
line = line[7:]
(planeID,lat,lon, angle) = line.split('\t');
lat = float(lat)
lon = float(lon)
self.onPositionUpdated(planeID,lat,lon, angle)
#Beacon received (Probe Request)
elif line.startswith("[beacon]"):
line = line[8:]
(user,lat,lon, pwr) = line.split('\t');
lat = float(lat)
lon = float(lon)
pwr = float(pwr)
#print "Threaded server received beacon !"
self.onBeacon(user,lat,lon,pwr)
#Plane has localized a user
elif line.startswith("[user]"):
line = line[6:]
(user,lat,lon) = line.split('\t');
lat = float(lat)
lon = float(lon)
if user in self.userPosition:
(oldLat, oldLon) = self.userPosition[user]
#We only update if this is a new guessed position
if oldLat == lat and oldLon == lon:
continue
self.userPosition[user] = (lat, lon)
self.onLocalized(user,lat,lon)
print "User %s localized at %.8f, %.8f" % (user,lat,lon)
elif line.startswith("KEEPALIVE"):
self.last_keepalive = datetime.datetime.now()
elif line.startswith("[routing]"):
(neLat, neLng, swLat, swLng) = line.split("\t")
else:
continue
except ValueError, err:
strBuffer += line
continue
self.client.close()
return
def readline( self ):
result = self.client.recv( 256 )
if( None != result ):
result = result
return result
class Server():
""" This is the server part of the TCP connection.
It is responsible for listening to new incoming connections
and communicate with the websocket to update the live GUI
"""
def __init__( self ):
os.system("fuser -k -n tcp " + str(PORT))
self.sock = None
self.thread_list = []
t = threading.Thread(target=launch_server)
t.start()
def addGuess(self,user,lat,lon):
wsSend("[u]%r\t%.8f\t%.8f" % (user,lat,lon))
def addBeacon(self,user,lat,lon,pwr):
print "Beacon received! : %s" % user
wsSend("[b]%r\t%.8f\t%.8f\t%f" % (user,lat,lon, pwr))
def addPlanePosition(self,planeID,lat,lon, angle):
print "Position received : %.8f %.8f %.8f" % (lat,lon,int(angle))
wsSend("[p]%r\t%.8f\t%.8f\t%d" % (planeID,lat,lon, int(angle)))
def run( self ):
connected = False
while not connected:
try:
self.sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
#self.sock.bind( ( '127.0.0.1', 8080 ) )
self.sock.bind( ( '192.168.100.92', 8080 ) )
self.sock.listen( MAX_PLANE_NUMBER )
print "Listening for plane communications"
connected = True
except socket.error, err:
print err
time.sleep(5)
connected = False
interrupted = False
while not interrupted:
try:
client = self.sock.accept()[0]
new_thread = ClientThread(self.sock, client, self.addGuess, self.addPlanePosition, self.addBeacon)
print 'Incoming plane connection'
self.thread_list.append( new_thread )
new_thread.start()
except KeyboardInterrupt:
print 'Ctrl+C pressed... Shutting Down'
for thread in self.thread_list:
thread.exit()
interrupted = True
except Exception, err:
print 'Exception caught: %s\nClosing...' % err
print 'Restarting server in 10 seconds...'
self.run()
self.sock.close()
def readline( self ):
return self.client.recv( 256 )
if "__main__" == __name__:
server = Server()
server.run()
|
StarcoderdataPython
|
1687869
|
<reponame>AamirAnwar/PythonLab
# For n disks, total 2^n – 1 moves are required.
def towersOfHanoi(n,from_stack,to_stack,aux_stack):
if n == 1:
print("Moved disc {} from {} to {}".format(n,from_stack, to_stack))
else:
towersOfHanoi(n-1, from_stack, aux_stack, to_stack)
print("Moved disc {} from {} to {}".format(n,from_stack, to_stack))
towersOfHanoi(n - 1, aux_stack, to_stack, from_stack)
def test():
n = 4
towersOfHanoi(n,'A', 'B', 'C')
test()
|
StarcoderdataPython
|
4823421
|
"""
Uqbar Sphinx API generation extension.
Install by adding ``'uqbar.sphinx.api'`` to the ``extensions`` list in your
Sphinx configuration.
This extension provides the following configuration values which correspond to
the initialization arguments to the :py:class:`uqbar.apis.APIBuilder` class.
- ``uqbar_api_directory_name`` (default: ``api``)
- ``uqbar_api_document_empty_modules`` (default: ``False``)
- ``uqbar_api_document_private_members`` (default: ``False``)
- ``uqbar_api_document_private_modules`` (default: ``False``)
- ``uqbar_api_member_documenter_classes`` (default: ``None``)
- ``uqbar_api_module_documenter_class`` (default: ``None``)
- ``uqbar_api_root_documenter_class`` (default: ``None``)
- ``uqbar_api_source_paths`` (default: ``[]``)
- ``uqbar_api_title`` (default: ``API``)
reStructuredText source files will be generated for the modules given by
``uqbar_api_source_paths`` in the directory ``uqbar_api_directory_name``
relative to your Sphinx source directory.
"""
import importlib
import pathlib
import types
from typing import Any, Dict, List
from sphinx.util import logging
from sphinx.util.console import bold, darkgreen, darkred, purple # type: ignore
import uqbar.apis
logger = logging.getLogger(__name__)
def logger_func(string):
if string.startswith("preserved"):
return
elif string.startswith("rewrote"):
string = purple(string)
elif string.startswith("pruned"):
string = darkred(string)
elif string.startswith("wrote"):
string = darkgreen(string)
logger.info("{} {}".format(bold("[uqbar-api]"), string))
def on_builder_inited(app):
"""
Hooks into Sphinx's ``builder-inited`` event.
Builds out the ReST API source.
"""
config = app.builder.config
target_directory = (
pathlib.Path(app.builder.env.srcdir) / config.uqbar_api_directory_name
)
initial_source_paths: List[str] = []
source_paths = config.uqbar_api_source_paths
for source_path in source_paths:
if isinstance(source_path, types.ModuleType):
if hasattr(source_path, "__path__"):
initial_source_paths.extend(getattr(source_path, "__path__"))
else:
initial_source_paths.extend(source_path.__file__)
continue
try:
module = importlib.import_module(source_path)
if hasattr(module, "__path__"):
initial_source_paths.extend(getattr(module, "__path__"))
else:
initial_source_paths.append(module.__file__)
except ImportError:
initial_source_paths.append(source_path)
root_documenter_class = config.uqbar_api_root_documenter_class
if isinstance(root_documenter_class, str):
module_name, _, class_name = root_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
root_documenter_class = getattr(module, class_name)
module_documenter_class = config.uqbar_api_module_documenter_class
if isinstance(module_documenter_class, str):
module_name, _, class_name = module_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
module_documenter_class = getattr(module, class_name)
# Don't modify the list in Sphinx's config. Sphinx won't pickle class
# references, and strips them from the saved config. That leads to Sphinx
# believing that the config has changed on every run.
member_documenter_classes = list(config.uqbar_api_member_documenter_classes or [])
for i, member_documenter_class in enumerate(member_documenter_classes):
if isinstance(member_documenter_class, str):
module_name, _, class_name = member_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
member_documenter_classes[i] = getattr(module, class_name)
api_builder = uqbar.apis.APIBuilder(
initial_source_paths=initial_source_paths,
target_directory=target_directory,
document_empty_modules=config.uqbar_api_document_empty_modules,
document_private_members=config.uqbar_api_document_private_members,
document_private_modules=config.uqbar_api_document_private_modules,
member_documenter_classes=member_documenter_classes or None,
module_documenter_class=module_documenter_class,
omit_root=config.uqbar_api_omit_root,
root_documenter_class=root_documenter_class,
title=config.uqbar_api_title,
logger_func=logger_func,
)
api_builder()
def setup(app) -> Dict[str, Any]:
"""
Sets up Sphinx extension.
"""
app.add_config_value("uqbar_api_directory_name", "api", "env")
app.add_config_value("uqbar_api_document_empty_modules", False, "env")
app.add_config_value("uqbar_api_document_private_members", False, "env")
app.add_config_value("uqbar_api_document_private_modules", False, "env")
app.add_config_value("uqbar_api_member_documenter_classes", None, "env")
app.add_config_value("uqbar_api_module_documenter_class", None, "env")
app.add_config_value("uqbar_api_omit_root", False, "env")
app.add_config_value("uqbar_api_root_documenter_class", None, "env")
app.add_config_value("uqbar_api_source_paths", None, "env")
app.add_config_value("uqbar_api_title", "API", "html")
app.connect("builder-inited", on_builder_inited)
return {
"version": uqbar.__version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
StarcoderdataPython
|
6680375
|
# Generated by Django 2.1.10 on 2019-07-17 15:04
import SiteSpace.models
from django.db import migrations, models
import djongo.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('categories', djongo.models.fields.ArrayModelField(model_container=SiteSpace.models.Category)),
('description', models.TextField()),
('price', models.FloatField(default=0)),
('count', models.IntegerField(default=0)),
('variants', djongo.models.fields.ArrayModelField(model_container=SiteSpace.models.Variant)),
],
),
migrations.CreateModel(
name='Variant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variant_name', models.CharField(max_length=255)),
('variant_type', models.CharField(max_length=255)),
],
),
]
|
StarcoderdataPython
|
3295279
|
from typing import Sequence
from snuba.clickhouse.columns import Column, DateTime, UInt
from snuba.clusters.storage_sets import StorageSetKey
from snuba.migrations import migration, operations, table_engines
from snuba.migrations.columns import MigrationModifiers as Modifiers
columns = [
# Kafka topic offset
Column("offset", UInt(64)),
Column("record_deleted", UInt(8)),
# PG columns
Column("project_id", UInt(64)),
Column("group_id", UInt(64)),
Column("date_added", DateTime(Modifiers(nullable=True))),
Column("user_id", UInt(64, Modifiers(nullable=True))),
Column("team_id", UInt(64, Modifiers(nullable=True))),
]
class Migration(migration.MultiStepMigration):
blocking = False
def forwards_local(self) -> Sequence[operations.Operation]:
return [
operations.CreateTable(
storage_set=StorageSetKey.EVENTS,
table_name="groupassignee_local",
columns=columns,
engine=table_engines.ReplacingMergeTree(
storage_set=StorageSetKey.EVENTS,
version_column="offset",
order_by="(project_id, group_id)",
unsharded=True,
),
)
]
def backwards_local(self) -> Sequence[operations.Operation]:
return [
operations.DropTable(
storage_set=StorageSetKey.EVENTS, table_name="groupassignee_local",
)
]
def forwards_dist(self) -> Sequence[operations.Operation]:
return [
operations.CreateTable(
storage_set=StorageSetKey.EVENTS,
table_name="groupassignee_dist",
columns=columns,
engine=table_engines.Distributed(
local_table_name="groupassignee_local", sharding_key=None,
),
)
]
def backwards_dist(self) -> Sequence[operations.Operation]:
return [
operations.DropTable(
storage_set=StorageSetKey.EVENTS, table_name="groupassignee_dist",
)
]
|
StarcoderdataPython
|
3203747
|
# blueberry - Yet another Python web framework.
#
# http://code.google.com/p/blueberrypy
#
# Copyright 2009 <NAME>
#
# Use and distribution licensed under the BSD license. See
# the LICENSE file for full text.
import sys
from webob.exc import HTTPNotFound
import blueberry
from blueberry import config
from blueberry.controllers.util import Request, Response
class WSGIApplication(object):
def __init__(self, debug=False):
self.debug = debug
self.controller_classes = {}
self.app_globals = config.get('blueberry.app_globals')
def resolve(self, environ, start_response):
match = environ['wsgiorg.routing_args'][1]
if not match:
return
environ['blueberry.routes_dict'] = match
controller = match.get('controller')
if not controller:
return
return self.find_controller(controller)
def find_controller(self, controller):
if self.controller_classes.has_key(controller):
return self.controller_classes[controller]
classname = controller.split('/')[-1].capitalize() + 'Controller'
dir = config['routes.map'].directory
# controller.replace in case the controller is nested in multiple directories
full_mod_name = dir.replace('/', '.') + '.' + controller.replace('/', '.')
__import__(full_mod_name)
mycontroller = getattr(sys.modules[full_mod_name], classname)
self.controller_classes[controller] = mycontroller
return mycontroller
def __call__(self, environ, start_response):
request = Request(environ)
response = Response()
# set the thread-local request/response objects
blueberry.request.set(request)
blueberry.response.set(response)
blueberry.app_globals.set(self.app_globals)
controller = self.resolve(environ, start_response)
if not controller:
return HTTPNotFound()(environ, start_response)
# instantiate if class
if hasattr(controller, '__bases__'):
controller = controller()
return controller(environ, start_response)
|
StarcoderdataPython
|
11369415
|
<reponame>Ravan339/LeetCode<gh_stars>1-10
# https://leetcode.com/problems/reverse-only-letters/
class Solution:
def reverseOnlyLetters(self, S):
"""
:type S: str
:rtype: str
"""
l, r = 0, len(S) - 1
S = list(S)
while l < r:
while l < r and not S[l].isalpha(): l += 1
while r > l and not S[r].isalpha(): r -= 1
S[l], S[r] = S[r], S[l]
l += 1
r -= 1
return ''.join(S)
|
StarcoderdataPython
|
98168
|
<reponame>bramvonk/blind-dialer<filename>src/sound.py
import sys
import pygame.mixer
from pygame.mixer import Sound
if sys.platform == "win32":
# workaround in windows: windows won't play sounds if pygame.init() has been called (which we need for joystick to
# work), but you can work around this bug by opening a window...
# see http://stackoverflow.com/questions/2936914/pygame-sounds-dont-play
pygame.init()
screen = pygame.display.set_mode((40, 40), 0, 32)
class SoundPlayer:
def __init__(self):
pygame.init()
pygame.mixer.init(44100, -16, 2)
self.currently_playing_sound = None
def play_sound_blocking(self, filename):
self.play_sound(filename)
self.wait_for_sound_playing_done()
def play_sound(self, filename):
print "playing %s" % filename
sound = Sound(filename)
channel = sound.play()
self.currently_playing_sound = {'sound': sound, 'channel': channel}
def is_sound_done_playing(self):
if self.currently_playing_sound is None:
print "have not played anything yet!"
return True
return not self.currently_playing_sound['channel'].get_busy()
def wait_for_sound_playing_done(self):
while not self.is_sound_done_playing():
pygame.time.delay(50)
def stop_playing(self):
if self.currently_playing_sound is None:
print "stop playing? We have not played anything yet!"
return
self.currently_playing_sound['sound'].stop()
|
StarcoderdataPython
|
1796565
|
# encoding: utf-8
# ____ _ __ ___ _
# / ___|___ _ __ ___ _ __ _ _| |_ ___ _ __ \ \ / (_)___(_) ___ _ __
# | | / _ \| '_ ` _ \| '_ \| | | | __/ _ \ '__| \ \ / /| / __| |/ _ \| '_ \
# | |__| (_) | | | | | | |_) | |_| | || __/ | \ V / | \__ \ | (_) | | | |
# \____\___/|_| |_| |_| .__/ \__,_|\__\___|_| \_/ |_|___/_|\___/|_| |_|
# |_|
# _ ____ ____ _ _____ _
# / \ | _ \| _ \ __ __ / ||___ | |_
# / _ \ | |_) | |_) | \ \ / / | | / /| __|
# / ___ \| __/| __/ \ V / | |_ / / | |_
# /_/ \_\_| |_| \_/ |_(_)_/ \__|
#
# Computer Vision APP
# Creado por @flowese versión: 1.7 para terminal.
# Reconocimiento de objetos en tiempo real para fuentes de video.
# Fichero: main.py
# ---> Pendiente implementar detector de género y de cara en #GESTIÓN DE MEDIOS*
# LIBRERÍAS
from imutils.video import FileVideoStream # Gestión de video
import numpy as np # Soporte vectores y matrices
import imutils
import cv2
import time
import os, random
from cvlib.object_detection import draw_bbox # Detección de objetos
import cvlib as cv # Detección de objetos
from inputimeout import inputimeout, TimeoutOccurred
# Funciones de estilos en fichero estilos.py
from config.estilos import convertoasci, mostrar_cargando, imprtextos, limpiarterminal
# Funciones generales en fichero funciones.py
from config.funciones import importarjson, impdiccionario, pathrevision
# ANIMACIONES DE LANZAMIENTO
# - Limpiar terminal antes de ejecutar la app
limpiarterminal()
# - Mensajes de inicio
titulo_inicio = convertoasci("Computer Vision APP v 1.7t")
print(titulo_inicio)
# Funcion imprime texto inicio
imprtextos('inicio')
# - Barra de animación de carga
mostrar_cargando()
# Espera 0.5 y limpia la pantalla después de la intro.
print("\nInicio completado.")
limpiarterminal()
# - Imprime ASCI art el título orígenes
titulo_origenes = convertoasci("ORIGENES")
print(titulo_origenes)
print('~ Las fuentes son de internet y no está garantizado que funcionen siempre. \n')
# - Funcion que imprime todos los nombres del diccionario origenes
source_origenes = 'config\\origenes.json'
# - Funcion reconocimiento del path de archivos en windows, mac o linux
tratamientopath = pathrevision(source_origenes)
origenes = importarjson(tratamientopath)
impdiccionario(origenes)
# - Input por terminal al usuario definiendo el origen (con timeout)
# - Si no se introduce input por defecto selecciona uno aleatorio.
try:
origen_def = inputimeout(
prompt='\nEscribe el NOMBRE del orígen: ', timeout=10)
while origen_def != origenes:
if origen_def in origenes:
limpiarterminal()
print('\n\nOrígen seleccionado: ', origen_def, '\n\n')
time.sleep(2)
# - Lee del diccionario de orígenes con el seleccionado
origen_in = origenes[origen_def]
time.sleep(1)
limpiarterminal()
break
else:
limpiarterminal()
print(titulo_origenes)
print('~ Las fuentes son de internet y no está garantizado que funcionen siempre. \n')
impdiccionario(origenes)
print ('\nERROR: El nombre que has introducido no existe en la lista de orígenes.')
origen_def = inputimeout(
prompt='\nEscribe el NOMBRE del orígen: ', timeout=10)
except TimeoutOccurred:
origen_def = random.choice(list(origenes.keys())) # Para obtener un valor key random del diccionario origenes
origen_in = origenes[origen_def]
print('\n\n---> AL no intriducir ningún valor se ha seleccionado automáticamente', origen_def+'.')
time.sleep(3)
limpiarterminal()
# - Imprime ASCI art el título modelo
titulo_modelo = convertoasci("MODELO DE I.A.")
print(titulo_modelo)
# Funcion imprime texto de modelo
imprtextos('modelo')
list(origenes)
# - Imprime todos los nombre del diccionario de modelos
# - Funcion que imprime todos los nombres del diccionario modelos
source_modelos = 'config\\modelos.json'
# - Funcion reconocimiento del path de archivos en windows, mac o linux
tratamientopath = pathrevision(source_modelos)
modelos = importarjson(tratamientopath)
impdiccionario(modelos)
# - Input por terminal al usuario definiendo el modelo de yolo (con timeout)
try:
modelo_def = inputimeout(
prompt='\nEscribe el NOMBRE del modelo: ', timeout=3)
if not modelo_def:
modelo_def = 'Preciso'
except TimeoutOccurred:
modelo_def = 'Preciso'
print('\n--> No se ha introducido un valor. Selección automática activada.')
time.sleep(1)
# - Lee el origen de datos definido por el input de usuario anterior
modelo_in = modelos[modelo_def]
# - Comprueba el modelo seleccionado e imprime la advertencia describiendo el modelo
print('\n·Modelo de computación seleccionado:', modelo_def)
if modelo_def == 'Rapido':
print('~ Recuerda que este modelo es más rápido pero menos preciso.')
else:
print('~ Recuerda que este modelo es más lento pero más preciso.')
# FORMATOS DE ESTILO EN PANTALLA
tipofuente = cv2.FONT_HERSHEY_SIMPLEX
tamanofuente = 0.8
grosorfuente = 1
# - Autos
colorfuente_coches = 0, 0, 255 # BRG
postexto_coches = 40, 50
colorfuente_camiones = 0, 0, 255 # BRG
postexto_camiones = 40, 80
# - Humanos
colorfuente_personas = 255, 0, 0 # BRG
postexto_personas = 40, 120
## Aún no implementada la funión.
colorfuente_hombres = 255, 0, 0 # BRG
postexto_hombres = 40, 160
## Aún no implementada la funión.
colorfuente_mujeres = 255, 0, 0 # BRG
postexto_mujeres = 40, 200
# GESTIÓN DE MEDIOS
# Iniciando fuente de video
fvs = FileVideoStream(origen_in).start()
print('\nProcesando fuente multimedia...')
time.sleep(1)
print('\nMostrando visualizador...\n')
# - Gestionando el video frame a frame
while fvs.more():
# - Leer fuente de video
videoproceso = fvs.read()
# - Reajuste de tamano
videoproceso = imutils.resize(videoproceso, width=1280)
# - Conversión de color a blanco y negro
videoproceso = cv2.cvtColor(videoproceso, cv2.COLOR_BGR2GRAY)
# - Matriz
videoproceso = np.dstack([videoproceso, videoproceso, videoproceso])
# DETECTORES DE VISIÓN POR COMPUTADORA
# - Detector de objetos
bbox, label, conf = cv.detect_common_objects(videoproceso, model=modelo_in)
# - Detector de rostros
#faces, confidences = cv.detect_face(videoproceso)
# - Detector de género
#label, confidence = cv.detect_gender(videoproceso)
# - Limpiar pantalla del terminal en cada frame
os.system('cls' if os.name == 'nt' else "printf '\033c'")
# - Mensajes de consola en captura
titulo_capturando = convertoasci("Computer Vision APP v 1.7t")
print(titulo_capturando)
# Funcion imprime texto de consola
imprtextos('consola')
print('·Modelo:', modelo_def, ' ·Fuente:', origen_def, ' \n')
print('Para cerrar la ventana de visualización pulsando la tecla "Q" o con "Control+C en el terminal."')
# Procesado del display layout
out = draw_bbox(videoproceso, bbox, label, conf)
# CONTADORES EN PANTALLA STREAM
# - Contador Personas
out = cv2.putText(videoproceso, 'Coches: '+str(label.count('car')), (postexto_coches),
tipofuente, tamanofuente, (colorfuente_coches), grosorfuente, cv2.LINE_AA)
# - Contador Camiones
out = cv2.putText(videoproceso, 'Camiones: '+str(label.count('truck')), (postexto_camiones),
tipofuente, tamanofuente, (colorfuente_camiones), grosorfuente, cv2.LINE_AA)
# - Contador Personas
out = cv2.putText(videoproceso, 'Personas: '+str(label.count('person')), (postexto_personas),
tipofuente, tamanofuente, (colorfuente_personas), grosorfuente, cv2.LINE_AA)
# Pendiente implementar - Detección de género
#out = cv2.putText(videoproceso,'Hombres: '+str(label.count('male')),(postexto_hombres),tipofuente,tamanofuente,(colorfuente_hombres),grosorfuente,cv2.LINE_AA)
#out = cv2.putText(videoproceso,'Mujeres: '+str(label.count('female')),(postexto_mujeres),tipofuente,tamanofuente,(colorfuente_mujeres),grosorfuente,cv2.LINE_AA)
cv2.imshow('(CVAPP) Computer Vision APP {origen_def} - Powered by @flowese',
out) # Título de la ventana
if cv2.waitKey(10) & 0xFF == ord('q'): # Pulsar tecla Q para salir
break
# CERRAR VENTANAS
imprtextos('final')
cv2.destroyAllWindows()
|
StarcoderdataPython
|
1612317
|
<reponame>rg3915/orcamentos
# Generated by Django 2.1.3 on 2018-12-14 23:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0008_companycontact'),
]
operations = [
migrations.RemoveField(
model_name='employee',
name='birthday',
),
migrations.RemoveField(
model_name='person',
name='birthday',
),
]
|
StarcoderdataPython
|
3481249
|
<reponame>yfsong0709/RA-GCNv1<filename>src/mask.py
import torch
from torch import nn
from torch.nn import functional as F
class Mask(nn.Module):
def __init__(self, model_stream, module):
super(Mask, self).__init__()
self.model_stream = model_stream
self.module = module
def forward(self, weight, feature):
result = []
for i in range(self.model_stream):
temp_result = self.CAM(weight[i], feature[i])
result.append(temp_result)
for i in range(1, self.model_stream):
for j in range(i):
if j == 0:
mask = result[j]
else:
mask *= result[j]
mask = torch.cat([mask.unsqueeze(1)] * 4, dim=1)
self.module.mask_stream[i].data = mask.view(-1).detach()
def CAM(self, weight, feature):
N, C = weight.shape
weight = weight.view(N, C, 1, 1, 1).expand_as(feature)
result = (weight * feature).sum(dim=1)
result = result.mean(dim=0)
T, V, M = result.shape
result = result.view(-1)
result = 1 - F.softmax(result, dim=0)
result = F.threshold(result, 0.1, 0)
result = result.view(T, V, M)
return result
|
StarcoderdataPython
|
1956396
|
<filename>Data Structures and Algorithms/LeetCode Algo Solutions/EASY DIFFICULTY PROBLEMS/DetermineColourOfChessSquare.py
# DETERMINE COLOR OF A CHESSBOARD SQUARE LEETCODE SOLUTION:
class Solution(object):
def squareIsWhite(self, coordinates):
# creating a dictionary with the correct amount of letters associated with their respective numbers.
my_dictionary = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8}
# creating variables and tying them to the two parts present in coordinates.
letter, num = coordinates
# creating an if-statement to check if the sum of the letter and number is even.
if (my_dictionary[letter] + int(num)) % 2 == 0:
# returning 'False' if the sum of the letter and number is even.
return False
# returning 'True' if the sum is odd and not even.
return True
|
StarcoderdataPython
|
6624257
|
"""
Maximum Sum BST in Binary Tree
Given a binary tree root, return the maximum sum of all keys of any sub-tree which is also a Binary Search Tree (BST).
Assume a BST is defined as follows:
The left subtree of a node contains only nodes with keys less than the node's key.
The right subtree of a node contains only nodes with keys greater than the node's key.
Both the left and right subtrees must also be binary search trees.
Example 1:
Input: root = [1,4,3,2,4,2,5,null,null,null,null,null,null,4,6]
Output: 20
Explanation: Maximum sum in a valid Binary search tree is obtained in root node with key equal to 3.
Example 2:
Input: root = [4,3,null,1,2]
Output: 2
Explanation: Maximum sum in a valid Binary search tree is obtained in a single root node with key equal to 2.
Example 3:
Input: root = [-4,-2,-5]
Output: 0
Explanation: All values are negatives. Return an empty BST.
Example 4:
Input: root = [2,1,3]
Output: 6
Example 5:
Input: root = [5,4,8,3,null,6,3]
Output: 7
Constraints:
The number of nodes in the tree is in the range [1, 4 * 104].
-4 * 104 <= Node.val <= 4 * 104
"""
"""
Most solutions discussed here solve this using Post Order traversal.
I tried to solve this using preorder traversal (using the floor and ceiling method to check validity of
BST like in here), and got confused.
For this problem we need to build the solution from the bottom-up i.e.,
from the leaf nodes towards the root. Only then can we check if the current sub-tree is a valid BST,
and then update the maximum sum. This means post order is the ideal way to traverse the tree.
Here's a solution using this idea:
"""
class Solution:
def __init__(self):
self.maxSum = 0
def maxSumBST(self, root):
def postOrderTraverse(node):
"""
Perform post order traversal of tree
to determine sub trees which are BSTs
and calculate maximum sum of its elements.
Returns:
isValidBST: True if valid BST else False
currentSum: sum of current sub tree. None
if not a valid BST.
currentMin: minimum value of current sub tree
currentMax: maximum value of current sub tree
"""
if not node:
return True, 0, float('inf'), float('-inf') # Empty sub tree
lValidBST, lSum, lMin, lMax = postOrderTraverse(node.left)
rValidBST, rSum, rMin, rMax = postOrderTraverse(node.right)
# Check if current subtree is a valid BST
if lValidBST and rValidBST and lMax < node.val < rMin:
currSum = lSum + rSum + node.val
currMin = lMin if lMin != float('inf') else node.val
currMax = rMax if rMax != float('-inf') else node.val
self.maxSum = max(self.maxSum, currSum) # update max sum
return True, currSum, currMin, currMax
return False, None, None, None
postOrderTraverse(root)
return self.maxSum
|
StarcoderdataPython
|
3548889
|
<reponame>sandernaert/brat
#---------------------------------------------------------------
# PyNLPl - FoLiA Format Module
# by <NAME>, ILK, Universiteit van Tilburg
# http://ilk.uvt.nl/~mvgompel
# proycon AT anaproy DOT nl
#
# Module for reading, editing and writing FoLiA XML
#
# Licensed under GPLv3
#
#----------------------------------------------------------------
from lxml import etree as ElementTree
LXE=True
#import xml.etree.cElementTree as ElementTree
#LXE = False
from lxml.builder import E, ElementMaker
from sys import stderr
from StringIO import StringIO
from copy import copy, deepcopy
from pynlpl.formats.imdi import RELAXNG_IMDI
from datetime import datetime
#from dateutil.parser import parse as parse_datetime
import pynlpl.algorithms
import inspect
import glob
import os
import re
import urllib
import multiprocessing
import threading
FOLIAVERSION = '0.9.0' #0.9 pre
LIBVERSION = '0.9.0.29' #== FoLiA version + library revision
NSFOLIA = "http://ilk.uvt.nl/folia"
NSDCOI = "http://lands.let.ru.nl/projects/d-coi/ns/1.0"
ILLEGAL_UNICODE_CONTROL_CHARACTERS = {} #XML does not like unicode control characters
for ordinal in range(0x20):
if chr(ordinal) not in '\t\r\n':
ILLEGAL_UNICODE_CONTROL_CHARACTERS[ordinal] = None
class Mode:
MEMORY = 0 #The entire FoLiA structure will be loaded into memory. This is the default and is required for any kind of document manipulation.
XPATH = 1 #The full XML structure will be loaded into memory, but conversion to FoLiA objects occurs only upon querying. The full power of XPath is available.
ITERATIVE = 2 #XML element are loaded and conveted to FoLiA objects iteratively on a need-to basis. A subset of XPath is supported. (not implemented, obsolete)
class AnnotatorType:
UNSET = 0
AUTO = 1
MANUAL = 2
class Attrib:
ID, CLASS, ANNOTATOR, CONFIDENCE, N, DATETIME, SETONLY = range(7) #BEGINTIME, ENDTIME, SRC, SRCOFFSET, SPEAKER = range(12) #for later
Attrib.ALL = (Attrib.ID,Attrib.CLASS,Attrib.ANNOTATOR, Attrib.N, Attrib.CONFIDENCE, Attrib.DATETIME)
class AnnotationType:
TEXT, TOKEN, DIVISION, PARAGRAPH, LIST, FIGURE, WHITESPACE, LINEBREAK, SENTENCE, POS, LEMMA, DOMAIN, SENSE, SYNTAX, CHUNKING, ENTITY, CORRECTION, SUGGESTION, ERRORDETECTION, ALTERNATIVE, PHON, SUBJECTIVITY, MORPHOLOGICAL, EVENT, DEPENDENCY, TIMESEGMENT, GAP, ALIGNMENT, COMPLEXALIGNMENT, COREFERENCE, SEMROLE, METRIC, LANG = range(33)
#Alternative is a special one, not declared and not used except for ID generation
class TextCorrectionLevel: #THIS IS NOW COMPLETELY OBSOLETE AND ONLY HERE FOR BACKWARD COMPATIBILITY!
CORRECTED, UNCORRECTED, ORIGINAL, INLINE = range(4)
class MetaDataType:
NATIVE, CMDI, IMDI = range(3)
class NoSuchAnnotation(Exception):
"""Exception raised when the requested type of annotation does not exist for the selected element"""
pass
class NoSuchText(Exception):
"""Exception raised when the requestion type of text content does not exist for the selected element"""
pass
class DuplicateAnnotationError(Exception):
pass
class DuplicateIDError(Exception):
"""Exception raised when an identifier that is already in use is assigned again to another element"""
pass
class NoDefaultError(Exception):
pass
class NoDescription(Exception):
pass
class UnresolvableTextContent(Exception):
pass
class MalformedXMLError(Exception):
pass
class DeepValidationError(Exception):
pass
class SetDefinitionError(DeepValidationError):
pass
class ModeError(Exception):
pass
def parsecommonarguments(object, doc, annotationtype, required, allowed, **kwargs):
"""Internal function, parses common FoLiA attributes and sets up the instance accordingly"""
object.doc = doc #The FoLiA root document
supported = required + allowed
if 'generate_id_in' in kwargs:
kwargs['id'] = kwargs['generate_id_in'].generate_id(object.__class__)
del kwargs['generate_id_in']
if 'id' in kwargs:
if not Attrib.ID in supported:
raise ValueError("ID is not supported")
isncname(kwargs['id'])
object.id = kwargs['id']
del kwargs['id']
elif Attrib.ID in required:
raise ValueError("ID is required for " + object.__class__.__name__)
else:
object.id = None
if 'set' in kwargs:
if not Attrib.CLASS in supported and not Attrib.SETONLY in supported:
raise ValueError("Set is not supported")
object.set = kwargs['set']
del kwargs['set']
if doc and (not (annotationtype in doc.annotationdefaults) or not (object.set in doc.annotationdefaults[annotationtype])):
if doc.autodeclare:
doc.annotations.append( (annotationtype, object.set ) )
doc.annotationdefaults[annotationtype] = {object.set: {} }
else:
raise ValueError("Set '" + object.set + "' is used for " + object.__class__.__name__ + ", but has no declaration!")
elif annotationtype in doc.annotationdefaults and len(doc.annotationdefaults[annotationtype]) == 1:
object.set = doc.annotationdefaults[annotationtype].keys()[0]
elif Attrib.CLASS in required or Attrib.SETONLY in required:
raise ValueError("Set is required for " + object.__class__.__name__)
else:
object.set = None
if 'class' in kwargs:
if not Attrib.CLASS in supported:
raise ValueError("Class is not supported for " + object.__class__.__name__)
object.cls = kwargs['class']
del kwargs['class']
elif 'cls' in kwargs:
if not Attrib.CLASS in supported:
raise ValueError("Class is not supported on " + object.__class__.__name__)
object.cls = kwargs['cls']
del kwargs['cls']
elif Attrib.CLASS in required:
raise ValueError("Class is required for " + object.__class__.__name__)
else:
object.cls = None
if object.cls and not object.set:
if doc and doc.autodeclare:
if not (annotationtype, 'undefined') in doc.annotations:
doc.annotations.append( (annotationtype, 'undefined') )
doc.annotationdefaults[annotationtype] = {'undefined': {} }
object.set = 'undefined'
else:
raise ValueError("Set is required for " + object.__class__.__name__ + ". Class '" + object.cls + "' assigned without set.")
if 'annotator' in kwargs:
if not Attrib.ANNOTATOR in supported:
raise ValueError("Annotator is not supported for " + object.__class__.__name__)
object.annotator = kwargs['annotator']
del kwargs['annotator']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotator' in doc.annotationdefaults[annotationtype][object.set]:
object.annotator = doc.annotationdefaults[annotationtype][object.set]['annotator']
elif Attrib.ANNOTATOR in required:
raise ValueError("Annotator is required for " + object.__class__.__name__)
else:
object.annotator = None
if 'annotatortype' in kwargs:
if not Attrib.ANNOTATOR in supported:
raise ValueError("Annotatortype is not supported for " + object.__class__.__name__)
if kwargs['annotatortype'] == 'auto' or kwargs['annotatortype'] == AnnotatorType.AUTO:
object.annotatortype = AnnotatorType.AUTO
elif kwargs['annotatortype'] == 'manual' or kwargs['annotatortype'] == AnnotatorType.MANUAL:
object.annotatortype = AnnotatorType.MANUAL
else:
raise ValueError("annotatortype must be 'auto' or 'manual', got " + repr(kwargs['annotatortype']))
del kwargs['annotatortype']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotatortype' in doc.annotationdefaults[annotationtype][object.set]:
object.annotatortype = doc.annotationdefaults[annotationtype][object.set]['annotatortype']
elif Attrib.ANNOTATOR in required:
raise ValueError("Annotatortype is required for " + object.__class__.__name__)
else:
object.annotatortype = None
if 'confidence' in kwargs:
if not Attrib.CONFIDENCE in supported:
raise ValueError("Confidence is not supported")
try:
object.confidence = float(kwargs['confidence'])
assert (object.confidence >= 0.0 and object.confidence <= 1.0)
except:
raise ValueError("Confidence must be a floating point number between 0 and 1, got " + repr(kwargs['confidence']) )
del kwargs['confidence']
elif Attrib.CONFIDENCE in required:
raise ValueError("Confidence is required for " + object.__class__.__name__)
else:
object.confidence = None
if 'n' in kwargs:
if not Attrib.N in supported:
raise ValueError("N is not supported")
object.n = kwargs['n']
del kwargs['n']
elif Attrib.N in required:
raise ValueError("N is required")
else:
object.n = None
if 'datetime' in kwargs:
if not Attrib.DATETIME in supported:
raise ValueError("Datetime is not supported")
if isinstance(kwargs['datetime'], datetime):
object.datetime = kwargs['datetime']
else:
#try:
object.datetime = parse_datetime(kwargs['datetime'])
#except:
# raise ValueError("Unable to parse datetime: " + str(repr(kwargs['datetime'])))
del kwargs['datetime']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'datetime' in doc.annotationdefaults[annotationtype][object.set]:
object.datetime = doc.annotationdefaults[annotationtype][object.set]['datetime']
elif Attrib.DATETIME in required:
raise ValueError("Datetime is required")
else:
object.datetime = None
if 'auth' in kwargs:
object.auth = bool(kwargs['auth'])
del kwargs['auth']
else:
object.auth = True
if 'text' in kwargs:
object.settext(kwargs['text'])
del kwargs['text']
if 'processedtext' in kwargs:
object.settext(kwargs['text'], TextCorrectionLevel.PROCESSED)
del kwargs['processedtext']
elif 'correctedtext' in kwargs: #backwards compatible alias
object.settext(kwargs['text'], TextCorrectionLevel.PROCESSED)
del kwargs['correctedtext']
if 'originaltext' in kwargs:
object.settext(kwargs['text'], TextCorrectionLevel.ORIGINAL)
del kwargs['originaltext']
if 'uncorrectedtext' in kwargs: #backwards compatible alias
object.settext(kwargs['text'], TextCorrectionLevel.ORIGINAL)
del kwargs['uncorrectedtext']
if doc and doc.debug >= 2:
print >>stderr, " @id = ", repr(object.id)
print >>stderr, " @set = ", repr(object.set)
print >>stderr, " @class = ", repr(object.cls)
print >>stderr, " @annotator = ", repr(object.annotator)
print >>stderr, " @annotatortype= ", repr(object.annotatortype)
print >>stderr, " @confidence = ", repr(object.confidence)
print >>stderr, " @n = ", repr(object.n)
print >>stderr, " @datetime = ", repr(object.datetime)
#set index
if object.id and doc:
if object.id in doc.index:
if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Duplicate ID not permitted:" + object.id
raise DuplicateIDError("Duplicate ID not permitted: " + object.id)
else:
if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Adding to index: " + object.id
doc.index[object.id] = object
#Parse feature attributes (shortcut for feature specification for some elements)
for c in object.ACCEPTED_DATA:
if issubclass(c, Feature):
if c.SUBSET in kwargs and kwargs[c.SUBSET]:
object.append(c,cls=kwargs[c.SUBSET])
del kwargs[c.SUBSET]
return kwargs
def parse_datetime(s): #source: http://stackoverflow.com/questions/2211362/how-to-parse-xsddatetime-format
"""Returns (datetime, tz offset in minutes) or (None, None)."""
m = re.match(""" ^
(?P<year>-?[0-9]{4}) - (?P<month>[0-9]{2}) - (?P<day>[0-9]{2})
T (?P<hour>[0-9]{2}) : (?P<minute>[0-9]{2}) : (?P<second>[0-9]{2})
(?P<microsecond>\.[0-9]{1,6})?
(?P<tz>
Z | (?P<tz_hr>[-+][0-9]{2}) : (?P<tz_min>[0-9]{2})
)?
$ """, s, re.X)
if m is not None:
values = m.groupdict()
if values["tz"] in ("Z", None):
tz = 0
else:
tz = int(values["tz_hr"]) * 60 + int(values["tz_min"])
if values["microsecond"] is None:
values["microsecond"] = 0
else:
values["microsecond"] = values["microsecond"][1:]
values["microsecond"] += "0" * (6 - len(values["microsecond"]))
values = dict((k, int(v)) for k, v in values.iteritems()
if not k.startswith("tz"))
try:
return datetime(**values) # , tz
except ValueError:
pass
return None
class AbstractElement(object):
"""This is the abstract base class from which all FoLiA elements are derived. This class should not be instantiated directly, but can useful if you want to check if a variable is an instance of any FoLiA element: isinstance(x, AbstractElement). It contains methods and variables also commonly inherited."""
REQUIRED_ATTRIBS = () #List of required attributes (Members from the Attrib class)
OPTIONAL_ATTRIBS = () #List of optional attributes (Members from the Attrib class)
ACCEPTED_DATA = () #List of accepted data, classes inherited from AbstractElement
ANNOTATIONTYPE = None #Annotation type (Member of AnnotationType class)
XMLTAG = None #XML-tag associated with this element
OCCURRENCES = 0 #Number of times this element may occur in its parent (0=unlimited, default=0)
OCCURRENCESPERSET = 1 #Number of times this element may occur per set (0=unlimited, default=1)
TEXTDELIMITER = None #Delimiter to use when dynamically gathering text from child elements
PRINTABLE = False #Is this element printable (aka, can its text method be called?)
AUTH = True #Authoritative by default. Elements the parser should skip on normal queries are non-authoritative (such as original, alternative)
def __init__(self, doc, *args, **kwargs):
if not isinstance(doc, Document) and not doc is None:
raise Exception("Expected first parameter to be instance of Document, got " + str(type(doc)))
self.doc = doc
self.parent = None
self.data = []
kwargs = parsecommonarguments(self, doc, self.ANNOTATIONTYPE, self.REQUIRED_ATTRIBS, self.OPTIONAL_ATTRIBS,**kwargs)
for child in args:
self.append(child)
if 'contents' in kwargs:
if isinstance(kwargs['contents'], list):
for child in kwargs['contents']:
self.append(child)
else:
self.append(kwargs['contents'])
del kwargs['contents']
for key in kwargs:
raise ValueError("Parameter '" + key + "' not supported by " + self.__class__.__name__)
#def __del__(self):
# if self.doc and self.doc.debug:
# print >>stderr, "[PyNLPl FoLiA DEBUG] Removing " + repr(self)
# for child in self.data:
# del child
# self.doc = None
# self.parent = None
# del self.data
def description(self):
"""Obtain the description associated with the element, will raise NoDescription if there is none"""
for e in self:
if isinstance(e, Description):
return e.value
raise NoDescription
def textcontent(self, cls='current'):
"""Get the text explicitly associated with this element (of the specified class).
Returns the TextContent instance rather than the actual text. Raises NoSuchText exception if
not found.
Unlike text(), this method does not recurse into child elements (with the sole exception of the Correction/New element), and it returns the TextContent instance rather than the actual text!
"""
if not self.PRINTABLE: #only printable elements can hold text
raise NoSuchText
#Find explicit text content (same class)
for e in self:
if isinstance(e, TextContent):
if e.cls == cls:
return e
elif isinstance(e, Correction):
try:
return e.textcontent(cls)
except NoSuchText:
pass
raise NoSuchText
def stricttext(self, cls='current'):
"""Get the text strictly associated with this element (of the specified class). Does not recurse into children, with the sole exception of Corection/New"""
return self.textcontent(cls).value
def toktext(self,cls='current'):
"""Alias for text with retaintokenisation=True"""
return self.text(cls,True)
def text(self, cls='current', retaintokenisation=False, previousdelimiter=""):
"""Get the text associated with this element (of the specified class), will always be a unicode instance.
If no text is directly associated with the element, it will be obtained from the children. If that doesn't result
in any text either, a NoSuchText exception will be raised.
If retaintokenisation is True, the space attribute on words will be ignored, otherwise it will be adhered to and text will be detokenised as much as possible.
"""
if not self.PRINTABLE: #only printable elements can hold text
raise NoSuchText
#print >>stderr, repr(self) + '.text()'
if self.hastext(cls):
s = self.textcontent(cls).value
#print >>stderr, "text content: " + s
else:
#Not found, descend into children
delimiter = ""
s = ""
for e in self:
if e.PRINTABLE and not isinstance(e, TextContent):
try:
s += e.text(cls,retaintokenisation, delimiter)
delimiter = e.gettextdelimiter(retaintokenisation)
#delimiter will be buffered and only printed upon next iteration, this prevent the delimiter being output at the end of a sequence
#print >>stderr, "Delimiter for " + repr(e) + ": " + repr(delimiter)
except NoSuchText:
continue
s = s.strip(' \r\n\t')
if s and previousdelimiter:
#print >>stderr, "Outputting previous delimiter: " + repr(previousdelimiter)
return previousdelimiter + s
elif s:
return s
else:
#No text found at all :`(
raise NoSuchText
def originaltext(self):
"""Alias for retrieving the original uncorrect text"""
return self.text('original')
def gettextdelimiter(self, retaintokenisation=False):
"""May return a customised text delimiter instead of the default for this class."""
if self.TEXTDELIMITER is None:
delimiter = ""
#no text delimite rof itself, recurse into children to inherit delimiter
for child in reversed(self):
return child.gettextdelimiter(retaintokenisation)
return delimiter
else:
return self.TEXTDELIMITER
def feat(self,subset):
"""Obtain the feature value of the specific subset. If a feature occurs multiple times, the values will be returned in a list.
Example::
sense = word.annotation(folia.Sense)
synset = sense.feat('synset')
"""
r = None
for f in self:
if isinstance(f, Feature) and f.subset == subset:
if r: #support for multiclass features
if isinstance(r,list):
r.append(f.cls)
else:
r = [r, f.cls]
else:
r = f.cls
if r is None:
raise NoSuchAnnotation
else:
return r
def __ne__(self, other):
return not (self == other)
def __eq__(self, other):
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - " + repr(self) + " vs " + repr(other)
#Check if we are of the same time
if type(self) != type(other):
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - Type mismatch: " + str(type(self)) + " vs " + str(type(other))
return False
#Check FoLiA attributes
if self.id != other.id:
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - ID mismatch: " + str(self.id) + " vs " + str(other.id)
return False
if self.set != other.set:
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - Set mismatch: " + str(self.set) + " vs " + str(other.set)
return False
if self.cls != other.cls:
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - Class mismatch: " + repr(self.cls) + " vs " + repr(other.cls)
return False
if self.annotator != other.annotator:
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - Annotator mismatch: " + repr(self.annotator) + " vs " + repr(other.annotator)
return False
if self.annotatortype != other.annotatortype:
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - Annotator mismatch: " + repr(self.annotatortype) + " vs " + repr(other.annotatortype)
return False
#Check if we have same amount of children:
mychildren = list(self)
yourchildren = list(other)
if len(mychildren) != len(yourchildren):
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - Unequal amount of children"
return False
#Now check equality of children
for mychild, yourchild in zip(mychildren, yourchildren):
if mychild != yourchild:
if self.doc and self.doc.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] AbstractElement Equality Check - Child mismatch: " + repr(mychild) + " vs " + repr(yourchild) + " (in " + repr(self) + ", id: " + str(self.id) + ")"
return False
#looks like we made it! \o/
return True
def __len__(self):
"""Returns the number of child elements under the current element"""
return len(self.data)
def __nonzero__(self):
return True #any instance evaluates to True in boolean tests!! (important to distinguish from uninstantianted None values!)
def __iter__(self):
"""Iterate over all children of this element"""
return iter(self.data)
def __contains__(self, element):
"""Tests if the specified element is part of the children of the element"""
return element in self.data
def __getitem__(self, key):
try:
return self.data[key]
except KeyError:
raise
def __unicode__(self):
"""Alias for text()"""
return self.text()
def __str__(self):
return unicode(self).encode('utf-8')
def copy(self, newdoc=None):
"""Make a deep copy"""
c = deepcopy(self)
c.setparents()
c.setdoc(newdoc)
return c
def setparents(self):
"""Correct all parent relations for elements within the scope, usually no need to call this directly, invoked implicitly by copy()"""
for c in self:
if isinstance(c, AbstractElement):
c.parent = self
c.setparents()
def setdoc(self,newdoc):
"""Set a different document, usually no need to call this directly, invoked implicitly by copy()"""
self.doc = newdoc
for c in self:
if isinstance(c, AbstractElement):
c.setdoc(newdoc)
def hastext(self,cls='current'):
"""Does this element have text (of the specified class)"""
try:
r = self.textcontent(cls)
return True
except NoSuchText:
return False
def settext(self, text, cls='current'):
"""Set the text for this element (and class)"""
self.replace(TextContent, value=text, cls=cls)
def setdocument(self, doc):
"""Associate a document with this element"""
assert isinstance(doc, Document)
if not self.doc:
self.doc = doc
if self.id:
if self.id in doc:
raise DuplicateIDError(self.id)
else:
self.doc.index[id] = self
for e in self: #recursive for all children
e.setdocument(doc)
@classmethod
def addable(Class, parent, set=None, raiseexceptions=True):
"""Tests whether a new element of this class can be added to the parent. Returns a boolean or raises ValueError exceptions (unless set to ignore)!
This will use ``OCCURRENCES``, but may be overidden for more customised behaviour.
This method is mostly for internal use.
"""
if not Class in parent.ACCEPTED_DATA:
#Class is not in accepted data, but perhaps any of its ancestors is?
found = False
c = Class
try:
while c.__base__:
if c.__base__ in parent.ACCEPTED_DATA:
found = True
break
c = c.__base__
except:
pass
if not found:
if raiseexceptions:
raise ValueError("Unable to add object of type " + Class.__name__ + " to " + parent.__class__.__name__ + ". Type not allowed as child.")
else:
return False
if Class.OCCURRENCES > 0:
#check if the parent doesn't have too many already
count = len(parent.select(Class,None,True,['Original','Suggestion','Alternative','AlternativeLayers']))
if count >= Class.OCCURRENCES:
if raiseexceptions:
raise DuplicateAnnotationError("Unable to add another object of type " + child.__class__.__name__ + " to " + __name__ + ". There are already " + str(count) + " instances of this class, which is the maximum.")
else:
return False
if Class.OCCURRENCESPERSET > 0 and set and Attrib.CLASS in Class.REQUIRED_ATTRIBS:
count = len(parent.select(Class,set,True, ['Original','Suggestion','Alternative','AlternativeLayers']))
if count >= Class.OCCURRENCESPERSET:
if raiseexceptions:
if parent.id:
extra = ' (id=' + parent.id + ')'
else:
extra = ''
raise DuplicateAnnotationError("Unable to add another object of set " + set + " and type " + Class.__name__ + " to " + parent.__class__.__name__ + " " + extra + ". There are already " + str(count) + " instances of this class, which is the maximum for the set.")
else:
return False
return True
def postappend(self):
"""This method will be called after an element is added to another. It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated.
This method is mostly for internal use.
"""
#If the element was not associated with a document yet, do so now (and for all unassociated children:
if not self.doc and self.parent.doc:
self.setdocument(self.parent.doc)
if self.doc and self.doc.deepvalidation:
self.deepvalidation()
def deepvalidation(self):
try:
if self.doc and self.doc.deepvalidation and self.set and self.set[0] != '_' and self.set != "undefined":
try:
self.doc.setdefinitions[self.set].testclass(self.cls,self.doc)
except KeyError:
raise DeepValidationError("Set definition for " + self.set + " not loaded!")
except AttributeError:
pass
def append(self, child, *args, **kwargs):
"""Append a child element. Returns the added element
Arguments:
* ``child`` - Instance or class
If an *instance* is passed as first argument, it will be appended
If a *class* derived from AbstractElement is passed as first argument, an instance will first be created and then appended.
Keyword arguments:
* ``alternative=`` - If set to True, the element will be made into an alternative.
Generic example, passing a pre-generated instance::
word.append( folia.LemmaAnnotation(doc, cls="house", annotator="proycon", annotatortype=folia.AnnotatorType.MANUAL ) )
Generic example, passing a class to be generated::
word.append( folia.LemmaAnnotation, cls="house", annotator="proycon", annotatortype=folia.AnnotatorType.MANUAL )
Generic example, setting text with a class:
word.append( "house", cls='original' )
"""
#obtain the set (if available, necessary for checking addability)
if 'set' in kwargs:
set = kwargs['set']
else:
try:
set = child.set
except:
set = None
#Check if a Class rather than an instance was passed
Class = None #do not set to child.__class__
if inspect.isclass(child):
Class = child
if Class.addable(self, set):
if not 'id' in kwargs and not 'generate_id_in' in kwargs and (Attrib.ID in Class.REQUIRED_ATTRIBS):
kwargs['generate_id_in'] = self
child = Class(self.doc, *args, **kwargs)
elif args:
raise Exception("Too many arguments specified. Only possible when first argument is a class and not an instance")
#Do the actual appending
if not Class and (isinstance(child,str) or isinstance(child,unicode)) and TextContent in self.ACCEPTED_DATA:
#you can pass strings directly (just for convenience), will be made into textcontent automatically.
child = TextContent(self.doc, child )
self.data.append(child)
child.parent = self
elif Class or (isinstance(child, AbstractElement) and child.__class__.addable(self, set)): #(prevents calling addable again if already done above)
if 'alternative' in kwargs and kwargs['alternative']:
child = Alternative(self.doc, child, generate_id_in=self)
self.data.append(child)
child.parent = self
else:
raise ValueError("Unable to append object of type " + child.__class__.__name__ + " to " + self.__class__.__name__ + ". Type not allowed as child.")
child.postappend()
return child
def insert(self, index, child, *args, **kwargs):
"""Insert a child element at specified index. Returns the added element
If an *instance* is passed as first argument, it will be appended
If a *class* derived from AbstractElement is passed as first argument, an instance will first be created and then appended.
Arguments:
* index
* ``child`` - Instance or class
Keyword arguments:
* ``alternative=`` - If set to True, the element will be made into an alternative.
* ``corrected=`` - Used only when passing strings to be made into TextContent elements.
Generic example, passing a pre-generated instance::
word.insert( 3, folia.LemmaAnnotation(doc, cls="house", annotator="proycon", annotatortype=folia.AnnotatorType.MANUAL ) )
Generic example, passing a class to be generated::
word.insert( 3, folia.LemmaAnnotation, cls="house", annotator="proycon", annotatortype=folia.AnnotatorType.MANUAL )
Generic example, setting text of a specific correctionlevel::
word.insert( 3, "house", corrected=folia.TextCorrectionLevel.CORRECTED )
"""
#obtain the set (if available, necessary for checking addability)
if 'set' in kwargs:
set = kwargs['set']
else:
try:
set = child.set
except:
set = None
#Check if a Class rather than an instance was passed
Class = None #do not set to child.__class__
if inspect.isclass(child):
Class = child
if Class.addable(self, set):
if not 'id' in kwargs and not 'generate_id_in' in kwargs and (Attrib.ID in Class.REQUIRED_ATTRIBS or Attrib.ID in Class.OPTIONAL_ATTRIBS):
kwargs['generate_id_in'] = self
child = Class(self.doc, *args, **kwargs)
elif args:
raise Exception("Too many arguments specified. Only possible when first argument is a class and not an instance")
#Do the actual appending
if not Class and (isinstance(child,str) or isinstance(child,unicode)) and TextContent in self.ACCEPTED_DATA:
#you can pass strings directly (just for convenience), will be made into textcontent automatically.
child = TextContent(self.doc, child )
self.data.insert(index, child)
child.parent = self
elif Class or (isinstance(child, AbstractElement) and child.__class__.addable(self, set)): #(prevents calling addable again if already done above)
if 'alternative' in kwargs and kwargs['alternative']:
child = Alternative(self.doc, child, generate_id_in=self)
self.data.insert(index, child)
child.parent = self
else:
raise ValueError("Unable to append object of type " + child.__class__.__name__ + " to " + self.__class__.__name__ + ". Type not allowed as child.")
child.postappend()
return child
@classmethod
def findreplacables(Class, parent, set=None,**kwargs):
"""Find replacable elements. Auxiliary function used by replace(). Can be overriden for more fine-grained control. Mostly for internal use."""
return parent.select(Class,set,False)
def replace(self, child, *args, **kwargs):
"""Appends a child element like ``append()``, but replaces any existing child element of the same type and set. If no such child element exists, this will act the same as append()
Keyword arguments:
* ``alternative`` - If set to True, the *replaced* element will be made into an alternative. Simply use ``append()`` if you want the added element
to be an alternative.
See ``append()`` for more information.
"""
if 'set' in kwargs:
set = kwargs['set']
else:
try:
set = child.set
except:
set = None
if inspect.isclass(child):
Class = child
replace = Class.findreplacables(self,set,**kwargs)
else:
Class = child.__class__
kwargs['instance'] = child
replace = Class.findreplacables(self,set,**kwargs)
del kwargs['instance']
if len(replace) == 0:
#nothing to replace, simply call append
if 'alternative' in kwargs:
del kwargs['alternative'] #has other meaning in append()
return self.append(child, *args, **kwargs)
elif len(replace) > 1:
raise Exception("Unable to replace. Multiple candidates found, unable to choose.")
elif len(replace) == 1:
if 'alternative' in kwargs and kwargs['alternative']:
#old version becomes alternative
if replace[0] in self.data:
self.data.remove(replace[0])
alt = self.append(Alternative)
alt.append(replace[0])
del kwargs['alternative'] #has other meaning in append()
else:
#remove old version competely
self.remove(replace[0])
return self.append(child, *args, **kwargs)
def ancestors(self, Class=None):
"""Generator yielding all ancestors of this element, effectively back-tracing its path to the root element."""
e = self
while e:
if e.parent:
e = e.parent
if not Class or isinstance(e,Class):
yield e
else:
break
def xml(self, attribs = None,elements = None, skipchildren = False):
"""Serialises the FoLiA element to XML, by returning an XML Element (in lxml.etree) for this element and all its children. For string output, consider the xmlstring() method instead."""
global NSFOLIA
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
if not attribs: attribs = {}
if not elements: elements = []
if self.id:
if self.doc and self.doc.bypassleak:
attribs['XMLid'] = self.id
else:
attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id
#Some attributes only need to be added if they are not the same as what's already set in the declaration
try:
if self.set:
if not self.ANNOTATIONTYPE in self.doc.annotationdefaults or len(self.doc.annotationdefaults[self.ANNOTATIONTYPE]) != 1 or self.doc.annotationdefaults[self.ANNOTATIONTYPE].keys()[0] != self.set:
if self.set != None:
attribs['{' + NSFOLIA + '}set'] = self.set
except AttributeError:
pass
try:
if self.cls:
attribs['{' + NSFOLIA + '}class'] = self.cls
except AttributeError:
pass
try:
if self.annotator and ((not (self.ANNOTATIONTYPE in self.doc.annotationdefaults)) or (not ( 'annotator' in self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set])) or (self.annotator != self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set]['annotator'])):
attribs['{' + NSFOLIA + '}annotator'] = self.annotator
if self.annotatortype and ((not (self.ANNOTATIONTYPE in self.doc.annotationdefaults)) or (not ('annotatortype' in self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set])) or (self.annotatortype != self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set]['annotatortype'])):
if self.annotatortype == AnnotatorType.AUTO:
attribs['{' + NSFOLIA + '}annotatortype'] = 'auto'
elif self.annotatortype == AnnotatorType.MANUAL:
attribs['{' + NSFOLIA + '}annotatortype'] = 'manual'
except AttributeError:
pass
try:
if self.confidence:
attribs['{' + NSFOLIA + '}confidence'] = str(self.confidence)
except AttributeError:
pass
try:
if self.n:
attribs['{' + NSFOLIA + '}n'] = str(self.n)
except AttributeError:
pass
try:
if not self.AUTH or not self.auth: #(former is static, latter isn't)
attribs['{' + NSFOLIA + '}auth'] = 'no'
except AttributeError:
pass
try:
if self.datetime and ((not (self.ANNOTATIONTYPE in self.doc.annotationdefaults)) or (not ( 'datetime' in self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set])) or (self.datetime != self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set]['datetime'])):
attribs['{' + NSFOLIA + '}datetime'] = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
except AttributeError:
pass
omitchildren = []
#Are there predetermined Features in ACCEPTED_DATA?
for c in self.ACCEPTED_DATA:
if issubclass(c, Feature) and c.SUBSET:
#Do we have any of those?
for c2 in self.data:
if c2.__class__ is c and c.SUBSET == c2.SUBSET and c2.cls:
#Yes, serialize them as attributes
attribs[c2.SUBSET] = c2.cls
omitchildren.append(c2) #and skip them as elements
break #only one
e = E._makeelement('{' + NSFOLIA + '}' + self.XMLTAG, **attribs)
if not skipchildren and self.data:
#append children,
# we want make sure that text elements are in the right order, 'current' class first
# so we first put them in a list
textelements = []
otherelements = []
for child in self:
if isinstance(child, TextContent):
if child.cls == 'current':
textelements.insert(0, child)
else:
textelements.append(child)
elif not (child in omitchildren):
otherelements.append(child)
for child in textelements+otherelements:
e.append(child.xml())
if elements: #extra elements
for e2 in elements:
e.append(e2)
return e
def xmlstring(self, pretty_print=False):
"""Serialises this FoLiA element to XML, returns a string with XML representation for this element and all its children."""
global LXE
s = ElementTree.tostring(self.xml(), xml_declaration=False, pretty_print=pretty_print, encoding='utf-8')
if self.doc and self.doc.bypassleak:
s = s.replace('XMLid=','xml:id=')
s = s.replace('ns0:','') #ugly patch to get rid of namespace prefix
s = s.replace(':ns0','')
return s
def select(self, Class, set=None, recursive=True, ignorelist=['Original','Suggestion','Alternative',], node=None):
"""Select child elements of the specified class.
A further restriction can be made based on set. Whether or not to apply recursively (by default enabled) can also be configured, optionally with a list of elements never to recurse into.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``
* ``set``: The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
* ``recursive``: Select recursively? Descending into child elements? Boolean defaulting to True.
* ``ignorelist``: A list of Classes (subclassed off ``AbstractElement``) not to recurse into. It is common not to want to recurse into the following elements: ``folia.Alternative``, ``folia.Suggestion``, and ``folia.Original``. As elements contained in these are never *authorative*.
* ``node``: Reserved for internal usage, used in recursion.
Returns:
A list of elements (instances)
Example::
text.select(folia.Sense, 'cornetto', True, [folia.Original, folia.Suggestion, folia.Alternative] )
"""
l = []
if not node:
node = self
for e in self.data:
if ignorelist:
ignore = False
for c in ignorelist:
if not inspect.isclass(c):
c = globals()[c]
if c == e.__class__ or issubclass(e.__class__,c):
ignore = True
break
if ignore:
continue
if isinstance(e, Class):
if not set is None:
try:
if e.set != set:
continue
except:
continue
l.append(e)
if recursive:
for e2 in e.select(Class, set, recursive, ignorelist, e):
if not set is None:
try:
if e2.set != set:
continue
except:
continue
l.append(e2)
return l
def xselect(self, Class, recursive=True, node=None):
"""Same as ``select()``, but this is a generator instead of returning a list"""
if not node:
node = self
for e in self:
if isinstance(e, Class):
if not set is None:
try:
if e.set != set:
continue
except:
continue
yield e
elif recursive:
for e2 in e.select(Class, recursive, e):
if not set is None:
try:
if e2.set != set:
continue
except:
continue
yield e2
def items(self, founditems=[]):
"""Returns a depth-first flat list of *all* items below this element (not limited to AbstractElement)"""
l = []
for e in self.data:
if not e in founditems: #prevent going in recursive loops
l.append(e)
if isinstance(e, AbstractElement):
l += e.items(l)
return l
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None, origclass = None):
"""Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)"""
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace",'a':"http://relaxng.org/ns/annotation/0.9" })
if origclass: cls = origclass
preamble = []
try:
if cls.__doc__:
E2 = ElementMaker(namespace="http://relaxng.org/ns/annotation/0.9", nsmap={'a':'http://relaxng.org/ns/annotation/0.9'} )
preamble.append(E2.documentation(cls.__doc__))
except AttributeError:
pass
attribs = []
if Attrib.ID in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(name='id', ns="http://www.w3.org/XML/1998/namespace") )
elif Attrib.ID in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='id', ns="http://www.w3.org/XML/1998/namespace") ) )
if Attrib.CLASS in cls.REQUIRED_ATTRIBS:
#Set is a tough one, we can't require it as it may be defined in the declaration: we make it optional and need schematron to resolve this later
attribs.append( E.attribute(name='class') )
attribs.append( E.optional( E.attribute( name='set' ) ) )
elif Attrib.CLASS in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='class') ) )
attribs.append( E.optional( E.attribute( name='set' ) ) )
if Attrib.ANNOTATOR in cls.REQUIRED_ATTRIBS or Attrib.ANNOTATOR in cls.OPTIONAL_ATTRIBS:
#Similarly tough
attribs.append( E.optional( E.attribute(name='annotator') ) )
attribs.append( E.optional( E.attribute(name='annotatortype') ) )
if Attrib.CONFIDENCE in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(E.data(type='double',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='confidence') )
elif Attrib.CONFIDENCE in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='double',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='confidence') ) )
if Attrib.N in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute( name='n') )
elif Attrib.N in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute( name='n') ) )
if Attrib.DATETIME in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(E.data(type='dateTime',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='datetime') )
elif Attrib.DATETIME in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute( E.data(type='dateTime',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='datetime') ) )
attribs.append( E.optional( E.attribute( name='auth' ) ) )
#if cls.ALLOWTEXT:
# attribs.append( E.optional( E.ref(name='t') ) ) #yes, not actually an attrib, I know, but should go here
if extraattribs:
for e in extraattribs:
attribs.append(e) #s
elements = [] #(including attributes)
done = {}
if includechildren:
for c in cls.ACCEPTED_DATA:
if c.__name__[:8] == 'Abstract' and inspect.isclass(c):
for c2 in globals().values():
try:
if inspect.isclass(c2) and issubclass(c2, c):
try:
if c2.XMLTAG and not (c2.XMLTAG in done):
if c2.OCCURRENCES == 1:
elements.append( E.optional( E.ref(name=c2.XMLTAG) ) )
else:
elements.append( E.zeroOrMore( E.ref(name=c2.XMLTAG) ) )
done[c2.XMLTAG] = True
except AttributeError:
continue
except TypeError:
pass
elif issubclass(c, Feature) and c.SUBSET:
attribs.append( E.optional( E.attribute(name=c.SUBSET))) #features as attributes
else:
try:
if c.XMLTAG and not (c.XMLTAG in done):
if c.OCCURRENCES == 1:
elements.append( E.optional( E.ref(name=c.XMLTAG) ) )
else:
elements.append( E.zeroOrMore( E.ref(name=c.XMLTAG) ) )
done[c.XMLTAG] = True
except AttributeError:
continue
if extraelements:
for e in extraelements:
elements.append( e )
if elements:
if len(elements) > 1:
attribs.append( E.interleave(*elements) )
else:
attribs.append( *elements )
if not attribs:
attribs.append( E.empty() )
return E.define( E.element(*(preamble + attribs), **{'name': cls.XMLTAG}), name=cls.XMLTAG, ns=NSFOLIA)
@classmethod
def parsexml(Class, node, doc):
"""Internal class method used for turning an XML element into an instance of the Class.
Args:
* ``node`' - XML Element
* ``doc`` - Document
Returns:
An instance of the current Class.
"""
assert issubclass(Class, AbstractElement)
global NSFOLIA, NSDCOI
nslen = len(NSFOLIA) + 2
nslendcoi = len(NSDCOI) + 2
dcoi = (node.tag[:nslendcoi] == '{' + NSDCOI + '}')
args = []
kwargs = {}
text = None
for subnode in node:
if subnode.tag[:nslen] == '{' + NSFOLIA + '}':
if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Processing subnode " + subnode.tag[nslen:]
args.append(doc.parsexml(subnode, Class) )
elif subnode.tag[:nslendcoi] == '{' + NSDCOI + '}':
#Dcoi support
if Class is Text and subnode.tag[nslendcoi:] == 'body':
for subsubnode in subnode:
if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Processing DCOI subnode " + subnode.tag[nslendcoi:]
args.append(doc.parsexml(subsubnode, Class) )
else:
if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Processing DCOI subnode " + subnode.tag[nslendcoi:]
args.append(doc.parsexml(subnode, Class) )
elif doc.debug >= 1:
print >>stderr, "[PyNLPl FoLiA DEBUG] Ignoring subnode outside of FoLiA namespace: " + subnode.tag
id = dcoipos = dcoilemma = dcoicorrection = dcoicorrectionoriginal = None
for key, value in node.attrib.items():
if key == '{http://www.w3.org/XML/1998/namespace}id':
id = value
key = 'id'
elif key[:nslen] == '{' + NSFOLIA + '}':
key = key[nslen:]
elif key[:nslendcoi] == '{' + NSDCOI + '}':
key = key[nslendcoi:]
#D-Coi support:
if Class is Word and key == 'pos':
dcoipos = value
continue
elif Class is Word and key == 'lemma':
dcoilemma = value
continue
elif Class is Word and key == 'correction':
dcoicorrection = value #class
continue
elif Class is Word and key == 'original':
dcoicorrectionoriginal = value
continue
elif Class is Gap and key == 'reason':
key = 'class'
elif Class is Gap and key == 'hand':
key = 'annotator'
elif Class is Division and key == 'type':
key = 'cls'
kwargs[key] = value
#D-Coi support:
if dcoi and TextContent in Class.ACCEPTED_DATA and node.text:
text = node.text.strip()
if text:
kwargs['text'] = text
if not AnnotationType.TOKEN in doc.annotationdefaults:
doc.declare(AnnotationType.TOKEN, set='http://ilk.uvt.nl/folia/sets/ilktok.foliaset')
if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Found " + node.tag[nslen:]
instance = Class(doc, *args, **kwargs)
#if id:
# if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Adding to index: " + id
# doc.index[id] = instance
if dcoipos:
if not AnnotationType.POS in doc.annotationdefaults:
doc.declare(AnnotationType.POS, set='http://ilk.uvt.nl/folia/sets/cgn-legacy.foliaset')
instance.append( PosAnnotation(doc, cls=dcoipos) )
if dcoilemma:
if not AnnotationType.LEMMA in doc.annotationdefaults:
doc.declare(AnnotationType.LEMMA, set='http://ilk.uvt.nl/folia/sets/mblem-nl.foliaset')
instance.append( LemmaAnnotation(doc, cls=dcoilemma) )
if dcoicorrection and dcoicorrectionoriginal and text:
if not AnnotationType.CORRECTION in doc.annotationdefaults:
doc.declare(AnnotationType.CORRECTION, set='http://ilk.uvt.nl/folia/sets/dcoi-corrections.foliaset')
instance.correct(generate_id_in=instance, cls=dcoicorrection, original=dcoicorrectionoriginal, new=text)
return instance
def resolveword(self, id):
return None
def remove(self, child):
"""Removes the child element"""
if not isinstance(child, AbstractElement):
raise ValueError("Expected AbstractElement, got " + str(type(child)))
child.parent = None
self.data.remove(child)
#delete from index
if child.id and self.doc and child.id in self.doc.index:
del self.doc.index[child.id]
class Description(AbstractElement):
"""Description is an element that can be used to associate a description with almost any
other FoLiA element"""
XMLTAG = 'desc'
OCCURRENCES = 1
def __init__(self,doc, *args, **kwargs):
"""Required keyword arguments:
* ``value=``: The text content for the description (``str`` or ``unicode``)
"""
if 'value' in kwargs:
if isinstance(kwargs['value'], unicode):
self.value = kwargs['value']
elif isinstance(kwargs['value'], str):
self.value = unicode(kwargs['value'],'utf-8')
elif kwargs['value'] is None:
self.value = u""
else:
raise Exception("value= parameter must be unicode or str instance, got " + str(type(kwargs['value'])))
del kwargs['value']
else:
raise Exception("Description expects value= parameter")
super(Description,self).__init__(doc, *args, **kwargs)
def __nonzero__(self):
return bool(self.value)
def __unicode__(self):
return self.value
def __str__(self):
return self.value.encode('utf-8')
def xml(self, attribs = None,elements = None, skipchildren = False):
global NSFOLIA
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
if not attribs:
attribs = {}
return E.desc(self.value, **attribs)
@classmethod
def parsexml(Class, node, doc):
global NSFOLIA
kwargs = {}
kwargs['value'] = node.text
return Description(doc, **kwargs)
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None):
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
return E.define( E.element(E.text(), name=cls.XMLTAG), name=cls.XMLTAG, ns=NSFOLIA)
class AllowCorrections(object):
def correct(self, **kwargs):
"""Apply a correction (TODO: documentation to be written still)"""
if 'reuse' in kwargs:
#reuse an existing correction instead of making a new one
if isinstance(kwargs['reuse'], Correction):
c = kwargs['reuse']
else: #assume it's an index
try:
c = self.doc.index[kwargs['reuse']]
assert isinstance(c, Correction)
except:
raise ValueError("reuse= must point to an existing correction (id or instance)!")
suggestionsonly = (not c.hasnew() and not c.hasoriginal() and c.hassuggestions())
if 'new' in kwargs and c.hascurrent():
#can't add new if there's current, so first set original to current, and then delete current
if 'current' in kwargs:
raise Exception("Can't set both new= and current= !")
if not 'original' in kwargs:
kwargs['original'] = c.current()
c.remove(c.current())
else:
if not 'id' in kwargs and not 'generate_id_in' in kwargs:
kwargs['generate_id_in'] = self
kwargs2 = copy(kwargs)
for x in ['new','original','suggestion', 'suggestions','current', 'insertindex']:
if x in kwargs2:
del kwargs2[x]
c = Correction(self.doc, **kwargs2)
addnew = False
if 'insertindex' in kwargs:
insertindex = int(kwargs['insertindex'])
del kwargs['insertindex']
else:
insertindex = -1 #append
if 'current' in kwargs:
if 'original' in kwargs or 'new' in kwargs: raise Exception("When setting current=, original= and new= can not be set!")
if not isinstance(kwargs['current'], list) and not isinstance(kwargs['current'], tuple): kwargs['current'] = [kwargs['current']] #support both lists (for multiple elements at once), as well as single element
c.replace(Current(self.doc, *kwargs['current']))
for o in kwargs['current']: #delete current from current element
if o in self and isinstance(o, AbstractElement):
if insertindex == -1: insertindex = self.data.index(o)
self.remove(o)
del kwargs['current']
if 'new' in kwargs:
if not isinstance(kwargs['new'], list) and not isinstance(kwargs['new'], tuple): kwargs['new'] = [kwargs['new']] #support both lists (for multiple elements at once), as well as single element
addnew = New(self.doc, *kwargs['new'])
c.replace(addnew)
for current in c.select(Current): #delete current if present
c.remove(current)
del kwargs['new']
if 'original' in kwargs:
if not isinstance(kwargs['original'], list) and not isinstance(kwargs['original'], tuple): kwargs['original'] = [kwargs['original']] #support both lists (for multiple elements at once), as well as single element
c.replace(Original(self.doc, *kwargs['original']))
for o in kwargs['original']: #delete original from current element
if o in self and isinstance(o, AbstractElement):
if insertindex == -1: insertindex = self.data.index(o)
self.remove(o)
for current in c.select(Current): #delete current if present
c.remove(current)
del kwargs['original']
elif addnew:
#original not specified, find automagically:
original = []
for new in addnew:
kwargs2 = {}
if isinstance(new, TextContent):
kwargs2['cls'] = new.cls
try:
set = new.set
except:
set = None
original += new.__class__.findreplacables(self, set, **kwargs2)
if not original:
raise Exception("No original= specified and unable to automatically infer")
else:
c.replace(Original(self.doc, *original))
for current in c.select(Current): #delete current if present
c.remove(current)
if addnew:
for original in c.original():
if original in self:
self.remove(original)
if 'suggestion' in kwargs:
kwargs['suggestions'] = [kwargs['suggestion']]
del kwargs['suggestion']
if 'suggestions' in kwargs:
for suggestion in kwargs['suggestions']:
if isinstance(suggestion, Suggestion):
c.append(suggestion)
elif isinstance(suggestion, list) or isinstance(suggestion, tuple):
c.append(Suggestion(self.doc, *suggestion))
else:
c.append(Suggestion(self.doc, suggestion))
del kwargs['suggestions']
if 'reuse' in kwargs:
if addnew and suggestionsonly:
#What was previously only a suggestion, now becomes a real correction
#If annotator, annotatortypes
#are associated with the correction as a whole, move it to the suggestions
#correction-wide annotator, annotatortypes might be overwritten
for suggestion in c.suggestions():
if c.annotator and not suggestion.annotator:
suggestion.annotator = c.annotator
if c.annotatortype and not suggestion.annotatortype:
suggestion.annotatortype = c.annotatortype
if 'annotator' in kwargs:
c.annotator = kwargs['annotator']
if 'annotatortype' in kwargs:
c.annotatortype = kwargs['annotatortype']
if 'confidence' in kwargs:
c.confidence = float(kwargs['confidence'])
del kwargs['reuse']
else:
if insertindex == -1:
self.append(c)
else:
self.insert(insertindex, c)
return c
class AllowTokenAnnotation(AllowCorrections):
"""Elements that allow token annotation (including extended annotation) must inherit from this class"""
def annotations(self,Class,set=None):
"""Obtain annotations. Very similar to ``select()`` but raises an error if the annotation was not found.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation)
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
A list of elements
Raises:
``NoSuchAnnotation`` if the specified annotation does not exist.
"""
l = self.select(Class,set,True,['Original','Suggestion','Alternative','AlternativeLayers','MorphologyLayer'])
if not l:
raise NoSuchAnnotation()
else:
return l
def hasannotation(self,Class,set=None):
"""Returns an integer indicating whether such as annotation exists, and if so, how many. See ``annotations()`` for a description of the parameters."""
l = self.select(Class,set,True,['Original','Suggestion','Alternative','AlternativeLayers','MorphologyLayer'])
return len(l)
def annotation(self, type, set=None):
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
l = self.select(type,set,True,['Original','Suggestion','Alternative','AlternativeLayers','MorphologyLayer'])
if len(l) >= 1:
return l[0]
else:
raise NoSuchAnnotation()
def alternatives(self, Class=None, set=None):
"""Obtain a list of alternatives, either all or only of a specific annotation type, and possibly restrained also by set.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
List of Alternative elements
"""
l = []
for e in self.select(Alternative,None, True, ['Original','Suggestion']):
if Class is None:
l.append(e)
elif len(e) >= 1: #child elements?
for e2 in e:
try:
if isinstance(e2, Class):
try:
if set is None or e2.set == set:
found = True
l.append(e) #not e2
break #yield an alternative only once (in case there are multiple matches)
except AttributeError:
continue
except AttributeError:
continue
return l
class AllowGenerateID(object):
"""Classes inherited from this class allow for automatic ID generation, using the convention of adding a period, the name of the element , another period, and a sequence number"""
def _getmaxid(self, xmltag):
try:
if xmltag in self.maxid:
return self.maxid[xmltag]
else:
return 0
except:
return 0
def _setmaxid(self, child):
#print "set maxid on " + repr(self) + " for " + repr(child)
try:
self.maxid
except AttributeError:
self.maxid = {}
try:
if child.id and child.XMLTAG:
fields = child.id.split(self.doc.IDSEPARATOR)
if len(fields) > 1 and fields[-1].isdigit():
if not child.XMLTAG in self.maxid:
self.maxid[child.XMLTAG] = int(fields[-1])
#print "set maxid on " + repr(self) + ", " + child.XMLTAG + " to " + fields[-1]
else:
if self.maxid[child.XMLTAG] < int(fields[-1]):
self.maxid[child.XMLTAG] = int(fields[-1])
#print "set maxid on " + repr(self) + ", " + child.XMLTAG + " to " + fields[-1]
except AttributeError:
pass
def generate_id(self, cls):
if isinstance(cls,str):
xmltag = cls
else:
try:
xmltag = cls.XMLTAG
except:
raise Exception("Expected a class such as Alternative, Correction, etc...")
maxid = self._getmaxid(xmltag)
id = None
if self.id:
id = self.id
else:
#this element has no ID, fall back to closest parent ID:
e = self
while e.parent:
if e.id:
id = e.id
break
e = e.parent
id = id + '.' + xmltag + '.' + str(maxid + 1)
try:
self.maxid
except AttributeError:
self.maxid = {}
self.maxid[xmltag] = maxid + 1 #Set MAX ID
return id
#i = 0
#while True:
# i += 1
# print i
# if self.id:
# id = self.id
# else:
# #this element has no ID, fall back to closest parent ID:
# e = self
# while e.parent:
# if e.id:
# id = e.id
# break
# e = e.parent
# id = id + '.' + xmltag + '.' + str(self._getmaxid(xmltag) + i)
# if not id in self.doc.index:
# return id
class AbstractStructureElement(AbstractElement, AllowTokenAnnotation, AllowGenerateID):
"""Abstract element, all structure elements inherit from this class. Never instantiated directly."""
PRINTABLE = True
TEXTDELIMITER = "\n\n" #bigger gap between structure elements
OCCURRENCESPERSET = 0 #Number of times this element may occur per set (0=unlimited, default=1)
REQUIRED_ATTRIBS = (Attrib.ID,)
OPTIONAL_ATTRIBS = Attrib.ALL
def __init__(self, doc, *args, **kwargs):
super(AbstractStructureElement,self).__init__(doc, *args, **kwargs)
def resolveword(self, id):
for child in self:
r = child.resolveword(id)
if r:
return r
return None
def append(self, child, *args, **kwargs):
"""See ``AbstractElement.append()``"""
e = super(AbstractStructureElement,self).append(child, *args, **kwargs)
self._setmaxid(e)
return e
def words(self, index = None):
"""Returns a list of Word elements found (recursively) under this element.
Arguments:
* ``index``: If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
"""
if index is None:
return self.select(Word,None,True,['Original','Suggestion','Alternative','AbstractAnnotationLayer'])
else:
return self.select(Word,None,True,['Original','Suggestion','Alternative','AbstractAnnotationLayer'])[index]
def paragraphs(self, index = None):
"""Returns a list of Paragraph elements found (recursively) under this element.
Arguments:
* ``index``: If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
"""
if index is None:
return self.select(Paragraph,None,True,['Original','Suggestion','Alternative','AbstractAnnotationLayer'])
else:
return self.select(Paragraph,None,True,['Original','Suggestion','Alternative','AbstractAnnotationLayer'])[index]
def sentences(self, index = None):
"""Returns a list of Sentence elements found (recursively) under this element
Arguments:
* ``index``: If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
"""
if index is None:
return self.select(Sentence,None,True,['Quote','Original','Suggestion','Alternative','AbstractAnnotationLayer'])
else:
return self.select(Sentence,None,True,['Quote','Original','Suggestion','Alternative','AbstractAnnotationLayer'])[index]
def layers(self, annotationtype=None,set=None):
"""Returns a list of annotation layers found *directly* under this element, does not include alternative layers"""
if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE
return [ x for x in self.select(AbstractAnnotationLayer,set,False,['AlternativeLayers']) if annotationtype is None or x.ANNOTATIONTYPE == annotationtype ]
def hasannotationlayer(self, annotationtype=None,set=None):
"""Does the specified annotation layer exist?"""
l = self.layers(annotationtype, set)
return (len(l) > 0)
def __eq__(self, other):
return super(AbstractStructureElement, self).__eq__(other)
class AbstractAnnotation(AbstractElement):
pass
class AbstractTokenAnnotation(AbstractAnnotation, AllowGenerateID):
"""Abstract element, all token annotation elements are derived from this class"""
OCCURRENCESPERSET = 1 #Do not allow duplicates within the same set
REQUIRED_ATTRIBS = (Attrib.CLASS,)
OPTIONAL_ATTRIBS = Attrib.ALL
def append(self, child, *args, **kwargs):
"""See ``AbstractElement.append()``"""
e = super(AbstractTokenAnnotation,self).append(child, *args, **kwargs)
self._setmaxid(e)
return e
class AbstractExtendedTokenAnnotation(AbstractTokenAnnotation):
pass
class TextContent(AbstractElement):
"""Text content element (``t``), holds text to be associated with whatever element the text content element is a child of.
Text content elements have an associated correction level, indicating whether the text they hold is in a pre-corrected or post-corrected state. There can be only once of each level. Text content elements
on structure elements like ``Paragraph`` and ``Sentence`` are by definition untokenised. Only on ``Word`` level and deeper they are by definition tokenised.
Text content elements can specify offset that refer to text at a higher parent level. Use the following keyword arguments:
* ``ref=``: The instance to point to, this points to the element holding the text content element, not the text content element itself.
* ``offset=``: The offset where this text is found, offsets start at 0
"""
XMLTAG = 't'
OPTIONAL_ATTRIBS = (Attrib.CLASS,Attrib.ANNOTATOR,Attrib.CONFIDENCE, Attrib.DATETIME)
ANNOTATIONTYPE = AnnotationType.TEXT
OCCURRENCES = 0 #Number of times this element may occur in its parent (0=unlimited)
OCCURRENCESPERSET = 0 #Number of times this element may occur per set (0=unlimited)
def __init__(self, doc, *args, **kwargs):
global ILLEGAL_UNICODE_CONTROL_CHARACTERS
"""Required keyword arguments:
* ``value=``: Set to a unicode or str containing the text
Example::
text = folia.TextContent(doc, value='test')
text = folia.TextContent(doc, value='test',cls='original')
"""
if not 'value' in kwargs:
if args and (isinstance(args[0], unicode) or isinstance(args[0], str)):
kwargs['value'] = args[0]
args = args[1:]
else:
raise Exception("TextContent expects value= parameter")
if isinstance(kwargs['value'], unicode):
self.value = kwargs['value']
del kwargs['value']
elif isinstance(kwargs['value'], str):
self.value = unicode(kwargs['value'],'utf-8')
del kwargs['value']
elif not kwargs['value']:
self.value = u""
del kwargs['value']
else:
raise Exception("Invalid value: " + repr(kwargs['value']))
if self.value and (self.value != self.value.translate(ILLEGAL_UNICODE_CONTROL_CHARACTERS)):
raise ValueError("There are illegal unicode control characters present in TextContent: " + repr(self.value))
if 'offset' in kwargs: #offset
self.offset = int(kwargs['offset'])
del kwargs['offset']
else:
self.offset = None
if 'ref' in kwargs: #reference to offset
if isinstance(self.ref, AbstractElement):
self.ref = kwargs['ref']
else:
try:
self.ref = doc.index[kwargs['ref']]
except:
raise UnresolvableTextContent("Unable to resolve textcontent reference: " + kwargs['ref'] + " (class=" + self.cls+")")
del kwargs['ref']
else:
self.ref = None #will be set upon parent.append()
#If no class is specified, it defaults to 'current'. (FoLiA uncharacteristically predefines two classes for t: current and original)
if not ('cls' in kwargs) and not ('class' in kwargs):
kwargs['cls'] = 'current'
super(TextContent,self).__init__(doc, *args, **kwargs)
def text(self):
"""Obtain the text (unicode instance)"""
return self.value
def validateref(self):
"""Validates the Text Content's references. Raises UnresolvableTextContent when invalid"""
if self.offset is None: return True #nothing to test
if self.ref:
ref = self.ref
else:
ref = self.finddefaultreference()
if not ref:
raise UnresolvableTextContent("Default reference for textcontent not found!")
elif ref.hastext(self.cls):
raise UnresolvableTextContent("Reference has no such text (class=" + self.cls+")")
elif self.value != ref.textcontent(self.cls).value[self.offset:self.offset+len(self.value)]:
raise UnresolvableTextContent("Referenced found but does not match!")
else:
#finally, we made it!
return True
def __unicode__(self):
return self.value
def __str__(self):
return self.value.encode('utf-8')
def __eq__(self, other):
if isinstance(other, TextContent):
return self.value == other.value
elif isinstance(other, unicode):
return self.value == other
elif isinstance(other, str):
return self.value == unicode(other,'utf-8')
else:
return False
def append(self, child, *args, **kwargs):
"""This method is not implemented on purpose"""
raise NotImplementedError #on purpose
def postappend(self):
"""(Method for internal usage, see ``AbstractElement.postappend()``)"""
if isinstance(self.parent, Original):
if self.cls == 'current': self.cls = 'original'
#assert (self.testreference() == True)
super(TextContent, self).postappend()
def finddefaultreference(self):
"""Find the default reference for text offsets:
The parent of the current textcontent's parent (counting only Structure Elements and Subtoken Annotation Elements)
Note: This returns not a TextContent element, but its parent. Whether the textcontent actually exists is checked later/elsewhere
"""
depth = 0
e = self
while True:
print e.__class__
if e.parent:
e = e.parent
else:
print "no parent, breaking"
return False
if isinstance(e,AbstractStructureElement) or isinstance(e,AbstractSubtokenAnnotation):
depth += 1
if depth == 2:
return e
return False
def __iter__(self):
"""Iterate over the text string (character by character)"""
return iter(self.value)
def __len__(self):
"""Get the length of the text"""
return len(self.value)
@classmethod
def findreplacables(Class, parent, set, **kwargs):
"""(Method for internal usage, see AbstractElement)"""
#some extra behaviour for text content elements, replace also based on the 'corrected' attribute:
if not 'cls' in kwargs:
kwargs['cls'] = 'current'
replace = super(TextContent, Class).findreplacables(parent, set, **kwargs)
replace = [ x for x in replace if x.cls == kwargs['cls']]
del kwargs['cls'] #always delete what we processed
return replace
@classmethod
def parsexml(Class, node, doc):
"""(Method for internal usage, see AbstractElement)"""
global NSFOLIA
nslen = len(NSFOLIA) + 2
args = []
kwargs = {}
if 'class' in node.attrib:
kwargs['cls'] = node.attrib['class']
if 'offset' in node.attrib:
kwargs['offset'] = int(node.attrib['offset'])
elif 'ref' in node.attrib:
kwargs['ref'] = node.attrib['ref']
if node.text:
kwargs['value'] = node.text
else:
kwargs['value'] = ""
return TextContent(doc, **kwargs)
def xml(self, attribs = None,elements = None, skipchildren = False):
global NSFOLIA
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
attribs = {}
if not self.offset is None:
attribs['{' + NSFOLIA + '}offset'] = str(self.offset)
if self.parent and self.ref:
attribs['{' + NSFOLIA + '}ref'] = self.ref.id
if self.cls != 'current' and not (self.cls == 'original' and any( isinstance(x, Original) for x in self.ancestors() ) ):
attribs['{' + NSFOLIA + '}class'] = self.cls
else:
if '{' + NSFOLIA + '}class' in attribs:
del attribs['{' + NSFOLIA + '}class']
return E.t(self.value, **attribs)
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None):
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
return E.define( E.element(E.text(), E.optional( E.attribute(name='offset')), E.optional( E.attribute(name='class')),name=cls.XMLTAG ), name=cls.XMLTAG, ns=NSFOLIA)
class Linebreak(AbstractStructureElement):
"""Line break element, signals a line break"""
REQUIRED_ATTRIBS = ()
ACCEPTED_DATA = ()
XMLTAG = 'br'
ANNOTATIONTYPE = AnnotationType.LINEBREAK
TEXTDELIMITER = "\n"
class Whitespace(AbstractStructureElement):
"""Whitespace element, signals a vertical whitespace"""
REQUIRED_ATTRIBS = ()
ACCEPTED_DATA = ()
XMLTAG = 'whitespace'
ANNOTATIONTYPE = AnnotationType.WHITESPACE
TEXTDELIMITER = "\n\n"
class Word(AbstractStructureElement, AllowCorrections):
"""Word (aka token) element. Holds a word/token and all its related token annotations."""
XMLTAG = 'w'
ANNOTATIONTYPE = AnnotationType.TOKEN
#ACCEPTED_DATA DEFINED LATER (after Correction)
#will actually be determined by gettextdelimiter()
def __init__(self, doc, *args, **kwargs):
"""Keyword arguments:
* ``space=``: Boolean indicating whether this token is followed by a space (defaults to True)
Example::
sentence.append( folia.Word, 'This')
sentence.append( folia.Word, 'is')
sentence.append( folia.Word, 'a')
sentence.append( folia.Word, 'test', space=False)
sentence.append( folia.Word, '.')
"""
self.space = True
if 'space' in kwargs:
self.space = kwargs['space']
del kwargs['space']
super(Word,self).__init__(doc, *args, **kwargs)
def sentence(self):
"""Obtain the sentence this word is a part of, otherwise return None"""
e = self;
while e.parent:
if isinstance(e, Sentence):
return e
e = e.parent
return None
def paragraph(self):
"""Obtain the paragraph this word is a part of, otherwise return None"""
e = self;
while e.parent:
if isinstance(e, Paragraph):
return e
e = e.parent
return None
def division(self):
"""Obtain the deepest division this word is a part of, otherwise return None"""
e = self;
while e.parent:
if isinstance(e, Division):
return e
e = e.parent
return None
def incorrection(self):
"""Is this word part of a correction? If it is, it returns the Correction element (evaluating to True), otherwise it returns None"""
e = self
while not e.parent is None:
if isinstance(e, Correction):
return e
if isinstance(e, Sentence):
break
e = e.parent
return None
def pos(self,set=None):
"""Shortcut: returns the FoLiA class of the PoS annotation (will return only one if there are multiple!)"""
return self.annotation(PosAnnotation,set).cls
def lemma(self, set=None):
"""Shortcut: returns the FoLiA class of the lemma annotation (will return only one if there are multiple!)"""
return self.annotation(LemmaAnnotation,set).cls
def sense(self,set=None):
"""Shortcut: returns the FoLiA class of the sense annotation (will return only one if there are multiple!)"""
return self.annotation(SenseAnnotation,set).cls
def domain(self,set=None):
"""Shortcut: returns the FoLiA class of the domain annotation (will return only one if there are multiple!)"""
return self.annotation(DomainAnnotation,set).cls
def morphemes(self,set=None):
"""Generator yielding all morphemes (in a particular set if specified). For retrieving one specific morpheme by index, use morpheme() instead"""
for layer in self.select(MorphologyLayer):
for m in layer.select(Morpheme, set):
yield m
def morpheme(self,index, set=None):
"""Returns a specific morpheme, the n'th morpheme (given the particular set if specified)."""
for layer in self.select(MorphologyLayer):
for i, m in enumerate(layer.select(Morpheme, set)):
if index == i:
return m
raise NoSuchAnnotation
def gettextdelimiter(self, retaintokenisation=False):
"""Returns the text delimiter"""
if self.space or retaintokenisation:
return ' '
else:
return ''
def resolveword(self, id):
if id == self.id:
return self
else:
return None
def getcorrection(self,set=None,cls=None):
try:
return self.getcorrections(set,cls)[0]
except:
raise NoSuchAnnotation
def getcorrections(self, set=None,cls=None):
try:
l = []
for correction in self.annotations(Correction):
if ((not set or correction.set == set) and (not cls or correction.cls == cls)):
l.append(correction)
return l
except NoSuchAnnotation:
raise
@classmethod
def parsexml(Class, node, doc):
assert Class is Word
global NSFOLIA
nslen = len(NSFOLIA) + 2
instance = super(Word,Class).parsexml(node, doc)
if 'space' in node.attrib:
if node.attrib['space'] == 'no':
instance.space = False
return instance
def xml(self, attribs = None,elements = None, skipchildren = False):
if not attribs: attribs = {}
if not self.space:
attribs['space'] = 'no'
return super(Word,self).xml(attribs,elements, False)
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None):
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
if not extraattribs:
extraattribs = [ E.optional(E.attribute(name='space')) ]
else:
extraattribs.append( E.optional(E.attribute(name='space')) )
return AbstractStructureElement.relaxng(includechildren, extraattribs, extraelements, cls)
def split(self, *newwords, **kwargs):
self.sentence().splitword(self, *newwords, **kwargs)
def next(self):
"""Returns the next word in the sentence, or None if no next word was found. This method does not cross sentence boundaries."""
words = self.sentence().words()
i = words.index(self) + 1
if i < len(words):
return words[i]
else:
return None
def previous(self):
"""Returns the previous word in the sentence, or None if no next word was found. This method does not cross sentence boundaries."""
words = self.sentence().words()
i = words.index(self) - 1
if i >= 0:
return words[i]
else:
return None
def leftcontext(self, size, placeholder=None):
"""Returns the left context for a word. This method crosses sentence/paragraph boundaries"""
if size == 0: return [] #for efficiency
words = self.doc.words()
i = words.index(self)
begin = i - size
if begin < 0:
return [placeholder] * (begin * -1) + words[0:i]
else:
return words[begin:i]
def rightcontext(self, size, placeholder=None):
"""Returns the right context for a word. This method crosses sentence/paragraph boundaries"""
if size == 0: return [] #for efficiency
words = self.doc.words()
i = words.index(self)
begin = i+1
end = begin + size
rightcontext = words[begin:end]
if len(rightcontext) < size:
rightcontext += (size - len(rightcontext)) * [placeholder]
return rightcontext
def context(self, size, placeholder=None):
"""Returns this word in context, {size} words to the left, the current word, and {size} words to the right"""
return self.leftcontext(size, placeholder) + [self] + self.rightcontext(size, placeholder)
class Feature(AbstractElement):
"""Feature elements can be used to associate subsets and subclasses with almost any
annotation element"""
OCCURRENCESPERSET = 0 #unlimited
XMLTAG = 'feat'
SUBSET = None
def __init__(self,doc, *args, **kwargs):
"""Required keyword arguments:
* ``subset=``: the subset
* ``cls=``: the class
"""
self.id = None
self.set = None
self.data = []
self.annotator = None
self.annotatortype = None
self.confidence = None
self.n = None
self.datetime = None
if not isinstance(doc, Document) and not (doc is None):
raise Exception("First argument of Feature constructor must be a Document instance, not " + str(type(doc)))
self.doc = doc
if self.SUBSET:
self.subset = self.SUBSET
elif 'subset' in kwargs:
self.subset = kwargs['subset']
else:
raise Exception("No subset specified for " + + self.__class__.__name__)
if 'cls' in kwargs:
self.cls = kwargs['cls']
elif 'class' in kwargs:
self.cls = kwargs['class']
else:
raise Exception("No class specified for " + self.__class__.__name__)
if isinstance(self.cls, datetime):
self.cls = self.cls.strftime("%Y-%m-%dT%H:%M:%S")
def xml(self):
global NSFOLIA
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
attribs = {}
if self.subset != self.SUBSET:
attribs['{' + NSFOLIA + '}subset'] = self.subset
attribs['{' + NSFOLIA + '}class'] = self.cls
return E._makeelement('{' + NSFOLIA + '}' + self.XMLTAG, **attribs)
@classmethod
def relaxng(cls, includechildren=True, extraattribs = None, extraelements=None):
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
return E.define( E.element(E.attribute(name='subset'), E.attribute(name='class'),name=cls.XMLTAG), name=cls.XMLTAG,ns=NSFOLIA)
class ValueFeature(Feature):
"""Value feature, to be used within Metric"""
#XMLTAG = 'synset'
XMLTAG = None
SUBSET = 'value' #associated subset
class Metric(AbstractElement):
"""Metric elements allow the annotatation of any kind of metric with any kind of annotation element. Allowing for example statistical measures to be added to elements as annotation,"""
XMLTAG = 'metric'
ANNOTATIONTYPE = AnnotationType.METRIC
ACCEPTED_DATA = (Feature, ValueFeature, Description)
class AbstractSubtokenAnnotation(AbstractAnnotation, AllowGenerateID):
"""Abstract element, all subtoken annotation elements are derived from this class"""
REQUIRED_ATTRIBS = ()
OPTIONAL_ATTRIBS = Attrib.ALL
OCCURRENCESPERSET = 0 #Allow duplicates within the same set
PRINTABLE = True
class AbstractSpanAnnotation(AbstractAnnotation, AllowGenerateID):
"""Abstract element, all span annotation elements are derived from this class"""
REQUIRED_ATTRIBS = ()
OPTIONAL_ATTRIBS = Attrib.ALL
OCCURRENCESPERSET = 0 #Allow duplicates within the same set
PRINTABLE = True
def xml(self, attribs = None,elements = None, skipchildren = False):
global NSFOLIA
if not attribs: attribs = {}
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
e = super(AbstractSpanAnnotation,self).xml(attribs, elements, True)
for child in self:
if isinstance(child, Word) or isinstance(child, Morpheme):
#Include REFERENCES to word items instead of word items themselves
attribs['{' + NSFOLIA + '}id'] = child.id
if child.text:
attribs['{' + NSFOLIA + '}t'] = child.text()
e.append( E.wref(**attribs) )
elif not (isinstance(child, Feature) and child.SUBSET): #Don't add pre-defined features, they are already added as attributes
e.append( child.xml() )
return e
def append(self, child, *args, **kwargs):
if (isinstance(child, Word) or isinstance(child, Morpheme)) and WordReference in self.ACCEPTED_DATA:
#Accept Word instances instead of WordReference, references will be automagically used upon serialisation
self.data.append(child)
return child
else:
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs)
def _helper_wrefs(self, targets):
"""Internal helper function"""
for c in self:
if isinstance(c,Word) or isinstance(c,Morpheme): #TODO: add phoneme when it becomes available
targets.append(c)
elif isinstance(c, AbstractSpanAnnotation):
c._helper_wrefs(targets)
def wrefs(self, index = None):
"""Returns a list of word references, these can be Words but also Morphemes or Phonemes.
Arguments:
* ``index``: If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
"""
targets =[]
self._helper_wrefs(targets)
if index is None:
return targets
else:
return targets[index]
class AbstractAnnotationLayer(AbstractElement, AllowGenerateID):
"""Annotation layers for Span Annotation are derived from this abstract base class"""
OPTIONAL_ATTRIBS = (Attrib.SETONLY,)
PRINTABLE = False
def __init__(self, doc, *args, **kwargs):
if 'set' in kwargs:
self.set = kwargs['set']
elif self.ANNOTATIONTYPE in doc.annotationdefaults and len(doc.annotationdefaults[self.ANNOTATIONTYPE]) == 1:
self.set = doc.annotationdefaults[self.ANNOTATIONTYPE].keys()[0]
else:
self.set = False
# ok, let's not raise an error yet, may may still be able to derive a set from elements that are appended
super(AbstractAnnotationLayer,self).__init__(doc, *args, **kwargs)
def xml(self, attribs = None,elements = None, skipchildren = False):
if self.set is False or self.set is None:
if len(self.data) == 0: #just skip if there are no children
return ""
else:
raise ValueError("No set specified or derivable for annotation layer " + self.__class__.__name__)
return super(AbstractAnnotationLayer, self).xml(attribs, elements, skipchildren)
def append(self, child, *args, **kwargs):
if self.set is False or self.set is None:
if inspect.isclass(child):
if 'set' in kwargs:
self.set = kwargs['set']
elif isinstance(child, AbstractElement):
if child.set:
self.set = child.set
#print "DEBUG AFTER APPEND: set=", self.set
return super(AbstractAnnotationLayer, self).append(child, *args, **kwargs)
def annotations(self,Class,set=None):
"""Obtain annotations. Very similar to ``select()`` but raises an error if the annotation was not found.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation)
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
A list of elements
Raises:
``NoSuchAnnotation`` if the specified annotation does not exist.
"""
l = self.select(Class,set,True,['Original','Suggestion','Alternative','AlternativeLayers','MorphologyLayer'])
if not l:
raise NoSuchAnnotation()
else:
return l
def hasannotation(self,Class,set=None):
"""Returns an integer indicating whether such as annotation exists, and if so, how many. See ``annotations()`` for a description of the parameters."""
l = self.select(Class,set,True,['Original','Suggestion','Alternative','AlternativeLayers','MorphologyLayer'])
return len(l)
def annotation(self, type, set=None):
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
l = self.select(type,set,True,['Original','Suggestion','Alternative','AlternativeLayers','MorphologyLayer'])
if len(l) >= 1:
return l[0]
else:
raise NoSuchAnnotation()
def alternatives(self, Class=None, set=None):
"""Obtain a list of alternatives, either all or only of a specific annotation type, and possibly restrained also by set.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
List of Alternative elements
"""
l = []
for e in self.select(AlternativeLayers,None, True, ['Original','Suggestion']):
if Class is None:
l.append(e)
elif len(e) >= 1: #child elements?
for e2 in e:
try:
if isinstance(e2, Class):
try:
if set is None or e2.set == set:
found = True
l.append(e) #not e2
break #yield an alternative only once (in case there are multiple matches)
except AttributeError:
continue
except AttributeError:
continue
return l
def findspan(self, *words):
"""Returns the span element which spans over the specified words or morphemes"""
for span in self.select(AbstractSpanAnnotation,None,True):
if tuple(span.wrefs()) == words:
return span
raise NoSuchAnnotation
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None, origclass = None):
"""Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)"""
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace",'a':"http://relaxng.org/ns/annotation/0.9" })
if not extraattribs:
extraattribs = []
extraattribs.append(E.optional(E.attribute(E.text(), name='set')) )
return AbstractElement.relaxng(includechildren, extraattribs, extraelements, cls)
# class AbstractSubtokenAnnotationLayer(AbstractElement, AllowGenerateID):
# """Annotation layers for Subtoken Annotation are derived from this abstract base class"""
# OPTIONAL_ATTRIBS = ()
# PRINTABLE = False
# def __init__(self, doc, *args, **kwargs):
# if 'set' in kwargs:
# self.set = kwargs['set']
# del kwargs['set']
# super(AbstractSubtokenAnnotationLayer,self).__init__(doc, *args, **kwargs)
class AbstractCorrectionChild(AbstractElement):
OPTIONAL_ATTRIBS = (Attrib.ANNOTATOR,Attrib.CONFIDENCE,Attrib.DATETIME,Attrib.N)
ACCEPTED_DATA = (AbstractTokenAnnotation, Word, TextContent, Description, Metric)
TEXTDELIMITER = None
PRINTABLE = True
class AlignReference(AbstractElement):
REQUIRED_ATTRIBS = (Attrib.ID,)
XMLTAG = 'aref'
def __init__(self, doc, *args, **kwargs):
#Special constructor, not calling super constructor
if not 'id' in kwargs:
raise Exception("ID required for AlignReference")
if not 'type' in kwargs:
raise Exception("Type required for AlignReference")
elif not inspect.isclass(kwargs['type']):
raise Exception("Type must be a FoLiA element (python class)")
self.type = kwargs['type']
if 't' in kwargs:
self.t = kwargs['t']
else:
self.t = None
assert(isinstance(doc,Document))
self.doc = doc
self.id = kwargs['id']
self.annotator = None
self.annotatortype = None
self.confidence = None
self.n = None
self.datetime = None
self.auth = False
self.set = None
self.cls = None
self.data = []
if 'href' in kwargs:
self.href = kwargs['href']
else:
self.href = None
@classmethod
def parsexml(Class, node, doc):
global NSFOLIA
assert Class is AlignReference or issubclass(Class, AlignReference)
#special handling for word references
id = node.attrib['id']
if not 'type' in node.attrib:
raise ValueError("No type in alignment reference")
try:
type = XML2CLASS[node.attrib['type']]
except KeyError:
raise ValueError("No such type: " + node.attrib['type'])
return AlignReference(doc, id=id, type=type)
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None):
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
return E.define( E.element(E.attribute(E.text(), name='id'), E.optional(E.attribute(E.text(), name='t')), E.attribute(E.text(), name='type'), name=cls.XMLTAG), name=cls.XMLTAG, ns=NSFOLIA)
def resolve(self, alignmentcontext):
if not alignmentcontext.href:
#no target document, same document
return self.doc[self.id]
else:
raise NotImplementedError
def xml(self, attribs = None,elements = None, skipchildren = False):
global NSFOLIA
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
if not attribs:
attribs = {}
attribs['id'] = self.id
attribs['type'] = self.type.XMLTAG
if self.t: attribs['t'] = self.t
return E.aref( **attribs)
class Alignment(AbstractElement):
REQUIRED_ATTRIBS = ()
OPTIONAL_ATTRIBS = Attrib.ALL
OCCURRENCESPERSET = 0 #Allow duplicates within the same set (0= unlimited)
XMLTAG = 'alignment'
ANNOTATIONTYPE = AnnotationType.ALIGNMENT
ACCEPTED_DATA = (AlignReference, Description, Metric)
PRINTABLE = False
def __init__(self, doc, *args, **kwargs):
if 'href' in kwargs:
self.href =kwargs['href']
del kwargs['href']
else:
self.href = None
super(Alignment,self).__init__(doc, *args, **kwargs)
@classmethod
def parsexml(Class, node, doc):
global NSFOLIA
assert Class is Alignment or issubclass(Class, Alignment)
instance = super(Alignment,Class).parsexml(node, doc)
if '{http://www.w3.org/1999/xlink}href' in node.attrib:
instance.href = node.attrib['{http://www.w3.org/1999/xlink}href']
else:
instance.href = None
return instance
def xml(self, attribs = None,elements = None, skipchildren = False):
if not attribs: attribs = {}
if self.href:
attribs['{http://www.w3.org/1999/xlink}href'] = self.href
attribs['{http://www.w3.org/1999/xlink}type'] = 'simple'
return super(Alignment,self).xml(attribs,elements, False)
def resolve(self):
l = []
for x in self.select(AlignReference):
l.append( x.resolve(self) )
return l
class ErrorDetection(AbstractExtendedTokenAnnotation):
ANNOTATIONTYPE = AnnotationType.ERRORDETECTION
XMLTAG = 'errordetection'
OCCURRENCESPERSET = 0 #Allow duplicates within the same set (0= unlimited)
class Suggestion(AbstractCorrectionChild):
ANNOTATIONTYPE = AnnotationType.SUGGESTION
XMLTAG = 'suggestion'
OCCURRENCES = 0 #unlimited
OCCURRENCESPERSET = 0 #Allow duplicates within the same set (0= unlimited)
AUTH = False
class New(AbstractCorrectionChild):
REQUIRED_ATTRIBS = (),
OPTIONAL_ATTRIBS = (),
OCCURRENCES = 1
XMLTAG = 'new'
@classmethod
def addable(Class, parent, set=None, raiseexceptions=True):
if not super(New,Class).addable(parent,set,raiseexceptions): return False
if any( ( isinstance(c, Current) for c in parent ) ):
if raiseexceptions:
raise ValueError("Can't add New element to Correction if there is a Current item")
else:
return False
return True
class Original(AbstractCorrectionChild):
REQUIRED_ATTRIBS = (),
OPTIONAL_ATTRIBS = (),
OCCURRENCES = 1
XMLTAG = 'original'
AUTH = False
@classmethod
def addable(Class, parent, set=None, raiseexceptions=True):
if not super(Original,Class).addable(parent,set,raiseexceptions): return False
if any( ( isinstance(c, Current) for c in parent ) ):
if raiseexceptions:
raise Exception("Can't add Original item to Correction if there is a Current item")
else:
return False
return True
class Current(AbstractCorrectionChild):
REQUIRED_ATTRIBS = (),
OPTIONAL_ATTRIBS = (),
OCCURRENCES = 1
XMLTAG = 'current'
@classmethod
def addable(Class, parent, set=None, raiseexceptions=True):
if not super(Current,Class).addable(parent,set,raiseexceptions): return False
if any( ( isinstance(c, New) or isinstance(c, Original) for c in parent ) ):
if raiseexceptions:
raise Exception("Can't add Current element to Correction if there is a New or Original element")
else:
return False
return True
class Correction(AbstractExtendedTokenAnnotation):
REQUIRED_ATTRIBS = ()
ACCEPTED_DATA = (New,Original,Current, Suggestion, Description, Metric)
ANNOTATIONTYPE = AnnotationType.CORRECTION
XMLTAG = 'correction'
OCCURRENCESPERSET = 0 #Allow duplicates within the same set (0= unlimited)
TEXTDELIMITER = None
PRINTABLE = True
def hasnew(self):
return bool(self.select(New,None,False, False))
def hasoriginal(self):
return bool(self.select(Original,None,False, False))
def hascurrent(self):
return bool(self.select(Current,None,False, False))
def hassuggestions(self):
return bool(self.select(Suggestion,None,False, False))
def textcontent(self, cls='current'):
"""Get the text explicitly associated with this element (of the specified class).
Returns the TextContent instance rather than the actual text. Raises NoSuchText exception if
not found.
Unlike text(), this method does not recurse into child elements (with the sole exception of the Correction/New element), and it returns the TextContent instance rather than the actual text!
"""
if cls == 'current':
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return e.textcontent(cls)
elif cls == 'original':
for e in self:
if isinstance(e, Original):
return e.textcontent(cls)
raise NoSuchText
def text(self, cls = 'current', retaintokenisation=False, previousdelimiter=""):
if cls == 'current':
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return previousdelimiter + e.text(cls, retaintokenisation)
elif cls == 'original':
for e in self:
if isinstance(e, Original):
return previousdelimiter + e.text(cls, retaintokenisation)
raise NoSuchText
def gettextdelimiter(self, retaintokenisation=False):
"""May return a customised text delimiter instead of the default for this class."""
for e in self:
if isinstance(e, New) or isinstance(e, Current):
d = e.gettextdelimiter(retaintokenisation)
return d
return ""
def new(self,index = None):
if index is None:
try:
return self.select(New,None,False)[0]
except IndexError:
raise NoSuchAnnotation
else:
l = self.select(New,None,False)
if len(l) == 0:
raise NoSuchAnnotation
else:
return l[0][index]
def original(self,index=None):
if index is None:
try:
return self.select(Original,None,False, False)[0]
except IndexError:
raise NoSuchAnnotation
else:
l = self.select(Original,None,False, False)
if len(l) == 0:
raise NoSuchAnnotation
else:
return l[0][index]
def current(self,index=None):
if index is None:
try:
return self.select(Current,None,False)[0]
except IndexError:
raise NoSuchAnnotation
else:
l = self.select(Current,None,False)
if len(l) == 0:
raise NoSuchAnnotation
else:
return l[0][index]
def suggestions(self,index=None):
if index is None:
return self.select(Suggestion,None,False, False)
else:
return self.select(Suggestion,None,False, False)[index]
def __unicode__(self):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return unicode(e)
def select(self, cls, set=None, recursive=True, ignorelist=[], node=None):
"""Select on Correction only descends in either "NEW" or "CURRENT" branch"""
if ignorelist is False:
#to override and go into all branches, set ignorelist explictly to False
return super(Correction,self).select(cls,set,recursive, ignorelist, node)
else:
ignorelist = copy(ignorelist) #we don't want to alter an passed ignorelist (by ref)
ignorelist.append(Original)
ignorelist.append(Suggestion)
return super(Correction,self).select(cls,set,recursive, ignorelist, node)
Original.ACCEPTED_DATA = (AbstractTokenAnnotation, Word, TextContent, Correction, Description, Metric)
class Alternative(AbstractElement, AllowTokenAnnotation, AllowGenerateID):
"""Element grouping alternative token annotation(s). Multiple alternative elements may occur, each denoting a different alternative. Elements grouped inside an alternative block are considered dependent."""
REQUIRED_ATTRIBS = ()
OPTIONAL_ATTRIBS = Attrib.ALL
ACCEPTED_DATA = [AbstractTokenAnnotation, Correction] #adding MorphlogyLayer later
ANNOTATIONTYPE = AnnotationType.ALTERNATIVE
XMLTAG = 'alt'
PRINTABLE = False
AUTH = False
class AlternativeLayers(AbstractElement):
"""Element grouping alternative subtoken annotation(s). Multiple altlayers elements may occur, each denoting a different alternative. Elements grouped inside an alternative block are considered dependent."""
REQUIRED_ATTRIBS = ()
OPTIONAL_ATTRIBS = Attrib.ALL
ACCEPTED_DATA = (AbstractAnnotationLayer,)
XMLTAG = 'altlayers'
PRINTABLE = False
AUTH = False
Word.ACCEPTED_DATA = (AbstractTokenAnnotation, TextContent, Alternative, AlternativeLayers, Description, AbstractAnnotationLayer, Alignment, Metric)
class WordReference(AbstractElement):
"""Word reference. Use to refer to words or morphemes from span annotation elements. The Python class will only be used when word reference can not be resolved, if they can, Word or Morpheme objects will be used"""
REQUIRED_ATTRIBS = (Attrib.ID,)
XMLTAG = 'wref'
#ANNOTATIONTYPE = AnnotationType.TOKEN
def __init__(self, doc, *args, **kwargs):
#Special constructor, not calling super constructor
if not 'id' in kwargs:
raise Exception("ID required for WordReference")
assert(isinstance(doc,Document))
self.doc = doc
self.id = kwargs['id']
self.annotator = None
self.annotatortype = None
self.confidence = None
self.n = None
self.datetime = None
self.auth = False
self.data = []
@classmethod
def parsexml(Class, node, doc):
global NSFOLIA
assert Class is WordReference or issubclass(Class, WordReference)
#special handling for word references
id = node.attrib['id']
if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Found word reference"
try:
return doc[id]
except KeyError:
if doc.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] ...Unresolvable!"
return WordReference(doc, id=id)
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None):
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
return E.define( E.element(E.attribute(E.text(), name='id'), E.optional(E.attribute(E.text(), name='t')), name=cls.XMLTAG), name=cls.XMLTAG, ns=NSFOLIA)
class SyntacticUnit(AbstractSpanAnnotation):
"""Syntactic Unit, span annotation element to be used in SyntaxLayer"""
REQUIRED_ATTRIBS = ()
ANNOTATIONTYPE = AnnotationType.SYNTAX
XMLTAG = 'su'
SyntacticUnit.ACCEPTED_DATA = (SyntacticUnit,WordReference, Description, Feature, Metric)
class Chunk(AbstractSpanAnnotation):
"""Chunk element, span annotation element to be used in ChunkingLayer"""
REQUIRED_ATTRIBS = ()
ACCEPTED_DATA = (WordReference, Description, Feature, Metric)
ANNOTATIONTYPE = AnnotationType.CHUNKING
XMLTAG = 'chunk'
class Entity(AbstractSpanAnnotation):
"""Entity element, for named entities, span annotation element to be used in EntitiesLayer"""
REQUIRED_ATTRIBS = ()
ACCEPTED_DATA = (WordReference, Description, Feature, Metric)
ANNOTATIONTYPE = AnnotationType.ENTITY
XMLTAG = 'entity'
class Headspan(AbstractSpanAnnotation): #generic head element
REQUIRED_ATTRIBS = ()
OPTIONAL_ATTRIBS = ()
ACCEPTED_DATA = (WordReference,Description, Feature, Alignment, Metric)
#ANNOTATIONTYPE = AnnotationType.DEPENDENCY
XMLTAG = 'hd'
DependencyHead = Headspan #alias, backwards compatibility with FoLiA 0.8
class DependencyDependent(AbstractSpanAnnotation):
REQUIRED_ATTRIBS = ()
OPTIONAL_ATTRIBS = ()
ACCEPTED_DATA = (WordReference,Description, Feature, Alignment, Metric)
ANNOTATIONTYPE = AnnotationType.DEPENDENCY
XMLTAG = 'dep'
class Dependency(AbstractSpanAnnotation):
REQUIRED_ATTRIBS = ()
ACCEPTED_DATA = (Description, Feature,Headspan, DependencyDependent, Alignment, Metric)
ANNOTATIONTYPE = AnnotationType.DEPENDENCY
XMLTAG = 'dependency'
def head(self):
"""Returns the head of the dependency relation. Instance of DependencyHead"""
return self.select(DependencyHead)[0]
def dependent(self):
"""Returns the dependent of the dependency relation. Instance of DependencyDependent"""
return self.select(DependencyDependent)[0]
class ModalityFeature(Feature):
"""Modality feature, to be used with coreferences"""
SUBSET = 'modality' #associated subset
XMLTAG = None
class TimeFeature(Feature):
"""Time feature, to be used with coreferences"""
SUBSET = 'time' #associated subset
XMLTAG = None
class LevelFeature(Feature):
"""Level feature, to be used with coreferences"""
SUBSET = 'level' #associated subset
XMLTAG = None
class CoreferenceLink(AbstractSpanAnnotation):
"""Coreference link. Used in coreferencechain."""
REQUIRED_ATTRIBS = ()
OPTIONAL_ATTRIBS = (Attrib.ANNOTATOR, Attrib.N, Attrib.DATETIME)
ACCEPTED_DATA = (WordReference, Description, Headspan, Alignment, ModalityFeature, TimeFeature,LevelFeature, Metric)
ANNOTATIONTYPE = AnnotationType.COREFERENCE
XMLTAG = 'coreferencelink'
class CoreferenceChain(AbstractSpanAnnotation):
"""Coreference chain. Consists of coreference links."""
REQUIRED_ATTRIBS = ()
ACCEPTED_DATA = (CoreferenceLink,Description, Metric)
ANNOTATIONTYPE = AnnotationType.COREFERENCE
XMLTAG = 'coreferencechain'
class SemanticRole(AbstractSpanAnnotation):
"""Semantic Role"""
REQUIRED_ATTRIBS = (Attrib.CLASS,)
ACCEPTED_DATA = (WordReference, Description, Headspan, Alignment, Metric)
ANNOTATIONTYPE = AnnotationType.SEMROLE
XMLTAG = 'semrole'
class FunctionFeature(Feature):
"""Function feature, to be used with morphemes"""
SUBSET = 'function' #associated subset
XMLTAG = None
class Morpheme(AbstractStructureElement):
"""Morpheme element, represents one morpheme in morphological analysis, subtoken annotation element to be used in MorphologyLayer"""
REQUIRED_ATTRIBS = (),
OPTIONAL_ATTRIBS = Attrib.ALL
ACCEPTED_DATA = (FunctionFeature, Feature,TextContent, Metric, Alignment, AbstractTokenAnnotation, Description)
ANNOTATIONTYPE = AnnotationType.MORPHOLOGICAL
XMLTAG = 'morpheme'
#class Subentity(AbstractSubtokenAnnotation):
# """Subentity element, for named entities within a single token, subtoken annotation element to be used in SubentitiesLayer"""
# ACCEPTED_DATA = (Feature,TextContent, Metric)
# ANNOTATIONTYPE = AnnotationType.SUBENTITY
# XMLTAG = 'subentity'
class SyntaxLayer(AbstractAnnotationLayer):
"""Syntax Layer: Annotation layer for SyntacticUnit span annotation elements"""
ACCEPTED_DATA = (SyntacticUnit,Description)
XMLTAG = 'syntax'
ANNOTATIONTYPE = AnnotationType.SYNTAX
class ChunkingLayer(AbstractAnnotationLayer):
"""Chunking Layer: Annotation layer for Chunk span annotation elements"""
ACCEPTED_DATA = (Chunk,Description)
XMLTAG = 'chunking'
ANNOTATIONTYPE = AnnotationType.CHUNKING
class EntitiesLayer(AbstractAnnotationLayer):
"""Entities Layer: Annotation layer for Entity span annotation elements. For named entities."""
ACCEPTED_DATA = (Entity,Description)
XMLTAG = 'entities'
ANNOTATIONTYPE = AnnotationType.ENTITY
class DependenciesLayer(AbstractAnnotationLayer):
"""Dependencies Layer: Annotation layer for Dependency span annotation elements. For dependency entities."""
ACCEPTED_DATA = (Dependency,Description)
XMLTAG = 'dependencies'
ANNOTATIONTYPE = AnnotationType.DEPENDENCY
class MorphologyLayer(AbstractAnnotationLayer):
"""Morphology Layer: Annotation layer for Morpheme subtoken annotation elements. For morphological analysis."""
ACCEPTED_DATA = (Morpheme,)
XMLTAG = 'morphology'
ANNOTATIONTYPE = AnnotationType.MORPHOLOGICAL
Alternative.ACCEPTED_DATA.append( MorphologyLayer)
#class SubentitiesLayer(AbstractSubtokenAnnotationLayer):
# """Subentities Layer: Annotation layer for Subentity subtoken annotation elements. For named entities within a single token."""
# ACCEPTED_DATA = (Subentity,)
# XMLTAG = 'subentities'
class CoreferenceLayer(AbstractAnnotationLayer):
"""Syntax Layer: Annotation layer for SyntacticUnit span annotation elements"""
ACCEPTED_DATA = (CoreferenceChain,Description)
XMLTAG = 'coreferences'
ANNOTATIONTYPE = AnnotationType.COREFERENCE
class SemanticRolesLayer(AbstractAnnotationLayer):
"""Syntax Layer: Annotation layer for SemnaticRole span annotation elements"""
ACCEPTED_DATA = (SemanticRole,Description)
XMLTAG = 'semroles'
ANNOTATIONTYPE = AnnotationType.SEMROLE
class HeadFeature(Feature):
"""Synset feature, to be used within PosAnnotation"""
SUBSET = 'head' #associated subset
XMLTAG = None
class PosAnnotation(AbstractTokenAnnotation):
"""Part-of-Speech annotation: a token annotation element"""
ANNOTATIONTYPE = AnnotationType.POS
ACCEPTED_DATA = (Feature,HeadFeature,Description, Metric)
XMLTAG = 'pos'
class LemmaAnnotation(AbstractTokenAnnotation):
"""Lemma annotation: a token annotation element"""
ANNOTATIONTYPE = AnnotationType.LEMMA
ACCEPTED_DATA = (Feature,Description, Metric)
XMLTAG = 'lemma'
class LangAnnotation(AbstractExtendedTokenAnnotation):
"""Language annotation: an extended token annotation element"""
ANNOTATIONTYPE = AnnotationType.LANG
ACCEPTED_DATA = (Feature,Description, Metric)
XMLTAG = 'lang'
#class PhonAnnotation(AbstractTokenAnnotation): #DEPRECATED in v0.9
# """Phonetic annotation: a token annotation element"""
# ANNOTATIONTYPE = AnnotationType.PHON
# ACCEPTED_DATA = (Feature,Description, Metric)
# XMLTAG = 'phon'
class DomainAnnotation(AbstractExtendedTokenAnnotation):
"""Domain annotation: an extended token annotation element"""
ANNOTATIONTYPE = AnnotationType.DOMAIN
ACCEPTED_DATA = (Feature,Description, Metric)
XMLTAG = 'domain'
class SynsetFeature(Feature):
"""Synset feature, to be used within Sense"""
#XMLTAG = 'synset'
XMLTAG = None
SUBSET = 'synset' #associated subset
class ActorFeature(Feature):
"""Actor feature, to be used within Event"""
#XMLTAG = 'actor'
XMLTAG = None
SUBSET = 'actor' #associated subset
class BegindatetimeFeature(Feature):
"""Begindatetime feature, to be used within Event"""
#XMLTAG = 'begindatetime'
XMLTAG = None
SUBSET = 'begindatetime' #associated subset
class EnddatetimeFeature(Feature):
"""Enddatetime feature, to be used within Event"""
#XMLTAG = 'enddatetime'
XMLTAG = None
SUBSET = 'enddatetime' #associated subset
class StyleFeature(Feature):
XMLTAG = None
SUBSET = "style"
class Event(AbstractStructureElement):
ACCEPTED_DATA = (AbstractStructureElement,Feature, ActorFeature, BegindatetimeFeature, EnddatetimeFeature, TextContent, Metric,AbstractExtendedTokenAnnotation)
ANNOTATIONTYPE = AnnotationType.EVENT
XMLTAG = 'event'
OCCURRENCESPERSET = 0
class TimeSegment(AbstractSpanAnnotation):
ACCEPTED_DATA = (WordReference, Description, Feature, ActorFeature, BegindatetimeFeature, EnddatetimeFeature, Metric)
ANNOTATIONTYPE = AnnotationType.TIMESEGMENT
XMLTAG = 'timesegment'
OCCURRENCESPERSET = 0
TimedEvent = TimeSegment #alias for FoLiA 0.8 compatibility
class TimingLayer(AbstractAnnotationLayer):
"""Dependencies Layer: Annotation layer for Dependency span annotation elements. For dependency entities."""
ANNOTATIONTYPE = AnnotationType.TIMESEGMENT
ACCEPTED_DATA = (TimedEvent,Description)
XMLTAG = 'timing'
class SenseAnnotation(AbstractTokenAnnotation):
"""Sense annotation: a token annotation element"""
ANNOTATIONTYPE = AnnotationType.SENSE
ACCEPTED_DATA = (Feature,SynsetFeature, Description, Metric)
XMLTAG = 'sense'
class SubjectivityAnnotation(AbstractTokenAnnotation):
"""Subjectivity annotation: a token annotation element"""
ANNOTATIONTYPE = AnnotationType.SUBJECTIVITY
ACCEPTED_DATA = (Feature, Description, Metric)
XMLTAG = 'subjectivity'
class Quote(AbstractStructureElement):
"""Quote: a structure element. For quotes/citations. May hold words or sentences."""
REQUIRED_ATTRIBS = ()
XMLTAG = 'quote'
#ACCEPTED DATA defined later below
def __init__(self, doc, *args, **kwargs):
super(Quote,self).__init__(doc, *args, **kwargs)
def resolveword(self, id):
for child in self:
r = child.resolveword(id)
if r:
return r
return None
def append(self, child, *args, **kwargs):
if inspect.isclass(child):
if child is Sentence:
kwargs['auth'] = False
elif isinstance(child, Sentence):
child.auth = False #Sentences under quotes are non-authoritative
return super(Quote, self).append(child, *args, **kwargs)
def gettextdelimiter(self, retaintokenisation=False):
#no text delimite rof itself, recurse into children to inherit delimiter
for child in reversed(self):
if isinstance(child, Sentence):
return "" #if a quote ends in a sentence, we don't want any delimiter
else:
return child.gettextdelimiter(retaintokenisation)
return delimiter
class Sentence(AbstractStructureElement):
"""Sentence element. A structure element. Represents a sentence and holds all its words (and possibly other structure such as LineBreaks, Whitespace and Quotes)"""
ACCEPTED_DATA = (Word, Quote, AbstractExtendedTokenAnnotation, Correction, TextContent, Description, Linebreak, Whitespace, Event, Alignment, Metric, Alternative, AlternativeLayers, AbstractAnnotationLayer)
XMLTAG = 's'
TEXTDELIMITER = ' '
ANNOTATIONTYPE = AnnotationType.SENTENCE
def __init__(self, doc, *args, **kwargs):
"""
Example 1::
sentence = paragraph.append( folia.Sentence)
sentence.append( folia.Word, 'This')
sentence.append( folia.Word, 'is')
sentence.append( folia.Word, 'a')
sentence.append( folia.Word, 'test', space=False)
sentence.append( folia.Word, '.')
Example 2::
sentence = folia.Sentence( doc, folia.Word(doc, 'This'), folia.Word(doc, 'is'), folia.Word(doc, 'a'), folia.Word(doc, 'test', space=False), folia.Word(doc, '.') )
paragraph.append(sentence)
"""
super(Sentence,self).__init__(doc, *args, **kwargs)
def resolveword(self, id):
for child in self:
r = child.resolveword(id)
if r:
return r
return None
def corrections(self):
"""Are there corrections in this sentence?"""
return bool(self.select(Correction))
def paragraph(self):
"""Obtain the paragraph this sentence is a part of (None otherwise)"""
e = self;
while e.parent:
if isinstance(e, Paragraph):
return e
e = e.parent
return None
def division(self):
"""Obtain the division this sentence is a part of (None otherwise)"""
e = self;
while e.parent:
if isinstance(e, Division):
return e
e = e.parent
return None
def correctwords(self, originalwords, newwords, **kwargs):
"""Generic correction method for words. You most likely want to use the helper functions
splitword() , mergewords(), deleteword(), insertword() instead"""
for w in originalwords:
if not isinstance(w, Word):
raise Exception("Original word is not a Word instance: " + str(type(w)))
elif w.sentence() != self:
raise Exception("Original not found as member of sentence!")
for w in newwords:
if not isinstance(w, Word):
raise Exception("New word is not a Word instance: " + str(type(w)))
if 'suggest' in kwargs and kwargs['suggest']:
del kwargs['suggest']
return self.correct(suggestion=newwords,current=originalwords, **kwargs)
else:
return self.correct(original=originalwords, new=newwords, **kwargs)
def splitword(self, originalword, *newwords, **kwargs):
"""TODO: Write documentation"""
if isinstance(originalword, str) or isinstance(originalword, unicode):
originalword = self.doc[originalword]
return self.correctwords([originalword], newwords, **kwargs)
def mergewords(self, newword, *originalwords, **kwargs):
"""TODO: Write documentation"""
return self.correctwords(originalwords, [newword], **kwargs)
def deleteword(self, word, **kwargs):
"""TODO: Write documentation"""
if isinstance(word, str) or isinstance(word, unicode):
word = self.doc[word]
return self.correctwords([word], [], **kwargs)
def insertword(self, newword, prevword, **kwargs):
if prevword:
if isinstance(prevword, str) or isinstance(prevword, unicode):
prevword = self.doc[prevword]
if not prevword in self or not isinstance(prevword, Word):
raise Exception("Previous word not found or not instance of Word!")
if not isinstance(newword, Word):
raise Exception("New word no instance of Word!")
kwargs['insertindex'] = self.data.index(prevword) + 1
else:
kwargs['insertindex'] = 0
return self.correctwords([], [newword], **kwargs)
Quote.ACCEPTED_DATA = (Word, Sentence, Quote, TextContent, Description, Alignment, Metric, Alternative, AlternativeLayers, AbstractAnnotationLayer)
class Caption(AbstractStructureElement):
"""Element used for captions for figures or tables, contains sentences"""
ACCEPTED_DATA = (Sentence, Description, TextContent,Alignment, Metric, Alternative, Alternative, AlternativeLayers, AbstractAnnotationLayer)
OCCURRENCES = 1
XMLTAG = 'caption'
class Label(AbstractStructureElement):
"""Element used for labels. Mostly in within list item. Contains words."""
ACCEPTED_DATA = (Word, Description, TextContent,Alignment, Metric, Alternative, Alternative, AlternativeLayers, AbstractAnnotationLayer,AbstractExtendedTokenAnnotation)
XMLTAG = 'label'
class ListItem(AbstractStructureElement):
"""Single element in a List. Structure element. Contained within List element."""
#ACCEPTED_DATA = (List, Sentence) #Defined below
XMLTAG = 'listitem'
ANNOTATIONTYPE = AnnotationType.LIST
class List(AbstractStructureElement):
"""Element for enumeration/itemisation. Structure element. Contains ListItem elements."""
ACCEPTED_DATA = (ListItem,Description, Caption, Event, TextContent, Alignment, Metric, Alternative, Alternative, AlternativeLayers, AbstractAnnotationLayer,AbstractExtendedTokenAnnotation)
XMLTAG = 'list'
TEXTDELIMITER = '\n'
ANNOTATIONTYPE = AnnotationType.LIST
ListItem.ACCEPTED_DATA = (List, Sentence, Description, Label, Event, TextContent,Alignment, Metric, Alternative, AlternativeLayers, AbstractAnnotationLayer,AbstractExtendedTokenAnnotation)
class Figure(AbstractStructureElement):
"""Element for the representation of a graphical figure. Structure element."""
ACCEPTED_DATA = (Sentence, Description, Caption, TextContent, Alignment, Metric, Alternative, Alternative, AlternativeLayers, AbstractAnnotationLayer)
XMLTAG = 'figure'
ANNOTATIONTYPE = AnnotationType.FIGURE
def __init__(self, doc, *args, **kwargs):
if 'src' in kwargs:
self.src = kwargs['src']
del kwargs['src']
else:
self.src = None
super(Figure, self).__init__(doc, *args, **kwargs)
def xml(self, attribs = None,elements = None, skipchildren = False):
global NSFOLIA
if self.src:
if not attribs: attribs = {}
attribs['{' + NSFOLIA + '}src'] = self.src
return super(Figure, self).xml(attribs, elements, skipchildren)
def caption(self):
try:
caption = self.select(Caption)[0]
return caption.text()
except:
raise NoSuchText
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None):
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
if not extraattribs:
extraattribs = [ E.optional(E.attribute(name='src')) ]
else:
extraattribs.append( E.optional(E.attribute(name='src')) )
return AbstractStructureElement.relaxng(includechildren, extraattribs, extraelements, cls)
class Paragraph(AbstractStructureElement):
"""Paragraph element. A structure element. Represents a paragraph and holds all its sentences (and possibly other structure Whitespace and Quotes)."""
ACCEPTED_DATA = (Sentence, AbstractExtendedTokenAnnotation, Correction, TextContent, Description, Linebreak, Whitespace, List, Figure, Event, Alignment, Metric, Alternative, AlternativeLayers, AbstractAnnotationLayer)
XMLTAG = 'p'
TEXTDELIMITER = "\n\n"
ANNOTATIONTYPE = AnnotationType.PARAGRAPH
class Head(AbstractStructureElement):
"""Head element. A structure element. Acts as the header/title of a division. There may be one per division. Contains sentences."""
ACCEPTED_DATA = (Sentence,Description, Event, TextContent,Alignment, Metric, Alternative, AlternativeLayers, AbstractAnnotationLayer, AbstractExtendedTokenAnnotation)
OCCURRENCES = 1
TEXTDELIMITER = ' '
XMLTAG = 'head'
class Query(object):
"""An XPath query on one or more FoLiA documents"""
def __init__(self, files, expression):
if isinstance(files, str) or isinstance(files, unicode):
self.files = [files]
else:
self.files = files
self.expression = expression
def __iter__(self):
for filename in self.files:
doc = Document(file=filename, mode=Mode.XPATH)
for result in doc.xpath(self.expression):
yield result
class RegExp(object):
def __init__(self, regexp):
self.regexp = re.compile(regexp)
def __eq__(self, value):
return self.regexp.match(value)
class Pattern(object):
def __init__(self, *args, **kwargs):
if not all( ( (x is True or isinstance(x,RegExp) or isinstance(x, str) or isinstance(x, unicode) or isinstance(x, list) or isinstance(x, tuple)) for x in args )):
raise TypeError
self.sequence = args
if 'matchannotation' in kwargs:
self.matchannotation = kwargs['matchannotation']
del kwargs['matchannotation']
else:
self.matchannotation = None
if 'matchannotationset' in kwargs:
self.matchannotationset = kwargs['matchannotationset']
del kwargs['matchannotationset']
else:
self.matchannotationset = None
if 'casesensitive' in kwargs:
self.casesensitive = bool(kwargs['casesensitive'])
del kwargs['casesensitive']
else:
self.casesensitive = False
for key in kwargs.keys():
raise Exception("Unknown keyword parameter: " + key)
if not self.casesensitive:
if all( ( isinstance(x, str) or isinstance(x, unicode) for x in self.sequence) ):
self.sequence = [ x.lower() for x in self.sequence ]
def __nonzero__(self):
return True
def __len__(self):
return len(self.sequence)
def __getitem__(self, index):
return self.sequence[index]
def __getslice__(self, begin,end):
return self.sequence[begin:end]
def variablesize(self):
return ('*' in self.sequence)
def variablewildcards(self):
wildcards = []
for i,x in enumerate(self.sequence):
if x == '*':
wildcards.append(i)
return wildcards
def __repr__(self):
return repr(self.sequence)
def resolve(self,size, distribution):
"""Resolve a variable sized pattern to all patterns of a certain fixed size"""
if not self.variablesize():
raise Exception("Can only resize patterns with * wildcards")
nrofwildcards = 0
for i,x in enumerate(self.sequence):
if x == '*':
nrofwildcards += 1
assert (len(distribution) == nrofwildcards)
wildcardnr = 0
newsequence = []
for i,x in enumerate(self.sequence):
if x == '*':
newsequence += [True] * distribution[wildcardnr]
wildcardnr += 1
else:
newsequence.append(x)
d = { 'matchannotation':self.matchannotation, 'matchannotationset':self.matchannotationset, 'casesensitive':self.casesensitive }
yield Pattern(*newsequence, **d )
class NativeMetaData(object):
def __init__(self, *args, **kwargs):
self.data = {}
self.order = []
for key, value in kwargs.items():
self[key] = value
def __setitem__(self, key, value):
exists = key in self.data
self.data[key] = value
if not exists: self.order.append(key)
def __iter__(self):
for x in self.order:
yield x
def __contains__(self, x):
return x in self.data
def items(self):
for key in self.order:
yield key, self.data[key]
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
def __delitem__(self):
del self.data[key]
self.order.remove(key)
class Document(object):
"""This is the FoLiA Document, all elements have to be associated with a FoLiA document. Besides holding elements, the document hold metadata including declaration, and an index of all IDs."""
IDSEPARATOR = '.'
def __init__(self, *args, **kwargs):
global FOLIAVERSION
"""Start/load a FoLiA document:
There are four sources of input for loading a FoLiA document::
1) Create a new document by specifying an *ID*::
doc = folia.Document(id='test')
2) Load a document from FoLiA or D-Coi XML file::
doc = folia.Document(file='/path/to/doc.xml')
3) Load a document from an XML string::
doc = folia.Document(string='<FoLiA>....</FoLiA>')
4) Load a document by passing a parse xml tree (lxml.etree):
doc = folia.Document(tree=xmltree)
Additionally, there are three modes that can be set with the mode= keyword argument:
* folia.Mode.MEMORY - The entire FoLiA Document will be loaded into memory. This is the default mode and the only mode in which documents can be manipulated and saved again.
* folia.Mode.XPATH - The full XML tree will still be loaded into memory, but conversion to FoLiA classes occurs only when queried. This mode can be used when the full power of XPath is required.
* folia.Mode.ITERATIVE - Not implemented, obsolete. Use Reader class instead
Optional keyword arguments:
``debug=``: Boolean to enable/disable debug
"""
self.version = FOLIAVERSION
self.data = [] #will hold all texts (usually only one)
self.annotationdefaults = {}
self.annotations = [] #Ordered list of incorporated annotations ['token','pos', etc..]
#Add implicit declaration for TextContent
self.annotations.append( (AnnotationType.TEXT,'undefined') )
self.annotationdefaults[AnnotationType.TEXT] = {'undefined': {} }
self.index = {} #all IDs go here
self.declareprocessed = False # Will be set to True when declarations have been processed
self.metadata = NativeMetaData() #will point to XML Element holding IMDI or CMDI metadata
self.metadatatype = MetaDataType.NATIVE
self.metadatafile = None #reference to external metadata file
self.autodeclare = False #Automatic declarations in case of undeclared elements (will be enabled for DCOI, since DCOI has no declarations)
self.setdefinitions = {} #key: set name, value: SetDefinition instance (only used when deepvalidation=True)
#The metadata fields FoLiA is directly aware of:
self._title = self._date = self._publisher = self._license = self._language = None
if 'debug' in kwargs:
self.debug = kwargs['debug']
else:
self.debug = False
if 'mode' in kwargs:
self.mode = int(kwargs['mode'])
else:
self.mode = Mode.MEMORY #Load all in memory
if 'deepvalidation' in kwargs:
self.deepvalidation = bool(kwargs['deepvalidation'])
else:
self.deepvalidation = False
if 'autodeclare' in kwargs:
self.autodeclare = True
if 'bypassleak' in kwargs:
self.bypassleak = bool(kwargs['bypassleak'])
else:
self.bypassleak = True
if 'id' in kwargs:
isncname(kwargs['id'])
self.id = kwargs['id']
elif 'file' in kwargs:
self.filename = kwargs['file']
if not self.bypassleak:
self.load(self.filename)
else:
f = open(self.filename)
contents = f.read()
f.close()
contents = contents.replace(' xml:id=', ' id=')
self.tree = ElementTree.parse(StringIO(contents))
self.parsexml(self.tree.getroot())
elif 'string' in kwargs:
s = kwargs['string']
if isinstance(s, unicode):
s = s.encode('utf-8')
if self.bypassleak:
s = s.replace(' xml:id=', ' id=')
self.tree = ElementTree.parse(StringIO(s))
self.parsexml(self.tree.getroot())
if self.mode != Mode.XPATH:
#XML Tree is now obsolete (only needed when partially loaded for xpath queries)
self.tree = None
elif 'tree' in kwargs:
self.parsexml(kwargs['tree'])
else:
raise Exception("No ID, filename or tree specified")
#def __del__(self):
# del self.index
# for child in self.data:
# del child
# del self.data
def load(self, filename):
"""Load a FoLiA or D-Coi XML file"""
global LXE
#if LXE and self.mode != Mode.XPATH:
# #workaround for xml:id problem (disabled)
# #f = open(filename)
# #s = f.read().replace(' xml:id=', ' id=')
# #f.close()
# self.tree = ElementTree.parse(filename)
#else:
self.tree = ElementTree.parse(filename)
self.parsexml(self.tree.getroot())
if self.mode != Mode.XPATH:
#XML Tree is now obsolete (only needed when partially loaded for xpath queries)
self.tree = None
def items(self):
"""Returns a depth-first flat list of all items in the document"""
l = []
for e in self.data:
l += e.items()
return l
def xpath(self, query):
"""Run Xpath expression and parse the resulting elements. Don't forget to use the FoLiA namesapace in your expressions, using folia: or the short form f: """
for result in self.tree.xpath(query,namespaces={'f': 'http://ilk.uvt.nl/folia','folia': 'http://ilk.uvt.nl/folia' }):
yield self.parsexml(result)
def findwords(self, *args, **kwargs):
if 'leftcontext' in kwargs:
leftcontext = kwargs['leftcontext']
del kwargs['leftcontext']
else:
leftcontext = 0
if 'rightcontext' in kwargs:
rightcontext = kwargs['rightcontext']
del kwargs['rightcontext']
else:
rightcontext = 0
if 'maxgapsize' in kwargs:
maxgapsize = kwargs['maxgapsize']
del kwargs['maxgapsize']
else:
maxgapsize = 10
for key in kwargs.keys():
raise Exception("Unknown keyword parameter: " + key)
matchcursor = 0
matched = []
#shortcut for when no Pattern is passed, make one on the fly
if len(args) == 1 and not isinstance(args[0], Pattern):
if not isinstance(args[0], list) and not isinstance(args[0], tuple):
args[0] = [args[0]]
args[0] = Pattern(*args[0])
unsetwildcards = False
variablewildcards = None
prevsize = -1
minsize = 99999
#sanity check
for i, pattern in enumerate(args):
if not isinstance(pattern, Pattern):
raise TypeError("You must pass instances of Sequence to findwords")
if prevsize > -1 and len(pattern) != prevsize:
raise Exception("If multiple patterns are provided, they must all have the same length!")
if pattern.variablesize():
if not variablewildcards and i > 0:
unsetwildcards = True
else:
if variablewildcards and pattern.variablewildcards() != variablewildcards:
raise Exception("If multiple patterns are provided with variable wildcards, then these wildcards must all be in the same positions!")
variablewildcards = pattern.variablewildcards()
elif variablewildcards:
unsetwildcards = True
prevsize = len(pattern)
if unsetwildcards:
#one pattern determines a fixed length whilst others are variable, rewrite all to fixed length
#converting multi-span * wildcards into single-span 'True' wildcards
for pattern in args:
if pattern.variablesize():
pattern.sequence = [ True if x == '*' else x for x in pattern.sequence ]
variablesize = False
if variablewildcards:
#one or more items have a * wildcard, which may span multiple tokens. Resolve this to a wider range of simpler patterns
#we're not commited to a particular size, expand to various ones
for size in range(len(variablewildcards), maxgapsize+1):
for distribution in pynlpl.algorithms.sum_to_n(size, len(variablewildcards)): #gap distributions, (amount) of 'True' wildcards
patterns = []
for pattern in args:
if pattern.variablesize():
patterns += list(pattern.resolve(size,distribution))
else:
patterns.append( pattern )
for match in self.findwords(*patterns, **{'leftcontext':leftcontext,'rightcontext':rightcontext}):
yield match
else:
patterns = args
buffers = []
for word in self.words():
buffers.append( [] ) #Add a new empty buffer for every word
match = [None] * len(buffers)
for pattern in patterns:
#find value to match against
if not pattern.matchannotation:
value = word.text()
else:
if pattern.matchannotationset:
items = word.select(pattern.matchannotation, pattern.matchannotationset, True, [Original, Suggestion, Alternative])
else:
try:
set = self.defaultset(pattern.matchannotation.ANNOTATIONTYPE)
items = word.select(pattern.matchannotation, set, True, [Original, Suggestion, Alternative] )
except KeyError:
continue
if len(items) == 1:
value = items[0].cls
else:
continue
if not pattern.casesensitive:
value = value.lower()
for i, buffer in enumerate(buffers):
if match[i] is False:
continue
matchcursor = len(buffer)
if (value == pattern.sequence[matchcursor] or pattern.sequence[matchcursor] is True or (isinstance(pattern.sequence[matchcursor], tuple) and value in pattern.sequence[matchcursor])):
match[i] = True
else:
match[i] = False
for buffer, matches in zip(buffers, match):
if matches:
buffer.append(word) #add the word
if len(buffer) == len(pattern.sequence):
yield buffer[0].leftcontext(leftcontext) + buffer + buffer[-1].rightcontext(rightcontext)
buffers.remove(buffer)
else:
buffers.remove(buffer) #remove buffer
def save(self, filename=None):
"""Save the document to FoLiA XML.
Arguments:
* ``filename=``: The filename to save to. If not set (None), saves to the same file as loaded from.
"""
if not filename:
filename = self.filename
if not filename:
raise Exception("No filename specified")
f = open(filename,'w')
f.write(self.xmlstring())
f.close()
def setcmdi(self,filename):
self.metadatatype = MetaDataType.CMDI
self.metadatafile = filename
self.metadata = {}
#TODO: Parse CMDI
def __len__(self):
return len(self.data)
def __nonzero__(self):
return True #documents always evaluate to True!
def __iter__(self):
for text in self.data:
yield text
def __getitem__(self, key):
"""Obtain an element by ID from the document index.
Example::
word = doc['example.p.4.s.10.w.3']
"""
try:
if isinstance(key, int):
return self.data[key]
else:
return self.index[key]
except KeyError:
raise
def append(self,text):
"""Add a text to the document:
Example 1::
doc.append(folia.Text)
Example 2::
doc.append( folia.Text(doc, id='example.text') )
"""
if text is Text:
text = Text(self, id=self.id + '.text.' + str(len(self.data)+1) )
else:
assert isinstance(text, Text)
self.data.append(text)
return text
def create(self, Class, *args, **kwargs):
"""Create an element associated with this Document. This method may be obsolete and removed later."""
return Class(self, *args, **kwargs)
def xmldeclarations(self):
l = []
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if annotationtype == AnnotationType.TEXT and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
attribs = {}
if set and set != 'undefined':
attribs['{' + NSFOLIA + '}set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
attribs['{' + NSFOLIA + '}' + key] = 'manual'
elif value == AnnotatorType.AUTO:
attribs['{' + NSFOLIA + '}' + key] = 'auto'
elif key == 'datetime':
attribs['{' + NSFOLIA + '}' + key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
attribs['{' + NSFOLIA + '}' + key] = value
if label:
l.append( E._makeelement('{' + NSFOLIA + '}' + label.lower() + '-annotation', **attribs) )
else:
raise Exception("Invalid annotation type")
return l
def xml(self):
global LIBVERSION, FOLIAVERSION
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace", 'xlink':"http://www.w3.org/1999/xlink"})
attribs = {}
if self.bypassleak:
attribs['XMLid'] = self.id
else:
attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id
if self.version:
attribs['version'] = self.version
else:
attribs['version'] = FOLIAVERSION
attribs['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
metadataattribs = {}
if self.metadatatype == MetaDataType.NATIVE:
metadataattribs['{' + NSFOLIA + '}type'] = 'native'
elif self.metadatatype == MetaDataType.IMDI:
metadataattribs['{' + NSFOLIA + '}type'] = 'imdi'
if self.metadatafile:
metadataattribs['{' + NSFOLIA + '}src'] = self.metadatafile
elif self.metadatatype == MetaDataType.CMDI:
metadataattribs['{' + NSFOLIA + '}type'] = 'cmdi'
metadataattribs['{' + NSFOLIA + '}src'] = self.metadatafile
e = E.FoLiA(
E.metadata(
E.annotations(
*self.xmldeclarations()
),
*self.xmlmetadata(),
**metadataattribs
)
, **attribs)
for text in self.data:
e.append(text.xml())
return e
def xmlmetadata(self):
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
if self.metadatatype == MetaDataType.NATIVE:
e = []
if not self.metadatafile:
for key, value in self.metadata.items():
e.append(E.meta(value,id=key) )
return e
elif self.metadatatype == MetaDataType.IMDI:
if self.metadatafile:
return [] #external
elif self.metadata:
return [ElementTree.parse(StringIO(self.metadata)).getroot()] #inline
else:
return []
elif self.metadatatype == MetaDataType.CMDI: #CMDI, by definition external
return []
def parsexmldeclarations(self, node):
if self.debug >= 1:
print >>stderr, "[PyNLPl FoLiA DEBUG] Processing Annotation Declarations"
self.declareprocessed = True
for subnode in node:
if subnode.tag[:25] == '{' + NSFOLIA + '}' and subnode.tag[-11:] == '-annotation':
prefix = subnode.tag[25:][:-11]
type = None
if prefix.upper() in vars(AnnotationType):
type = vars(AnnotationType)[prefix.upper()]
else:
raise Exception("Unknown declaration: " + subnode.tag)
if 'set' in subnode.attrib and subnode.attrib['set']:
set = subnode.attrib['set']
else:
set = 'undefined'
if (type,set) in self.annotations:
if type == AnnotationType.TEXT:
#explicit Text declaration, remove the implicit declaration:
a = []
for t,s in self.annotations:
if not (t == AnnotationType.TEXT and s == 'undefined'):
a.append( (t,s) )
self.annotations = a
#raise ValueError("Double declaration of " + subnode.tag + ", set '" + set + "' + is already declared") //doubles are okay says Ko
else:
self.annotations.append( (type, set) )
#Load set definition
if set and self.deepvalidation and not set in self.setdefinitions:
if set != 'undefined' and set[0] != '_': #ignore sets starting with an underscore, they are ad-hoc sets by definition
self.setdefinitions[set] = loadsetdefinition(set) #will raise exception on error
#Set defaults
if type in self.annotationdefaults and set in self.annotationdefaults[type]:
#handle duplicate. If ambiguous: remove defaults
if 'annotator' in subnode.attrib:
if not ('annotator' in self.annotationdefaults[type][set]):
self.annotationdefaults[type][set]['annotator'] = subnode.attrib['annotator']
elif self.annotationdefaults[type][set]['annotator'] != subnode.attrib['annotator']:
del self.annotationdefaults[type][set]['annotator']
if 'annotatortype' in subnode.attrib:
if not ('annotatortype' in self.annotationdefaults[type][set]):
self.annotationdefaults[type][set]['annotatortype'] = subnode.attrib['annotatortype']
elif self.annotationdefaults[type][set]['annotatortype'] != subnode.attrib['annotatortype']:
del self.annotationdefaults[type][set]['annotatortype']
else:
defaults = {}
if 'annotator' in subnode.attrib:
defaults['annotator'] = subnode.attrib['annotator']
if 'annotatortype' in subnode.attrib:
if subnode.attrib['annotatortype'] == 'auto':
defaults['annotatortype'] = AnnotatorType.AUTO
else:
defaults['annotatortype'] = AnnotatorType.MANUAL
if 'datetime' in subnode.attrib:
if isinstance(subnode.attrib['datetime'], datetime):
defaults['datetime'] = subnode.attrib['datetime']
else:
defaults['datetime'] = parse_datetime(subnode.attrib['datetime'])
if not type in self.annotationdefaults:
self.annotationdefaults[type] = {}
self.annotationdefaults[type][set] = defaults
if self.debug >= 1:
print >>stderr, "[PyNLPl FoLiA DEBUG] Found declared annotation " + subnode.tag + ". Defaults: " + repr(defaults)
def setimdi(self, node):
global LXE
#TODO: node or filename
ns = {'imdi': 'http://www.mpi.nl/IMDI/Schema/IMDI'}
self.metadatatype = MetaDataType.IMDI
if LXE:
self.metadata = ElementTree.tostring(node, xml_declaration=False, pretty_print=True, encoding='utf-8')
else:
self.metadata = ElementTree.tostring(node, encoding='utf-8')
n = node.xpath('imdi:Session/imdi:Title', namespaces=ns)
if n and n[0].text: self._title = n[0].text
n = node.xpath('imdi:Session/imdi:Date', namespaces=ns)
if n and n[0].text: self._date = n[0].text
n = node.xpath('//imdi:Source/imdi:Access/imdi:Publisher', namespaces=ns)
if n and n[0].text: self._publisher = n[0].text
n = node.xpath('//imdi:Source/imdi:Access/imdi:Availability', namespaces=ns)
if n and n[0].text: self._license = n[0].text
n = node.xpath('//imdi:Languages/imdi:Language/imdi:ID', namespaces=ns)
if n and n[0].text: self._language = n[0].text
def declare(self, annotationtype, set, **kwargs):
if inspect.isclass(annotationtype):
annotationtype = annotationtype.ANNOTATIONTYPE
if not (annotationtype, set) in self.annotations:
self.annotations.append( (annotationtype,set) )
if not annotationtype in self.annotationdefaults:
self.annotationdefaults[annotationtype] = {}
self.annotationdefaults[annotationtype][set] = kwargs
def declared(self, annotationtype, set):
if inspect.isclass(annotationtype) and isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
return ( (annotationtype,set) in self.annotations)
def defaultset(self, annotationtype):
if inspect.isclass(annotationtype) and isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
try:
return self.annotationdefaults[annotationtype].keys()[0]
except IndexError:
raise NoDefaultError
def defaultannotator(self, annotationtype, set=None):
if inspect.isclass(annotationtype) and isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
if not set: set = self.defaultset(annotationtype)
try:
return self.annotationdefaults[annotationtype][set]['annotator']
except KeyError:
raise NoDefaultError
def defaultannotatortype(self, annotationtype,set=None):
if inspect.isclass(annotationtype) and isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
if not set: set = self.defaultset(annotationtype)
try:
return self.annotationdefaults[annotationtype][set]['annotatortype']
except KeyError:
raise NoDefaultError
def defaultdatetime(self, annotationtype,set=None):
if inspect.isclass(annotationtype) and isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
if not set: set = self.defaultset(annotationtype)
try:
return self.annotationdefaults[annotationtype][set]['datetime']
except KeyError:
raise NoDefaultError
def title(self, value=None):
"""No arguments: Get the document's title from metadata
Argument: Set the document's title in metadata
"""
if not (value is None):
if (self.metadatatype == MetaDataType.NATIVE):
self.metadata['title'] = value
else:
self._title = value
if (self.metadatatype == MetaDataType.NATIVE):
if 'title' in self.metadata:
return self.metadata['title']
else:
return None
else:
return self._title
def date(self, value=None):
"""No arguments: Get the document's date from metadata
Argument: Set the document's date in metadata
"""
if not (value is None):
if (self.metadatatype == MetaDataType.NATIVE):
self.metadata['date'] = value
else:
self._date = value
if (self.metadatatype == MetaDataType.NATIVE):
if 'date' in self.metadata:
return self.metadata['date']
else:
return None
else:
return self._date
def publisher(self, value=None):
"""No arguments: Get the document's publisher from metadata
Argument: Set the document's publisher in metadata
"""
if not (value is None):
if (self.metadatatype == MetaDataType.NATIVE):
self.metadata['publisher'] = value
else:
self._publisher = value
if (self.metadatatype == MetaDataType.NATIVE):
if 'publisher' in self.metadata:
return self.metadata['publisher']
else:
return None
else:
return self._publisher
def license(self, value=None):
"""No arguments: Get the document's license from metadata
Argument: Set the document's license in metadata
"""
if not (value is None):
if (self.metadatatype == MetaDataType.NATIVE):
self.metadata['license'] = value
else:
self._license = value
if (self.metadatatype == MetaDataType.NATIVE):
if 'license' in self.metadata:
return self.metadata['license']
else:
return None
else:
return self._license
def language(self, value=None):
"""No arguments: Get the document's language (ISO-639-3) from metadata
Argument: Set the document's language (ISO-639-3) in metadata
"""
if not (value is None):
if (self.metadatatype == MetaDataType.NATIVE):
self.metadata['language'] = value
else:
self._language = value
if (self.metadatatype == MetaDataType.NATIVE):
if 'language' in self.metadata:
return self.metadata['language']
else:
return None
else:
return self._language
def parsemetadata(self, node):
if self.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Found Metadata"
if 'type' in node.attrib and node.attrib['type'] == 'imdi':
self.metadatatype = MetaDataType.IMDI
elif 'type' in node.attrib and node.attrib['type'] == 'cmdi':
self.metadatatype = MetaDataType.CMDI
elif 'type' in node.attrib and node.attrib['type'] == 'native':
self.metadatatype = MetaDataType.NATIVE
else:
#no type specified, default to native
self.metadatatype = MetaDataType.NATIVE
self.metadata = NativeMetaData()
self.metadatafile = None
if 'src' in node.attrib:
self.metadatafile = node.attrib['src']
for subnode in node:
if subnode.tag == '{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT':
self.metadatatype = MetaDataType.IMDI
self.setimdi(subnode)
if subnode.tag == '{' + NSFOLIA + '}annotations':
self.parsexmldeclarations(subnode)
if subnode.tag == '{' + NSFOLIA + '}meta':
if subnode.text:
self.metadata[subnode.attrib['id']] = subnode.text
def parsexml(self, node, ParentClass = None):
"""Main XML parser, will invoke class-specific XML parsers. For internal use."""
global XML2CLASS, NSFOLIA, NSDCOI, LXE
nslen = len(NSFOLIA) + 2
nslendcoi = len(NSDCOI) + 2
if (LXE and isinstance(node,ElementTree._ElementTree)) or (not LXE and isinstance(node, ElementTree.ElementTree)):
node = node.getroot()
elif isinstance(node, str) or isinstance(node, unicode):
node = ElementTree.parse(StringIO(node)).getroot()
if node.tag == '{' + NSFOLIA + '}FoLiA':
if self.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Found FoLiA document"
try:
self.id = node.attrib['{http://www.w3.org/XML/1998/namespace}id']
except KeyError:
try:
self.id = node.attrib['id']
except KeyError:
raise Exception("FoLiA Document has no ID!")
if 'version' in node.attrib:
self.version = node.attrib['version']
else:
self.version = None
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}metadata':
self.parsemetadata(subnode)
elif subnode.tag == '{' + NSFOLIA + '}text' and self.mode == Mode.MEMORY:
if self.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Found Text"
self.data.append( self.parsexml(subnode) )
elif node.tag == '{' + NSDCOI + '}DCOI':
if self.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Found DCOI document"
self.autodeclare = True
try:
self.id = node.attrib['{http://www.w3.org/XML/1998/namespace}id']
except KeyError:
try:
self.id = node.attrib['id']
except KeyError:
raise Exception("D-Coi Document has no ID!")
for subnode in node:
if subnode.tag == '{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT':
self.metadatatype = MetaDataType.IMDI
self.setimdi(subnode)
elif subnode.tag == '{' + NSDCOI + '}text':
if self.debug >= 1: print >>stderr, "[PyNLPl FoLiA DEBUG] Found Text"
self.data.append( self.parsexml(subnode) )
elif node.tag[:nslen] == '{' + NSFOLIA + '}':
#generic handling (FoLiA)
if not node.tag[nslen:] in XML2CLASS:
raise Exception("Unknown FoLiA XML tag: " + node.tag)
Class = XML2CLASS[node.tag[nslen:]]
return Class.parsexml(node,self)
elif node.tag[:nslendcoi] == '{' + NSDCOI + '}':
#generic handling (D-Coi)
if node.tag[nslendcoi:] in XML2CLASS:
Class = XML2CLASS[node.tag[nslendcoi:]]
return Class.parsexml(node,self)
elif node.tag[nslendcoi:][0:3] == 'div': #support for div0, div1, etc:
Class = Division
return Class.parsexml(node,self)
elif node.tag[nslendcoi:] == 'item': #support for listitem
Class = ListItem
return Class.parsexml(node,self)
elif node.tag[nslendcoi:] == 'figDesc': #support for description in figures
Class = Description
return Class.parsexml(node,self)
else:
raise Exception("Unknown DCOI XML tag: " + node.tag)
else:
raise Exception("Unknown FoLiA XML tag: " + node.tag)
def select(self, Class, set=None):
if self.mode == Mode.MEMORY:
return sum([ t.select(Class,set,True ) for t in self.data ],[])
def paragraphs(self, index = None):
"""Return a list of all paragraphs found in the document.
If an index is specified, return the n'th paragraph only (starting at 0)"""
if index is None:
return sum([ t.select(Paragraph) for t in self.data ],[])
else:
return sum([ t.select(Paragraph) for t in self.data ],[])[index]
def sentences(self, index = None):
"""Return a list of all sentence found in the document. Except for sentences in quotes.
If an index is specified, return the n'th sentence only (starting at 0)"""
if index is None:
return sum([ t.select(Sentence,None,True,[Quote]) for t in self.data ],[])
else:
return sum([ t.select(Sentence,None,True,[Quote]) for t in self.data ],[])[index]
def words(self, index = None):
"""Return a list of all active words found in the document. Does not descend into annotation layers, alternatives, originals, suggestions.
If an index is specified, return the n'th word only (starting at 0)"""
if index is None:
return sum([ t.select(Word,None,True,['Original','Suggestion','Alternative','AbstractAnnotationLayer']) for t in self.data ],[])
else:
return sum([ t.select(Word,None,True,['Original','Suggestion','Alternative','AbstractAnnotationLayer']) for t in self.data ],[])[index]
def text(self, retaintokenisation=False):
"""Returns the text of the entire document (returns a unicode instance)"""
s = u""
for c in self.data:
if s: s += "\n\n\n"
try:
s += c.text('current',retaintokenisation)
except NoSuchText:
continue
return s
def xmlstring(self):
s = ElementTree.tostring(self.xml(), xml_declaration=True, pretty_print=True, encoding='utf-8')
if self.bypassleak:
return s.replace('XMLid=','xml:id=')
else:
return s
def __unicode__(self):
"""Returns the text of the entire document"""
return self.text()
def __ne__(self, other):
return not (self == other)
def __eq__(self, other):
if len(self.data) != len(other.data):
if self.debug: print >>stderr, "[PyNLPl FoLiA DEBUG] Equality check - Documents have unequal amount of children"
return False
for e,e2 in zip(self.data,other.data):
if e != e2:
return False
return True
def __str__(self):
"""Returns the text of the entire document (UTF-8 encoded)"""
return unicode(self).encode('utf-8')
class Content(AbstractElement): #used for raw content, subelement for Gap
OCCURRENCES = 1
XMLTAG = 'content'
def __init__(self,doc, *args, **kwargs):
if 'value' in kwargs:
if isinstance(kwargs['value'], unicode):
self.value = kwargs['value']
elif isinstance(kwargs['value'], str):
self.value = unicode(kwargs['value'],'utf-8')
elif kwargs['value'] is None:
self.value = u""
else:
raise Exception("value= parameter must be unicode or str instance")
del kwargs['value']
else:
raise Exception("Description expects value= parameter")
super(Content,self).__init__(doc, *args, **kwargs)
def __nonzero__(self):
return bool(self.value)
def __unicode__(self):
return self.value
def __str__(self):
return self.value.encode('utf-8')
def xml(self, attribs = None,elements = None, skipchildren = False):
global NSFOLIA
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
if not attribs:
attribs = {}
return E.content(self.value, **attribs)
@classmethod
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None):
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
return E.define( E.element(E.text(), name=cls.XMLTAG), name=cls.XMLTAG, ns=NSFOLIA)
@classmethod
def parsexml(Class, node, doc):
global NSFOLIA
kwargs = {}
kwargs['value'] = node.text
return Content(doc, **kwargs)
class Gap(AbstractElement):
"""Gap element. Represents skipped portions of the text. Contains Content and Desc elements"""
ACCEPTED_DATA = (Content, Description)
OPTIONAL_ATTRIBS = (Attrib.ID,Attrib.CLASS,Attrib.ANNOTATOR,Attrib.CONFIDENCE,Attrib.N,)
ANNOTATIONTYPE = AnnotationType.GAP
XMLTAG = 'gap'
def __init__(self, doc, *args, **kwargs):
if 'content' in kwargs:
self.content = kwargs['content']
del kwargs['content']
elif 'description' in kwargs:
self.description = kwargs['description']
del kwargs['description']
super(Gap,self).__init__(doc, *args, **kwargs)
def content(self):
for e in self:
if isinstance(e, Content):
return e.value
return ""
class Division(AbstractStructureElement):
"""Structure element representing some kind of division. Divisions may be nested at will, and may include almost all kinds of other structure elements."""
REQUIRED_ATTRIBS = (Attrib.ID,)
OPTIONAL_ATTRIBS = (Attrib.CLASS,Attrib.N)
XMLTAG = 'div'
ANNOTATIONTYPE = AnnotationType.DIVISION
TEXTDELIMITER = "\n\n\n"
def head(self):
for e in self.data:
if isinstance(e, Head):
return e
raise NoSuchAnnotation()
Division.ACCEPTED_DATA = (Division, Gap, Event, Head, Paragraph, Sentence, List, Figure, AbstractExtendedTokenAnnotation, Description, Linebreak, Whitespace, Alternative, AlternativeLayers, AbstractAnnotationLayer)
class Text(AbstractStructureElement):
"""A full text. This is a high-level element (not to be confused with TextContent!). This element may contain divisions, paragraphs, sentences, etc.."""
REQUIRED_ATTRIBS = (Attrib.ID,)
OPTIONAL_ATTRIBS = (Attrib.N,)
ACCEPTED_DATA = (Gap, Event, Division, Paragraph, Sentence, List, Figure, AbstractAnnotationLayer, AbstractExtendedTokenAnnotation, Description, TextContent)
XMLTAG = 'text'
TEXTDELIMITER = "\n\n\n"
class Corpus:
"""A corpus of various FoLiA documents. Yields a Document on each iteration. Suitable for sequential processing."""
def __init__(self,corpusdir, extension = 'xml', restrict_to_collection = "", conditionf=lambda x: True, ignoreerrors=False, **kwargs):
self.corpusdir = corpusdir
self.extension = extension
self.restrict_to_collection = restrict_to_collection
self.conditionf = conditionf
self.ignoreerrors = ignoreerrors
self.kwargs = kwargs
def __iter__(self):
if not self.restrict_to_collection:
for f in glob.glob(self.corpusdir+"/*." + self.extension):
if self.conditionf(f):
try:
yield Document(file=f, **self.kwargs )
except Exception as e:
print >>stderr, "Error, unable to parse " + f + ": " + e.__class__.__name__ + " - " + str(e)
if not self.ignoreerrors:
raise
for d in glob.glob(self.corpusdir+"/*"):
if (not self.restrict_to_collection or self.restrict_to_collection == os.path.basename(d)) and (os.path.isdir(d)):
for f in glob.glob(d+ "/*." + self.extension):
if self.conditionf(f):
try:
yield Document(file=f, **self.kwargs)
except Exception as e:
print >>stderr, "Error, unable to parse " + f + ": " + e.__class__.__name__ + " - " + str(e)
if not self.ignoreerrors:
raise
class CorpusFiles(Corpus):
"""A corpus of various FoLiA documents. Yields the filenames on each iteration."""
def __iter__(self):
if not self.restrict_to_collection:
for f in glob.glob(self.corpusdir+"/*." + self.extension):
if self.conditionf(f):
try:
yield f
except Exception as e:
print >>stderr, "Error, unable to parse " + f+ ": " + e.__class__.__name__ + " - " + str(e)
if not self.ignoreerrors:
raise
for d in glob.glob(self.corpusdir+"/*"):
if (not self.restrict_to_collection or self.restrict_to_collection == os.path.basename(d)) and (os.path.isdir(d)):
for f in glob.glob(d+ "/*." + self.extension):
if self.conditionf(f):
try:
yield f
except Exception as e:
print >>stderr, "Error, unable to parse " + f+ ": " + e.__class__.__name__ + " - " + str(e)
if not self.ignoreerrors:
raise
class CorpusProcessor(object):
"""Processes a corpus of various FoLiA documents using a parallel processing. Calls a user-defined function with the three-tuple (filename, args, kwargs) for each file in the corpus. The user-defined function is itself responsible for instantiating a FoLiA document! args and kwargs, as received by the custom function, are set through the run() method, which yields the result of the custom function on each iteration."""
def __init__(self,corpusdir, function, threads = None, extension = 'xml', restrict_to_collection = "", conditionf=lambda x: True, maxtasksperchild=100, preindex = False, ordered=True, chunksize = 1):
self.function = function
self.threads = threads #If set to None, will use all available cores by default
self.corpusdir = corpusdir
self.extension = extension
self.restrict_to_collection = restrict_to_collection
self.conditionf = conditionf
self.ignoreerrors = True
self.maxtasksperchild = maxtasksperchild #This should never be set too high due to lxml leaking memory!!!
self.preindex = preindex
self.ordered = ordered
self.chunksize = chunksize
if preindex:
self.index = list(CorpusFiles(self.corpusdir, self.extension, self.restrict_to_collection, self.conditionf, True))
self.index.sort()
def __len__(self):
if self.preindex:
return len(self.index)
else:
return ValueError("Can only retrieve length if instantiated with preindex=True")
def execute(self):
for output in self.run():
pass
def run(self, *args, **kwargs):
if not self.preindex:
self.index = CorpusFiles(self.corpusdir, self.extension, self.restrict_to_collection, self.conditionf, True) #generator
pool = multiprocessing.Pool(self.threads,None,None, self.maxtasksperchild)
if self.ordered:
return pool.imap( self.function, ( (filename, args, kwargs) for filename in self.index), self.chunksize)
else:
return pool.imap_unordered( self.function, ( (filename, args, kwargs) for filename in self.index), self.chunksize)
#pool.close()
def __iter__(self):
return self.run()
class SetType:
CLOSED, OPEN, MIXED = range(3)
class AbstractDefinition(object):
pass
class ConstraintDefinition(object):
def __init__(self, id, restrictions = {}, exceptions = {}):
self.id = id
self.restrictions = restrictions
self.exceptions = exceptions
@classmethod
def parsexml(Class, node, constraintindex):
global NSFOLIA
assert node.tag == '{' + NSFOLIA + '}constraint'
if 'ref' in node.attrib:
try:
return constraintindex[node.attrib['ref']]
except KeyError:
raise KeyError("Unresolvable constraint: " + node.attrib['ref'])
restrictions = []
exceptions = []
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}restrict':
if 'subset' in subnode.attrib:
restrictions.append( (subnode.attrib['subset'], subnode.attrib['class']) )
else:
restrictions.append( (None, subnode.attrib['class']) )
elif subnode.tag == '{' + NSFOLIA + '}except':
if 'subset' in subnode.attrib:
exceptions.append( (subnode.attrib['subset'], subnode.attrib['class']) )
else:
exceptions.append( (None, subnode.attrib['class']) )
if '{http://www.w3.org/XML/1998/namespace}id' in node.attrib:
id = node.attrib['{http://www.w3.org/XML/1998/namespace}id']
instance = Constraint(id, restrictions,exceptions)
constraintindex[id] = instance
else:
instance = Constraint(None, restrictions,exceptions)
return instance
class ClassDefinition(AbstractDefinition):
def __init__(self,id, type,label, constraints=[]):
self.id = id
self.type = type
self.label = label
self.constraints = constraints
@classmethod
#Todo: constraintindex= {} klopt dit?
def parsexml(Class, node, constraintindex= {}):
global NSFOLIA
assert node.tag == '{' + NSFOLIA + '}class'
if 'label' in node.attrib:
label = node.attrib['label']
else:
label = ""
if 'type' in node.attrib:
if node.attrib['type'] == 'open':
type = SetType.OPEN
elif node.attrib['type'] == 'closed':
type = SetType.CLOSED
elif node.attrib['type'] == 'mixed':
type = SetType.MIXED
else:
raise Exception("Invalid set type: ", type)
else:
type = SetType.MIXED
constraints = []
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}constraint':
constraints.append( Constraint.parsexml(subnode, constraintindex) )
elif subnode.tag[:len(NSFOLIA) +2] == '{' + NSFOLIA + '}':
raise Exception("Invalid tag in Class definition: " + subnode.tag)
return ClassDefinition(node.attrib['{http://www.w3.org/XML/1998/namespace}id'],type, label, constraints)
class SubsetDefinition(AbstractDefinition):
def __init__(self, id, type, classes = [], constraints = []):
self.id = id
self.type = type
self.classes = classes
self.constraints = constraints
@classmethod
def parsexml(Class, node, constraintindex= {}):
global NSFOLIA
assert node.tag == '{' + NSFOLIA + '}subset'
if 'type' in node.attrib:
if node.attrib['type'] == 'open':
type = SetType.OPEN
elif node.attrib['type'] == 'closed':
type = SetType.CLOSED
elif node.attrib['type'] == 'mixed':
type = SetType.MIXED
else:
raise Exception("Invalid set type: ", type)
else:
type = SetType.MIXED
classes = []
constraints = []
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}class':
classes.append( ClassDefinition.parsexml(subnode, constraintindex) )
elif subnode.tag == '{' + NSFOLIA + '}constraint':
constraints.append( Constraint.parsexml(subnode, constraintindex) )
elif subnode.tag[:len(NSFOLIA) +2] == '{' + NSFOLIA + '}':
raise Exception("Invalid tag in Set definition: " + subnode.tag)
return SubsetDefinition(node.attrib['{http://www.w3.org/XML/1998/namespace}id'],type,classes, constraints)
class SetDefinition(AbstractDefinition):
def __init__(self, id, classes = [], subsets = [], constraintindex = {}):
isncname(id)
self.id = id
self.classes = classes
self.subsets = subsets
self.constraintindex = constraintindex
@classmethod
def parsexml(Class, node):
global NSFOLIA
assert node.tag == '{' + NSFOLIA + '}set'
classes = []
subsets= []
contstraintindex = {}
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}class':
classes.append( ClassDefinition.parsexml(subnode) )
elif subnode.tag == '{' + NSFOLIA + '}subset':
subsets.append( SubsetDefinition.parsexml(subnode) )
elif subnode.tag[:len(NSFOLIA) +2] == '{' + NSFOLIA + '}':
raise SetDefinitionError("Invalid tag in Set definition: " + subnode.tag)
return SetDefinition(node.attrib['{http://www.w3.org/XML/1998/namespace}id'],classes, subsets)
def testclass(self,cls,doc):
#TODO:set moet type krijgen
if not cls in (c.id for c in self.classes) and self.classes and not str(cls) == "None":
#can still be an trigger for an event
#~ ent_name =""
#~ dep_name =""
#~ for ann_type,j in doc.annotations:
#~ if ann_type== AnnotationType.ENTITY:
#~ ent_name = j
#~ if ann_type== AnnotationType.DEPENDENCY:
#~ dep_name = j
#~ break
#~ print ent_name
#~ print dep_name
#~ print self.id
#~ if self.id != doc.setdefinitions[ent_name].id:
raise DeepValidationError("Set definition for " + self.id + " does not contain"+str(cls))
#~ else:
#~ doc.setdefinitions[dep_name].testclass(cls,doc)
#raise NotImplementedError #TODO, IMPLEMENT!
def testsubclass(cls, subset, subclass):
raise NotImplementedError #TODO, IMPLEMENT!
def loadsetdefinition(filename):
global NSFOLIA
if filename[0:7] == 'http://':
f = urllib.urlopen(filename)
try:
tree = ElementTree.parse(StringIO("\n".join(f.readlines())))
except IOError:
raise DeepValidationError("Unable to download " + set)
f.close()
else:
tree = ElementTree.parse(filename)
root = tree.getroot()
if root.tag != '{' + NSFOLIA + '}set':
raise SetDefinitionError("Not a FoLiA Set Definition! Unexpected root tag:"+ root.tag)
return SetDefinition.parsexml(root)
def relaxng_declarations():
global NSFOLIA
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
for key, value in vars(AnnotationType).items():
if key[0] != '_':
yield E.element( E.optional( E.attribute(name='set')) , E.optional(E.attribute(name='annotator')) , E.optional( E.attribute(name='annotatortype') ) , E.optional( E.attribute(name='datetime') ) , name=key.lower() + '-annotation')
def relaxng(filename=None):
global NSFOLIA, LXE
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
grammar = E.grammar( E.start ( E.element( #FoLiA
E.attribute(name='id',ns="http://www.w3.org/XML/1998/namespace"),
E.optional( E.attribute(name='version') ),
E.optional( E.attribute(name='generator') ),
E.element( #metadata
E.optional(E.attribute(name='type')),
E.optional(E.attribute(name='src')),
E.element( E.zeroOrMore( E.choice( *relaxng_declarations() ) ) ,name='annotations'),
E.zeroOrMore(
E.element(E.attribute(name='id'), E.text(), name='meta'),
),
#E.optional(
# E.ref(name='METATRANSCRIPT')
#),
name='metadata',
#ns=NSFOLIA,
),
E.oneOrMore(
E.ref(name='text'),
),
name='FoLiA',
ns = NSFOLIA
) ),
)
done = {}
for c in globals().values():
if 'relaxng' in dir(c):
if c.relaxng and c.XMLTAG and not c.XMLTAG in done:
done[c.XMLTAG] = True
grammar.append( c.relaxng() )
#for e in relaxng_imdi():
# grammar.append(e)
if filename:
f = open(filename,'w')
if LXE:
f.write( ElementTree.tostring(relaxng(),pretty_print=True).replace("</define>","</define>\n\n") )
else:
f.write( ElementTree.tostring(relaxng()).replace("</define>","</define>\n\n") )
f.close()
return grammar
class Reader(object):
"""Streaming FoLiA reader. The reader allows you to read a FoLiA Document without holding the whole tree structure in memory. The document will be read and the elements you seek returned as they are found. """
def __init__(self, filename, target, bypassleak=False):
"""Read a FoLiA document in a streaming fashion. You select a specific target element and all occurrence of this element, including all there contents (so all elements within), will be returned.
Arguments:
* ``filename``: The filename of the document to read
* ``target``: A FoLiA elements you want to read, passed as a class. For example: ``folia.Sentence``.
* ``bypassleak'': Boolean indicating whether to bypass a memory leak in lxml. Set this to true if you are processing a large number of files sequentially! This comes at the cost of a higher memory footprint, as the raw contents of the file, as opposed to the tree structure, *will* be loaded in memory.
"""
self.filename = filename
self.target = target
if not issubclass(self.target, AbstractElement):
raise ValueError("Target must be subclass of FoLiA element")
self.bypassleak = bypassleak
def __iter__(self):
"""Iterating over a Reader instance will cause the FoLiA document to be read. This is a generator yielding instances of the object you specified"""
global NSFOLIA
f = open(self.filename,'r')
if self.bypassleak:
data = f.read()
data = data.replace(' xml:id="',' id="')
f.close()
f = StringIO(data)
doc = None
metadata = False
parser = ElementTree.iterparse(f, events=("start","end"))
for action, node in parser:
if action == "start" and node.tag == "{" + NSFOLIA + "}FoLiA":
doc = Document(id= node.attrib['{http://www.w3.org/XML/1998/namespace}id'])
if 'version' in node.attrib:
doc.version = node.attrib['version']
if action == "end" and node.tag == "{" + NSFOLIA + "}metadata":
if not doc:
raise MalformedXMLError("Metadata found, but no document? Impossible")
metadata = True
doc.parsemetadata(node)
break
if not doc:
raise MalformedXMLError("No FoLiA Document found!")
elif not metadata:
raise MalformedXMLError("No metadata found!")
f.seek(0) #reset
parser = ElementTree.iterparse(f, events=("end",), tag="{" + NSFOLIA + "}" + self.target.XMLTAG )
for action, node in parser:
element = self.target.parsexml(node, doc)
node.clear() #clean up children
while node.getprevious() is not None:
del node.getparent()[0] # clean up preceding siblings
yield element
f.close()
#class WordIndexer(object):
# def __init__(self, doc, *args, **kwargs)
# self.doc = doc
#
# def __iter__(self):
#
#
# def savecsv(self, filename):
#
#
# def savesql(self, filename):
# in-place prettyprint formatter
def isncname(name):
#not entirely according to specs http://www.w3.org/TR/REC-xml/#NT-Name , but simplified:
for i, c in enumerate(name):
if i == 0:
if not c.isalpha():
raise ValueError('Invalid XML NCName identifier: ' + name + ' (at position ' + str(i+1)+')')
else:
if not c.isalnum() and not (c in ['-','_','.']):
raise ValueError('Invalid XML NCName identifier: ' + name + ' (at position ' + str(i+1)+')')
return True
def validate(filename,schema=None,deep=False):
if not os.path.exists(filename):
raise IOError("No such file")
try:
doc = ElementTree.parse(filename)
except:
raise MalformedXMLError("Malformed XML!")
#See if there's inline IMDI and strip it off prior to validation (validator doesn't do IMDI)
m = doc.xpath('//folia:metadata', namespaces={'f': 'http://ilk.uvt.nl/folia','folia': 'http://ilk.uvt.nl/folia' })
if m:
metadata = m[0]
m = metadata.find('{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT')
if m is not None:
metadata.remove(m)
if not schema:
schema = ElementTree.RelaxNG(relaxng())
schema.assertValid(doc) #will raise exceptions
if deep:
doc = Document(tree=doc, deepvalidation=True)
XML2CLASS = {}
for c in vars().values():
try:
if c.XMLTAG:
XML2CLASS[c.XMLTAG] = c
except:
continue
|
StarcoderdataPython
|
4836960
|
from subprocess import call
from . import Command
class RelinkCommand(Command):
"""%prog [options] path
Find and relink entities into the SGFS cache.
"""
def __init__(self):
super(RelinkCommand, self).__init__()
self.add_option('-C', '--cache-path')
self.add_option('-r', '--recurse', action="store_true", dest="recurse")
self.add_option('-u', '--update', action="store_true", dest="update")
self.add_option('-n', '--dry-run', action="store_true", dest="dry_run")
def run(self, sgfs, opts, args, recurse=False, **kwargs):
if len(args) != 1:
self.print_usage()
return 1
changed = sgfs.rebuild_cache(args[0],
cache_path=opts.cache_path,
recurse=recurse or opts.recurse,
dry_run=opts.dry_run,
verbose=True,
)
if opts.update and changed:
if opts.dry_run:
print 'Checking for tag updates (without applying)...'
else:
print 'Updating tags...'
cmd = ['sgfs-update']
if opts.dry_run:
cmd.append('--dry-run')
cmd.extend(new for old, new, tag in changed)
call(cmd)
main = RelinkCommand()
def main_rebuild(*args, **kwargs):
kwargs['recurse'] = True
return main(*args, **kwargs)
|
StarcoderdataPython
|
4309
|
from .utils import get_request, authorized
class Hubs:
@authorized
def getHubs(self):
url = self.api_url + '/project/v1/hubs'
headers = {
'Authorization': '%s %s' % (self.token_type, self.access_token)
}
return get_request(url, headers)
@authorized
def getHub(self, hub_id):
url = self.api_url + '/project/v1/hubs/%s' % hub_id
headers = {
'Authorization': '%s %s' % (self.token_type, self.access_token)
}
return get_request(url, headers)
|
StarcoderdataPython
|
3303467
|
<gh_stars>100-1000
from gateware.encoder.core import EncoderDMAReader, EncoderBuffer, Encoder
|
StarcoderdataPython
|
8059188
|
__all__ = [
# env
'get_env_name',
# geometry
'CoordSystem',
# misc
'property_buffered',
'indicate_last',
'working_dir',
'measure_time',
# wrappers
'as_part',
]
from .env import get_env_name
from .geometry import CoordSystem
from .misc import property_buffered
from .misc import indicate_last
from .misc import working_dir
from .misc import measure_time
from .wrappers import as_part
#from . import test
# Nope!, test is only intended to be imported by testcases, so it's not
# imported automatically when cqparts.utils is referenced
|
StarcoderdataPython
|
321497
|
# -*- encoding:utf-8 -*-
"""Autogenerated file, do not edit. Submit translations on Transifex."""
MESSAGES = {
"%d min remaining to read": "残りを読むのに必要な時間は%d分",
"(active)": "(有効)",
"Also available in:": "他の言語で読む:",
"Archive": "文書一覧",
"Atom feed": "Atomフィード",
"Authors": "著者一覧",
"Categories": "カテゴリ",
"Comments": "コメント",
"LANGUAGE": "日本語",
"Languages:": "言語:",
"More posts about %s": "%sに関する文書一覧",
"Newer posts": "新しい文書",
"Next post": "次の文書",
"Next": "次",
"No posts found.": "文書はありません。",
"Nothing found.": "なにも見つかりませんでした。",
"Older posts": "過去の文書",
"Original site": "翻訳元のサイト",
"Posted:": "公開日時:",
"Posts about %s": "%sについての文書",
"Posts by %s": "%sの文書一覧",
"Posts for year %s": "%s年の文書",
"Posts for {month} {day}, {year}": "{year}年{month}{day}日の文書",
"Posts for {month} {year}": "{year}年{month}の文書",
"Previous post": "一つ前の文書",
"Previous": "前",
"Publication date": "公開日",
"RSS feed": "RSSフィード",
"Read in English": "日本語で読む",
"Read more": "続きを読む",
"Skip to main content": "本文を読み飛ばす",
"Source": "ソース",
"Subcategories:": "サブカテゴリ",
"Tags and Categories": "カテゴリおよびタグ一覧",
"Tags": "タグ",
"Toggle navigation": "ナビゲーションを隠す",
"Uncategorized": "uncategorized",
"Up": "上",
"Updates": "フィード",
"Write your page here.": "ここに文書を記述してください。",
"Write your post here.": "ここに文書を記述してください。",
"old posts, page %d": "過去の文書 %dページ目",
"page %d": "ページ%d",
"{month} {day}, {year}": "{月} {日}, {年}",
"{month} {year}": "{月} {年}",
}
|
StarcoderdataPython
|
1797221
|
import json
import pytest
from django.contrib.admin.sites import AdminSite
from django.urls import reverse
from wazimap_ng.profile.models import ProfileHighlight
from wazimap_ng.profile.admin import ProfileHighlightAdmin
@pytest.mark.django_db
class TestProfileHighlightAdminHistory:
def test_change_reason_field_in_admin_form(self, mocked_request, dataset):
admin = ProfileHighlightAdmin(ProfileHighlight, AdminSite())
ProfileHighlightForm = admin.get_form(mocked_request)
fields = [f for f in ProfileHighlightForm.base_fields]
assert bool("change_reason" in fields) == True
def test_history_for_highlight_edit_from_admin(
self, client, superuser, profile_highlight
):
client.force_login(user=superuser)
url = reverse(
"admin:profile_profilehighlight_change", args=(
profile_highlight.id,
)
)
data = {
"profile": profile_highlight.profile_id,
"indicator": profile_highlight.indicator_id,
"subindicator": profile_highlight.subindicator,
"denominator": "absolute_value",
"label": "new label",
"change_reason": "Changed Label",
}
res = client.post(url, data, follow=True)
assert res.status_code == 200
assert profile_highlight.history.all().count() == 2
history = profile_highlight.history.first()
assert history.history_user_id == superuser.id
changed_data = json.loads(history.history_change_reason)
assert changed_data["reason"] == "Changed Label"
assert "label" in changed_data["changed_fields"]
assert "denominator" in changed_data["changed_fields"]
assert history.history_type == "~"
|
StarcoderdataPython
|
40638
|
#!/usr/bin/env python
from __future__ import division, unicode_literals
import argparse
from onmt.translate.Translator import make_translator
import onmt.io
import onmt.translate
import onmt
import onmt.ModelConstructor
import onmt.modules
import onmt.opts
import timeit
def main(opt):
translator = make_translator(opt, report_score=True)
start = timeit.default_timer()
_, attns_info, oov_info, copy_info, context_attns_info = translator.translate(opt.src_dir, opt.src, opt.tgt,
opt.batch_size, opt.attn_debug)
end = timeit.default_timer()
print("Translation takes {}s".format(end-start))
# currently attns_info,oov_info only contain first index data of batch
if len(context_attns_info) == 0:
return attns_info, oov_info, copy_info
else:
return attns_info, oov_info, copy_info, context_attns_info
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='translate.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
onmt.opts.add_md_help_argument(parser)
onmt.opts.translate_opts(parser)
opt = parser.parse_args()
main(opt)
|
StarcoderdataPython
|
11255686
|
#
# Copyright 2015-2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..runenvtool import RunEnvTool
class tarTool(RunEnvTool):
"""The GNU version of the tar archiving utility.
Tool tune:
* .compressor=gzip ("bzip2", "gzip", "xz" or plain "tar")
"""
__slots__ = ()
def _installTool(self, env):
self._install.debrpm(['tar'])
self._install.emerge(['app-arch/tar'])
self._install.pacman(['tar'])
self._install.apk(['tar'])
self._install.brew('gnu-tar')
def initEnv(self, env, bin_name=None):
# Busybox's version is not enough for SDKMan
if self._detect.isAlpineLinux() and self._ospath.islink('/bin/tar'):
return
super(tarTool, self).initEnv(env, bin_name)
|
StarcoderdataPython
|
11201970
|
print('--- Functions ---')
def test():
print('oi')
def test2(param):
print(param)
def sum(param, paramb):
return param + paramb
test()
test2('sou um parametro')
print(sum(5,4))
print()
print('--- Lambda ---')
#function normal
def quadrado(value): return value**2
print(quadrado(2))
my_lambda = lambda param: param**2
print(my_lambda(5))
print()
print('--- Map ---')
s = [1,2,3,4,5]
ls1 = list(map(quadrado, s))
print(ls1)
ls2 = list(map(lambda v: v**3, s))
print(ls2)
print()
print('--- Filter ---')
print(list(filter(lambda v : v >= 3, s)))
ls3 = list(filter(lambda v : v % 2 == 0, s))
print(ls3)
|
StarcoderdataPython
|
190735
|
from client_lib.servercall import remote_call
_isInitialized = False
_productList = None
_productNameList = []
_productNumberList = []
def _prodSugInit():
global _isInitialized
global _productList
global _productNameList
global _productNumberList
_productList = remote_call('/product/all')
# Build product name and number lists
for product in _productList:
_productNameList.append((product['naam'].encode('utf-8'), product['id']))
_productNumberList.append((product['leverancier_id'].encode('utf-8'), product['id']))
_productNameList.sort()
_productNumberList.sort()
_isInitialized = True
def findSuggestion(curInput):
global _isInitialized
global _productNameList
global _productNumberList
if not _isInitialized:
_prodSugInit()
low = -1
high = len(_productNameList)-1
while high - low > 1:
mid = (high+low)/2
if _productNameList[mid][0] >= curInput:
high = mid
else:
low = mid
if _productNameList[high][0].startswith(curInput):
return (_productNameList[high][1], _productNameList[high][0])
low = -1
high = len(_productNumberList)-1
while high - low > 1:
mid = (high + low)/2
if _productNumberList[mid][0] >= curInput:
high = mid
else:
low = mid
if _productNumberList[high][0].startswith(curInput):
return (_productNumberList[high][1], _productNumberList[high][0])
return (None, '')
|
StarcoderdataPython
|
9651330
|
# Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from vitrage.entity_graph.graph_init import EventsCoordination
from vitrage.tests import base
class EventsCoordinationTest(base.BaseTest):
@classmethod
def setUpClass(cls):
super(EventsCoordinationTest, cls).setUpClass()
cls.calc_result = 0
def do_work(self, x):
if x:
self.calc_result = self.calc_result * 2
else:
self.calc_result = self.calc_result + 1
def test_queue_coordination(self):
explain = """
initially calc_result is 0.
each high priority call multiplies by *2
each low priority call adds +1
so, if all the high calls are performed first, and then all the low,
the result should be the number of low priority calls.
0*(2^n) + 1*n
"""
priority_listener = EventsCoordination(self.do_work)
def write_high():
for i in range(10000):
priority_listener._do_high_priority_work(True)
def write_low():
for i in range(10000):
priority_listener._do_low_priority_work(False)
self.calc_result = 0
t1 = threading.Thread(name='high_1', target=write_high)
t2 = threading.Thread(name='high_2', target=write_high)
t3 = threading.Thread(name='low_1', target=write_low)
t4 = threading.Thread(name='low_2', target=write_low)
self._start_and_join(t1, t2, t3, t4)
self.assertEqual(20000, self.calc_result, explain)
self.calc_result = 0
t1 = threading.Thread(name='high_1', target=write_high)
t2 = threading.Thread(name='low_1', target=write_low)
t3 = threading.Thread(name='low_2', target=write_low)
t4 = threading.Thread(name='high_2', target=write_high)
self._start_and_join(t1, t2, t3, t4)
self.assertEqual(20000, self.calc_result, explain)
def _start_and_join(self, *args):
for t in args:
t.start()
for t in args:
t.join()
|
StarcoderdataPython
|
6596158
|
# -*- coding: utf-8 -*-
"""
File Name: zigzag_conversion
Author : jing
Date: 2020/3/16
"""
class Solution:
def convert(self, s: str, numRows: int) -> str:
if s is None or len(s) == 0 or numRows < 1:
return s
else:
result = [""] * numRows # 类似一个for循环,创建了["", "", ""]
# for i in range(numRows):
# result.append("")
j = 0
for char in s:
if j == numRows + numRows - 2:
j = 0
if j >= numRows:
result[2*numRows - j - 2] = result[2*numRows - j - 2] + char
j = j + 1
continue
result[j] = result[j] + char
j += 1
# out = ""
# for i in range(len(result)):
# for j in range(len(result[i])):
# out = out + result[i][j]
# join: str.join(seq), 用str拼接seq序列,eg: "-".join(["q", "e", "r"]) -> q-e-r
return "".join(result)
def convert2(self, s, numRows):
result = [''] * numRows
j = 0
for i in s:
result[j] = result[j] + i
if j == 0:
m = j
j += 1
elif (j == numRows - 1) | (j - m < 0):
m = j
j -= 1
else:
m = j
j += 1
return ("".join(result))
if __name__ == '__main__':
print(Solution().convert("PAYPALISHIRING", 4))
|
StarcoderdataPython
|
312218
|
'''
As Mayavi is a little outdated, and not support wxphoenix
So I wrote a simple one, and remove two a, M(a)y(a)vi.self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)self.color = cs if isinstance(cs, tuple) else (0,0,0)
'''
from .canvas3d import *
from .frame3d import *
from .manager import *
from .util import *
|
StarcoderdataPython
|
1602961
|
import importlib
from mu.protogen import stores_pb2
MODULE_NAME_FORMAT = 'mu.protogen.{}_pb2'
STORE_TYPE_NAME_FORMAT = 'Mu{}Store'
def store_from_name(type_name):
type_name = type_name.lower()
# get class from module via introspection
type_module = importlib.import_module(MODULE_NAME_FORMAT.format(type_name))
store_class = getattr(type_module, STORE_TYPE_NAME_FORMAT.format(type_name.capitalize()))
return store_class()
def store_from_enum(store_enum):
# grab enum name from enum value for introspection
type_name = stores_pb2.MuStoreType.Name(store_enum).lower()
return store_from_name(type_name)
def decode_store_info(msg):
store_info = stores_pb2.MuStoreInfo()
store_info.ParseFromString(msg)
return store_info
def decode_store(store_info):
store = store_from_enum(store_info.type)
store.ParseFromString(store_info.msg)
return store
def full_mask(store):
mask = store.__class__()
for descriptor, val in store.ListFields():
if hasattr(val, '__len__'):
getattr(mask, descriptor.name).extend([1] * len(val))
else:
setattr(mask, descriptor.name, 1)
return mask
|
StarcoderdataPython
|
1932501
|
import os
class RunlistValidationError(Exception):
pass
class RunlistParsingError(Exception):
pass
def parseSectionTag(in_tag):
isEnd = False
if in_tag[-1] != "]":
raise RunlistParsingError("Tag not ended with ]")
tag_content = in_tag.strip("][")
if tag_content[0] == "/":
isEnd = True
tag_content.strip("/")
contents = tag_content.split()
if (len(contents) < 3) or (contents[1] != "ID:"):
raise RunlistParsingError("Wrong tag format.")
key, id_str, gid = contents
try:
gid = int(gid)
except ValueError:
raise RunlistParsingError(f"Group ID {gid} is not an integer")
return isEnd, key, gid
def validateRunlist(r_dict):
must_have = ["EA", "RUNLIST"]
# Check if EA and RUNLIST are present
for k in must_have:
if k not in r_dict.keys():
raise RunlistValidationError("Missing {}".format(k))
# Cehck if there's only one EA per group
for gid, item in r_dict["EA"].items():
if len(item) != 1:
exception_str = "Only one EA file can be used for each group.\n"
exception_str += "[EA ID: {:d}] have {:d} files.".format(gid, len(item))
raise RunlistValidationError(exception_str)
# Check if all files exists
for gid, item in r_dict["EA"].items():
if not os.path.exists(item[0]):
raise RunlistValidationError("{} does not exist.".format(item[0]))
for gid, item in r_dict["RUNLIST"].items():
for f in item:
if not os.path.exists(f):
raise RunlistValidationError("{} does not exist.".format(f))
# Check if all groups have corresponding EA
rl_minus_ea = list(set(r_dict["RUNLIST"].keys()) - set(r_dict["EA"].keys()))
if rl_minus_ea:
raise RunlistValidationError("ID: {} have no matching EA.".format(rl_minus_ea))
def parseRunlistStrs(lines):
not_used_str = []
lines_sans_whitespace = map(lambda x: x.strip(), lines)
in_tag_region = False
current_key_id = None
encounter_first_tag = False
parse_dict = {}
for line in lines_sans_whitespace:
if (len(line) == 0) or line[0] == "#":
# Skip empty lines
continue
elif line[0] == "[":
encounter_first_tag = True
isEnd, key, gid = parseSectionTag(line)
if (not in_tag_region) and isEnd:
raise RunlistParsingError(
"No start tag found for [{} ID: {:d}]".format(key, gid)
)
elif in_tag_region and (not isEnd):
raise RunlistParsingError(
"No ending tag found for previous block before starting [{} ID: {:d}].".format(
key, gid
)
)
elif not isEnd:
current_key_id = (key, gid)
try:
parse_dict[key][gid] = []
except KeyError:
parse_dict[key] = {gid: []}
in_tag_region = True
else:
in_tag_region = False
current_key_id = None
else:
# Fill group 0 files
if not (in_tag_region or encounter_first_tag):
not_used_str.append(line)
if in_tag_region:
key, gid = current_key_id
parse_dict[key][gid].append(line)
if in_tag_region:
raise RunlistParsingError("Missing closing tags.")
if not_used_str:
try:
parse_dict["RUNLIST"][0] = not_used_str
except KeyError:
parse_dict["RUNLIST"] = {0: not_used_str}
return parse_dict
|
StarcoderdataPython
|
50066
|
import os
import platform
import sys
from os import listdir
from pathlib import Path
from src.create_dir import create_numbered_dirs, get_parent_dir
from src.validate_windows_file_name import is_valid_windows_file_name
def get_files_in(dir: str):
"""Returns a list of absolute paths to files sorted alphabetically in the specified folder"""
# TODO: ignore folders
# TODO: use OS independent path separator
return [dir + '\\' + file for file in listdir(dir) if os.path.isfile(os.path.join(dir, file))]
def create_new_dirs(parent_dir: str, number_of_dirs: int):
start = 1
return create_numbered_dirs(parent_dir, start, number_of_dirs)
def move_files(path_to_files: [str], destinations: [str], new_file_name: str):
"""
Assumes that every file in the parameter files is an absolute path to a file.
Also assumes that both parameters are non-empty lists
new_file_name does not change the file extension
"""
for index, source_path in enumerate(path_to_files):
original_file_name = os.path.basename(source_path)
file_extension = Path(original_file_name).suffix
# don't want to set this expression to new_file_name because it will overwrite the value
# and affect subsequent iterations of the loop
new_file_name2 = new_file_name + file_extension
dest = os.path.join(destinations[index], new_file_name2)
os.rename(source_path, dest)
print(f'Moved {original_file_name}...')
def ask_for_file_name():
question = 'What do you want to to use for the new file names ?'
if platform.system() == "Windows":
ask_for_file_name_on_windows(question)
print(question, sep='')
return input()
def ask_for_file_name_on_windows(question: str):
for i in range(3):
print(question, sep='')
file_name = input()
if is_valid_windows_file_name(file_name):
return file_name
print('Invalid file name. Try again.')
print('Error: you did not enter in a valid name after 3 attempts. Exiting...')
sys.exit()
def main():
parent_dir = get_parent_dir()
files = get_files_in(parent_dir)
if not files:
print(f'Cannot find any files in {parent_dir}')
sys.exit()
new_dirs = create_new_dirs(parent_dir, len(files))
if not new_dirs:
print(f'Could not create new directories. Perhaps those directories already exist in '
f'{parent_dir}?')
sys.exit()
move_files(files, new_dirs, ask_for_file_name())
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9671668
|
<reponame>ajensen1234/ShapeWorks
import os
import sys
import numpy as np
from shapeworks import *
success = True
# note: we just use numpy arrays for coordinates/indices, points, dimensions, vectors, and matrices
def coordTest():
c1 = np.array([1.0, 1.0, 1.0])
c2 = np.array([2.0, 2.0, 1.0])
c2[2] = 2
c3 = c1 + c2
c4 = c2 - c1
c5 = c1 * c2
c1 += c2
c1 -= c2
c6 = c2 * 10
c7 = c2 / 2
c6 *= 4
c6 /= 2
return c6[0] == 40 and c6[1] == 40 and c6[2] == 40
success &= utils.test(coordTest)
sys.exit(not success)
|
StarcoderdataPython
|
3333283
|
<reponame>Lucas-py/Python-Basico02
'''
Escreva um programa que pergunte a velocidade do carro de um usuário.
Caso ultrapasse 80 km/h, exiba uma mensagem dizendo que o usuário foi multado.
Nesse caso, exiba o valor da multa, cobrando R$ 5 por km acima de 80 km/h.
'''
velocidade = int(input('digite a velcidade do veiculo: '))
if velocidade > 80:
taxa = velocidade - 80
multa = 5 * taxa
print(f'VOCÊ FOI MULTADO PAGUE: {multa} R$, SUA VELOCIDADE ERA DE: {velocidade} o limite e de 80 km')
else:
print(f'parabéns você não foi multudado sua velocidade é de {velocidade}')
|
StarcoderdataPython
|
3509841
|
<gh_stars>0
import logging
import os
def get_logger(name='default', level='INFO', log_path=None, log_format = '%(asctime)s - %(levelname)s - %(pathname)s - Line: %(lineno)d - ', prefix=""):
if log_path is None:
log_path = os.getenv('LOG_PATH', '/tmp')
logger = logging.getLogger(name)
formatter = logging.Formatter(fmt=log_format+str(prefix)+" %(message)s")
file_handler = logging.FileHandler(log_path + '/' + name + ".log")
file_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(file_handler)
logger.setLevel(level)
logger.propagate = False
return logger
|
StarcoderdataPython
|
5098265
|
import json
import os
import threading
from stock.Stock import Stock
global config
global userAgents
def __init__():
path = os.path.dirname(__file__)
print(path)
with open("../config/config.json", "r+", encoding="utf-8") as f:
global config
config = json.loads(f.read())
with open("../config/user_agents.txt", "r+", encoding="utf-8") as f:
global userAgents
userAgents = f.read().split('\n')
run()
def run():
stock = Stock(config["stock"], userAgents)
stock.run()
if __name__ == '__main__':
__init__()
pass
|
StarcoderdataPython
|
11392825
|
import re
import os
from io import BytesIO, StringIO
from copy import deepcopy
import flametree
from snapgene_reader import snapgene_file_to_seqrecord
from Bio import SeqIO
try:
# Biopython <1.78
from Bio.Alphabet import DNAAlphabet
has_dna_alphabet = True
except ImportError:
# Biopython >=1.78
has_dna_alphabet = False
from .record_operations import (
set_record_topology,
sequence_to_biopython_record,
)
def string_to_records(string):
"""Convert a string of a fasta, genbank... into a simple ATGC string.
Can also be used to detect a format.
"""
matches = re.match("([ATGC][ATGC]*)", string)
if (matches is not None) and (matches.groups()[0] == string):
return [sequence_to_biopython_record(string)], "ATGC"
for fmt in ("fasta", "genbank"):
try:
stringio = StringIO(string)
records = list(SeqIO.parse(stringio, fmt))
if len(records) > 0:
return (records, fmt)
except Exception:
pass
try:
record = snapgene_file_to_seqrecord(filecontent=StringIO(string))
return [record]
except Exception:
pass
raise ValueError("Invalid sequence format")
def load_record(
filepath,
topology="default_to_linear",
id="auto",
upperize=True,
max_name_length=20,
):
"""Return a Biopython record read from a Fasta/Genbank/Snapgene file.
Parameters
----------
filepath
Path to a Genbank, Fasta, or Snapgene (.dna) file.
topology
Can be "circular", "linear", "default_to_circular" (will default
to circular if ``annotations['topology']`` is not already set) or
"default_to_linear".
id
Sets the record.id. If "auto", the original record.id is used, and if
none is set the name of the file (without extension) is used instead.
upperize
If true, the sequence will get upperized (recommended in this library,
as the mix of upper and lower case can cause problems in Biopython's
enzyme site search).
max_name_length
The name of the record will be truncated if too long to avoid Biopython
exceptions being raised.
"""
if filepath.lower().endswith(("gb", "gbk")):
record = SeqIO.read(filepath, "genbank")
elif filepath.lower().endswith(("fa", "fasta")):
record = SeqIO.read(filepath, "fasta")
elif filepath.lower().endswith(".dna"):
record = snapgene_file_to_seqrecord(filepath)
else:
raise ValueError("Unknown format for file: %s" % filepath)
if upperize:
record = record.upper()
set_record_topology(record, topology)
if id == "auto":
id = record.id
if id in [None, "", "<unknown id>", ".", " "]:
id = os.path.splitext(os.path.basename(filepath))[0]
id = id.replace(" ", "_")[:max_name_length]
record.id = id
elif id is not None:
record.id = id.replace(" ", "_")[:max_name_length]
return record
def _load_records_from_zip_file(zip_file, use_file_names_as_ids=False):
"""Return all fasta/genbank/snapgene in a zip as biopython records.
Each record gets a ``source_file`` attribute from the zip's file name
without the .zip extension.
Used via "load_records_from_files".
"""
zip_file = flametree.file_tree(zip_file)
records = []
for f in zip_file._all_files:
ext = f._extension.lower()
if ext in ["gb", "gbk", "fa", "dna"]:
try:
new_records, fmt = string_to_records(f.read())
if not isinstance(new_records, list):
new_records = [new_records]
except Exception:
content_stream = BytesIO(f.read("rb"))
try:
record = snapgene_file_to_seqrecord(fileobject=content_stream)
new_records, _ = [record], "snapgene"
except Exception:
raise ValueError("Format not recognized for file " + f._path)
single_record = len(new_records) == 1
for i, record in enumerate(new_records):
name = record.id
if name in [
None,
"",
"<unknown id>",
".",
" ",
"<unknown name>",
]:
number = "" if single_record else ("%04d" % i)
name = f._name_no_extension.replace(" ", "_") + number
record.id = name
record.id = name
record.file_name = f._name_no_extension
if use_file_names_as_ids and single_record:
basename = os.path.basename(record.source_file)
basename_no_extension = os.path.splitext(basename)[0]
record.id = basename_no_extension
for record in new_records:
record.source_file = f._path
records += new_records
return records
def load_records_from_file(filepath):
"""Autodetect file format and load biopython records from it."""
with open(filepath, "rb") as f:
content = f.read()
try:
records, fmt = string_to_records(content.decode("utf-8"))
except Exception:
try:
record = snapgene_file_to_seqrecord(fileobject=BytesIO(content))
records, fmt = [record], "snapgene"
except Exception:
raise ValueError("Format not recognized for file " + filepath)
if not isinstance(records, list):
records = [records]
for record in records:
record.source_file = filepath
return records, fmt
def load_records_from_files(files=None, folder=None, use_file_names_as_ids=False):
"""Automatically convert files or a folder's content to biopython records.
Parameters
----------
files
A list of path to files. A ``folder`` can be provided instead.
folder
A path to a folder containing sequence files.
use_file_names_as_ids
If True, for every file containing a single record, the file name
(without extension) will be set as the record's ID.
"""
if files is not None:
for file in files:
if isinstance(file, str) and not os.path.exists(file):
raise IOError("File %s not found" % file)
if folder is not None:
files = [f._path for f in flametree.file_tree(folder)._all_files]
records = []
for filepath in files:
filename = os.path.basename(filepath)
if filename.lower().endswith("zip"):
records += _load_records_from_zip_file(
filepath, use_file_names_as_ids=use_file_names_as_ids
)
continue
recs, fmt = load_records_from_file(filepath)
single_record = len(recs) == 1
for i, record in enumerate(recs):
name_no_extension = "".join(filename.split(".")[:-1])
name = name_no_extension + ("" if single_record else ("%04d" % i))
name = name.replace(" ", "_")
UNKNOWN_IDS = [
"None",
"",
"<unknown id>",
".",
"EXPORTED",
"<unknown name>",
"Exported",
]
if has_dna_alphabet: # Biopython <1.78
record.seq.alphabet = DNAAlphabet()
record.annotations["molecule_type"] = "DNA"
# Sorry for this parts, it took a lot of "whatever works".
# keep your part names under 20c and pointless, and everything
# will be good
if str(record.id).strip() in UNKNOWN_IDS:
record.id = name
if str(record.id).strip() in UNKNOWN_IDS:
record.id = name
record.file_name = name_no_extension
if use_file_names_as_ids and single_record:
basename = os.path.basename(record.source_file)
basename_no_extension = os.path.splitext(basename)[0]
record.id = basename_no_extension
records += recs
return records
def write_record(record, target, fmt="genbank"):
"""Write a record as genbank, fasta, etc. via Biopython, with fixes."""
record = deepcopy(record)
record.id = record.id[:20]
if has_dna_alphabet: # Biopython <1.78
if str(record.seq.alphabet.__class__.__name__) != "DNAAlphabet":
record.seq.alphabet = DNAAlphabet()
record.annotations["molecule_type"] = "DNA"
if hasattr(target, "open"):
target = target.open("w")
SeqIO.write(record, target, fmt)
|
StarcoderdataPython
|
3566139
|
<reponame>febalci/DomoticzEarthquake
"""
<plugin key="SeismicPortal" name="Eartquake EMSC Data" author="febalci" version="1.0.1">
<params>
<param field="Mode2" label="Radius1 (km)" width="150px" required="true" default="250"/>
<param field="Mode3" label="Radius2 (km)" width="150px" required="true" default="500"/>
<param field="Mode4" label="Min Magnitude in Radius1" width="150px" required="true" default="3.5"/>
<param field="Mode5" label="Min Magnitude in Radius2" width="150px" required="true" default="5"/>
<param field="Mode6" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import json
from math import radians, cos, sin, asin, sqrt
import struct
import time
from datetime import datetime
class BasePlugin:
wsConn = None
wsping = bytes([0x89, 0x80, 0x5b, 0x63, 0x68, 0x84])
myHomelat = myHomelon = 0
nextConnect = 2
oustandingPings = 0
wsHeader = "GET /standing_order/websocket HTTP/1.1\r\n" \
"Host: www.seismicportal.eu\r\n" \
"User-Agent: Domoticz/1.0\r\n" \
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" \
"Accept-Language: en-US,en;q=0.5\r\n" \
"Accept-Encoding: gzip, deflate\r\n" \
"Sec-WebSocket-Version: 13\r\n" \
"Origin: http://www.seismicportal.eu\r\n" \
"Sec-WebSocket-Key: <KEY>" \
"DNT: 1\r\n" \
"Connection: keep-alive, Upgrade\r\n" \
"Pragma: no-cache\r\n" \
"Cache-Control: no-cache\r\n" \
"Upgrade: websocket\r\n\r\n"
def __init__(self):
#self.var = 123
return
def onStart(self):
global minRadius, maxRadius, minMagnitude, maxMagnitude
Domoticz.Log("onstart Called")
if (Parameters["Mode6"] == "Debug"):
Domoticz.Debugging(1)
# Get the location from the Settings
if not "Location" in Settings:
Domoticz.Log("Location not set in Preferences")
return False
# The location is stored in a string in the Settings
loc = Settings["Location"].split(";")
self.myHomelat = float(loc[0])
self.myHomelon = float(loc[1])
Domoticz.Debug("Coordinates from Domoticz: " + str(self.myHomelat) + ";" + str(self.myHomelon))
if self.myHomelat == None or self.myHomelon == None:
Domoticz.Log("Unable to parse coordinates")
return False
minRadius = float (Parameters["Mode2"])
maxRadius = float (Parameters["Mode3"])
minMagnitude = float(Parameters["Mode4"])
maxMagnitude = float(Parameters["Mode5"])
if (len(Devices) == 0):
Domoticz.Device(Name='Earthquake', Unit=1, TypeName="Alert", Used=1).Create()
Domoticz.Debug("Device created.")
self.wsConn = Domoticz.Connection(Name="EmscConn", Transport="TCP/IP", Protocol="None", Address="www.seismicportal.eu", Port="80")
self.wsConn.Connect()
DumpConfigToLog()
Domoticz.Heartbeat(40)
def onStop(self):
Domoticz.Log("onStop called")
def onConnect(self, Connection, Status, Description):
Domoticz.Log("onConnect called")
if (Status == 0):
self.isConnected = True
Connection.Send(self.wsHeader) #Upgrade Connection to WebSocket
Domoticz.Debug("Connected successfully to the server")
else:
self.isConnected = False
Domoticz.Debug("Failed to connect ("+str(Status)+") to server with error: "+Description)
return
def onMessage(self, Connection, Data):
Domoticz.Log("onMessage called")
HEADER, = struct.unpack("!H", Data[:2])
wsData = Data[2:]
FIN = (HEADER >> 15) & 0x01
RSV1 = (HEADER >> 14) & 0x01
RSV2 = (HEADER >> 13) & 0x01
RSV3 = (HEADER >> 12) & 0x01
OPCODE = (HEADER >> 8) & 0x0F
MASKED = (HEADER >> 7) & 0x01
LEN = (HEADER >> 0) & 0x7F
if LEN == 126:
LEN, = struct.unpack("!H", wsData[:2])
wsData = wsData[2:]
elif LEN == 127:
LEN, = struct.unpack("!4H", wsData[:8])
wsData = wsData[8:]
if Data[:2]==b'\x81\x7e': #Earthquake Message
eqjson = wsData.decode('utf8')
eqdata = json.loads(eqjson)
Domoticz.Debug(str(eqdata))
action = eqdata["action"]
mag = eqdata["data"]["properties"]["mag"]
lat = eqdata["data"]["properties"]["lat"]
lon = eqdata["data"]["properties"]["lon"]
time = eqdata["data"]["properties"]["time"]
Domoticz.Debug('UTC Time:'+str(time))
time = isoutc_to_local(time)
Domoticz.Debug('Local Time:'+str(time))
time_stripped = str(time).split('.', 1)[0]
Domoticz.Debug('Local Time Stripped:'+str(time_stripped))
location = eqdata["data"]["properties"]["flynn_region"]
distance = haversine(lat,lon,self.myHomelat,self.myHomelon)
Domoticz.Debug ("Magnitude = "+str(mag))
Domoticz.Debug ("Distance = "+str(int(distance))+" km")
eqshow = False
# If the earthquake is within the given parameters:
if action == "create": # New Earthquake - "update" is updating values of an earlier earthquake
Domoticz.Debug(location)
if distance < minRadius:
if float(mag) > minMagnitude:
eqshow = True
elif distance < maxRadius:
if float(mag) > maxMagnitude:
eqshow = True
elif float(mag) > 8:
eqshow = True
# Alertbox Color:
if mag < 3:
magnitude=0
elif mag < 4:
magnitude=1
elif mag < 5:
magnitude=2
elif mag < 6:
magnitude=3
else:
magnitude=4
if eqshow:
Domoticz.Log(str(mag)+' - '+location)
UpdateDevice(1,magnitude,str(mag)+' - '+str(int(distance))+' km - '+location+' - '+str(time_stripped))#0=gray, 1=green, 2=yellow, 3=orange, 4=red
elif Data[:2]==b'\x8a\x00':#PONG message from the server
self.oustandingPings = self.oustandingPings - 1
Domoticz.Debug('Pong received')
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Log("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Log("Notification: " + Name + "," + Subject + "," + Text + "," + Status + "," + str(Priority) + "," + Sound + "," + ImageFile)
def onDisconnect(self, Connection):
Domoticz.Log("onDisconnect called")
self.isConnected = False
def onHeartbeat(self):
if (self.wsConn.Connected() == True):
if (self.oustandingPings > 3):
Domoticz.Debug("Ping Timeout, Disconnect")
self.wsConn.Disconnect()
self.nextConnect = 0
else:
self.wsConn.Send(self.wsping) #PING message to Server
Domoticz.Debug("Ping sent")
self.oustandingPings = self.oustandingPings + 1
else:
# if not connected try and reconnected every 2 heartbeats
self.oustandingPings = 0
self.nextConnect = self.nextConnect - 1
if (self.nextConnect <= 0):
self.nextConnect = 2
self.wsConn.Connect()
return
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
# Generic helper functions
def UpdateDevice(Unit, nValue, sValue):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue):
Devices[Unit].Update(nValue=nValue, sValue=str(sValue))
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+")")
return
def haversine(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
# haversine formula
dlat = lat2 - lat1
dlon = lon2 - lon1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
# 6367 km is the radius of the Earth
km = 6367 * c
return km
def isoutc_to_local(utc_dt):
# time = 2018-02-26T18:44:41.0Z
ztime = datetime.strptime(utc_dt, '%Y-%m-%dT%H:%M:%S.%fZ')
now_timestamp = time.time()
offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(now_timestamp)
return ztime + offset
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
|
StarcoderdataPython
|
1937881
|
import networkx as nx
import random
import time
import tree_traversals
import sys
# import json
sys.setrecursionlimit(10**6)
def sort_neighbors(d):
new_dict = {}
for e in d[1]:
sub_dict = d[1][e]
for d2 in sub_dict:
# print(sub_dict[d2])
new_dict.update({e: sub_dict[d2]})
new_dict = dict(sorted(new_dict.items(), key=lambda kv: kv[1]))
return d[0], new_dict
def count_set_bits(b):
count = 0
while b:
count += b & 1
b >>= 1
return count
def str_bitwise_xor(array, t):
result = ""
max_len = -1
for i in range(t):
max_len = max(max_len, len(array[i]))
array[i] = arr[i][::-1]
for i in range(t):
s = ""
for j in range(max_len - len(arr[i])):
s += "0"
arr[i] = arr[i] + s
for i in range(max_len):
pres_bit = 0
for j in range(t):
pres_bit = pres_bit ^ (ord(arr[j][i]) - ord('0'))
result += chr(pres_bit + ord('0'))
result = result[::-1]
return result
def rand_key(p):
p = p - 3
key1 = ""
for a in range(p):
temp = str(random.randint(0, 1))
key1 += temp
return '111'+key1
def ncd(t):
# network connectivity degree for every node of the network
count = 0
s = 0.0
for user in t[1]:
s += t[1][user]
count += 1
return s/count
def compute_acc(circle_, n_hoods):
# returns (community id, number of members, average community connectivity)
id_ = circle_[0]
# circle_.pop(0)
s = 0.0
count = 1
for j in range(1, len(circle_)):
for n_hood in n_hoods:
if circle_[j] == n_hood[1][0]:
s += n_hood[0]
count += 1
if count != 0:
return id_, count, s/count
def dia(circle_, graph):
circle_.pop(0)
q = nx.Graph()
for o in circle_:
q.add_node(o)
for (j, p, w) in graph.edges.data('weight'):
if j in q.nodes:
if p in q.nodes:
q.add_weighted_edges_from([(j, p, w)])
if len(circle_) == 0:
pass
else:
if nx.is_connected(q):
return nx.diameter(q)+5
else:
return 3
def declare_membership(path_sim_t, diam, acc):
# path_sim_t = (path_strength, updates)
# diam = diameter
# acc = average community connectivity
if path_sim_t[1] <= diam+5:
if path_sim_t[0] >= acc-1:
return 'the node can be considered a member of the target community'
else:
return
if __name__ == '__main__':
start = time.perf_counter()
with open('social_network_samples\\0.edges', 'r') as fh:
# with open('C:\\Users\\kots\\Desktop\\0.edges', 'r') as fh:
# new_file = fh.read()
# new_file = new_file.replace(',', ' ')
# print(new_file)
g = nx.read_edgelist(fh, delimiter=' ', create_using=nx.Graph(), nodetype=str)
# g = nx.read_edgelist('C:\\Users\\giorgos\\Desktop\\twitter_combined.txt', create_using=nx.Graph(), nodetype=str)
# print(nx.info(g))
print('Origin of the graph: Facebook')
print('Number of nodes:', g.number_of_nodes())
print('Density of the graph:', format(nx.density(g), '.4f'))
# print('Features in common: 3 (at least)')
circles = []
binary_values = {}
features = 10
with open('social_network_samples\\0.circles', 'r') as f:
# with open('C:\\Users\\kots\\Desktop\\0.circles', 'r') as f:
for line in f:
k = line.split()
circles.append(k)
c = f.readline().split('\t')# Driver code
# print(c)
c[-1] = c[-1].strip('\n')
circles.append(c)
# music communities
'''music1 = ['Folk']
music2 = ['Techno/House']
with open('C:\\Users\\giorgos\\Desktop\\HU_genres.json', 'r') as f:
data = json.load(f)
for key, value in data.items():
# print(key, '-->', value)
for kind in value:
if kind == 'Folk':
music1.append(key)
elif kind == 'Techno/House':
music2.append(key)
circles.append(music1)
circles.append(music2)'''
w = 0.66
for i in g.nodes:
binary_values[i] = rand_key(features)
# print(binary_values)
for (u, v) in g.edges():
arr = [binary_values[u], binary_values[v]]
XOR = str_bitwise_xor(arr, 2)
number_of_1s = count_set_bits(int(XOR, 2))
weigh = number_of_1s/features
g.add_weighted_edges_from([(u, v, weigh)])
# print(f"({u}, {v}, {weigh:.3})")
neighborhoods = []
for n in g.adjacency():
# print(sort_neighbors(n))
# print(ncd(sort_neighbors(n)))
element = (ncd(sort_neighbors(n)), sort_neighbors(n))
# print(element)
neighborhoods.append(element)
diameters = {}
# diameters = {id: diameter, ...}
comm = []
# comm = [(community id, number of members, average community connectivity), ...()]
for circle in circles:
comm.append(compute_acc(circle, neighborhoods))
diameters.update({circle[0]: dia(circle, g)})
for _ in comm:
pass
# print(_)
for _ in diameters.items():
pass
# print(_)
target_com = circles[11]
d = diameters['circle11']
a = comm[11][2]
trees = []
for node in neighborhoods:
r = tree_traversals.build_tree(node[1], w)
trees.append(r)
for tree in trees:
tree_traversals.build_communities(tree, trees)
stronger_paths = []
for tree in trees:
stronger_paths.append(tree_traversals.path_sim(tree, target_com))
s = 0
for path in stronger_paths:
if path is not None:
if declare_membership(path, d, a) is not None:
s = s + 1
# print(path)
# print(declare_membership(path, d, a))
print('Nodes that can be considered members of the target community:', s)
finish = time.perf_counter()
print(f'Finished in {round(finish - start, 2)} seconds')
# end
|
StarcoderdataPython
|
1687023
|
<gh_stars>0
# coding: utf-8
"""
Производственный календарь.
"""
import json
import os
import datetime
import requests
WORKING_TYPE_WORK = 0
WORKING_TYPE_HOLIDAY = 2
WORKING_TYPE_SHORT = 3
DEFAULT_CACHE_PATH = '/tmp/basicdata_calend.json'
def is_working_time(date_time, use_cache=False, cache_path=DEFAULT_CACHE_PATH):
exceptions = _get_prod_exceptions(use_cache=use_cache, cache_path=cache_path)
work_first_hour = 10
work_last_hour = 17
is_work_day = date_time.weekday() < 5
year = str(date_time.year)
month = str(date_time.month)
day = str(date_time.day)
if exceptions.get(year) and exceptions[year].get(month) and exceptions[year][month].get(day):
working_type = exceptions[year][month][day]['isWorking']
if working_type == WORKING_TYPE_HOLIDAY:
is_work_day = False
elif working_type == WORKING_TYPE_SHORT:
work_last_hour = 16
elif working_type == WORKING_TYPE_WORK:
is_work_day = True
is_work_time = work_first_hour <= date_time.hour <= work_last_hour
return is_work_day and is_work_time
def _get_prod_exceptions(use_cache=False, cache_path=DEFAULT_CACHE_PATH):
if not use_cache:
return _load_prod_exceptions()
if _is_cache_available(cache_path=cache_path):
exceptions = _load_cache(cache_path)
else:
exceptions = _load_prod_exceptions()
_save_cache(exceptions, cache_path)
return exceptions
def _load_prod_exceptions():
"""
Используется http://basicdata.ru/api/calend/
Как написано на сайте:
Предполагается, что дни недели с понедельника по пятницу включительно являются рабочими,
а суббота и воскресение — выходными.
Данное API возвращает все исключения из этого правила
"""
url = 'http://basicdata.ru/api/json/calend/'
exceptions = requests.get(url).json()
return exceptions['data']
def _save_cache(data, cache_path):
with open(cache_path, 'w') as cache_file:
json.dump(data, cache_file)
def _load_cache(cache_path):
with open(cache_path) as cache_file:
return json.load(cache_file)
def _is_cache_available(cache_path, expiration_days=1):
if not os.path.isfile(cache_path):
return False
now = datetime.datetime.now()
cache_modify_dt = datetime.datetime.fromtimestamp(os.path.getmtime(cache_path))
delta = now - cache_modify_dt
if delta.days >= expiration_days:
return False
try:
with open(cache_path) as cache_file:
json.load(cache_file)
except Exception:
return False
return True
|
StarcoderdataPython
|
11254137
|
import logbook
def test_level_properties(logger):
assert logger.level == logbook.NOTSET
assert logger.level_name == 'NOTSET'
logger.level_name = 'WARNING'
assert logger.level == logbook.WARNING
logger.level = logbook.ERROR
assert logger.level_name == 'ERROR'
def test_reflected_properties(logger):
group = logbook.LoggerGroup()
group.add_logger(logger)
assert logger.group == group
group.level = logbook.ERROR
assert logger.level == logbook.ERROR
assert logger.level_name == 'ERROR'
group.level = logbook.WARNING
assert logger.level == logbook.WARNING
assert logger.level_name == 'WARNING'
logger.level = logbook.CRITICAL
group.level = logbook.DEBUG
assert logger.level == logbook.CRITICAL
assert logger.level_name == 'CRITICAL'
group.remove_logger(logger)
assert logger.group is None
|
StarcoderdataPython
|
9691135
|
from flask import Flask, jsonify, make_response, send_from_directory
import os
from os.path import exists, join
from constants import CONSTANTS
app = Flask(__name__, static_folder='build')
# Catching all routes
# This route is used to serve all the routes in the frontend application after deployment.
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
file_to_serve = path if path and exists(join(app.static_folder, path)) else 'index.html'
return send_from_directory(app.static_folder, file_to_serve)
# Error Handler
@app.errorhandler(404)
def page_not_found(error):
json_response = jsonify({'error': 'Page not found'})
return make_response(json_response, CONSTANTS['HTTP_STATUS']['404_NOT_FOUND'])
if __name__ == '__main__':
app.run(port=CONSTANTS['PORT'])
|
StarcoderdataPython
|
6483840
|
<gh_stars>1-10
# What happens when you instantiate a class (a fuller version with a
# metaclass).
class Meta(type):
def __new__(cls, name, bases, dict):
print('Meta.__new__()')
return super().__new__(cls, name, bases, dict)
def __init__(self, name, bases, dict):
print('Meta.__init__()')
return super().__init__(name, bases, dict)
def __call__(self, n):
print('Meta.__call__()')
return super().__call__(n)
# The following creation of A prints:
#
# Meta.__new__()
# Meta.__init__()
#
class A(metaclass=Meta):
def __new__(cls, n):
print('A.__new__()')
return super().__new__(cls)
def __init__(self, n):
print('A.__init__()')
self.n = n
print()
# The following instantiation prints:
#
# Meta.__call__()
# A.__new__()
# A.__init__()
#
a = A(1)
|
StarcoderdataPython
|
390374
|
<reponame>justinwp/rules_proto
load("//:compile.bzl", "ProtoCompileInfo")
RustProtoLibInfo = provider(fields = {
"name": "rule name",
"lib": "lib.rs file",
})
def _basename(f):
return f.basename[:-len(f.extension) - 1]
def _rust_proto_lib_impl(ctx):
"""Generate a lib.rs file for the crates."""
compilation = ctx.attr.compilation[ProtoCompileInfo]
deps = ctx.attr.deps
srcs = compilation.files
lib_rs = ctx.actions.declare_file("%s/lib.rs" % compilation.label.name)
# Search in the plugin list for 'protoc_gen_rust_grpc' or similar.
grpc = False
for plugin in compilation.plugins:
if plugin.executable.path.endswith("grpc"):
grpc = True
break
content = ["extern crate protobuf;"]
if grpc:
content.append("extern crate grpc;")
content.append("extern crate tls_api;")
# for dep in deps:
# content.append("extern crate %s;" % dep.label.name)
# content.append("pub use %s::*;" % dep.label.name)
for f in srcs:
content.append("pub mod %s;" % _basename(f))
content.append("pub use %s::*;" % _basename(f))
ctx.actions.write(
lib_rs,
"\n".join(content),
False,
)
return [RustProtoLibInfo(
name = ctx.label.name,
lib = lib_rs,
), DefaultInfo(
files = depset([lib_rs]),
)]
rust_proto_lib = rule(
implementation = _rust_proto_lib_impl,
attrs = {
"compilation": attr.label(
providers = [ProtoCompileInfo],
mandatory = True,
),
"deps": attr.label_list(
# providers = [""],
),
},
output_to_genfiles = True,
)
|
StarcoderdataPython
|
12847654
|
class HasIdent(object):
"""
避免 CfgGenerator 交叉引用所使用的基类
"""
def __init__(self, ident: int = 0):
self.ident = ident
def increase_ident(self):
self.ident += 1
def decrease_ident(self):
self.ident -= 1
class CfgGeneratorIdent(object):
def __init__(self, gen: HasIdent):
self.gen = gen
def __enter__(self):
self.gen.increase_ident()
def __exit__(self, exc_type, exc_val, exc_tb):
self.gen.decrease_ident()
class CfgGenerator(HasIdent):
def __init__(self, step: int = 4):
super(CfgGenerator, self).__init__()
self.step = step
self.conf = []
def to_cfg_string(self):
"""
获取生成好的配置信息
:return:
"""
return "".join(self.conf)
def with_ident(self):
"""
返回一个保存了递进状态的资源对象, 用于 with 语句
:return:
"""
return CfgGeneratorIdent(self)
def increase_ident(self, ident: int = 1):
"""
手动增加或减少递进状态
:param ident:
:return:
"""
self.ident += ident
def append_with(self, conf: str = "", new_line: bool = True, with_ident: bool = True):
"""
添加配置信息,传递了 new_line 或 ident 参数时可为其添加 ident 及新行
:param conf:
:param new_line:
:param with_ident:
:return:
"""
ident = "" if not with_ident else self.ident_str()
if str:
self.conf.append("%s%s" % (ident, conf))
if new_line:
self.conf.append("\n")
def ident_str(self):
return self.ident * self.step * " "
def str_with_ident(self, s) -> str:
"""
使用当前的缩进来创建字符串
"""
return "%s%s" % (self.ident_str(), s)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.