metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jluini/julo-doc",
"score": 2
} |
#### File: julopedia/julodoc/doctree.py
```python
import mistune
from mistune_contrib import math
class JuloParser(mistune.Markdown):
def __init__(self):
treeRenderer = JuloDocRenderer()
blockLexer = mistune.BlockLexer(mistune.BlockGrammar()) #JuloDocBlockLexer()
inlineLexer = JuloDocInlineLexer(treeRenderer)
super(JuloParser, self).__init__(renderer=treeRenderer, inline=inlineLexer, block=blockLexer)
def output_block_math(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'block_math_end':
body += self.tok()
return self.renderer.block_math(body)
"""
class JuloDocBlockLexer(mistune.BlockLexer, math.MathBlockMixin):
def __init__(self, *args, **kwargs):
super(JuloDocBlockLexer, self).__init__(*args, **kwargs)
self.enable_math()
"""
class JuloDocInlineLexer(mistune.InlineLexer, math.MathInlineMixin):
def __init__(self, renderer, *args, **kwargs):
super(JuloDocInlineLexer, self).__init__(renderer, *args, **kwargs)
self.enable_math()
"""
class DocTree(object):
def __init__(self):
self.root = ParentNode()
def __iadd__(self, other):
self.root.children.append(other)
"""
#class DocTreeRenderer(math.MathRendererMixin):
#class DocTreeRenderer(mistune_contrib.Renderer):
class JuloDocRenderer(object):
def __init__(self, **kwargs):
self.options = kwargs
def placeholder(self):
return ParentNode()
def block_code(self, code, lang=None):
"""Rendering block level code. ``pre > code``.
:param code: text content of the code block.
:param lang: language of the given code.
"""
return BlockCode(code, lang)
def block_quote(self, text):
return BlockQuote(text)
def block_html(self, html):
return BlockHtml(html)
def header(self, text, level, raw=None):
return Header(text, level, raw)
def hrule(self):
return HRule()
def list(self, body, ordered=True):
return List(body, ordered)
def list_item(self, text):
return ListItem(text)
def paragraph(self, text):
"""Rendering paragraph tags. Like ``<p>``."""
return Paragraph(text)
# table funcs
def double_emphasis(self, content):
return SimpleContainer("strong", content)
def emphasis(self, content):
return SimpleContainer("em", content)
def codespan(self, content):
return SimpleContainer("code", content)
def linebreak(self):
return Fixed('<br />\n')
def strikethrough(self, content):
return SimpleContainer("del", content)
def text(self, content):
return Text(content)
def escape(self, content):
return Escape(content)
def autolink(self, link, is_email=False):
return AutoLink(link, is_email)
def link(self, link, title, text):
return Link(link, title, text)
def image(self, src, title, text):
raise Exception("Not implemented")
def inline_html(self, html):
return InlineHtml(html)
def newline(self):
return NewLine()
#footnotes
# Math
def block_math(self, text):
return BlockMath(text)
def block_latex(self, name, text):
return BlockLatex(text)
def math(self, text):
return InlineMath(text)
class DocNode(object):
pass
class Fixed(DocNode):
def __init__(self, text):
self.text = text
def __str__(self):
return self.text
def toHtml(self):
return self.text
class SimpleContainer(DocNode):
def __init__(self, name, content):
self.name = name
self.content = content
print("Creating SimpleContainer '" + name + "'")
def __str__(self):
return self.name + "(" + self.content.__str__() + ")"
def toHtml(self):
if(isinstance(self.content, DocNode)):
contentText = self.content.toHtml()
elif(isinstance(self.content, str)):
contentText = self.content # TODO escape html
else:
return "[unknown content type for container '%s']" % self.name
return "<%s>%s</%s>" % (self.name, contentText, self.name)
class InlineMath(DocNode):
def __init__(self, text):
self.text = text
def __str__(self):
return "InlineMath(%s)" % self.text
def toHtml(self):
return "\\( %s \\)" % self.text
class BlockMath(DocNode):
def __init__(self, text):
self.text = text
def __str__(self):
return "BlockMath(%s)" % self.text
def toHtml(self):
return "$$ %s $$" % self.text
class BlockLatex(DocNode):
def __init__(self, name, text):
self.name = name
self.text = text
def __str__(self):
return "BlockLatex(%s)" % self.text
def toHtml(self):
return r'\begin{%s}%s\end{%s}' % (name, text, name)
class Text(DocNode):
def __init__(self, text):
self.text = text
def __str__(self):
return self.text
def toHtml(self):
# TODO escape
return self.text
class Link(DocNode):
def __init__(self, link, title, text):
self.link = link
self.title = title
self.text = text
class AutoLink(DocNode):
def __init__(self, link, is_email):
self.link = link
self.is_email = is_email
class List(DocNode):
def __init__(self, content, ordered):
self.content = content
self.ordered = ordered
def __str__(self):
if self.ordered:
return "OrderedList(" + self.content.__str__() + ")"
else:
return "List(" + self.content.__str__() + ")"
def toHtml(self):
if(self.ordered):
tag = "ol"
else:
tag = "ul"
return "<%s>%s</%s>" % (tag, self.content.toHtml(), tag)
class ListItem(DocNode):
def __init__(self, content):
self.content = content
def __str__(self):
return "ListItem(" + self.content.__str__() + ")"
def toHtml(self):
return "<li>%s</li>" % (self.content.toHtml())
class Escape(DocNode):
def __init__(self, text):
self.text = text
def __str__(self):
return "Escape(%s)" % self.text
def toHtml(self):
return self.text # TODO !!!
class NewLine(DocNode):
def __init__(self):
pass
class Paragraph(DocNode):
def __init__(self, content):
self.content = content
def __str__(self):
return "Paragraph(" + self.content.__str__() + ")"
def toHtml(self):
return "<p>" + self.content.toHtml() + "</p>"
class Header(DocNode):
def __init__(self, content, level, raw):
self.content = content
self.level = level
self.raw = raw
def __str__(self):
return "Header (" + self.content.__str__() + ")"
def toHtml(self):
return "<h%s>%s</h%s>" % (self.level, self.content.toHtml(), self.level)
class HRule(DocNode):
pass
class BlockCode(DocNode):
def __init__(self, code, lang):
self.code = code
self.lang = lang
class BlockHtml(DocNode):
def __init__(self, html):
self.html = html
class InlineHtml(DocNode):
def __init__(self, html):
self.html = html
class BlockQuote(DocNode):
def __init__(self, content):
self.content = content
class ParentNode(DocNode):
def __init__(self):
#self.type =
self.children = []
def __iadd__(self, other):
self.children.append(other)
return self
def toHtml(self):
ret = ''
for child in self.children:
if isinstance(child, DocNode):
childContent = child.toHtml()
elif isinstance(child, str):
childContent = child # escape
else:
childContent = "[Unknown node type]"
ret += childContent
return ret
def __str__(self):
ret = "{"
first = True
for child in self.children:
if not first:
ret += ", "
ret += child.__str__()
first = False
ret += "}"
return ret
#class JuloDocRenderer(TreeRenderer, math.MathRendererMixin):
# def __init__(self, *args, **kwargs):
# super(JuloDocRenderer, self).__init__(*args, **kwargs)
# #self.enable_math()
```
#### File: julo-doc/julopedia/models.py
```python
from django.db import models
from django.urls import reverse
from django.template.defaultfilters import title
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import RegexValidator
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from julodoc.tree import julotree
class NodeManager(models.Manager):
def get_children(self, parent):
if parent:
parent_id = parent.id
else:
parent_id = None
return self.filter(parent=parent_id)
def get_by_path(self, path):
if not isinstance(path, (list, tuple)):
raise Node.DoesNotExist("Node with path " + path.join("//"))
elif len(path) == 0:
raise Node.DoesNotExist("Empty path")
node = None
for token in path:
try:
if(node == None):
node = self.get(parent=None, node_key=token)
else:
node = self.get(parent=node.id, node_key=token)
except ObjectDoesNotExist:
raise Node.DoesNotExist("child with key '%s'" % token)
if not node:
raise Exception("Node expected")
return node
class Author(models.Model):
author_name = models.CharField(max_length = 200)
def __str__(self):
return self.author_name
class Meta:
verbose_name = _('author')
verbose_name_plural = _('authors')
valid_key = RegexValidator(julotree.key_regex)
class Node(models.Model):
# Manager
objects = NodeManager()
# Fields
node_type = models.IntegerField(default = 0, verbose_name=_('type'), choices = (
(0, _("section")),
(1, _("theory")),
(2, _("exercise")),
))
node_key = models.CharField(max_length = 200, verbose_name=_('key'), validators=[valid_key])
title = models.CharField(max_length = 200, verbose_name=_('title'))
content = models.CharField(max_length = 15000, blank=True, default='', verbose_name=_('content'))
author = models.ForeignKey(Author, on_delete=models.SET_NULL, null = True, blank = True, verbose_name=_('author'))
numbering = models.BooleanField(default=True,verbose_name=_('numbering'))
created_time = models.DateTimeField(editable=False)
modified_time = models.DateTimeField(editable=False)
parent = models.ForeignKey('self', on_delete=models.SET_NULL, null = True, blank = True) #, editable=False)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created_time = timezone.now()
self.modified_time = timezone.now()
return super(Node, self).save(*args, **kwargs)
class Meta:
order_with_respect_to = 'parent'
verbose_name = _('node')
verbose_name_plural = _('nodes')
def __str__(self):
return '(' + str(self.id) + ')-' + self.title[:3]
``` |
{
"source": "j-luis996/crud_python-dic",
"score": 3
} |
#### File: j-luis996/crud_python-dic/crud.py
```python
from ej import search_client
import const
class crud:
def __get_client_field(self, field_name, message = "What's the client {}? "):
field = None
while not field:
field = input(message.format(field_name))
return field
def __get_client_from_user(self):
client = {
'name': self.__get_client_field(field_name = 'name'),
'company': self.__get_client_field(field_name = 'company'),
'email': self.__get_client_field(field_name = 'email'),
'position': self.__get_client_field(field_name = 'position'),
}
return client
def __add_client(self, client):
if client in const.clients:
print("Client alredy in client's list")
else:
const.clients.append(client)
def create_client(self):
client = self.__get_client_from_user()
self.__add_client(client)
print('Added client successful')
def read_clients(self):
print('uid | name | company | email | position')
print('*' * 50)
for idx, client in enumerate(const.clients):
print(f'{idx} | {client["name"]} | {client["company"]} | {client["email"]} | {client["position"]}')
def update_client(self):
id_client = int(self.__get_client_field(field_name="id"))
if id_client < len(const.clients):
client_update = self.__get_client_from_user()
const.clients[id_client] = client_update
print("Client updated in client's list")
else:
print('id invalid')
def delete_client(self):
id_client = int(self.__get_client_field(field_name="id"))
if id_client < len(const.clients):
for idx, client in enumerate(const.clients):
if idx == id_client:
del const.clients[idx]
break
print("Client deleted in client's list")
else:
print('id invalid')
def search_client(self, data, key = "name"):
client_exist = False
for client in const.clients:
if client[key] ==data:
client_exist = True
break
else:
continue
return client_exist
``` |
{
"source": "j-luis996/python_intermedio",
"score": 4
} |
#### File: j-luis996/python_intermedio/cuadrado_naturales.py
```python
def main():
natula_list = []
for i in range(1,101):
#guarda numeros naturales al cuadrado
#natula_list.append(i**2)
#guarda el cuadrado de i en aux
aux = i**2
#si aux es dibisible entre 3 guarda aux en natural_list
if aux%3 != 0:
natula_list.append(aux)
print(f"lista de cuadrado de numeros naturales: {natula_list}")
#con list comprehensions
natural_list2 = [i**2 for i in range(1,101) if i%3 != 0]
print(f"lista con list comprehensions: {natural_list2}")
def reto():
narutal_list = [i for i in range(1,1000) if i%4 == 0 and i%6 == 0 and i%9 == 0]
print(narutal_list)
if __name__ == "__main__":
#main()
reto()
```
#### File: j-luis996/python_intermedio/listas_y_diccionarios.py
```python
def main():
my_list = [1,"hello",True,4.5]
my_dict = {'fistname' : 'luis', 'lastname':'martinez'}
super_list = [
{'firstname' : 'luis', 'lastname':'martinez'},
{'firstname' : 'lizette', 'lastname':'martinez'},
{'firstname' : 'dalia', 'lastname':'martinez'}
]
for i in super_list:
for key, values in i.items():
print(f'{key}: {values}')
print('##########')
if __name__ == '__main__':
main()
``` |
{
"source": "JLuisRojas/reconocimiento-de-voz",
"score": 2
} |
#### File: src/dataset/procesar-common-voice.py
```python
from preparacion import *
distribuciones = ["train"]
def main():
for distribucion in distribuciones:
common_proc_distrib("common-voice/es/", distribucion+".tsv", "clips/")
if __name__ == "__main__":
main()
```
#### File: src/model/layers.py
```python
import tensorflow as tf
from tensorflow.keras import layers
"""
TODO:
- Crear layer para obtener mask
- Crear conv con mask
"""
"""
Capa que calcula el mask del input, esto solo funciona con el
siguiente shape: [batch, frames, rows, cols].
Y regresa: [batch, frames] de tipo booleano con el mask a los
frames
"""
class ObtenerMask(layers.Layer):
def call(self, inputs):
return inputs
def compute_mask(self, inputs, mask=None):
shape = tf.shape(inputs)
reshaped_input = tf.reshape(inputs, [shape[0], shape[1], -1])
max_input = tf.math.reduce_max(reshaped_input, 2)
mask = tf.not_equal(max_input, tf.zeros([shape[0], shape[1]],
dtype=tf.float32))
return mask
class MaskWrapper(layers.Wrapper):
def __init__(self, layer, **kwargs):
super(MaskWrapper, self).__init__(layer, **kwargs)
self.supports_masking = True
def call(self, inputs):
"""
NOTA: se supone que el call no tiene argumentos
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = traininginp
"""
return self.layer.call(inputs)
def compute_mask(self, inputs, mask=None):
return mask
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
```
#### File: src/pipeline/pipeline.py
```python
import abc
"""
Clase abstracta que define la implementacion del Pipeline de
entrenamiento
"""
class Pipeline:
"""
Metodo que hace "fit" del dataset al modelo
Args:
train_descrip: objeto de tipo DataDescripcion con la descripcion
del dataset de entrenamiento
test_descrip: objeto de tipo DataDescripcion con la descripcion
del dataset de pruebas
setup: diccionario con hyperparametros y setup de entrenamiento
"""
@abc.abstractmethod
def fit(self, train_descrip, test_descrip, setup):
pass
"""
Metodo que regresa la configuracion del pipeline
"""
@abc.abstractmethod
def get_config(self):
pass
``` |
{
"source": "jlujan/python-crashreporter",
"score": 2
} |
#### File: python-crashreporter/crashreporter/main.py
```python
from six import print_
from pprint import pprint
import argparse
from crashreporter.crash_report_pb2 import CrashReport
CRASH_REPORT_HEADER_LENGTH = 8
def load_crash_report(data):
#load the crash report data
cr = CrashReport()
cr.ParseFromString(data)
return cr
def pb_fields_to_dict(obj, ignore=None):
if not hasattr(obj, "ListFields"):
raise Exception("Object not a ProtoBuf Object.")
ignore = list() if ignore is None else ignore
fields = []
for desc, val in obj.ListFields():
if desc.enum_type is not None:
val = desc.enum_type.values_by_number[val].name
fields.append((desc.name, val))
return {k: v for k, v in fields if k not in ignore}
def get_summary(report):
return {"System Info": pb_fields_to_dict(report.system_info),
"Application Info": pb_fields_to_dict(report.application_info),
"Signal Info": pb_fields_to_dict(report.signal),
"Excpetion Info": pb_fields_to_dict(report.exception, ignore=['frames']),
}
def print_summary(report):
""" Print simple crash information
"""
pprint(get_summary(report))
print_()
def build_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("crash_reports", nargs="*", type=str,
help="One or more crash files.")
return parser
def main(cr_file):
cr = ""
print_("Processing {}\n".format(cr_file))
with open(cr_file, 'rb') as fp:
#eat the header
fp.read(CRASH_REPORT_HEADER_LENGTH)
cr = load_crash_report(fp.read())
print_summary(cr)
def run():
parser = build_arg_parser()
args = parser.parse_args()
if len(args.crash_reports) == 0:
parser.print_help()
raise SystemExit("\nError: Must provide path to crash report!")
for cr_file in args.crash_reports:
main(cr_file)
if __name__ == "__main__":
run()
``` |
{
"source": "jluk-codi/ci-utils",
"score": 3
} |
#### File: tungsten_ci_utils/mirror_maven_repo/scrapy-maven.py
```python
import scrapy
import re
# crawls a website starting at the provided link
# extracts all links from the website and either follows them and repeats the process
# or prints out the link when it's pointing to an artifact
# an 'artifact' is considered to be a file with an extension
# ignores / does not follow links from the IGNORE_LIST
# run command: scrapy runspider scrapy-maven.py -a start_url='http://localhost:8443/maven-repo'
class MavenRepositorySpider(scrapy.Spider):
name = 'maven-repo-spider'
custom_settings = {
'LOG_LEVEL': 'DEBUG'
}
IGNORE_LIST = ['javadoc/']
EXTENSION_REGEX = '\.[a-zA-Z]*$'
def __init__(self, *args, **kwargs):
super(MavenRepositorySpider, self).__init__(*args, **kwargs)
self.start_urls = [self.start_url]
def parse(self, response):
for next_page in response.css('a'):
text = next_page.css('a ::text').extract_first()
if re.search(self.EXTENSION_REGEX, text):
print '{}'.format(response.url + text)
elif not any([ignored in text for ignored in self.IGNORE_LIST]):
yield response.follow(next_page, self.parse)
``` |
{
"source": "jlukose12/RedditWordCloudSentimentAnalysis",
"score": 3
} |
#### File: RedditWordCloudSentimentAnalysis/main/subaccess.py
```python
import sys
import praw
import config
from prawcore.exceptions import NotFound
from praw.exceptions import ClientException
class SubredditAccess(object):
def __init__(self, sub_name):
self.SUBREDDIT_NAME = sub_name.replace(' ', '')
self.USER_AGENT = "Subreddit Sentiment Analysis v0.1.0 for /r/" + self.SUBREDDIT_NAME
self.SUBMISSION_LIMIT = 1000
self.VALID_SUB = False
self.REDDIT_INSTANCE = praw.Reddit(username=config.username,
password=<PASSWORD>,
client_id=config.client_id,
client_secret=config.client_secret,
user_agent=self.USER_AGENT)
self.SUBREDDIT = None
def access_sub(self):
self.validate_sub()
if self.VALID_SUB:
self.SUBREDDIT = self.REDDIT_INSTANCE.subreddit(self.SUBREDDIT_NAME)
return self.SUBREDDIT
else:
sys.exit(1) # Not necessary, remove later
def validate_sub(self):
self.VALID_SUB = False
try:
response = self.REDDIT_INSTANCE.subreddits.search_by_name(self.SUBREDDIT_NAME, exact=True)
if len(response) == 0:
raise ClientException("Invalid input, must not be empty")
else:
self.VALID_SUB = True # Only valid here, must not throw any Exception and must not be empty input
except NotFound:
self.VALID_SUB = False
print("This subreddit does not exist, make sure you input a valid subreddit")
sys.exit(1)
except Exception:
print("Unexpected Error Occurred")
raise
``` |
{
"source": "jlumbroso/princeton-scraper-cos-people",
"score": 3
} |
#### File: src/princeton_scraper_cos_people/output.py
```python
import datetime
import json
import typing
import comma
import princeton_scraper_cos_people.cos_directory
import princeton_scraper_cos_people.parsing
__author__ = "<NAME> <<EMAIL>>"
__all__ = [
"json_output",
"csv_output",
]
# noinspection PyBroadException
def json_output(
person_types: typing.Optional[typing.List[princeton_scraper_cos_people.parsing.CosPersonType]],
download_images=False,
) -> typing.Optional[str]:
try:
data = princeton_scraper_cos_people.cos_directory.fetch_cos_people_directory(
person_types=person_types,
download_images=download_images,
)
return json.dumps({
"source": "https://github.com/jlumbroso/princeton-scraper-cos-people/",
"timestamp": datetime.datetime.now().isoformat(),
"data": data,
}, indent=2)
except Exception:
raise
return
def csv_output(
person_types: typing.Optional[typing.List[princeton_scraper_cos_people.parsing.CosPersonType]],
) -> typing.Optional[str]:
try:
data = princeton_scraper_cos_people.cos_directory.fetch_cos_people_directory(
person_types=person_types,
download_images=False,
)
# for row in data:
# del row["research"]
# row["affiliations"] = ";".join(row["affiliations"])
return comma.dumps(data)
except Exception:
return
```
#### File: princeton-scraper-cos-people/tests/test_princeton_scraper_cos_people.py
```python
import requests
import bs4
#from princeton_scraper_seas_faculty import __version__
SOME_LOW_TENS_NUMBER = 14
# def test_version():
# assert __version__ == '0.1.0'
def test_faculty_format_dom():
r = requests.get("https://www.cs.princeton.edu/people/faculty")
assert r.ok
s = bs4.BeautifulSoup(r.content, features="html.parser")
assert s is not None
people = s.find_all("div", {"class": "people"})
person = s.find_all("div", {"class": "person"})
assert len(people) == 1
assert len(person) > SOME_LOW_TENS_NUMBER
``` |
{
"source": "jlumbroso/princeton-scraper-seas-faculty",
"score": 4
} |
#### File: src/princeton_scraper_seas_faculty/helpers.py
```python
import typing
__author__ = "<NAME> <<EMAIL>>"
__all__ = [
"split_name",
]
def split_name(name: str) -> typing.Tuple[str, str]:
"""
Returns a likely `(first, last)` split given a full name. This uses
very simple heuristics, and assumes Western usage.
:param name: A full name (first and last name).
:return: A split pair with the first names, and the last name.
"""
words = name.split()
first_bits = words[:-1]
last_bits = words[-1:]
while len(first_bits) > 0 and first_bits[-1][0].islower():
last_bits = [first_bits[-1]] + last_bits
first_bits = first_bits[:-1]
first_joined = " ".join(first_bits)
last_joined = " ".join(last_bits)
return first_joined, last_joined
``` |
{
"source": "jlumbroso/python-slack-scim",
"score": 3
} |
#### File: slack_scim/v1/service_provider_configs.py
```python
from typing import Optional, Any, List, TypeVar, Callable, Type, cast
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def from_bool(x: Any) -> bool:
assert isinstance(x, bool)
return x
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x
def from_list(f: Callable[[Any], T], x: Any) -> List[T]:
assert isinstance(x, list)
return [f(y) for y in x]
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
class AuthenticationScheme:
description: Optional[str]
name: Optional[str]
primary: Optional[bool]
spec_url: Optional[str]
type: Optional[str]
def __init__(self, description: Optional[str], name: Optional[str], primary: Optional[bool], spec_url: Optional[str], type: Optional[str]) -> None:
self.description = description
self.name = name
self.primary = primary
self.spec_url = spec_url
self.type = type
@staticmethod
def from_dict(obj: Any) -> 'AuthenticationScheme':
assert isinstance(obj, dict)
description = from_union([from_str, from_none], obj.get("description"))
name = from_union([from_str, from_none], obj.get("name"))
primary = from_union([from_bool, from_none], obj.get("primary"))
spec_url = from_union([from_str, from_none], obj.get("specUrl"))
type = from_union([from_str, from_none], obj.get("type"))
return AuthenticationScheme(description, name, primary, spec_url, type)
def to_dict(self) -> dict:
result: dict = {}
result["description"] = from_union([from_str, from_none], self.description)
result["name"] = from_union([from_str, from_none], self.name)
result["primary"] = from_union([from_bool, from_none], self.primary)
result["specUrl"] = from_union([from_str, from_none], self.spec_url)
result["type"] = from_union([from_str, from_none], self.type)
return result
class Bulk:
max_operations: Optional[int]
max_payload_size: Optional[int]
supported: Optional[bool]
def __init__(self, max_operations: Optional[int], max_payload_size: Optional[int], supported: Optional[bool]) -> None:
self.max_operations = max_operations
self.max_payload_size = max_payload_size
self.supported = supported
@staticmethod
def from_dict(obj: Any) -> 'Bulk':
assert isinstance(obj, dict)
max_operations = from_union([from_int, from_none], obj.get("maxOperations"))
max_payload_size = from_union([from_int, from_none], obj.get("maxPayloadSize"))
supported = from_union([from_bool, from_none], obj.get("supported"))
return Bulk(max_operations, max_payload_size, supported)
def to_dict(self) -> dict:
result: dict = {}
result["maxOperations"] = from_union([from_int, from_none], self.max_operations)
result["maxPayloadSize"] = from_union([from_int, from_none], self.max_payload_size)
result["supported"] = from_union([from_bool, from_none], self.supported)
return result
class ChangePassword:
supported: Optional[bool]
def __init__(self, supported: Optional[bool]) -> None:
self.supported = supported
@staticmethod
def from_dict(obj: Any) -> 'ChangePassword':
assert isinstance(obj, dict)
supported = from_union([from_bool, from_none], obj.get("supported"))
return ChangePassword(supported)
def to_dict(self) -> dict:
result: dict = {}
result["supported"] = from_union([from_bool, from_none], self.supported)
return result
class Filter:
max_results: Optional[int]
supported: Optional[bool]
def __init__(self, max_results: Optional[int], supported: Optional[bool]) -> None:
self.max_results = max_results
self.supported = supported
@staticmethod
def from_dict(obj: Any) -> 'Filter':
assert isinstance(obj, dict)
max_results = from_union([from_int, from_none], obj.get("maxResults"))
supported = from_union([from_bool, from_none], obj.get("supported"))
return Filter(max_results, supported)
def to_dict(self) -> dict:
result: dict = {}
result["maxResults"] = from_union([from_int, from_none], self.max_results)
result["supported"] = from_union([from_bool, from_none], self.supported)
return result
class ServiceProviderConfigs:
authentication_schemes: Optional[List[AuthenticationScheme]]
bulk: Optional[Bulk]
change_password: Optional[ChangePassword]
etag: Optional[ChangePassword]
filter: Optional[Filter]
patch: Optional[ChangePassword]
sort: Optional[ChangePassword]
xml_data_format: Optional[ChangePassword]
def __init__(self, authentication_schemes: Optional[List[AuthenticationScheme]], bulk: Optional[Bulk], change_password: Optional[ChangePassword], etag: Optional[ChangePassword], filter: Optional[Filter], patch: Optional[ChangePassword], sort: Optional[ChangePassword], xml_data_format: Optional[ChangePassword]) -> None:
self.authentication_schemes = authentication_schemes
self.bulk = bulk
self.change_password = change_password
self.etag = etag
self.filter = filter
self.patch = patch
self.sort = sort
self.xml_data_format = xml_data_format
@staticmethod
def from_dict(obj: Any) -> 'ServiceProviderConfigs':
assert isinstance(obj, dict)
authentication_schemes = from_union([lambda x: from_list(AuthenticationScheme.from_dict, x), from_none], obj.get("authenticationSchemes"))
bulk = from_union([Bulk.from_dict, from_none], obj.get("bulk"))
change_password = from_union([ChangePassword.from_dict, from_none], obj.get("changePassword"))
etag = from_union([ChangePassword.from_dict, from_none], obj.get("etag"))
filter = from_union([Filter.from_dict, from_none], obj.get("filter"))
patch = from_union([ChangePassword.from_dict, from_none], obj.get("patch"))
sort = from_union([ChangePassword.from_dict, from_none], obj.get("sort"))
xml_data_format = from_union([ChangePassword.from_dict, from_none], obj.get("xmlDataFormat"))
return ServiceProviderConfigs(authentication_schemes, bulk, change_password, etag, filter, patch, sort, xml_data_format)
def to_dict(self) -> dict:
result: dict = {}
result["authenticationSchemes"] = from_union([lambda x: from_list(lambda x: to_class(AuthenticationScheme, x), x), from_none], self.authentication_schemes)
result["bulk"] = from_union([lambda x: to_class(Bulk, x), from_none], self.bulk)
result["changePassword"] = from_union([lambda x: to_class(ChangePassword, x), from_none], self.change_password)
result["etag"] = from_union([lambda x: to_class(ChangePassword, x), from_none], self.etag)
result["filter"] = from_union([lambda x: to_class(Filter, x), from_none], self.filter)
result["patch"] = from_union([lambda x: to_class(ChangePassword, x), from_none], self.patch)
result["sort"] = from_union([lambda x: to_class(ChangePassword, x), from_none], self.sort)
result["xmlDataFormat"] = from_union([lambda x: to_class(ChangePassword, x), from_none], self.xml_data_format)
return result
def service_provider_configs_from_dict(s: Any) -> ServiceProviderConfigs:
return ServiceProviderConfigs.from_dict(s)
def service_provider_configs_to_dict(x: ServiceProviderConfigs) -> Any:
return to_class(ServiceProviderConfigs, x)
```
#### File: tests/v1/mock_server.py
```python
import json
import logging
import threading
from http import HTTPStatus
from http.server import HTTPServer, SimpleHTTPRequestHandler
from typing import Type
from unittest import TestCase
from urllib.parse import urlparse
from tests.v1 import is_prod_test_mode
class MockHandler(SimpleHTTPRequestHandler):
protocol_version = "HTTP/1.1"
default_request_version = "HTTP/1.1"
logger = logging.getLogger(__name__)
def is_valid_token(self):
return "authorization" in self.headers \
and str(self.headers["authorization"]).startswith("Bearer xoxp-")
def set_common_headers(self):
self.send_header("content-type", "application/json;charset=utf-8")
self.send_header("connection", "close")
self.end_headers()
def do_GET(self):
if self.is_valid_token():
parsed_path = urlparse(self.path)
if parsed_path.path == "/ServiceProviderConfigs":
with open("tests/fixture/v1_service_provider_configs.json") as f:
body = f.read()
elif parsed_path.path == "/Users":
if "startIndex=2" in parsed_path.query:
with open("tests/fixture/v1_users_2.json") as f:
body = f.read()
else:
with open("tests/fixture/v1_users_1.json") as f:
body = f.read()
elif parsed_path.path == "/Users/W111":
with open("tests/fixture/v1_user_1.json") as f:
body = f.read()
elif parsed_path.path == "/Users/W222":
with open("tests/fixture/v1_user_2.json") as f:
body = f.read()
elif parsed_path.path == "/Groups":
if "startIndex=2" in parsed_path.query:
with open("tests/fixture/v1_groups_2.json") as f:
body = f.read()
else:
with open("tests/fixture/v1_groups_1.json") as f:
body = f.read()
elif parsed_path.path == "/Groups/S111":
with open("tests/fixture/v1_group_1.json") as f:
body = f.read()
elif parsed_path.path == "/Groups/S222":
with open("tests/fixture/v1_group_2.json") as f:
body = f.read()
elif parsed_path.path == "/Groups/S333":
with open("tests/fixture/v1_group_3.json") as f:
body = f.read()
else:
body = "{}"
self.send_response(HTTPStatus.OK)
self.set_common_headers()
self.wfile.write(body.encode("utf-8"))
self.wfile.close()
else:
self.send_response(HTTPStatus.UNAUTHORIZED)
self.set_common_headers()
def do_POST(self):
if self.is_valid_token():
content_len = int(self.headers.get('Content-Length'))
post_body = self.rfile.read(content_len)
input = json.loads(post_body)
parsed_path = urlparse(self.path)
if parsed_path.path == "/Users":
with open("tests/fixture/v1_user_1.json") as f:
body = f.read()
b = json.loads(body)
b["id"] = "W111"
b["emails"] = input["emails"]
b["name"] = input["name"]
b["userName"] = input["userName"]
body = json.dumps(b)
elif parsed_path.path == "/Groups":
with open("tests/fixture/v1_group_1.json") as f:
body = f.read()
b = json.loads(body)
b["id"] = "S111"
if "displayName" in input:
b["displayName"] = input["displayName"]
if "members" in input:
b["members"] = input["members"]
body = json.dumps(b)
else:
body = "{}"
self.send_response(HTTPStatus.OK)
self.set_common_headers()
self.wfile.write(body.encode("utf-8"))
self.wfile.close()
else:
self.send_response(HTTPStatus.UNAUTHORIZED)
self.set_common_headers()
def do_PATCH(self):
if self.is_valid_token():
content_len = int(self.headers.get('Content-Length'))
post_body = self.rfile.read(content_len)
input = json.loads(post_body)
parsed_path = urlparse(self.path)
if parsed_path.path == "/Users/W111":
with open("tests/fixture/v1_user_1.json") as f:
body = f.read()
b = json.loads(body)
b["id"] = "W111"
if "emails" in input:
b["emails"] = input["emails"]
if "name" in input:
b["name"] = input["name"]
if "userName" in input:
b["userName"] = input["userName"]
body = json.dumps(b)
elif parsed_path.path == "/Groups/S111":
with open("tests/fixture/v1_group_1.json") as f:
body = f.read()
b = json.loads(body)
b["id"] = "S111"
if "displayName" in input:
b["displayName"] = input["displayName"]
if "members" in input:
b["members"] = input["members"]
body = json.dumps(b)
else:
body = "{}"
print(body)
self.send_response(HTTPStatus.OK)
self.set_common_headers()
self.wfile.write(body.encode("utf-8"))
self.wfile.close()
else:
self.send_response(HTTPStatus.UNAUTHORIZED)
self.set_common_headers()
def do_PUT(self):
if self.is_valid_token():
parsed_path = urlparse(self.path)
content_len = int(self.headers.get('Content-Length'))
post_body = self.rfile.read(content_len)
input = json.loads(post_body)
if parsed_path.path == "/Users/W111":
with open("tests/fixture/v1_user_1.json") as f:
body = f.read()
b = json.loads(body)
b["id"] = "W111"
if "emails" in input:
b["emails"] = input["emails"]
if "name" in input:
b["name"] = input["name"]
if "userName" in input:
b["userName"] = input["userName"]
body = json.dumps(b)
elif parsed_path.path == "/Groups/S111":
with open("tests/fixture/v1_group_1.json") as f:
body = f.read()
b = json.loads(body)
b["id"] = "S111"
if "displayName" in input:
b["displayName"] = input["displayName"]
if "members" in input:
b["members"] = input["members"]
body = json.dumps(b)
else:
body = "{}"
self.send_response(HTTPStatus.OK)
self.set_common_headers()
self.wfile.write(body.encode("utf-8"))
self.wfile.close()
else:
self.send_response(HTTPStatus.UNAUTHORIZED)
self.set_common_headers()
def do_DELETE(self):
if self.is_valid_token():
self.send_response(HTTPStatus.OK)
self.set_common_headers()
else:
self.send_response(HTTPStatus.UNAUTHORIZED)
self.set_common_headers()
class MockServerThread(threading.Thread):
def __init__(self, test: TestCase, handler: Type[SimpleHTTPRequestHandler] = MockHandler):
threading.Thread.__init__(self)
self.handler = handler
self.test = test
def run(self):
self.server = HTTPServer(('localhost', 8888), self.handler)
self.test.server_url = "http://localhost:8888"
self.test.host, self.test.port = self.server.socket.getsockname()
self.test.server_started.set() # threading.Event()
self.test = None
try:
self.server.serve_forever(0.05)
finally:
self.server.server_close()
def stop(self):
self.server.shutdown()
self.join()
def setup_mock_server(test: TestCase):
if is_prod_test_mode():
test.server_url = None
else:
test.server_started = threading.Event()
test.thread = MockServerThread(test)
test.thread.start()
test.server_started.wait()
def cleanup_mock_server(test: TestCase):
if not is_prod_test_mode():
test.thread.stop()
test.thread = None
``` |
{
"source": "jlumley0413/Wordle_Repo",
"score": 3
} |
#### File: Wordle_Repo/Reinforcement_Learning/Exploit.py
```python
import numpy as np
from GYB_checker import GYB_checker
from Read_Q_table import Read_Q_table
def Exploit(correct_word,Q_table):
num_tries = 0
remaining_Q_words = Q_table.copy()
original_length = len(remaining_Q_words)
guess = []
print("Made it to exploit")
while guess != correct_word:
num_tries += 1
current_length = len(remaining_Q_words)
percent_remaining = current_length / original_length
guess = Read_Q_table(percent_remaining, remaining_Q_words)
print(guess)
remaining_words = list(remaining_Q_words.index.values)
remaining_words, ignore_letters, colors = GYB_checker(guess, correct_word, remaining_words)
print(remaining_words)
remaining_Q_words=remaining_Q_words[remaining_Q_words.index.isin([remaining_words])]
print(len(remaining_Q_words))
print(remaining_Q_words)
return num_tries
``` |
{
"source": "jlumpe/analysis-project",
"score": 2
} |
#### File: test-project-1/scripts/script.py
```python
foo = 3
bar = 'bar'
def baz(x):
return x + 1
```
#### File: analysis-project/tests/test_project.py
```python
import os
import types
import pytest
from analysis_project.project import get_project, Project
def test_get_project(test_data_dir):
pd = test_data_dir / 'test-project-1'
# Test with root directory and subdirectory as path argument
paths = [pd, pd / 'scripts']
for path in paths:
# Test by passing path as argument
project = get_project(path)
assert project.rootpath == pd
assert project.configfile == pd / 'project.yaml'
# Test calling with no arguments, should go by working directory
os.chdir(path)
project2 = get_project(path)
assert project2.rootpath == pd
assert project2.configfile == pd / 'project.yaml'
class test_Project:
@pytest.fixture()
def project_dir(self, test_data_dir):
return test_data_dir / 'test-project-1'
@pytest.fixture()
def project(self, project_dir):
return Project(project_dir)
def test_construct(self, project_dir):
config_dict = {'_passed_as_arg': True}
# Test root directory or config file as path argument
cases = [
(project_dir, project_dir / 'project.yaml'),
(project_dir / 'project.yaml', project_dir / 'project.yaml'),
(project_dir / 'project2.yaml', project_dir / 'project2.yaml'),
]
for path, configfile in cases:
project = Project(path)
assert project.rootpath == project_dir
assert project.configfile == configfile
assert not project.config.get('_passed_as_arg', False)
# Try again passing explicit config dict
project2 = Project(path, config=config_dict)
assert project2.rootpath == project_dir
assert project2.configfile is None
assert project2.config['_passed_as_arg']
def test_import(self, project):
rel_path = 'scripts/script.py'
# Module only
mod = project._import(rel_path)
assert isinstance(mod, types.ModuleType)
assert mod.__file__ == str(project.rootpath / rel_path)
assert mod.foo == 3
# Attributes
foo, bar, baz = project.import_(rel_path, 'foo', 'bar', 'baz')
assert foo == mod.foo
assert bar == mod.bar
assert baz is mod.baz
def test_import_magic(self, project):
rel_path = 'scripts/script.py'
# Module only
project._import(rel_path, magic=True)
assert isinstance(script, types.ModuleType)
assert script.__file__ == str(project.rootpath / rel_path)
assert script.foo == 3
# Attributes
project.import_(rel_path, 'foo', 'bar', 'baz', magic=True)
assert foo == script.foo
assert bar == script.bar
assert baz is script.baz
``` |
{
"source": "jlumpe/lyve-set-conda",
"score": 2
} |
#### File: jlumpe/lyve-set-conda/build.py
```python
import os
import subprocess as sp
# Make rules to run
BASE_MAKE_RULES = [
'install-mkdir',
'install-SGELK',
'install-CGP',
'install-perlModules',
'install-config'
]
EXPENSIVE_MAKE_RULES = ['install-phast']
# Relative directory in conda env to install to
# Just put everything in a subdirectory of opt
INSTALL_DIR = 'opt/lyve-set'
# Files to install in conda ENV from working directory
INSTALL_FILES = [
'scripts',
'lib',
'plugins',
'docs',
'README.md',
'LICENSE',
]
def log(message, *args, **kwargs):
"""Write line to stdout with recognizable prefix.
:param str message: Message to write
:param \\**args: Positional arguments to format message with.
:param \\**kwargs: Keyword arguments to format message with.
"""
print('\n@@', message.format(*args, **kwargs), '\n')
def cmd(*args, **export):
"""Run a command in a subprocess.
Prints command before executing.
:param \\*args: Command and its arguments.
:param \\**export: Environment variables to export.
"""
# Print the command
msg = '$'
if export is not None:
for k, v in export.items():
msg += ' {}="{}"'.format(k, v)
for arg in args:
msg += ' ' + arg
log(msg)
# Environment variables
env = None
if export is not None:
env = dict(os.environ)
env.update(export)
# Run
p = sp.Popen(args, env=env)
rcode = p.wait()
# Check exit code
if rcode:
raise RuntimeError(
'Process returned non-zero exit code: {}'
.format(rcode)
)
def make_symlink_relative(path):
"""Replace a symbolic link with a relative one.
:param str path: Path to symbolic link.
"""
assert os.path.islink(path)
target = os.readlink(path)
# Skip if already relative
if not os.path.isabs(target):
return
rel = os.path.relpath(target, os.path.dirname(path))
os.unlink(path)
os.symlink(rel, path)
def build(work_dir, prefix, dirty=False):
"""Run the build process.
:param str work_dir: Working directory containing the repo's source code.
:param str prefix: Path to install to (should already exist).
:param bool dirty: Whether the build process has already been run in this
directory.
"""
log('Beginning build process')
os.chdir(work_dir)
# Makefile rules to run
make_rules = BASE_MAKE_RULES[:]
if dirty:
log(
'--dirty is set, skipping the following make rules: {}',
' '.join(EXPENSIVE_MAKE_RULES),
)
else:
make_rules += EXPENSIVE_MAKE_RULES
# Run Makefile
log('Running Makefile...')
cmd('make', *make_rules)
# Run "check" rule to check dependencies
log('Checking dependencies...')
cmd('make', 'check')
# Convert absolute symlink paths to relative paths
log('Fixing symlinks...')
for fname in os.listdir('scripts'):
fpath = os.path.join('scripts', fname)
if os.path.islink(fpath):
make_symlink_relative(fpath)
# Directory to install to
install_dir = os.path.join(prefix, INSTALL_DIR)
log('Installing to {}', install_dir)
cmd('mkdir', '-p', install_dir)
# Copy files
log('Copying files...')
for file in INSTALL_FILES:
cmd(
'cp',
'-r',
os.path.join(work_dir, file),
os.path.join(install_dir, file),
)
# Install wrapper script
script_src = os.path.join(work_dir, 'wrapper.sh')
script_dst = os.path.join(prefix, 'bin', 'lyve-set')
cmd('cp', script_src, script_dst)
cmd('chmod', '+x', script_dst)
# Done
log('Install script completed successfully')
if __name__ == '__main__':
if os.environ.get('CONDA_BUILD') != '1':
raise RuntimeError('CONDA_BUILD environment variable not set')
dirty = os.environ.get('DIRTY', '') == '1'
build(os.getcwd(), os.environ['PREFIX'], dirty=dirty)
``` |
{
"source": "jlumpe/nbconvert-terminal",
"score": 3
} |
#### File: nbconvert-terminal/nbcat/terminal_exporter.py
```python
import os
import os.path
from traitlets import Bool, Unicode, validate, TraitError, default
from traitlets.config import Config
from nbconvert.exporters import TemplateExporter
from pygments import highlight
from pygments.formatters import TerminalFormatter, Terminal256Formatter
from pygments.lexers import get_lexer_by_name
from pygments.styles import get_all_styles
# ANSI color reset
ANSI_RESET = '\x1b[0m'
_ansi_color_digits = {
'black': '0',
'red': '1',
'green': '2',
'yellow': '3',
'blue': '4',
'purple': '5',
'cyan': '6',
'white': '7',
}
# ANSI control sequences for FG colors
ANSI_COLORS = {}
for name, digit in _ansi_color_digits.items():
ANSI_COLORS[name] = '\x1b[3' + digit + 'm'
ANSI_COLORS['b' + name] = '\x1b[9' + digit + 'm' # Bright
def ansi_color_filter(text, color):
"""Jinja2 filter which applies ANSI color codes to text."""
return ANSI_COLORS[color] + str(text) + ANSI_RESET
def make_syntax_highlight_filter(FormatterCls, default_style):
def syntax_highlight_filter(code, lexername='python3', style=default_style, trim=True):
lexer = get_lexer_by_name(lexername)
if trim:
code = code.strip()
out = highlight(code, lexer, FormatterCls(style=style))
if trim:
out = out.strip()
return out
return syntax_highlight_filter
class TerminalExporter(TemplateExporter):
"""Exporter for viewing notebook in terminal with ANSI colors."""
output_mimetype = 'text/x-ansi'
use_256_colors = Bool(
default_value=False,
config=True,
help='Use ANSI 256 color mode',
)
@default('use_256_colors')
def _default_use_256_colors(self):
# Just check the $TERM environment variable
return os.environ.get('TERM', '') == 'xterm-256color'
syntax_style = Unicode(
default_value='default',
config=True,
help='Pygments style name for coloring output. This is ignored unless in 256 color mode.',
)
@validate('syntax_style')
def _validate_syntax_style(self, proposal):
style = proposal['value']
if style not in get_all_styles():
raise TraitError('{!r} is not a valid pygments style'.format(style))
return style
def _file_extension_default(self):
return '.txt'
@property
def template_path(self):
templates_dir = os.path.join(os.path.dirname(__file__), 'templates')
return super().template_path + [templates_dir]
@property
def environment(self):
env = super().environment
# ANSI escape sequences
env.globals['RESET'] = ANSI_RESET
for name, code in ANSI_COLORS.items():
env.globals[name.upper()] = code
env.filters['color'] = ansi_color_filter
# Syntax highlighting filter
FormatterCls = Terminal256Formatter if self.use_256_colors else TerminalFormatter
env.filters['syntax'] = make_syntax_highlight_filter(FormatterCls, self.syntax_style)
return env
def _template_file_default(self):
return 'terminal.tpl'
@property
def default_config(self):
c = Config({
'ExtractOutputPreprocessor': {'enabled': True}
})
c.merge(super().default_config)
return c
``` |
{
"source": "jlumpe/progress-interface",
"score": 3
} |
#### File: progress-interface/progress_interface/base.py
```python
from abc import ABC, abstractmethod
import typing as t
from warnings import warn
#: Registry of string keys to :class:`.ProgressConfig` instances.
REGISTRY = dict() # type Dict[str, ProgressConfig]
class AbstractProgressMonitor(ABC):
"""
Abstract base class for an object which tracks the progress of a long-running task and possibly
displays it to the user.
Concrete subclasses must implement the :meth:`moveto` and :meth:`create` methods along with the
:attr:`n`, :attr:`total`, and :attr:`closed` attributes. They may also optionally override
:meth:`increment` and :meth:`close`.
Attributes
----------
n
Number of completed iterations. Do not modify directly, use the :meth:`increment` and
:meth:`moveto` methods instead.
total
Expected total number of iterations.
closed
Whether the monitor has been closed/completed.
"""
n: int
total: int
closed: int
def increment(self, delta: int = 1):
"""Increment the position of the monitor by the given value."""
self.moveto(self.n + delta)
@abstractmethod
def moveto(self, n: int):
"""Set the monitor's position to the given value."""
pass
def close(self):
"""Stop tracking/displaying progress and perform whatever cleanup is necessary."""
pass
def __enter__(self) -> 'AbstractProgressMonitor':
return self
def __exit__(self, *args):
self.close()
@classmethod
@abstractmethod
def create(cls,
total: int,
*,
initial: int = 0,
desc: t.Optional[str] = None,
file: t.Optional[t.TextIO] = None,
**kw,
) -> 'AbstractProgressMonitor':
"""Factory function with standardized signature to create instances of the class.
Parameters
----------
total
Total number of iterations to completion.
initial
Initial value of :attr:`n`.
desc
Description to display to the user, if applicable.
file
File-like object to write text output to, if applicable. Defaults to ``sys.stderr``.
\\**kw
Additional options depending on the subclass.
"""
pass
@classmethod
def config(cls, **kw) -> 'ProgressConfig':
"""
Get a ``ProgressConfig`` which creates instances of the class with the given default
settings..
Keyword arguments are passed on to :meth:`create`.
"""
return ProgressConfig(cls.create, kw)
class NullProgressMonitor(AbstractProgressMonitor):
"""Progress monitor which does nothing."""
def increment(self, delta: int = 1):
pass
def moveto(self, n: int):
pass
def close(self):
pass
@classmethod
def create(cls, total: int, initial: int = 0, **kw):
return cls()
#: Type alias for a factory function with signature ``(total: int, **kw) -> AbstractProgressMonitor``.
ProgressFactoryFunc = t.Callable[[int], AbstractProgressMonitor]
class ProgressConfig:
"""Configuration settings used to create new progress monitor instances.
This allows callers to pass the desired progress monitor type and other settings to a function
without needing to know the total length and other details about the task, which can be
determined within the function body.
Attributes
----------
factory
The :meth:`.AbstractProgressMonitor.create` method of a concrete progress monitor type, or
another factory with the same signature which returns a progress monitor instance.
kw
Keyword arguments to pass to ``factory``.
"""
factory: ProgressFactoryFunc
kw: t.Dict[str, t.Any]
def __init__(self, factory: ProgressFactoryFunc, kw: t.Dict[str, t.Any]):
self.factory = factory
self.kw = kw
def create(self, total: int, **kw) -> AbstractProgressMonitor:
"""
Create a progress monitor instance by calling the factory function with the stored keyword
arguments.
The signature of this function is identical to :meth:`.AbstractProgressMonitor.create`.
"""
final_kw = dict(self.kw)
final_kw.update(kw)
return self.factory(total, **final_kw)
def update(self, *args: t.Mapping[str, t.Any], **kw) -> 'ProgressConfig':
"""Update keyword arguments and return a new instance."""
new_kw = dict(self.kw)
new_kw.update(*args, **kw)
return ProgressConfig(self.factory, new_kw)
def default_config() -> ProgressConfig:
"""Get the default :class:`.ProgressConfig` instance to use.
Currently attempts to use :class:`.TqdmProgressMonitor`, if ``tqdm`` is not importable prints a
warning and uses :class:`.NullProgressMonitor`.
"""
try:
from tqdm import tqdm
except ImportError:
warn('Could not import tqdm (not installed?), no default progress monitor type available.')
return NullProgressMonitor.config()
from .monitors import TqdmProgressMonitor
return TqdmProgressMonitor.config()
#: Type alias for argument to :func:`.get_config` and :func:`.get_progress`.
ProgressArg = t.Union[ProgressConfig, str, bool, type, ProgressFactoryFunc, None]
def progress_config(arg: ProgressArg, **kw) -> ProgressConfig:
"""Get a ``ProgressConfig`` instance from a variety argument types.
Accepts the following types/values for the argument:
- :class:`.ProgressConfig`
- ``None`` - uses :class:`.NullProgressBar`.
- ``True`` - uses value returned by :func:`.default_config`.
- ``False`` - same as ``None``.
- ``str`` key - Looks up progress bar class/factory function in :data:`.REGISTRY`.
- :class:`.AbstractProgressMonitor` subclass
- ``Callable`` - factory function. Must have same signature as :meth:`.AbstractProgressMonitor.create`.
Parameters
----------
arg
See above.
\\**kw
Additional keyword arguments to add to the returned config object.
"""
if arg is True:
arg = default_config()
if isinstance(arg, str):
arg = REGISTRY[arg]
if isinstance(arg, ProgressConfig):
return arg.update(kw) if kw else arg
if arg is None or arg is False:
return NullProgressMonitor.config()
if isinstance(arg, type) and issubclass(arg, AbstractProgressMonitor):
return ProgressConfig(arg.create, kw)
if callable(arg):
return ProgressConfig(arg, kw)
raise TypeError(arg)
def get_progress(arg: ProgressArg, total: int, initial: int = 0, **kw) -> AbstractProgressMonitor:
"""Create a progress monitor instance.
See :func:`.progress_config` for description of allowed types/values for the argument.
Parameters
----------
arg
total
Number of expected iterations.
initial
Initial position of progress monitor.
\\**kw
Additional keyword arguments to pass to progress monitor class or factory function defined by
``arg``..
"""
config = progress_config(arg)
return config.create(total, initial=initial, **kw)
class ProgressIterator(t.Iterator):
itr: t.Iterator
monitor: AbstractProgressMonitor
def __init__(self, iterable: t.Iterable, monitor: AbstractProgressMonitor):
self.itr = iter(iterable)
self.monitor = monitor
self._first = True
def __next__(self):
if not self._first:
self.monitor.increment()
self._first = False
try:
value = next(self.itr)
except StopIteration:
self.monitor.close() # Close on reaching end
raise
return value
def __enter__(self):
return self
def __exit__(self, *args):
self.monitor.close()
def iter_progress(iterable: t.Iterable,
progress: ProgressArg = True,
total: t.Optional[int] = None,
**kw,
) -> ProgressIterator:
"""Display a progress monitor while iterating over an object.
The returned iterator object can also be used as a context manager to ensure that the progress
monitor is closed properly even if iteration does not finish.
Parameters
----------
itarable
Iterable object.
progress
Passed to :func:`get_progress`.
total
Total number of expected iterations. Defaults to ``len(iterable)``.
\\**kw
Additional keyword arguments to pass to progress monitor factory.
Returns
-------
.ProgressIterator
Iterator over values in ``iterable`` which advances a progress monitor.
"""
if total is None:
total = len(iterable)
monitor = get_progress(progress, total, **kw)
return ProgressIterator(iterable, monitor)
def register(key: str, arg: t.Optional[ProgressArg] = None, *, overwrite: bool=False):
"""Register a progress monitor class or factory function under the given key.
If ``arg`` is not None, it is converted to a ``ProgressConfig`` instance and registered
immediately. Otherwise a decorator function is returned which registers its argument under the
given key.
Parameters
----------
key
Key to register under.
arg
None or any value that can be passed to :func:`.progress_config`.
overwrite
Whether to allow overwriting of existing keys.
Returns
-------
Union[ProgressConfig, Callable]
The ``ProgressConfig`` instance registered if ``arg`` is not None, otherwise a decorator
function which registers its argument and returns it unchanged.
"""
def decorator(_arg: t.Union[type, t.Callable]):
if not overwrite and key in REGISTRY:
raise ValueError(f'Key {key!r} already exists in the registry')
REGISTRY[key] = progress_config(_arg)
return _arg
if arg is None:
return decorator
else:
return decorator(arg)
```
#### File: progress-interface/progress_interface/monitors.py
```python
import typing as t
from importlib import import_module
from .base import AbstractProgressMonitor, register
@register('tqdm')
class TqdmProgressMonitor(AbstractProgressMonitor):
"""Wrapper around a progress bar from the ``tqdm`` library."""
def __init__(self, pbar):
"""
Parameters
----------
pbar
``tqdm`` instance.
"""
self.pbar = pbar
@property
def n(self):
return self.pbar.n
@property
def total(self):
return self.pbar.total
@property
def closed(self):
return False # TODO
def increment(self, delta: int = 1):
self.pbar.update(delta)
def moveto(self, n: int):
self.pbar.moveto(n)
def close(self):
self.pbar.close()
@classmethod
def create(cls,
total: int,
*,
initial: int = 0,
desc: t.Optional[str] = None,
file: t.Optional[t.TextIO] = None,
tqdm: t.Union[type, str] = 'tqdm.auto:tqdm',
**kw,
):
"""
Parameters
----------
tqdm
``tqdm`` class to use. Can be a string formatted like ``'tqdm.std:tqdm'``.
\\**kw
Passed to ``tqdm`` constructor.
"""
if isinstance(tqdm, str):
modname, name = tqdm.split(':')
module = import_module(modname)
tqdm = getattr(module, name)
return cls(tqdm(total=total, desc=desc, initial=initial, file=file, **kw))
register('tqdm-std', TqdmProgressMonitor.config(tqdm='tqdm.std:tqdm'))
register('tqdm-notebook', TqdmProgressMonitor.config(tqdm='tqdm.notebook:tqdm'))
@register('click')
class ClickProgressMonitor(AbstractProgressMonitor):
"""Wrapper around a progress bar from the ``click`` library, using ``click.progressbar()``."""
def __init__(self, pbar):
"""
Parameters
----------
pbar
Progress bar object returned by ``click.progressbar``.
"""
self.pbar = pbar
@property
def n(self):
return self.pbar.pos
@property
def total(self):
return self.pbar.length
@property
def closed(self):
return self.pbar.finished
def increment(self, delta: int = 1):
self.pbar.update(delta)
def moveto(self, n: int):
self.pbar.update(n - self.pbar.pos)
def close(self):
self.pbar.finish()
@classmethod
def create(cls,
total: int,
*,
initial: int = 0,
desc: t.Optional[str] = None,
file: t.Optional[t.TextIO] = None,
**kw,
):
"""
Parameters
----------
\\**kw
Passed to ``click.progressbar``.
"""
import click
pbar = click.progressbar(length=total, label=desc, file=file, **kw)
if initial != 0:
pbar.update(initial)
return cls(pbar)
```
#### File: progress-interface/tests/test_base.py
```python
from contextlib import contextmanager
from unittest.mock import patch
import pytest
from progress_interface.base import default_config, progress_config, get_progress, iter_progress, \
NullProgressMonitor, REGISTRY
from progress_interface.test import TestProgressMonitor
from progress_interface.monitors import TqdmProgressMonitor, ClickProgressMonitor
@contextmanager
def no_import(name: str):
"""Context manager which makes a module not importable even if installed."""
# Setting value of a key to None in sys.modules will raise a ModuleNotFound error on import,
# even if the package is installed.
with patch.dict('sys.modules', {name: None}):
yield
@pytest.mark.parametrize('with_tqdm', [False, True])
class TestDefaultConfig:
"""Test the default_config() function and get_progress(True)."""
def test_default_config(self, with_tqdm):
"""Test default_config() function."""
if with_tqdm:
pytest.importorskip('tqdm')
conf = default_config()
assert conf.factory == TqdmProgressMonitor.create
else:
with no_import('tqdm'):
with pytest.warns(UserWarning):
conf = default_config()
assert conf.factory == NullProgressMonitor.create
def test_progress_config_true(self, with_tqdm):
"""Test passing True as argument to progress_config()."""
if with_tqdm:
pytest.importorskip('tqdm') # Skip if tqdm not available.
config = progress_config(True, foo=1)
assert config.factory == TqdmProgressMonitor.create
assert config.kw == dict(foo=1)
else:
with no_import('tqdm'):
with pytest.warns(UserWarning):
config = progress_config(True, foo=1)
assert config.factory == NullProgressMonitor.create
assert config.kw == dict(foo=1)
class TestProgressConfigFunc:
"""Test the progress_config() function.
The case where arg=True is tested in TestDefaultConfig.
"""
def test_null(self):
"""Test passing None and False as argument."""
for arg in [None, False]:
config = progress_config(arg)
assert config.factory == NullProgressMonitor.create
def test_cls(self):
"""Test passing AbstractProgressMonitor subclass as argument."""
for cls in [NullProgressMonitor, TestProgressMonitor]:
config = progress_config(cls, foo=1)
assert config.factory == cls.create
assert config.kw == dict(foo=1)
def test_str(self):
for key, config in REGISTRY.items():
config2 = progress_config(key, foo=1)
assert config2.factory == config.factory
assert config2.kw == {**config.kw, 'foo': 1}
def test_factory(self):
"""Test passing a factory function as argument."""
def factory(total, *, initial=None, **kw):
return TestProgressMonitor.create(total, initial=initial, foo=1, **kw)
config = progress_config(factory, foo=1)
assert config.factory == factory
assert config.kw == dict(foo=1)
def test_progressconfig(self):
"""Test passing a factory function as argument."""
config = TestProgressMonitor.config(foo=1, bar=2)
config2 = progress_config(config, bar=20, baz=3)
assert config2.factory == TestProgressMonitor.create
assert config2.kw == dict(foo=1, bar=20, baz=3)
def test_invalid(selfj):
with pytest.raises(TypeError):
get_progress(0, 100)
class TestGetProgress:
"""Test the get_progress() function.
The case where arg=True is tested in TestDefaultConfig.
"""
@pytest.fixture()
def total(self):
return 100
@pytest.fixture(params=[0, 10])
def initial(self, request):
return request.param
def test_null(self, total, initial):
"""Test passing None and False as argument."""
for arg in [None, False]:
assert isinstance(get_progress(arg, total, initial=initial), NullProgressMonitor)
def test_cls(self, total, initial):
"""Test passing AbstractProgressMonitor subclass as argument."""
for cls in [NullProgressMonitor, TestProgressMonitor]:
monitor = get_progress(cls, total, initial=initial)
assert isinstance(monitor, cls)
if cls is not NullProgressMonitor:
assert monitor.total == total
assert monitor.n == initial
def test_str(self, total, initial):
# TODO - use a type that doesn't require 3rd-party library
monitor = get_progress('click', total, initial=initial)
assert isinstance(monitor, ClickProgressMonitor)
assert monitor.total == total
assert monitor.n == initial
def test_factory(self, total, initial):
"""Test passing a factory function as argument."""
def factory(total, *, initial=None, **kw):
return TestProgressMonitor.create(total, initial=initial, foo=1, **kw)
monitor = get_progress(factory, total, initial=initial, bar=2)
assert isinstance(monitor, TestProgressMonitor)
assert monitor.total == total
assert monitor.n == initial
assert monitor.kw == dict(foo=1, bar=2)
def test_progressconfig(self, total, initial):
"""Test passing a factory function as argument."""
config = TestProgressMonitor.config(foo=1)
monitor = get_progress(config, total, initial=initial, bar=2)
assert isinstance(monitor, TestProgressMonitor)
assert monitor.total == total
assert monitor.n == initial
assert monitor.kw == dict(foo=1, bar=2)
def test_invalid(selfj):
with pytest.raises(TypeError):
get_progress(0, 100)
@pytest.mark.parametrize('pass_total', [False, True])
@pytest.mark.parametrize('abort_early', [False, True])
def test_iter_progress(pass_total, abort_early):
"""Test the iter_progress() function."""
import string
items = string.ascii_letters
abort_at = 10
if pass_total:
iterable = iter(items)
total = len(items)
else:
iterable = items
total = None
with iter_progress(iterable, TestProgressMonitor, total=total, foo=1) as itr:
assert isinstance(itr.monitor, TestProgressMonitor)
assert itr.monitor.total == len(items)
assert itr.monitor.kw == dict(foo=1)
assert itr.monitor.n == 0
assert not itr.monitor.closed
for i, val in enumerate(itr):
assert val == items[i]
assert itr.monitor.n == i
assert not itr.monitor.closed
if abort_early and i == abort_at:
break
if abort_early:
assert i == abort_at
assert itr.monitor.n == abort_at
assert not itr.monitor.closed
else:
assert i == len(items) - 1
assert itr.monitor.n == len(items)
assert itr.monitor.closed
assert itr.monitor.closed # Always closed after exiting context
class TestRegister:
"""Test the register() function."""
# TODO
``` |
{
"source": "jlumpe/pwtools",
"score": 3
} |
#### File: pwtools/tests/test_convert.py
```python
import pytest
import numpy as np
import triarray as tri
def test_array_conversion(indices, index_matrix, upper, diag_val):
"""Test conversion between matrix and flattened triangle arrays."""
mat_vals = tri.tri_to_matrix(indices, upper=upper, diag=diag_val)
assert np.array_equal(mat_vals, index_matrix)
assert mat_vals.dtype == indices.dtype
tri_vals = tri.matrix_to_tri(index_matrix, upper=upper)
assert np.array_equal(indices, tri_vals)
assert tri_vals.dtype == index_matrix.dtype
def test_squareform_conversion(n):
"""Test vs. scipy's squareform() function."""
try:
from scipy.spatial.distance import squareform
except ImportError:
pytest.skip('Could not import scipy')
indices = np.arange(tri.tri_n(n - 1), dtype=np.double)
matrix = squareform(indices)
assert np.array_equal(tri.tri_to_matrix(indices, upper=True), matrix)
```
#### File: pwtools/tests/test_matrix.py
```python
import numpy as np
import pytest
import triarray as tri
def check_getitem_same(mat1, mat2, index):
"""Check indexing returns the same results between two matrix objects."""
assert np.array_equal(mat1[index], mat2[index])
@pytest.fixture()
def trimatrix(indices, upper, diag_val):
"""TriMatrix that should correspond to the index_matrix fixture."""
return tri.TriMatrix(indices, upper=upper, diag_val=diag_val)
def test_attrs(n, indices, trimatrix, upper, diag_val):
"""Test basic attributes."""
assert trimatrix.size == n
assert trimatrix.upper == upper
assert trimatrix.diag_val == diag_val
# Check array uses same memory
# Not sure what trickery might happen with Numba but I think comparing the
# memoryview objects in the data attribute should work fine
assert indices.data == trimatrix.array.data
def test_invalid_array_size():
"""Test constructor with invalid array size."""
with pytest.raises(ValueError):
tri.TriMatrix(np.arange(11))
def test_index_conversion(trimatrix, index_matrix):
for row in range(trimatrix.size):
for col in range(trimatrix.size):
if row == col:
# Can't get index along diagonal
with pytest.raises(ValueError):
trimatrix.flat_index(row, col)
else:
idx = trimatrix.flat_index(row, col)
assert trimatrix.array[idx] == index_matrix[row, col]
assert trimatrix.flat_index(row, col) == idx
def test_to_array(trimatrix, index_matrix):
"""Test conversion to array using method and various indices."""
assert np.array_equal(trimatrix.to_array(), index_matrix)
assert np.array_equal(trimatrix[()], index_matrix)
assert np.array_equal(trimatrix[:], index_matrix)
assert np.array_equal(trimatrix[:, :], index_matrix)
def test_getitem_single(trimatrix, index_matrix, diag_val):
"""Test getting a single element from the matrix."""
for row in range(trimatrix.size):
for col in range(trimatrix.size):
assert trimatrix[row, col] == index_matrix[row, col]
assert trimatrix.get_item(row, col) == index_matrix[row, col]
def test_get_row_single(trimatrix, index_matrix):
"""Test various methods of getting single rows."""
out = np.zeros(trimatrix.size, dtype=index_matrix.dtype)
for row in range(trimatrix.size):
row_vals = index_matrix[row]
assert np.array_equal(trimatrix[row], row_vals)
assert np.array_equal(trimatrix[row, :], row_vals)
assert np.array_equal(trimatrix[:, row], row_vals)
assert np.array_equal(trimatrix.get_row(row), row_vals)
trimatrix.get_row(row, out=out)
assert np.array_equal(out, row_vals)
def test_get_row_array(n, trimatrix, index_matrix):
"""Test getting many rows by indexing with single integer array."""
def check_rows(rows):
check_getitem_same(trimatrix, index_matrix, rows)
check_getitem_same(trimatrix, index_matrix, (rows, slice(None)))
check_getitem_same(trimatrix, index_matrix, (slice(None), rows))
# 1D array - every 10th row
step = int(np.ceil(n / 10))
rows = np.arange(0, n, step)
check_rows(rows)
# 2D array
rows = np.arange(17 * 17).reshape(17, 17)
rows = (rows * 11) % n
check_rows(rows)
# Degenerate case of empty 1D array
rows = np.arange(0)
check_rows(rows)
def test_getitem_scalar_array(n, trimatrix, index_matrix):
"""Check indexing with a single integer and an array of integers."""
def check_rows(rows):
check_getitem_same(trimatrix, index_matrix, rows)
check_getitem_same(trimatrix, index_matrix, (rows, slice(None)))
check_getitem_same(trimatrix, index_matrix, (slice(None), rows))
# 1D array - every 10th row
step = int(np.ceil(n / 10))
rows = np.arange(0, n, step)
check_rows(rows)
# 2D array
rows = np.arange(17 * 17).reshape(17, 17)
rows = (rows * 11) % n
check_rows(rows)
# Degenerate case of empty 1D array
rows = np.arange(0)
check_rows(rows)
def test_invalid_index(trimatrix):
"""Test various invalid indices."""
# Too many
with pytest.raises(ValueError):
trimatrix[:, :, :]
# Float scalar
with pytest.raises(TypeError):
trimatrix[0.5]
# Float array
with pytest.raises(ValueError):
trimatrix[np.linspace(0, 10)]
def test_index_out_of_range(trimatrix):
"""Test row/column indices out of range result in an exception."""
def check_bad_index(*index):
with pytest.raises(ValueError) as exc_info:
trimatrix[tuple(index)]
assert str(exc_info.value) == 'Index out of range'
full_slice = slice(None)
valid_array = np.arange(0, trimatrix.size, 5)
for bad_int in (-1, trimatrix.size):
bad_array = valid_array.copy()
bad_array.flat[-1] = bad_int
for bad_index in [bad_int, bad_array]:
check_bad_index(bad_index)
check_bad_index(bad_index, 0)
check_bad_index(0, bad_index)
check_bad_index(bad_index, full_slice)
check_bad_index(full_slice, bad_index)
check_bad_index(bad_index, valid_array)
check_bad_index(valid_array, bad_index)
check_bad_index
def test_get_partial_row(trimatrix, index_matrix, upper):
"""Check get_partial_row() and iter_partial_rows() methods."""
def get_partial_row(row):
return index_matrix[row, row + 1:] if upper else index_matrix[row, :row]
for i in range(trimatrix.size):
assert np.array_equal(trimatrix.get_partial_row(i), get_partial_row(i))
for i, row in enumerate(trimatrix.iter_partial_rows()):
assert np.array_equal(row, get_partial_row(i))
def test_index_int_types(trimatrix, index_matrix):
"""
Test indexing with Python integers and numpy integer objects of various
types.
"""
# TODO - uint64 can't be converted to intp
for np_type in (np.int32, np.uint32, np.int64):
for i in range(trimatrix.size):
assert trimatrix[0, i] == trimatrix[np_type(0), np_type(i)]
indices = np.arange(trimatrix.size).astype(np_type)
assert np.array_equal(trimatrix[0, indices], index_matrix[0, indices])
```
#### File: pwtools/triarray/convert.py
```python
import numpy as np
import numba as nb
from .math import tri_n, tri_root
@nb.jit(nopython=True)
def matrix_to_tril(matrix, out):
"""Get flattened lower triangle of a square matrix.
:param matrix: Square array.
:type matrix: numpy.ndarray.
:param out: Linear array of correct length to write values to.
:type matrix: numpy.ndarray
"""
N = matrix.shape[0]
i = 0
for row in range(N):
for col in range(row):
out[i] = matrix[row, col]
i += 1
@nb.jit(nopython=True)
def matrix_to_triu(matrix, out):
"""Get flattened upper triangle of a square matrix.
:param matrix: Square array.
:type matrix: numpy.ndarray.
:param out: Linear array of correct length to write values to.
:type matrix: numpy.ndarray
"""
N = matrix.shape[0]
i = 0
for row in range(N):
for col in range(row + 1, N):
out[i] = matrix[row, col]
i += 1
def matrix_to_tri(matrix, out=None, upper=False):
"""Get flattened lower/upper triangle of a square matrix.
Output will be an array containing the triangle's elements in "row-major"
order, that is, first the elements of the 0th row in the specified triangle,
then the 1st row, etc.
:param matrix: Square array.
:type matrix: numpy.ndarray.
:param out: Existing array to write to, if any. Must be 1D with the correct
number of elements (``tri_n(N - 1)`` where ``N`` is the size of the
matrix). If None one will be created with the same data type as
``matrix``.
:type out: numpy.ndarray
:param bool upper: If True get upper triangle of matrix, otherwise get
lower triangle.
:returns: 1D array containing the specified triangle of the matrix. Will be
the same array as ``out`` if it was given.
:rtype: numpy.ndarray
"""
N = matrix.shape[0]
tri_len = tri_n(N - 1)
if out is None:
out = np.empty(tri_len, dtype=matrix.dtype)
elif out.shape != (tri_len,):
raise ValueError('"out" has incorrect shape')
if upper:
matrix_to_triu(matrix, out)
else:
matrix_to_tril(matrix, out)
return out
@nb.jit(nopython=True)
def tril_to_matrix(array, diag, out):
"""Convert flattened lower triangle to full symmetrical square matrix.
:param array: 1D array containing elements of matrix's lower triangle, in
same format as output of :func:`.matrix_to_tril`.
:type array: numpy.ndarray
:param diag: Number to fill diagonal with.
:param out: Existing array to write to. Must be square with the correct
number of elements.
:type out: numpy.ndarray
"""
N = tri_root(len(array)) + 1
i = 0
for row in range(N):
out[row, row] = diag
for col in range(row):
out[row, col] = out[col, row] = array[i]
i += 1
@nb.jit(nopython=True)
def triu_to_matrix(array, diag, out):
"""Convert flattened upper triangle to full symmetrical square matrix.
:param array: 1D array containing elements of matrix's upper triangle, in
same format as output of :func:`.matrix_to_triu`.
:type array: numpy.ndarray
:param diag: Number to fill diagonal with.
:param out: Existing array to write to. Must be square with the correct
number of elements.
:type out: numpy.ndarray
"""
N = tri_root(len(array)) + 1
i = 0
for row in range(N):
out[row, row] = diag
for col in range(row + 1, N):
out[row, col] = out[col, row] = array[i]
i += 1
def tri_to_matrix(array, diag=0, out=None, upper=False):
"""Convert flattened lower/upper triangle to full symmetrical square matrix.
:param array: 1D array containing elements of matrix's lower/upper triangle,
in same format as output of :func:`.matrix_to_tri`.
:type array: numpy.ndarray
:param diag: Number to fill diagonal with.
:param out: Existing array to write to, if any. Must be square with the
correct number of elements. If None one will be created with the same
data type as ``array``.
:type out: numpy.ndarray
:param bool upper: Whether ``array`` contains the upper (True) or lower
(False) triangle of the matrix.
:returns: Full matrix. Will be the same array as ``out`` if it was given.
:rtype: numpy.ndarray
"""
N = tri_root(len(array)) + 1
if out is None:
out = np.zeros((N, N), dtype=array.dtype)
elif out.shape != (N, N):
raise ValueError('"out" has incorrect shape')
if upper:
triu_to_matrix(array, diag, out)
else:
tril_to_matrix(array, diag, out)
return out
```
#### File: pwtools/triarray/io.py
```python
import io
import numpy as np
from .util import get_tqdm
def write_tri_file(fobj, matrix, upper=False, dtype=None):
"""Write the lower/upper triangle of a matrix to a binary file object.
:param fobj: Writable file-like object in binary mode.
:param matrix: Square matrix.
:type matrix: numpy.ndarray
:param bool upper: Whether to write the upper (True) or lower (False)
portion of the matrix.
:param dtype: Numpy data type to write as. If None will use the data type
of ``matrix``.
"""
n = matrix.shape[0]
if matrix.shape != (n, n):
raise ValueError('Matrix is not square')
if dtype is not None:
dtype = np.dtype(dtype)
if dtype == matrix.dtype:
dtype = None
for row in range(n):
if upper:
row_vals = matrix[row, row + 1:]
else:
row_vals = matrix[row, :row]
if row_vals.size == 0:
continue
if dtype is not None:
row_vals = row_vals.astype(dtype)
fobj.write(row_vals.tobytes())
def read_tri_file_rows(fobj, n=None, dtype='f8', upper=False, keep=None,
progress=False):
"""
Iterate over partial rows of a matrix stored in a file in flattened
triangular format.
Given a file containing distance matrix data in the non-redundant form
created by :func:`.write_tri_file`, yields each partial row of the matrix
stored in the file. These will be the portions of the rows to the left of
the diagonal if ``upper`` is False or to the right if ``upper`` is True.
Row portions of zero length will be skipped.
:param fobj: Readable, seekable file-like object in binary mode.
:param int n: Size of full matrix stored in file. Exactly one of ``n`` or
``keep`` should be given.
:param dtype: Numpy data type of the stored data.
:type dtype: numpy.dtype
:param bool upper: Whether the file contains the upper (True) or lower
(False) triangle of the matrix. Should match value used when file was
created by :func:`write_tri_file`.
:param keep: If given, subset of rows/columns of matrix to pull from file.
Should be a boolean array with length matching the size of the full
stored matrix, with ``False`` values indicating a row/column should be
skipped. The returned values will then be filtered to include only
these row/columns of the full matrix. Exactly one of ``n`` or ``keep``
should be given.
:type keep: numpy.ndarray
:param progress: If True will display a progress bar with tqdm, using either
the standard or notebook version depending on whether the code is being
executed in a Jupyter notebook or not. You may also pass any other
function taking an iterable as an argument and returning another
iterable in the same manner as :func:`tqdm.tqdm`.
:returns: Generator yielding 2-tuples of ``int`` and :class:`numpy.ndarray`,
the row index and row values. If ``keep`` was given instead of ``n`` the
row indices will be for the filtered matrix, not the full matrix.
"""
if keep is not None:
if n is not None:
raise TypeError('Must give only one of "n" and "keep"')
n = keep.size
elif n is None:
raise TypeError('Should give one of "n" or "keep"')
# Use the appropriate flavor of tqdm if progress is True
if progress is True:
progress = get_tqdm()
# Iterator over rows - wrap in progress if needed
rowiter = range(n)
if progress:
rowiter = progress(rowiter)
sub_row = 0
for row in rowiter:
# Range of columns to read
col_begin = row + 1 if upper else 0
col_end = n if upper else row
row_len = col_end - col_begin
if keep is not None and not keep[row]:
# Row to be skipped, seek ahead to the next
fobj.seek(row_len * dtype.itemsize, io.SEEK_CUR)
else:
if row_len > 0:
# Read in all stores values of row
data = fobj.read(row_len * dtype.itemsize)
row_vals = np.frombuffer(data, dtype=dtype, count=row_len)
if keep is None:
# Yield all columns
yield sub_row, row_vals
else:
# Yield subset of columns
keep_row = keep[col_begin:col_end]
if keep_row.sum() > 0:
yield sub_row, row_vals[keep_row]
sub_row += 1
def read_tri_file(fobj, n=None, dtype='f8', upper=False, keep=None,
diag=None, out=None, **kwargs):
"""Read a full matrix from a file created by :func:`.write_tri_file`.
:param fobj: Readable, seekable file-like object in binary mode.
:param int n: Size of full matrix stored in file. Exactly one of ``n`` or
``keep`` should be given.
:param dtype: Numpy data type of the stored data.
:type dtype: numpy.dtype
:param bool upper: Whether the file contains the upper (True) or lower
(False) triangle of the matrix. Should match value used when file was
created by :func:`write_tri_file`.
:param keep: If given, subset of rows/columns of matrix to pull from file.
Should be a boolean array with length matching the size of the full
stored matrix, with ``False`` values indicating a row/column should be
skipped. ``read_tri_file(..., keep=keep)`` should be identical to
``read_tri_file(...)[np.ix_(keep, keep)]``.Exactly one of ``n`` or
``keep`` should be given.
:type keep: numpy.ndarray
:param diag: Value to fill diagonal with. If None and ``out`` is given will
keep existing diagonal values in matrix, otherwise if ``out`` is omitted
will be zero.
:param out: Square array to write matrix values to. Should be of correct
shape (``(n, n)`` where ``n`` is given explicitly or otherwise
``n = sum(keep)``.
:type out: numpy.ndarray
:param \\**kwargs: Additional keyword arguments to
:func:`.read_tri_file_rows`.
:rtype: numpy.ndarray
:returns: Square array containing matrix values. If ``out`` was given will
be the same array, otherwise a new one will be created with the
appropriate data type.
"""
if keep is not None:
if n is not None:
raise TypeError('Must give only one of "n" and "keep"')
n = np.sum(keep)
elif n is None:
raise TypeError('Should give one of "n" or "keep"')
# Create destination array if necessary
if out is None:
out = np.zeros((n, n), dtype=dtype)
elif out.shape != (n, n):
raise ValueError('"out" does not have the expected shape.')
# Pass correct arguments to read_tri_file_rows()
read_args = dict(fobj=fobj, dtype=dtype, upper=upper, **kwargs)
if keep is None:
read_args['n'] = n
else:
read_args['keep'] = keep
# Read in rows
for row, row_vals in read_tri_file_rows(**read_args):
col_slice = slice(row + 1, None) if upper else slice(row)
out[row, col_slice] = row_vals
out[col_slice, row] = row_vals
# Fill diagonal
if diag is not None:
np.fill_diagonal(out, diag)
return out
```
#### File: pwtools/triarray/math.py
```python
import numpy as np
import numba as nb
@nb.vectorize([nb.intp(nb.intp)], nopython=True)
def tri_n(n):
"""tri_n(n)
Numpy ufunc. Get the nth triangular number.
:param int n: Nonnegative integer.
:rtype: int
"""
return n * (n + 1) // 2
@nb.vectorize([nb.intp(nb.intp)], nopython=True)
def tri_root(t):
"""tri_root(t)
Numpy ufunc. Get n such that t is the nth triangular number.
This is the fastest version of this function. Behavior is undefined when
t is not a triangular number.
:param int t: Triangular number.
:rtype: int
"""
s = 8 * t + 1
rs = nb.intp(np.sqrt(s) + .5)
return (rs - 1) // 2
@nb.vectorize([nb.intp(nb.intp)], nopython=True)
def tri_root_strict(t):
"""tri_root_stric(t)
Numpy ufunc. Get n such that t is the nth triangular number, or raise an
exception if t is not triangular.
:param int t: Triangular number.
:rtype: int
:raises ValueError: If t is not a triangular number.
"""
s = 8 * t + 1
rs = nb.intp(np.sqrt(s) + .5)
if rs ** 2 != s:
raise ValueError('Not a triangular number')
return (rs - 1) // 2
@nb.vectorize([nb.intp(nb.intp)], nopython=True)
def tri_root_trunc(t):
"""tri_root_trunc(t)
Numpy ufunc. Get n such that t is >= the nth triangular number and < the
(n+1)th triangular number.
:param int t: Triangular number.
:rtype: int
:raises ValueError: If t is not a triangular number.
"""
s = 8 * t + 1
rs = nb.intp(np.sqrt(s) + .5)
if rs ** 2 > s:
rs -= 1
return (rs - 1) // 2
@nb.jit(nopython=True)
def tri_root_rem(t):
"""Get n and r such that ``t == tri_n(n) + r``.
:param t: Scalar or array of nonnegative integers.
:returns: (n, r) tuple of arrays the same shape as ``t``.
:rtype: tuple(int, int)
"""
n = tri_root_trunc(t)
return n, t - tri_n(n)
``` |
{
"source": "jlumpe/py-bioconductor",
"score": 3
} |
#### File: py-bioconductor/pybioc/s4vectors.py
```python
import numpy as np
from rpy2.robjects.packages import importr
def dataframe_to_pandas(dataframe):
"""Convert an S4Vector DataFrame to a Pandas DataFrame.
Requires the pandas package be installed, obviously.
:param dataframe: rpy2 S4 object corresponding to an S4VectorS DataFrame.
:type dataframe: rpy2.robjects.methods.RS4
:rtype: pandas.DataFrame
"""
import pandas as pd
rbase = importr('base')
colnames = list(rbase.colnames(dataframe))
data = list(map(np.array, dataframe.do_slot('listData')))
df = pd.DataFrame.from_items(list(zip(colnames, data)))
df.index = dataframe.do_slot('rownames')
return df
``` |
{
"source": "jlumpe/pydatatypes",
"score": 3
} |
#### File: pydatatypes/pydatatypes/test.py
```python
import pytest
from pydatatypes.typing import TypeConversionError
def assert_convert_success(converter, value, type_, eq=True):
converted = converter.convert(value, type_)
assert converter.isinstance(converted, type_)
if eq:
assert converted == value
def assert_convert_failure(converter, value, type_):
with pytest.raises(TypeConversionError):
converter.convert(value, type_)
class ExampleValues:
"""A set of test examples divided into groups."""
def __init__(self, groups, aliases):
self.groups = dict(groups)
self.aliases = dict(aliases)
def expand_aliases(self, keys):
keys = set(keys)
for key in list(keys):
try:
alias_keys = self.aliases[key]
except KeyError:
continue
keys.remove(key)
keys.update(alias_keys)
return keys
def getkeys(self, keys, complement=False, omit=None):
keys = self.expand_aliases(keys)
if complement:
keys = self.groups.keys() - keys
if omit is not None:
omit = self.expand_aliases(omit)
keys -= omit
return keys
def values(self, keys, complement=False, omit=None):
for key in self.getkeys(keys, complement, omit):
yield from self.groups[key]
``` |
{
"source": "jlumpe/python-emacs",
"score": 3
} |
#### File: emacs/elisp/dsl.py
```python
from .ast import *
class ElispSingleton:
"""Class of the singleton :data:`.E`.
"""
__instance = None
def __new__(cls):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __getitem__(self, name) -> Symbol:
"""Indexing with string gets a Symbol."""
return Symbol(name)
def _convert_symbol_name(self, name):
"""Convert symbol name from Python style to Elisp style."""
return name.replace('_', '-')
def __getattr__(self, name):
"""Attribute access with lower-case name gets a symbol."""
if name[0] == name[0].lower() and not name.startswith('__'):
return Symbol(self._convert_symbol_name(name))
return object.__getattribute__(self, name)
def __call__(self, value) -> ElispAstNode:
"""Calling as function converts value."""
return to_elisp(value)
Q = staticmethod(quote)
C = staticmethod(Cons)
S = staticmethod(symbols)
R = staticmethod(Raw)
#: Singleton object which implements the Elisp DSL.
E = ElispSingleton()
``` |
{
"source": "jlundy2/service-auto-analyzer",
"score": 2
} |
#### File: service-auto-analyzer/commons/log_merger.py
```python
import utils.utils as utils
import copy
class LogMerger:
@staticmethod
def merge_big_and_small_logs(logs, log_level_ids_to_add,
log_level_messages, log_level_ids_merged):
"""Merge big message logs with small ones"""
new_logs = []
for log in logs:
if not log["_source"]["message"].strip():
continue
log_level = log["_source"]["log_level"]
if log["_id"] in log_level_ids_to_add[log_level]:
merged_small_logs = utils.compress(log_level_messages["message"][log_level])
new_logs.append(LogMerger.prepare_new_log(
log, log["_id"], False, merged_small_logs))
for log_level in log_level_messages["message"]:
if not log_level_ids_to_add[log_level] and\
log_level_messages["message"][log_level].strip():
log = log_level_ids_merged[log_level]
new_log = LogMerger.prepare_new_log(
log, str(log["_id"]) + "_m", True,
utils.compress(log_level_messages["message"][log_level]),
fields_to_clean=["message", "detected_message", "only_numbers",
"detected_message_with_numbers", "stacktrace",
"found_exceptions_extended", "detected_message_extended",
"detected_message_without_params_extended",
"stacktrace_extended", "message_extended",
"message_without_params_extended",
"urls", "paths", "message_params",
"message_without_params_and_brackets",
"detected_message_without_params_and_brackets"])
for field in log_level_messages:
if field in ["message"]:
continue
new_log["_source"][field] = utils.compress(
log_level_messages[field][log_level])
new_log["_source"]["found_exceptions_extended"] = utils.compress(
utils.enrich_found_exceptions(log_level_messages["found_exceptions"][log_level]))
new_logs.append(new_log)
return new_logs
@staticmethod
def decompose_logs_merged_and_without_duplicates(logs):
"""Merge big logs with small ones without duplcates"""
log_level_messages = {"message": {}, "found_exceptions": {}, "potential_status_codes": {}}
log_level_ids_to_add = {}
log_level_ids_merged = {}
logs_unique_log_level = {}
for log in logs:
if not log["_source"]["message"].strip():
continue
log_level = log["_source"]["log_level"]
for field in log_level_messages:
if log_level not in log_level_messages[field]:
log_level_messages[field][log_level] = ""
if log_level not in log_level_ids_to_add:
log_level_ids_to_add[log_level] = []
if log_level not in logs_unique_log_level:
logs_unique_log_level[log_level] = set()
if log["_source"]["original_message_lines"] <= 2 and\
log["_source"]["original_message_words_number"] <= 100:
if log_level not in log_level_ids_merged:
log_level_ids_merged[log_level] = log
log_level_representative = log_level_ids_merged[log_level]
current_log_word_num = log["_source"]["original_message_words_number"]
main_log_word_num = log_level_representative["_source"]["original_message_words_number"]
if current_log_word_num > main_log_word_num:
log_level_ids_merged[log_level] = log
normalized_msg = " ".join(log["_source"]["message"].strip().lower().split())
if normalized_msg not in logs_unique_log_level[log_level]:
logs_unique_log_level[log_level].add(normalized_msg)
for field in log_level_messages:
splitter = "\r\n" if field == "message" else " "
log_level_messages[field][log_level] =\
log_level_messages[field][log_level] + log["_source"][field] + splitter
else:
log_level_ids_to_add[log_level].append(log["_id"])
return LogMerger.merge_big_and_small_logs(logs, log_level_ids_to_add,
log_level_messages, log_level_ids_merged)
@staticmethod
def prepare_new_log(old_log, new_id, is_merged, merged_small_logs, fields_to_clean=[]):
"""Prepare updated log"""
merged_log = copy.deepcopy(old_log)
merged_log["_source"]["is_merged"] = is_merged
merged_log["_id"] = new_id
merged_log["_source"]["merged_small_logs"] = merged_small_logs
for field in fields_to_clean:
merged_log["_source"][field] = ""
return merged_log
```
#### File: commons/object_saving/minio_client.py
```python
from minio import Minio
import json
import io
import logging
import pickle
logger = logging.getLogger("analyzerApp.minioClient")
class MinioClient:
def __init__(self, app_config):
self.minioClient = None
try:
self.minioClient = Minio(
app_config["minioHost"],
access_key=app_config["minioAccessKey"],
secret_key=app_config["minioSecretKey"],
secure=False,
region=app_config["minioRegion"]
)
logger.info("Minio intialized %s" % app_config["minioHost"])
except Exception as err:
logger.error(err)
def remove_project_objects(self, project_id, object_names):
if self.minioClient is None:
return
try:
bucket_name = project_id
if not self.minioClient.bucket_exists(bucket_name):
return
for object_name in object_names:
self.minioClient.remove_object(
bucket_name=bucket_name, object_name=object_name)
except Exception as err:
logger.error(err)
def put_project_object(self, data, project_id, object_name, using_json=False):
if self.minioClient is None:
return
try:
bucket_name = project_id
if not self.minioClient.bucket_exists(bucket_name):
logger.debug("Creating minio bucket %s" % bucket_name)
self.minioClient.make_bucket(bucket_name)
logger.debug("Created minio bucket %s" % bucket_name)
if using_json:
data_to_save = json.dumps(data).encode("utf-8")
else:
data_to_save = pickle.dumps(data)
data_stream = io.BytesIO(data_to_save)
data_stream.seek(0)
self.minioClient.put_object(
bucket_name=bucket_name, object_name=object_name,
data=data_stream, length=len(data_to_save))
logger.debug(
"Saved into bucket '%s' with name '%s': %s", bucket_name, object_name, data)
except Exception as err:
logger.error(err)
def get_project_object(self, project_id, object_name, using_json=False):
if self.minioClient is None:
return {}
try:
obj = self.minioClient.get_object(
bucket_name=project_id, object_name=object_name)
return json.loads(obj.data) if using_json else pickle.loads(obj.data)
except Exception:
return {}
def does_object_exists(self, project_id, object_name):
try:
self.minioClient.get_object(
bucket_name=project_id, object_name=object_name)
return True
except Exception:
return False
def get_folder_objects(self, project_id, folder):
object_names = []
for obj in self.minioClient.list_objects(project_id, prefix=folder):
object_names.append(obj.object_name)
return object_names
def remove_folder_objects(self, project_id, folder):
for obj in self.minioClient.list_objects(project_id, prefix=folder):
self.minioClient.remove_object(
bucket_name=project_id, object_name=obj.object_name)
```
#### File: service-auto-analyzer/commons/similarity_calculator.py
```python
from utils import utils
from scipy import spatial
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
class SimilarityCalculator:
def __init__(self, config, weighted_similarity_calculator=None):
self.weighted_similarity_calculator = weighted_similarity_calculator
self.config = config
self.similarity_dict = {}
self.object_id_weights = {}
self.fields_mapping_for_weighting = {
"message": ["detected_message", "stacktrace"],
"message_without_params_extended": [
"detected_message_without_params_extended", "stacktrace_extended"],
"message_extended": ["detected_message_extended", "stacktrace_extended"]
}
self.artificial_columns = ["namespaces_stacktrace"]
def find_similarity(self, all_results, fields):
for field in fields:
if field in self.similarity_dict:
continue
self.similarity_dict[field] = {}
log_field_ids = {}
index_in_message_array = 0
count_vector_matrix = None
all_messages = []
all_messages_needs_reweighting = []
needs_reweighting_wc = False
for log, res in all_results:
for obj in [log] + res["hits"]["hits"]:
if obj["_id"] not in log_field_ids:
if field not in self.artificial_columns and not obj["_source"][field].strip():
log_field_ids[obj["_id"]] = -1
else:
text = []
needs_reweighting = 0
if self.config["number_of_log_lines"] == -1 and\
field in self.fields_mapping_for_weighting:
fields_to_use = self.fields_mapping_for_weighting[field]
text = self.weighted_similarity_calculator.message_to_array(
obj["_source"][fields_to_use[0]],
obj["_source"][fields_to_use[1]])
elif field == "namespaces_stacktrace":
gathered_lines = []
weights = []
for line in obj["_source"]["stacktrace"].split("\n"):
line_words = utils.split_words(
line,
min_word_length=self.config["min_word_length"])
for word in line_words:
part_of_namespace = ".".join(word.split(".")[:2])
if part_of_namespace in self.config["chosen_namespaces"]:
gathered_lines.append(" ".join(line_words))
weights.append(
self.config["chosen_namespaces"][part_of_namespace])
if obj["_id"] == log["_id"] and len(gathered_lines):
text = gathered_lines
self.object_id_weights[obj["_id"]] = weights
else:
text = []
for line in obj["_source"]["stacktrace"].split("\n"):
text.append(" ".join(utils.split_words(
utils.clean_from_brackets(line),
min_word_length=self.config["min_word_length"])))
text = utils.filter_empty_lines(text)
self.object_id_weights[obj["_id"]] = [1] * len(text)
elif field.startswith("stacktrace"):
if utils.does_stacktrace_need_words_reweighting(obj["_source"][field]):
needs_reweighting = 1
text = self.weighted_similarity_calculator.message_to_array(
"", obj["_source"][field])
else:
text = utils.filter_empty_lines([" ".join(utils.split_words(
obj["_source"][field],
min_word_length=self.config["min_word_length"]))])
if not text:
log_field_ids[obj["_id"]] = -1
else:
all_messages.extend(text)
all_messages_needs_reweighting.append(needs_reweighting)
log_field_ids[obj["_id"]] = [index_in_message_array,
len(all_messages) - 1]
index_in_message_array += len(text)
if all_messages:
needs_reweighting_wc = all_messages_needs_reweighting and\
sum(all_messages_needs_reweighting) == len(all_messages_needs_reweighting)
vectorizer = CountVectorizer(
binary=not needs_reweighting_wc,
analyzer="word", token_pattern="[^ ]+")
count_vector_matrix = np.asarray(vectorizer.fit_transform(all_messages).toarray())
for log, res in all_results:
sim_dict = self._calculate_field_similarity(
log, res, log_field_ids, count_vector_matrix, needs_reweighting_wc, field)
for key in sim_dict:
self.similarity_dict[field][key] = sim_dict[key]
def reweight_words_weights_by_summing(self, count_vector_matrix):
count_vector_matrix_weighted = np.zeros_like(count_vector_matrix, dtype=float)
whole_sum_vector = np.sum(count_vector_matrix, axis=0)
for i in range(len(count_vector_matrix)):
for j in range(len(count_vector_matrix[i])):
if whole_sum_vector[j] > 1 and count_vector_matrix[i][j] > 0:
count_vector_matrix_weighted[i][j] = max(0.1, 1 - whole_sum_vector[j] * 0.2)
else:
count_vector_matrix_weighted[i][j] = count_vector_matrix[i][j]
return count_vector_matrix_weighted
def multiply_vectors_by_weight(self, rows, weights):
return np.dot(np.reshape(weights, [-1]), rows)
def normalize_weights(self, weights):
normalized_weights = np.asarray(weights) / np.min(weights)
return np.clip(normalized_weights, a_min=1.0, a_max=3.0)
def _calculate_field_similarity(
self, log, res, log_field_ids, count_vector_matrix, needs_reweighting_wc, field):
all_results_similarity = {}
for obj in res["hits"]["hits"]:
group_id = (obj["_id"], log["_id"])
index_query_message = log_field_ids[log["_id"]]
index_log_message = log_field_ids[obj["_id"]]
if (isinstance(index_query_message, int) and index_query_message < 0) and\
(isinstance(index_log_message, int) and index_log_message < 0):
all_results_similarity[group_id] = {"similarity": 1.0, "both_empty": True}
elif (isinstance(index_query_message, int) and index_query_message < 0) or\
(isinstance(index_log_message, int) and index_log_message < 0):
all_results_similarity[group_id] = {"similarity": 0.0, "both_empty": False}
else:
query_vector = count_vector_matrix[index_query_message[0]:index_query_message[1] + 1]
log_vector = count_vector_matrix[index_log_message[0]:index_log_message[1] + 1]
if field == "namespaces_stacktrace":
query_vector = self.multiply_vectors_by_weight(
query_vector, self.normalize_weights(self.object_id_weights[log["_id"]]))
log_vector = self.multiply_vectors_by_weight(
log_vector, self.normalize_weights(self.object_id_weights[obj["_id"]]))
else:
if needs_reweighting_wc:
query_vector = self.reweight_words_weights_by_summing(query_vector)
log_vector = self.reweight_words_weights_by_summing(log_vector)
query_vector = self.weighted_similarity_calculator.weigh_data_rows(query_vector)
log_vector = self.weighted_similarity_calculator.weigh_data_rows(log_vector)
if needs_reweighting_wc:
query_vector *= 2
log_vector *= 2
similarity =\
round(1 - spatial.distance.cosine(query_vector, log_vector), 2)
all_results_similarity[group_id] = {"similarity": similarity, "both_empty": False}
return all_results_similarity
```
#### File: service-auto-analyzer/test/test_boosting_featurizer.py
```python
import unittest
import logging
import sure # noqa
from boosting_decision_making.boosting_featurizer import BoostingFeaturizer
from boosting_decision_making.suggest_boosting_featurizer import SuggestBoostingFeaturizer
from boosting_decision_making import weighted_similarity_calculator
from utils import utils
class TestBoostingFeaturizer(unittest.TestCase):
"""Tests boosting feature creation functionality"""
@utils.ignore_warnings
def setUp(self):
self.one_hit_search_rs_explained = "one_hit_search_rs_explained.json"
self.two_hits_search_rs_explained = "two_hits_search_rs_explained.json"
self.log_message = "log_message.json"
self.log_message_wo_stacktrace = "log_message_wo_stacktrace.json"
self.one_hit_search_rs_explained_wo_stacktrace =\
"one_hit_search_rs_explained_wo_stacktrace.json"
self.log_message_only_small_logs = "log_message_only_small_logs.json"
self.one_hit_search_rs_small_logs = "one_hit_search_rs_small_logs.json"
self.two_hits_search_rs_small_logs = "two_hits_search_rs_small_logs.json"
self.three_hits_search_rs_explained = "three_hits_search_rs_explained.json"
self.one_hit_search_rs_explained_wo_params = "one_hit_search_rs_explained_wo_params.json"
self.epsilon = 0.0001
model_settings = utils.read_json_file("", "model_settings.json", to_json=True)
self.weights_folder = model_settings["SIMILARITY_WEIGHTS_FOLDER"]
logging.disable(logging.CRITICAL)
@utils.ignore_warnings
def tearDown(self):
logging.disable(logging.DEBUG)
@staticmethod
@utils.ignore_warnings
def get_default_config(
filter_fields=["detected_message", "stacktrace"],
filter_fields_any=[]):
"""Get default config"""
return {
"max_query_terms": 50,
"min_should_match": 0.8,
"min_word_length": 0,
"filter_min_should_match": filter_fields,
"filter_min_should_match_any": filter_fields_any,
"number_of_log_lines": -1
}
@utils.ignore_warnings
def test_normalize_results(self):
tests = [
{
"elastic_results": [],
"config": TestBoostingFeaturizer.get_default_config(),
"result": [],
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(),
"result": [[{"_score": 158.08437,
"normalized_score": 1.0, }]],
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.two_hits_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(),
"result": [[{"_score": 158.08437,
"normalized_score": 1.0,
},
{"_score": 77.53298,
"normalized_score": 0.4904,
}, ]],
},
]
weight_log_sim = weighted_similarity_calculator.\
WeightedSimilarityCalculator(folder=self.weights_folder)
for idx, test in enumerate(tests):
with sure.ensure('Error in the test case number: {0}', idx):
_boosting_featurizer = BoostingFeaturizer(
test["elastic_results"],
test["config"],
[],
weighted_log_similarity_calculator=weight_log_sim)
_boosting_featurizer.all_results.should.have.length_of(len(test["result"]))
for i in range(len(test["result"])):
for j in range(len(test["result"][i])):
for field in test["result"][i][j]:
elastic_res = _boosting_featurizer.all_results[i][1][j]
elastic_res[field].should.equal(test["result"][i][j][field],
epsilon=self.epsilon)
@utils.ignore_warnings
def test_find_most_relevant_by_type(self):
tests = [
{
"elastic_results": [],
"config": TestBoostingFeaturizer.get_default_config(),
"result": {},
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(
self.one_hit_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(),
"result": {"AB001": {"mrHit": {"_score": 158.08437,
"_id": "1"},
"compared_log": utils.get_fixture(
self.log_message, to_json=True),
"score": 1.0, },
}
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.two_hits_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(),
"result": {"AB001": {"mrHit": {"_score": 158.08437,
"_id": "1"},
"compared_log": utils.get_fixture(
self.log_message, to_json=True),
"score": 0.6709, },
"PB001": {"mrHit": {"_score": 77.53298,
"_id": "2"},
"compared_log": utils.get_fixture(
self.log_message, to_json=True),
"score": 0.3291, },
}
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.two_hits_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(),
"result": {"AB001": {"mrHit": {"_score": 158.08437,
"_id": "1"},
"compared_log": utils.get_fixture(
self.log_message, to_json=True),
"score": 0.8031, },
"PB001": {"mrHit": {"_score": 77.53298,
"_id": "2"},
"compared_log": utils.get_fixture(
self.log_message, to_json=True),
"score": 0.1969, },
}
},
]
weight_log_sim = weighted_similarity_calculator.\
WeightedSimilarityCalculator(folder=self.weights_folder)
for idx, test in enumerate(tests):
with sure.ensure('Error in the test case number: {0}', idx):
_boosting_featurizer = BoostingFeaturizer(
test["elastic_results"],
test["config"],
[],
weighted_log_similarity_calculator=weight_log_sim)
scores_by_issue_type = _boosting_featurizer.find_most_relevant_by_type()
scores_by_issue_type.should.have.length_of(len(test["result"]))
for issue_type in test["result"]:
scores_by_issue_type.keys().should.contain(issue_type)
elastic_res = scores_by_issue_type[issue_type]
for field in test["result"][issue_type]:
if type(test["result"][issue_type][field]) != dict:
elastic_res[field].should.equal(test["result"][issue_type][field],
epsilon=self.epsilon)
else:
for field_dict in test["result"][issue_type][field]:
result_field_dict = test["result"][issue_type][field][field_dict]
elastic_res[field][field_dict].should.equal(result_field_dict,
epsilon=self.epsilon)
@utils.ignore_warnings
def test_filter_by_min_should_match(self):
tests = [
{
"elastic_results": [],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[]),
"result": [],
},
{
"elastic_results": [],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[
"detected_message", "stacktrace"]),
"result": [],
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[
"detected_message", "stacktrace"]),
"result": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[
"message"]),
"result": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message_wo_stacktrace, to_json=True),
utils.get_fixture(
self.one_hit_search_rs_explained_wo_stacktrace, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[
"message"]),
"result": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message_wo_stacktrace, to_json=True),
utils.get_fixture(
self.one_hit_search_rs_explained_wo_stacktrace, to_json=True))]
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message_wo_stacktrace, to_json=True),
utils.get_fixture(
self.one_hit_search_rs_explained_wo_stacktrace, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[
"detected_message", "stacktrace"]),
"result": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message_wo_stacktrace, to_json=True),
utils.get_fixture(
self.one_hit_search_rs_explained_wo_stacktrace, to_json=True))]
},
{
"elastic_results": [(utils.get_fixture(self.log_message_only_small_logs, to_json=True),
utils.get_fixture(self.one_hit_search_rs_small_logs, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[
"detected_message", "stacktrace"]),
"result": []
},
{
"elastic_results": [(utils.get_fixture(self.log_message_only_small_logs, to_json=True),
utils.get_fixture(self.two_hits_search_rs_small_logs, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[
"detected_message", "stacktrace"]),
"result": [(utils.get_fixture(self.log_message_only_small_logs, to_json=True),
utils.get_fixture(self.two_hits_search_rs_small_logs, to_json=True))]
},
]
weight_log_sim = weighted_similarity_calculator.\
WeightedSimilarityCalculator(folder=self.weights_folder)
for idx, test in enumerate(tests):
with sure.ensure('Error in the test case number: {0}', idx):
_boosting_featurizer = BoostingFeaturizer(
test["elastic_results"],
test["config"],
[],
weighted_log_similarity_calculator=weight_log_sim)
all_results = test["elastic_results"]
for field in test["config"]["filter_min_should_match"]:
all_results = _boosting_featurizer.filter_by_min_should_match(all_results, field=field)
all_results.should.have.length_of(len(test["result"]))
for idx, (log, hits) in enumerate(all_results):
log["_id"].should.equal(test["result"][idx][0]["_id"])
for i, hit in enumerate(hits["hits"]["hits"]):
hit["_id"].should.equal(test["result"][idx][1]["hits"]["hits"][i]["_id"])
@utils.ignore_warnings
def test_find_most_relevant_by_type_for_suggests(self):
tests = [
{
"elastic_results": [],
"config": TestBoostingFeaturizer.get_default_config(),
"result": {},
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(),
"result": {1: {"mrHit": {"_score": 158.08437,
"_id": "1"},
"compared_log": utils.get_fixture(self.log_message, to_json=True),
"score": 1.0, },
}
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.two_hits_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(),
"result": {1: {"mrHit": {"_score": 158.08437,
"_id": "1"},
"compared_log": utils.get_fixture(self.log_message, to_json=True),
"score": 1.0, },
2: {"mrHit": {"_score": 77.53298,
"_id": "2"},
"compared_log": utils.get_fixture(self.log_message, to_json=True),
"score": 0.4905, },
}
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.two_hits_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.three_hits_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(),
"result": {1: {"mrHit": {"_score": 158.08437,
"_id": "1"},
"compared_log": utils.get_fixture(self.log_message, to_json=True),
"score": 0.9392, },
2: {"mrHit": {"_score": 168.31,
"_id": "2"},
"compared_log": utils.get_fixture(self.log_message, to_json=True),
"score": 1.0, }
}
},
]
weight_log_sim = weighted_similarity_calculator.\
WeightedSimilarityCalculator(folder=self.weights_folder)
for idx, test in enumerate(tests):
with sure.ensure('Error in the test case number: {0}', idx):
_boosting_featurizer = SuggestBoostingFeaturizer(
test["elastic_results"],
test["config"],
[],
weighted_log_similarity_calculator=weight_log_sim)
scores_by_issue_type = _boosting_featurizer.find_most_relevant_by_type()
scores_by_issue_type.should.have.length_of(len(test["result"]))
for issue_type in test["result"]:
scores_by_issue_type.keys().should.contain(issue_type)
elastic_res = scores_by_issue_type[issue_type]
for field in test["result"][issue_type]:
if type(test["result"][issue_type][field]) != dict:
elastic_res[field].should.equal(test["result"][issue_type][field],
epsilon=self.epsilon)
else:
for field_dict in test["result"][issue_type][field]:
result_field_dict = test["result"][issue_type][field][field_dict]
elastic_res[field][field_dict].should.equal(result_field_dict,
epsilon=self.epsilon)
@utils.ignore_warnings
def test_filter_by_min_should_match_any(self):
tests = [
{
"elastic_results": [],
"config": TestBoostingFeaturizer.get_default_config(filter_fields=[],
filter_fields_any=[]),
"result": [],
},
{
"elastic_results": [],
"config": TestBoostingFeaturizer.get_default_config(
filter_fields=[],
filter_fields_any=["detected_message"]),
"result": [],
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(
filter_fields=[],
filter_fields_any=["detected_message",
"detected_message_without_params_extended"]),
"result": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))],
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(
self.one_hit_search_rs_explained_wo_params, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(
filter_fields=[],
filter_fields_any=["detected_message",
"detected_message_without_params_extended"]),
"result": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(
self.one_hit_search_rs_explained_wo_params, to_json=True))]
},
{
"elastic_results": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True)),
(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(
self.one_hit_search_rs_explained_wo_params, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(
filter_fields=[],
filter_fields_any=["detected_message"]),
"result": [(utils.get_fixture(self.log_message, to_json=True),
utils.get_fixture(self.one_hit_search_rs_explained, to_json=True))]
},
{
"elastic_results": [(utils.get_fixture(self.log_message_only_small_logs, to_json=True),
utils.get_fixture(self.one_hit_search_rs_small_logs, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(
filter_fields=[],
filter_fields_any=["detected_message",
"detected_message_without_params_extended"]),
"result": []
},
{
"elastic_results": [(utils.get_fixture(self.log_message_only_small_logs, to_json=True),
utils.get_fixture(self.two_hits_search_rs_small_logs, to_json=True))],
"config": TestBoostingFeaturizer.get_default_config(
filter_fields=[],
filter_fields_any=["detected_message",
"detected_message_without_params_extended"]),
"result": [(utils.get_fixture(self.log_message_only_small_logs, to_json=True),
utils.get_fixture(self.two_hits_search_rs_small_logs, to_json=True))]
},
]
weight_log_sim = weighted_similarity_calculator.\
WeightedSimilarityCalculator(folder=self.weights_folder)
for idx, test in enumerate(tests):
with sure.ensure('Error in the test case number: {0}', idx):
_boosting_featurizer = SuggestBoostingFeaturizer(
test["elastic_results"],
test["config"],
[],
weighted_log_similarity_calculator=weight_log_sim)
all_results = test["elastic_results"]
all_results = _boosting_featurizer.filter_by_min_should_match_any(
all_results,
fields=test["config"]["filter_min_should_match_any"])
all_results.should.have.length_of(len(test["result"]))
for idx, (log, hits) in enumerate(all_results):
log["_id"].should.equal(test["result"][idx][0]["_id"])
for i, hit in enumerate(hits["hits"]["hits"]):
hit["_id"].should.equal(test["result"][idx][1]["hits"]["hits"][i]["_id"])
``` |
{
"source": "JLUNeverMore/FAR-HO",
"score": 2
} |
#### File: FAR-HO/far_ho/hyper_gradients.py
```python
from __future__ import absolute_import, print_function, division
import sys
from collections import defaultdict, deque
import tensorflow as tf
from tensorflow.python.training import slot_creator
from tensorflow.contrib.opt import ScipyOptimizerInterface
from far_ho import utils
from far_ho.optimizer import OptimizerDict
from far_ho.utils import dot, maybe_add, reduce_all_sums
RAISE_ERROR_ON_DETACHED = False
class HyperGradient(object):
def __init__(self, name):
self._optimizer_dicts = set()
self._inner_objectives = None
self._hypergrad_dictionary = defaultdict(list) # dictionary (hyperparameter, list of hypergradients)
self._ts = None
self._initialization = None
self._iteration = None
self._state = None
self._name = name
_ERROR_NOT_OPTIMIZER_DICT = """
Looks like {} is not an `OptimizerDict`. Use optimizers in far_ho.optimizers for obtaining an OptimizerDict.
"""
_ERROR_HYPER_DETACHED = """
Hyperparameter {} is detached from this optimization dynamics.
"""
def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):
# Doesn't do anything useful here. To be overridden.
"""
Function overridden by specific methods.
:param optimizer_dict: OptimzerDict object resulting from the inner objective optimization.
:param outer_objective: A loss function for the hyperparameters (scalar tensor)
:param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the
hyperparameter collection in the current scope.
:return: list of hyperparameters involved in the computation
"""
assert isinstance(optimizer_dict, OptimizerDict), HyperGradient._ERROR_NOT_OPTIMIZER_DICT.format(optimizer_dict)
self._optimizer_dicts.add(optimizer_dict)
if hyper_list is None: # get default hyperparameters
hyper_list = utils.hyperparameters(tf.get_variable_scope().name)
return hyper_list
@property
def initialization(self):
if self._initialization is None:
self._initialization = [opt_dict.initialization for opt_dict in sorted(self._optimizer_dicts)]
return self._initialization
@property
def iteration(self):
if self._iteration is None:
self._iteration = [opt_dict.iteration for opt_dict in sorted(self._optimizer_dicts)]
return self._iteration
@property
def state(self):
for opt_dict in sorted(self._optimizer_dicts):
for v in opt_dict.state:
yield v
@property
def inner_objectives(self):
if self._inner_objectives is None:
self._inner_objectives = [opt.objective if hasattr(opt, 'objective') else tf.constant(False)
for opt in sorted(self._optimizer_dicts)]
return self._inner_objectives
@property
def ts(self):
if self._ts is None:
self._ts = tf.group(*[opt_dict.ts for opt_dict in sorted(self._optimizer_dicts)])
return self._ts
def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,
initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):
"""
Runs the inner optimization dynamics for T iterations (T_or_generator can be indeed a generator) and computes
in the meanwhile.
:param T_or_generator: integer or generator that should yield a step. Express either a total number of
iterations of inner objective optimization dynamics, or could implement a stopping
condition, or variables number of steps.
:param inner_objective_feed_dicts: Optional feed dictionary for the inner objective
:param outer_objective_feed_dicts: Optional feed dictionary for the outer objective
(note that this is not used in ForwardHG since hypergradients are not
variables)
:param initializer_feed_dict: Optional feed dictionary for the inner objective
:param global_step: Optional global step for the
:param session: Optional session (otherwise will take the default session)
:param online: Performs the computation of the hypergradient in the online (or "real time") mode. Note that
`ReverseHG` and `ForwardHG` behave differently.
:param callback: callback funciton for the forward optimization
"""
raise NotImplementedError()
def hgrads_hvars(self, hyper_list=None, aggregation_fn=None, process_fn=None):
"""
Method for getting hypergradient and hyperparameters as required by apply_gradient methods from tensorflow
optimizers.
:param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the
hyperparameter collection in the current scope.
:param aggregation_fn: Optional operation to aggregate multiple hypergradients (for the same hyperparameter),
by default reduce_mean
:param process_fn: Optional operation like clipping to be applied.
:return:
"""
if hyper_list is None:
hyper_list = utils.hyperparameters(tf.get_variable_scope().name)
assert all([h in self._hypergrad_dictionary for h in hyper_list]), 'FINAL ERROR!'
if aggregation_fn is None:
aggregation_fn = lambda hgrad_list: tf.reduce_mean(hgrad_list, axis=0)
def _aggregate_process_manage_collection(_hg_lst):
if len(_hg_lst) == 1: # avoid useless operations...
aggr = _hg_lst[0]
else:
with tf.name_scope(_hg_lst[0].op.name):
aggr = aggregation_fn(_hg_lst) if len(_hg_lst) > 1 else _hg_lst[0]
if process_fn is not None:
with tf.name_scope('process_gradients'):
aggr = process_fn(aggr)
tf.add_to_collection(utils.GraphKeys.HYPERGRADIENTS, aggr)
return aggr
return [(_aggregate_process_manage_collection(self._hypergrad_dictionary[h]),
h) for h in hyper_list]
@property
def name(self):
return self._name
@staticmethod
def need_scalar_hyperparameters():
return False
# noinspection PyMethodMayBeStatic
def _make_callback(self):
"""
Template for callbacks
"""
values = []
# noinspection PyUnusedLocal
def _callback(t, feed_dcit, session):
values.append(0) # these should not depend from any feed dictionary
return values, _callback
def __str__(self):
return self._name
class ReverseHG(HyperGradient):
def __init__(self, history=None, name='ReverseHG'):
super(ReverseHG, self).__init__(name)
self._alpha_iter = tf.no_op()
self._reverse_initializer = tf.no_op()
self._history = history if history is not None else []
@staticmethod
def _truncated(max_items, name='TruncatedReverseHG'):
"""
Utility method to initialize truncated reverse HG (not necessarily online)
:param max_items: Maximum number of iterations that will be stored
:param name: a name for the operations and variables that will be created
:return: ReverseHG object
"""
return ReverseHG(deque(maxlen=max_items + 1), name=name)
# noinspection SpellCheckingInspection
def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):
"""
Function that adds to the computational graph all the operations needend for computing
the hypergradients in a "dynamic" way, without unrolling the entire optimization graph.
The resulting computation, while being roughly 2x more expensive then unrolling the
optimizaiton dynamics, requires much less (GPU) memory and is more flexible, allowing
to set a termination condition to the parameters optimizaiton routine.
:param optimizer_dict: OptimzerDict object resulting from the inner objective optimization.
:param outer_objective: A loss function for the hyperparameters (scalar tensor)
:param hyper_list: Optional list of hyperparameters to consider. If not provided will get all variables in the
hyperparameter collection in the current scope.
:return: list of hyperparameters involved in the computation
"""
hyper_list = super(ReverseHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)
# derivative of outer objective w.r.t. state
with tf.variable_scope(outer_objective.op.name): # for some reason without this there is a cathastrofic
# failure...
doo_ds = tf.gradients(outer_objective, list(optimizer_dict.state))
alphas = self._create_lagrangian_multipliers(optimizer_dict, doo_ds)
alpha_vec = utils.vectorize_all(alphas)
dyn_vec = utils.vectorize_all(list(optimizer_dict.dynamics))
lag_phi_t = utils.dot(alpha_vec, dyn_vec, name='iter_wise_lagrangian_part1')
# TODO outer_objective might be a list... handle this case
# iterative computation of hypergradients
doo_dypers = tf.gradients(outer_objective, hyper_list) # (direct) derivative of outer objective w.r.t. hyp.
alpha_dot_B = tf.gradients(lag_phi_t, hyper_list)
# check that optimizer_dict has initial ops (phi_0)
if optimizer_dict.init_dynamics is not None:
lag_phi0 = utils.dot(alpha_vec, utils.vectorize_all([d for (s, d) in optimizer_dict.init_dynamics]))
alpha_dot_B0 = tf.gradients(lag_phi0, hyper_list)
else:
alpha_dot_B0 = [None] * len(hyper_list)
# here, if some of this is None it may mean that the hyperparameter compares inside phi_0: check that and
# if it is not the case raise error...
hyper_grad_vars, hyper_grad_step = [], tf.no_op()
for dl_dh, doo_dh, a_d_b0, hyper in zip(alpha_dot_B, doo_dypers, alpha_dot_B0, hyper_list):
assert dl_dh is not None or a_d_b0 is not None, HyperGradient._ERROR_HYPER_DETACHED.format(hyper)
hgv = None
if dl_dh is not None: # "normal hyperparameter"
hgv = self._create_hypergradient(hyper, doo_dh)
hyper_grad_step = tf.group(hyper_grad_step, hgv.assign_add(dl_dh))
if a_d_b0 is not None:
hgv = hgv + a_d_b0 if hgv is not None else a_d_b0
# here hyper_grad_step has nothing to do...
hyper_grad_vars.append(hgv) # save these...
with tf.control_dependencies([hyper_grad_step]): # first update hypergradinet then alphas.
_alpha_iter = tf.group(*[alpha.assign(dl_ds) for alpha, dl_ds
in zip(alphas, tf.gradients(lag_phi_t, list(optimizer_dict.state)))])
self._alpha_iter = tf.group(self._alpha_iter, _alpha_iter) # put all the backward iterations toghether
[self._hypergrad_dictionary[h].append(hg) for h, hg in zip(hyper_list, hyper_grad_vars)]
self._reverse_initializer = tf.group(self._reverse_initializer,
tf.variables_initializer(alphas),
tf.variables_initializer([h for h in hyper_grad_vars
if hasattr(h, 'initializer')])) # some ->
# hypergradients (those coming form initial dynamics) might be just tensors and not variables...
return hyper_list
@staticmethod
def _create_lagrangian_multipliers(optimizer_dict, doo_ds):
lag_mul = [slot_creator.create_slot(v.initialized_value(), utils.val_or_zero(der, v), 'alpha') for v, der
in zip(optimizer_dict.state, doo_ds)]
[tf.add_to_collection(utils.GraphKeys.LAGRANGIAN_MULTIPLIERS, lm) for lm in lag_mul]
utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, *lag_mul)
# this prevents the 'automatic' initialization with tf.global_variables_initializer.
return lag_mul
@staticmethod
def _create_hypergradient(hyper, doo_dhypers):
"""
Creates one hyper-gradient as a variable. doo_dhypers: initialization, that is the derivative of
the outer objective w.r.t this hyper
"""
hgs = slot_creator.create_slot(hyper, utils.val_or_zero(doo_dhypers, hyper), 'hypergradient')
utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, hgs)
return hgs
def _state_feed_dict_generator(self, history, T_or_generator):
for t, his in zip(utils.solve_int_or_generator(T_or_generator), history):
yield t, utils.merge_dicts(
*[od.state_feed_dict(h) for od, h in zip(sorted(self._optimizer_dicts), his)]
)
def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,
initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):
# callback may be a pair, first for froward pass, second for reverse pass
callback = utils.as_tuple_or_list(callback)
# same thing for T
T_or_generator = utils.as_tuple_or_list(T_or_generator)
ss = session or tf.get_default_session()
self._history.clear()
if not online:
_fd = utils.maybe_call(initializer_feed_dict, utils.maybe_eval(global_step, ss))
self._save_history(ss.run(self.initialization, feed_dict=_fd))
# else: # not totally clear if i should add this
# self._save_history(ss.run(list(self.state)))
T = 0 # this is useful if T_or_generator is indeed a generator...
for t in utils.solve_int_or_generator(T_or_generator[0]):
# nonlocal t # with nonlocal would not be necessary the variable T... not compatible with 2.7
_fd = utils.maybe_call(inner_objective_feed_dicts, t)
self._save_history(ss.run(self.iteration, feed_dict=_fd))
T = t
utils.maybe_call(callback[0], t, _fd, ss) # callback
# initialization of support variables (supports stochastic evaluation of outer objective via global_step ->
# variable)
# TODO (maybe tf bug or oddity) for some strange reason, if some variable's initializer depends on
# a placeholder, then the initializer of alpha SEEMS TO DEPEND ALSO ON THAT placeholder,
# as if the primary variable should be reinitialized as well, but, I've checked, the primary variable is NOT
# actually reinitialized. This doesn't make sense since the primary variable is already initialized
# and Tensorflow seems not to care... should maybe look better into this issue
reverse_init_fd = utils.maybe_call(outer_objective_feed_dicts, utils.maybe_eval(global_step, ss))
# now adding also the initializer_feed_dict because of tf quirk...
maybe_init_fd = utils.maybe_call(initializer_feed_dict, utils.maybe_eval(global_step, ss))
reverse_init_fd = utils.merge_dicts(reverse_init_fd, maybe_init_fd)
ss.run(self._reverse_initializer, feed_dict=reverse_init_fd)
del self._history[-1] # do not consider last point
for pt, state_feed_dict in self._state_feed_dict_generator(reversed(self._history), T_or_generator[-1]):
# this should be fine also for truncated reverse... but check again the index t
t = T - pt - 1 # if T is int then len(self.history) is T + 1 and this numerator
# shall start at T-1
_fd = utils.merge_dicts(state_feed_dict, utils.maybe_call(inner_objective_feed_dicts, t))
ss.run(self._alpha_iter, _fd)
if len(callback) == 2: utils.maybe_call(callback[1], t, _fd, ss)
def _save_history(self, weights):
self._history.append(weights)
def hypergrad_callback(self, hyperparameter=None, flatten=True):
"""callback that records the partial hypergradients on the reverse pass"""
values = []
gs = list(self._hypergrad_dictionary.values()) if hyperparameter is None else \
self._hypergrad_dictionary[hyperparameter]
if flatten: gs = utils.vectorize_all(gs)
# noinspection PyUnusedLocal
def _callback(_, __, ss):
values.append(ss.run(gs)) # these should not depend from any feed dictionary
return values, _callback
class ReverseHg(ReverseHG):
def __init__(self, history=None):
print('WARNING, DEPRECATED: please use the class ReverseHG', file=sys.stderr)
super(ReverseHg, self).__init__(history)
class ForwardHG(HyperGradient):
def __init__(self, name='ForwardHG'):
super(ForwardHG, self).__init__(name)
self._forward_initializer = tf.no_op()
self._zs = {} # hyperparameter - zs dictionary
self._z_iter = tf.no_op()
self._iteration = None
self.A_dot_zs = {}
_HYPER_RANK_ERROR_MESSAGE = """
ForwardHG: Only scalar hyperparameters accepted.\n
Hyperparameter tensor {} has rank {}.\n
Use keyword argument far_ho.get_hyperparameter(..., scalar=True) on hyperparameter creation.
"""
def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):
hyper_list = super(ForwardHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)
# scalar_hyper_list
with tf.variable_scope(outer_objective.op.name):
# dynamics_vec = vectorize_all(optimizer_dict.dynamics) # in the new implementation there's no need of
# vectorizing... it might be more efficient since it's better to avoid too many reshaping operations...
d_oo_d_state = tf.gradients(outer_objective, list(optimizer_dict.state))
with tf.name_scope('DUMMY'): # variables to compute forward propagation
# TODO avoid this computation if optimizer_dict has already been seen.
aux_vs = [tf.zeros_like(v) for v in optimizer_dict.state]
dynamics_dot_aux_v = reduce_all_sums(list(optimizer_dict.dynamics), aux_vs)
der_dynamics_dot_aux_v = tf.gradients(dynamics_dot_aux_v, list(optimizer_dict.state))
# this is a list of jacobians times aux_vs that have the same dimension of states variables.
init_dynamics_dot_aux_v = None
if optimizer_dict.init_dynamics:
# init_dynamics_dot_aux_v = dot(vectorize_all(optimizer_dict.init_dynamics), aux_v_vec) # old impl
init_dynamics_dot_aux_v = reduce_all_sums(
optimizer_dict.init_dynamics, aux_vs)
for hyp in hyper_list:
assert hyp.shape.ndims == 0, ForwardHG._HYPER_RANK_ERROR_MESSAGE.format(hyp, hyp.shape.ndims)
d_init_dyn_d_hyp = None if init_dynamics_dot_aux_v is None else \
tf.gradients(init_dynamics_dot_aux_v, hyp)[0]
d_dyn_d_hyp = tf.gradients(dynamics_dot_aux_v, hyp)[0]
d_oo_d_hyp = tf.gradients(outer_objective, hyp)[0]
# ------------------------------------------------------------
# check detached hyperparameters (for which hypergradient would be always null)
hyper_ok = d_init_dyn_d_hyp is not None or d_dyn_d_hyp is not None or d_oo_d_hyp is not None
if RAISE_ERROR_ON_DETACHED:
# try:
assert hyper_ok, HyperGradient._ERROR_HYPER_DETACHED.format(hyp)
# ex
else:
if not hyper_ok:
print(HyperGradient._ERROR_HYPER_DETACHED.format(hyp), file=sys.stderr)
hyper_list.remove(hyp)
# -------------------------------------------------------------
# UPDATE OF TOTAL DERIVATIVE OF STATE W.R.T. HYPERPARAMETER
zs = ForwardHG._create_zs(
optimizer_dict, hyp, None if d_init_dyn_d_hyp is None else tf.gradients(d_init_dyn_d_hyp, aux_vs)
) # this is one z for each variable
self._zs[hyp] = zs # store a reference for the total derivatives for easy access
Bs = tf.gradients(d_dyn_d_hyp, aux_vs)
A_dot_zs = tf.gradients(reduce_all_sums(der_dynamics_dot_aux_v, zs), aux_vs)
self.A_dot_zs[hyp] = A_dot_zs
_z_iter = tf.group(*[
z.assign(maybe_add(A_dot_z, B)) for z, A_dot_z, B
in zip(zs, A_dot_zs, Bs)
])
self._z_iter = tf.group(self._z_iter, _z_iter)
# -- HYPERGRADIENT -----
d_E_T = [dot(d_oo_d_s, z) for d_oo_d_s, z in zip(d_oo_d_state, zs)
if d_oo_d_s is not None and z is not None] # list of dot products
hg = maybe_add(tf.reduce_sum(d_E_T), d_oo_d_hyp) # sum the partial dot products and possibly ->
# adds the ''direct derivative'' term d(E( . , \lambda))/d \lambda
self._hypergrad_dictionary[hyp].append(hg)
self._forward_initializer = tf.group(self._forward_initializer,
tf.variables_initializer(zs))
return hyper_list
@staticmethod
def _create_zs(optimizer_dict, hyper, d_init_dynamics_d_hyper):
if d_init_dynamics_d_hyper is None: d_init_dynamics_d_hyper = [None] * len(optimizer_dict)
with tf.variable_scope('Z'):
z = [slot_creator.create_slot(v, utils.val_or_zero(der, v), hyper.op.name) for v, der
in zip(optimizer_dict.state, d_init_dynamics_d_hyper)]
[tf.add_to_collection(utils.GraphKeys.ZS, lm) for lm in z]
# in this case it is completely fine to keep zs into the global variable...
return z
def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,
initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):
ss = session or tf.get_default_session()
if not online:
self._run_batch_initialization(ss, utils.maybe_call(
initializer_feed_dict, utils.maybe_eval(global_step, ss)))
for t in utils.solve_int_or_generator(T_or_generator):
_fd = utils.maybe_call(inner_objective_feed_dicts, t)
self._forward_step(ss, _fd)
utils.maybe_call(callback, t, _fd, ss)
def _forward_step(self, ss, _fd):
ss.run(self._z_iter, _fd)
ss.run(self.iteration, _fd)
def _run_batch_initialization(self, ss, fd):
ss.run(self.initialization, feed_dict=fd)
ss.run(self._forward_initializer, feed_dict=fd)
@staticmethod
def need_scalar_hyperparameters():
return True
@property
def w_dots(self):
# if hyper: return self._zs[hyper]
return [{h: self._zs[h][k] for h in self._zs} for k, _ in enumerate(self.state)]
def z_callback(self, hyperparameter=None, flatten=True):
zs_values = []
zs = list(self._zs.values()) if hyperparameter is None else self._zs[hyperparameter]
if flatten: zs = utils.vectorize_all(zs)
# noinspection PyUnusedLocal
def _callback(_, __, ss):
zs_values.append(ss.run(zs)) # these should not depend from any feed dictionary
return zs_values, _callback
class ImplicitHG(HyperGradient):
"""
Implementation follows Pedregosa's algorithm HOAG
"""
def __init__(self, linear_system_solver_gen=None, tolerance=None, name='ImplicitHG'):
super(ImplicitHG, self).__init__(name)
if linear_system_solver_gen is None:
linear_system_solver_gen = lambda _obj, var_list, _tolerance: ScipyOptimizerInterface(
_obj, var_list=var_list, options={'maxiter': 100}, method='cg', tol=_tolerance)
self.linear_system_solver = linear_system_solver_gen
if tolerance is None:
tolerance = lambda _k: 0.1 * (0.9 ** _k)
self.tolerance = tolerance
self._lin_sys = []
self._qs = []
def compute_gradients(self, outer_objective, optimizer_dict, hyper_list=None):
hyper_list = super(ImplicitHG, self).compute_gradients(outer_objective, optimizer_dict, hyper_list)
state = list(optimizer_dict.state)
with tf.variable_scope(outer_objective.op.name):
g1 = utils.vectorize_all(tf.gradients(outer_objective, state))
grads_inner_obj_vec = utils.vectorize_all(tf.gradients(optimizer_dict.objective, state))
q = self._create_q(g1)
obj = tf.norm(
utils.vectorize_all(tf.gradients(utils.dot(grads_inner_obj_vec, q), state)) - g1
) # using the norm seems to produce better results then squared norm...
# (even though is more costly)
self._lin_sys.append(lambda _tolerance: self.linear_system_solver(obj, [q], _tolerance))
g2s = tf.gradients(outer_objective, hyper_list)
cross_ders = tf.gradients(utils.dot(grads_inner_obj_vec, q), hyper_list)
for g2, cd, hyper in zip(g2s, cross_ders, hyper_list):
assert g2 is not None or cd is not None, HyperGradient._ERROR_HYPER_DETACHED.format(hyper)
hg = utils.maybe_add(-cd, g2)
if hg is None: # this would be strange...
print('WARNING, outer objective is only directly dependent on hyperparameter {}. ' +
'Direct optimization would be better!'.format(hyper))
hg = g2
self._hypergrad_dictionary[hyper].append(hg)
return hyper_list
def _create_q(self, d_oo_d_state):
self._qs.append(slot_creator.create_zeros_slot(d_oo_d_state, 'q'))
return self._qs[-1]
def run(self, T_or_generator, inner_objective_feed_dicts=None, outer_objective_feed_dicts=None,
initializer_feed_dict=None, global_step=None, session=None, online=False, callback=None):
ss = session or tf.get_default_session()
inner_objective_feed_dicts = utils.as_tuple_or_list(inner_objective_feed_dicts)
if not online:
self._run_batch_initialization(ss, utils.maybe_call(
initializer_feed_dict, utils.maybe_eval(global_step, ss)))
for t in utils.solve_int_or_generator(T_or_generator):
_fd = utils.maybe_call(inner_objective_feed_dicts[0], t)
self._forward_step(ss, _fd)
utils.maybe_call(callback, t, _fd, ss)
# end of optimization. Solve linear systems.
tol_val = utils.maybe_call(self.tolerance, utils.maybe_eval(global_step, ss)) # decreasing tolerance (seq.)
# feed dictionaries (could...in theory, implement stochastic solution of this linear system...)
_fd = utils.maybe_call(inner_objective_feed_dicts[-1], -1)
_fd_outer = utils.maybe_call(outer_objective_feed_dicts, utils.maybe_eval(global_step, ss))
_fd = utils.merge_dicts(_fd, _fd_outer)
for lin_sys in self._lin_sys:
lin_sys(tol_val).minimize(ss, _fd) # implicitly warm restarts with previously found q
def _forward_step(self, ss, _fd):
ss.run(self.iteration, _fd)
def _run_batch_initialization(self, ss, fd):
ss.run(self.initialization, feed_dict=fd)
``` |
{
"source": "j-luo93/lingvo",
"score": 2
} |
#### File: core/ops/generic_input_op_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import os
import numpy as np
from six.moves import range
import tensorflow as tf
from lingvo.core.ops import py_x_ops
class GenericInputOpTest(tf.test.TestCase):
def testBasic(self):
# Generate a test file w/ 100 records.
tmp = os.path.join(tf.test.get_temp_dir(), 'basic')
with tf.python_io.TFRecordWriter(tmp) as w:
for i in range(100):
w.write('%08d' % i)
g = tf.Graph()
with g.as_default():
# A simple string parsing routine. Just convert a string to a
# number.
def str_to_num(s):
return np.array(float(s), dtype=np.float32)
# A record processor written in TF graph.
def _process(record):
num, = tf.py_func(str_to_num, [record], [tf.float32])
return record, tf.stack([num, tf.square(num)]), tf.to_int32(1)
# Samples random records from the data files and processes them
# to generate batches.
strs, vals = py_x_ops.generic_input(
file_pattern='tfrecord:' + tmp,
file_random_seed=0,
file_buffer_size=32,
file_parallelism=4,
bucket_upper_bound=[1],
bucket_batch_limit=[8],
processor=_process)
with self.session(graph=g) as sess:
record_seen = set()
for i in range(100):
ans_strs, ans_vals = sess.run([strs, vals])
for s in ans_strs:
record_seen.add(s)
self.assertEqual(ans_strs.shape, (8,))
self.assertEqual(ans_vals.shape, (8, 2))
self.assertAllEqual(np.square(ans_vals[:, 0]), ans_vals[:, 1])
for i in range(100):
self.assertTrue('%08d' % i in record_seen)
def testPadding(self):
# Generate a test file w/ 50 records of different lengths.
tmp = os.path.join(tf.test.get_temp_dir(), 'basic')
with tf.python_io.TFRecordWriter(tmp) as w:
for n in range(1, 50):
w.write(pickle.dumps(np.full([n, 3, 3], n, np.int32)))
g = tf.Graph()
with g.as_default():
# A record processor written in TF graph.
def _process(record):
num = tf.py_func(lambda x: pickle.loads(x), [record], tf.int32)
bucket_key = tf.shape(num)[0]
return num, tf.transpose(num, [1, 0, 2]), bucket_key
# Samples random records from the data files and processes them
# to generate batches.
vals_t, transposed_vals_t = py_x_ops.generic_input(
file_pattern='tfrecord:' + tmp,
file_random_seed=0,
file_buffer_size=32,
file_parallelism=4,
bucket_upper_bound=[10],
bucket_batch_limit=[8],
processor=_process,
dynamic_padding_dimensions=[0, 1],
dynamic_padding_constants=[0] * 2)
with self.session(graph=g) as sess:
for i in range(10):
vals, transposed_vals = sess.run([vals_t, transposed_vals_t])
print(vals, np.transpose(transposed_vals, [0, 2, 1, 3]))
self.assertEqual(vals.shape[0], 8)
self.assertEqual(vals.shape[2], 3)
self.assertEqual(vals.shape[3], 3)
largest = np.amax(vals)
self.assertLessEqual(largest, 10)
self.assertEqual(vals.shape[1], largest)
for j in range(8):
n = vals[j, 0, 0, 0]
self.assertTrue(np.all(vals[j, :n] == n))
self.assertTrue(np.all(vals[j, n:] == 0))
self.assertAllEqual(vals, np.transpose(transposed_vals, [0, 2, 1, 3]))
if __name__ == '__main__':
tf.test.main()
```
#### File: tasks/asr/encoder.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
from six.moves import zip
import tensorflow as tf
from tensorflow.python.ops import inplace_ops
from lingvo.core import base_encoder
from lingvo.core import base_layer
from lingvo.core import layers
from lingvo.core import plot
from lingvo.core import py_utils
from lingvo.core import rnn_cell
from lingvo.core import rnn_layers
from lingvo.core import summary_utils
from lingvo.core import model_helper
ConvLSTMBlock = collections.namedtuple('ConvLSTMBlock', ('rnn', 'cnn'))
class AsrEncoder(base_encoder.BaseEncoder):
"""Speech encoder version 1."""
@classmethod
def Params(cls):
"""Configs for AsrEncoder."""
p = super(AsrEncoder, cls).Params()
p.Define('lstm_tpl', rnn_cell.LSTMCellSimple.Params(),
'Configs template for the RNN layer.')
p.Define('cnn_tpl', layers.ConvLayer.Params(),
'Configs template for the conv layer.')
p.Define('proj_tpl', layers.ProjectionLayer.Params(),
'Configs template for the projection layer.')
p.Define(
'highway_skip', False,
'If set, residual connections from different layers are gated. '
'Will only be used if residual_start is enabled.')
p.Define('highway_skip_tpl', layers.HighwaySkipLayer.Params(),
'Configs template for the highway skip layer.')
p.Define('conv_lstm_tpl', rnn_cell.ConvLSTMCell.Params(),
'Configs template for ConvLSTMCell.')
p.Define(
'after_conv_lstm_cnn_tpl', layers.ConvLayer.Params(),
'Configs template for the cnn layer immediately follow the'
' convlstm layer.')
p.Define('conv_filter_shapes', None, 'Filter shapes for each conv layer.')
p.Define('conv_filter_strides', None, 'Filter strides for each conv layer.')
p.Define('input_shape', [None, None, None, None],
'Shape of the input. This should a TensorShape with rank 4.')
p.Define('lstm_cell_size', 256, 'LSTM cell size for the RNN layer.')
p.Define('num_cnn_layers', 2, 'Number of conv layers to create.')
p.Define('num_conv_lstm_layers', 1, 'Number of conv lstm layers to create.')
p.Define('num_lstm_layers', 3, 'Number of rnn layers to create')
p.Define('project_lstm_output', True,
'Include projection layer after each encoder LSTM layer.')
p.Define('pad_steps', 6,
'Extra zero-padded timesteps to add to the input sequence. ')
p.Define(
'residual_start', 0, 'Start residual connections from this lstm layer. '
'Disabled if 0 or greater than num_lstm_layers.')
p.Define('residual_stride', 1,
'Number of lstm layers to skip per residual connection.')
p.Define(
'bidi_rnn_type', 'func', 'Options: func, native_cudnn. '
'func: BidirectionalFRNN, '
'native_cudnn: BidirectionalNativeCuDNNLSTM.')
# TODO(yonghui): Maybe move those configs to a separate file.
# Set some reasonable default values.
#
# NOTE(yonghui): The default config below assumes the following encoder
# architecture:
#
# cnn/batch-norm/relu ->
# cnn/batch-norm/relu ->
# bidirectional conv-lstm ->
# cnn/batch-norm/relu
# bidirectional lstm ->
# projection/batch-norm/relu ->
# bidirectional lstm ->
# projection/batch-norm/relu ->
# bidirectional lstm
#
# Default config for the rnn layer.
p.lstm_tpl.params_init = py_utils.WeightInit.Uniform(0.1)
# Default config for the convolution layer.
p.input_shape = [None, None, 80, 3]
p.conv_filter_shapes = [(3, 3, 3, 32), (3, 3, 32, 32)]
p.conv_filter_strides = [(2, 2), (2, 2)]
p.cnn_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
# TODO(yonghui): Disable variational noise logic.
# NOTE(yonghui): Fortunately, variational noise logic is currently not
# implemented for ConvLayer yet (as of sep 22, 2016).
# Default config for the projection layer.
p.proj_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
# TODO(yonghui): Disable variational noise logic.
# NOTE(yonghui): Fortunately, variational noise logic is currently not
# implemented for ProjectionLayer yet (as of sep 22, 2016).
p.conv_lstm_tpl.filter_shape = [1, 3] # height (time), width (frequency)
p.conv_lstm_tpl.inputs_shape = [None, None, None, None]
p.conv_lstm_tpl.cell_shape = [None, None, None, None]
p.conv_lstm_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
p.after_conv_lstm_cnn_tpl.filter_shape = [3, 3, None, None]
p.after_conv_lstm_cnn_tpl.params_init = (
py_utils.WeightInit.TruncatedGaussian(0.1))
p.after_conv_lstm_cnn_tpl.filter_stride = [1, 1]
return p
@base_layer.initializer
def __init__(self, params):
super(AsrEncoder, self).__init__(params)
p = self.params
assert p.packed_input is False, ('Packed inputs are not yet supported for '
'AsrEncoder.')
name = p.name
with tf.variable_scope(name):
# First create the conv layers.
assert p.num_cnn_layers == len(p.conv_filter_shapes)
assert p.num_cnn_layers == len(p.conv_filter_strides)
params_conv_layers = []
for i in range(p.num_cnn_layers):
conv_p = p.cnn_tpl.Copy()
conv_p.name = 'conv_L%d' % (i)
conv_p.filter_shape = p.conv_filter_shapes[i]
conv_p.filter_stride = p.conv_filter_strides[i]
conv_p.is_eval = p.is_eval
params_conv_layers.append(conv_p)
self.CreateChildren('conv', params_conv_layers)
conv_output_shape = tf.TensorShape(p.input_shape)
for i in range(p.num_cnn_layers):
conv_output_shape = self.conv[i].OutShape(conv_output_shape)
conv_output_shape = conv_output_shape.as_list()
assert len(conv_output_shape) == 4 # batch, height, width, channel.
params_conv_lstm_rnn = []
params_conv_lstm_cnn = []
for i in range(p.num_conv_lstm_layers):
# NOTE(yonghui): We assume that output from ConvLSTMBlock has the same
# shape as its input.
_, _, width, in_channel = conv_output_shape
f_conv_lstm_p = p.conv_lstm_tpl.Copy()
f_conv_lstm_p.name = 'f_conv_lstm_%d' % (i)
f_conv_lstm_p.inputs_shape = [None, 1, width, in_channel]
f_conv_lstm_p.cell_shape = [None, 1, width, in_channel]
b_conv_lstm_p = f_conv_lstm_p.Copy()
b_conv_lstm_p.name = 'b_conv_lstm_%d' % (i)
conv_lstm_rnn_p = self.CreateConvLstmLayerParams()
conv_lstm_rnn_p.name = 'conv_lstm_rnn'
conv_lstm_rnn_p.fwd = f_conv_lstm_p
conv_lstm_rnn_p.bak = b_conv_lstm_p
params_conv_lstm_rnn.append(conv_lstm_rnn_p)
cnn_p = p.after_conv_lstm_cnn_tpl.Copy()
cnn_p.name = 'conv_lstm_cnn_%d' % (i)
cnn_p.filter_shape[2] = 2 * in_channel
cnn_p.filter_shape[3] = in_channel
params_conv_lstm_cnn.append(cnn_p)
# TODO(yonghui): Refactor ConvLSTMBlock into a layer.
self.CreateChildren('conv_lstm_rnn', params_conv_lstm_rnn)
self.CreateChildren('conv_lstm_cnn', params_conv_lstm_cnn)
(self._first_lstm_input_dim,
self._first_lstm_input_dim_pad) = self.FirstLstmLayerInputDimAndPadding(
conv_output_shape, pad_to_multiple=16)
# Now create all the rnn layers and projection layers.
# TODO(yonghui): take care of device placement.
params_rnn_layers = []
params_proj_layers = []
params_highway_skip_layers = []
for i in range(p.num_lstm_layers):
if i == 0:
input_dim = self._first_lstm_input_dim
else:
input_dim = 2 * p.lstm_cell_size
forward_p = p.lstm_tpl.Copy()
forward_p.name = 'fwd_rnn_L%d' % (i)
forward_p.num_input_nodes = input_dim
forward_p.num_output_nodes = p.lstm_cell_size
backward_p = forward_p.Copy()
backward_p.name = 'bak_rnn_L%d' % (i)
rnn_p = self.CreateBidirectionalRNNParams(forward_p, backward_p)
rnn_p.name = 'brnn_L%d' % (i)
params_rnn_layers.append(rnn_p)
if p.project_lstm_output and (i < p.num_lstm_layers - 1):
proj_p = p.proj_tpl.Copy()
proj_p.input_dim = 2 * p.lstm_cell_size
proj_p.output_dim = 2 * p.lstm_cell_size
proj_p.name = 'proj_L%d' % (i)
proj_p.is_eval = p.is_eval
params_proj_layers.append(proj_p)
# add the skip layers
residual_index = i - p.residual_start + 1
if p.residual_start > 0 and residual_index >= 0 and p.highway_skip:
highway_skip = p.highway_skip_tpl.Copy()
highway_skip.name = 'enc_hwskip_%d' % len(params_highway_skip_layers)
highway_skip.input_dim = 2 * p.lstm_cell_size
params_highway_skip_layers.append(highway_skip)
self.CreateChildren('rnn', params_rnn_layers)
self.CreateChildren('proj', params_proj_layers)
self.CreateChildren('highway_skip', params_highway_skip_layers)
@property
def _use_functional(self):
return True
def CreateBidirectionalRNNParams(self, forward_p, backward_p):
return model_helper.CreateBidirectionalRNNParams(self.params, forward_p,
backward_p)
def CreateConvLstmLayerParams(self):
return rnn_layers.BidirectionalFRNN.Params()
def FirstLstmLayerInputDimAndPadding(self,
conv_output_shape,
pad_to_multiple=16):
lstm_input_shape = conv_output_shape
# Makes sure the lstm input dims is multiple of 16 (alignment
# requirement from FRNN).
first_lstm_input_dim_unpadded = lstm_input_shape[2] * lstm_input_shape[3]
if self._use_functional and (first_lstm_input_dim_unpadded % pad_to_multiple
!= 0):
first_lstm_input_dim = int(
(first_lstm_input_dim_unpadded + pad_to_multiple - 1) /
pad_to_multiple) * pad_to_multiple
else:
first_lstm_input_dim = first_lstm_input_dim_unpadded
first_lstm_input_dim_padding = (
first_lstm_input_dim - first_lstm_input_dim_unpadded)
return first_lstm_input_dim, first_lstm_input_dim_padding
@property
def supports_streaming(self):
return False
def zero_state(self, batch_size):
return py_utils.NestedMap()
def FProp(self, theta, batch, state0=None):
"""Encodes source as represented by 'inputs' and 'paddings'.
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
batch: A NestedMap with fields:
src_inputs - The inputs tensor. It is expected to be of shape [batch,
time, feature_dim, channels].
paddings - The paddings tensor. It is expected to be of shape [batch,
time].
state0: Recurrent input state. Not supported/ignored by this encoder.
Returns:
(outputs, out_paddings, state1) tuple. Outputs is of the shape
[time, batch, depth], and out_paddings is of the shape [time, batch]
"""
p = self.params
inputs, paddings = batch.src_inputs, batch.paddings
with tf.name_scope(p.name):
# Add a few extra padded timesteps at the end. This is for ensuring the
# correctness of the conv-layers at the edges.
if p.pad_steps > 0:
# inplace_update() is not supported by TPU for now. Since we have done
# padding on the input_generator, we may avoid this additional padding.
assert not py_utils.use_tpu()
inputs_pad = tf.zeros(
inplace_ops.inplace_update(tf.shape(inputs), 1, p.pad_steps),
inputs.dtype)
paddings_pad = tf.ones(
inplace_ops.inplace_update(tf.shape(paddings), 1, p.pad_steps),
paddings.dtype)
inputs = tf.concat([inputs, inputs_pad], 1, name='inputs')
paddings = tf.concat([paddings, paddings_pad], 1)
def ReshapeForPlot(tensor, padding, name):
"""Transposes and flattens channels to [batch, dim, seq_len] shape."""
# Flatten any dimensions beyond the third into the third.
batch_size = tf.shape(tensor)[0]
max_len = tf.shape(tensor)[1]
plot_tensor = tf.reshape(tensor, [batch_size, max_len, -1])
plot_tensor = tf.transpose(plot_tensor, [0, 2, 1], name=name)
return (plot_tensor, summary_utils.SequenceLength(padding))
plots = [
ReshapeForPlot(
tf.transpose(inputs, [0, 1, 3, 2]), paddings, 'inputs')
]
conv_out = inputs
out_padding = paddings
for i, conv_layer in enumerate(self.conv):
conv_out, out_padding = conv_layer.FProp(theta.conv[i], conv_out,
out_padding)
plots.append(
ReshapeForPlot(
tf.transpose(conv_out, [0, 1, 3, 2]), out_padding,
'conv_%d_out' % i))
def TransposeFirstTwoDims(t):
first_dim = tf.shape(t)[0]
second_dim = tf.shape(t)[1]
t_new = tf.transpose(
tf.reshape(t, [first_dim, second_dim, -1]), [1, 0, 2])
t_shape_new = tf.concat([[second_dim], [first_dim], tf.shape(t)[2:]], 0)
return tf.reshape(t_new, t_shape_new)
# Now the conv-lstm part.
conv_lstm_out = conv_out
conv_lstm_out_padding = out_padding
for i, (rnn, cnn) in enumerate(
zip(self.conv_lstm_rnn, self.conv_lstm_cnn)):
conv_lstm_in = conv_lstm_out
# Move time dimension to be the first.
conv_lstm_in = TransposeFirstTwoDims(conv_lstm_in)
conv_lstm_in = tf.expand_dims(conv_lstm_in, 2)
conv_lstm_in_padding = tf.expand_dims(
tf.transpose(conv_lstm_out_padding), 2)
lstm_out = rnn.FProp(theta.conv_lstm_rnn[i], conv_lstm_in,
conv_lstm_in_padding)
# Move time dimension to be the second.
cnn_in = TransposeFirstTwoDims(lstm_out)
cnn_in = tf.squeeze(cnn_in, 2)
cnn_in_padding = conv_lstm_out_padding
cnn_out, cnn_out_padding = cnn.FProp(theta.conv_lstm_cnn[i], cnn_in,
cnn_in_padding)
conv_lstm_out, conv_lstm_out_padding = cnn_out, cnn_out_padding
plots.append(
ReshapeForPlot(conv_lstm_out, conv_lstm_out_padding,
'conv_lstm_%d_out' % i))
# Need to do a reshape before starting the rnn layers.
conv_lstm_out = py_utils.HasRank(conv_lstm_out, 4)
conv_lstm_out_shape = tf.shape(conv_lstm_out)
new_shape = tf.concat([conv_lstm_out_shape[:2], [-1]], 0)
conv_lstm_out = tf.reshape(conv_lstm_out, new_shape)
if self._first_lstm_input_dim_pad:
conv_lstm_out = tf.pad(
conv_lstm_out,
[[0, 0], [0, 0], [0, self._first_lstm_input_dim_pad]])
conv_lstm_out = py_utils.HasShape(conv_lstm_out,
[-1, -1, self._first_lstm_input_dim])
# Transpose to move the time dimension to be the first.
rnn_in = tf.transpose(conv_lstm_out, [1, 0, 2])
rnn_padding = tf.expand_dims(tf.transpose(conv_lstm_out_padding), 2)
# rnn_in is of shape [time, batch, depth]
# rnn_padding is of shape [time, batch, 1]
# Now the rnn layers.
num_skips = 0
for i in range(p.num_lstm_layers):
rnn_out = self.rnn[i].FProp(theta.rnn[i], rnn_in, rnn_padding)
residual_index = i - p.residual_start + 1
if p.residual_start > 0 and residual_index >= 0:
if residual_index % p.residual_stride == 0:
residual_in = rnn_in
if residual_index % p.residual_stride == p.residual_stride - 1:
# Highway skip connection.
if p.highway_skip:
rnn_out = self.highway_skip[num_skips].FProp(
theta.highway_skip[num_skips], residual_in, rnn_out)
num_skips += 1
else:
# Residual skip connection.
rnn_out += py_utils.HasShape(residual_in, tf.shape(rnn_out))
if p.project_lstm_output and (i < p.num_lstm_layers - 1):
# Projection layers.
rnn_out = self.proj[i].FProp(theta.proj[i], rnn_out, rnn_padding)
if i == p.num_lstm_layers - 1:
rnn_out *= (1.0 - rnn_padding)
plots.append(
ReshapeForPlot(
tf.transpose(rnn_out, [1, 0, 2]),
tf.transpose(rnn_padding, [1, 0, 2]), 'rnn_%d_out' % i))
rnn_in = rnn_out
final_out = rnn_in
if self.cluster.add_summary:
fig = plot.MatplotlibFigureSummary(
'encoder_example', figsize=(8, len(plots) * 3.5))
# Order layers from bottom to top.
plots.reverse()
for tensor, seq_len in plots:
fig.AddSubplot(
[tensor, seq_len],
summary_utils.TrimPaddingAndPlotSequence,
title=tensor.name,
xlabel='Time')
fig.Finalize()
rnn_padding = tf.squeeze(rnn_padding, [2])
return final_out, rnn_padding, py_utils.NestedMap()
```
#### File: tasks/lm/layers.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
from lingvo.core import base_layer
from lingvo.core import layers
from lingvo.core import layers_with_attention
from lingvo.core import py_utils
from lingvo.core import rnn_cell
from lingvo.core import rnn_layers
def get_basis_init(p, shape):
if p.trainable_basis:
return p.params_init
else:
init_val = np.random.normal(size=shape,
scale=1.0 / math.sqrt(shape[-1]))
init = py_utils.WeightInit.Constant(scale=init_val)
return init
class BaseLanguageModel(base_layer.BaseLayer):
"""Abstract base class for a language model layer."""
@classmethod
def Params(cls):
p = super(BaseLanguageModel, cls).Params()
p.Define('vocab_size', 0, 'Number of vocabulary tokens.')
return p
@base_layer.initializer
def __init__(self, params):
super(BaseLanguageModel, self).__init__(params)
def zero_state(self, batch_size):
raise NotImplementedError('Abstract method')
def FProp(self, theta, inputs, paddings, state0, *args, **kwargs):
"""Computes xent loss given the language model inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: a tensor of shape [time, batch] or [time, batch, dims].
paddings: a 0/1 tensor of shape [time, batch].
state0: A `.NestedMap` containing the initial recurrent state.
*args: optional extra arguments.
**kwargs: optional extra keyword arguments.
Returns:
(xent_output, state1). `xent_output` is a `.NestedMap` as defined by
`SoftmaxLayer`'s return value and `state1` is the next recurrent state.
"""
raise NotImplementedError('Abstract method')
def Logits(self, theta, inputs, paddings, *args, **kwargs):
"""FProp and returns the logits for the whole sequence."""
xent_output, _ = self.FProp(
theta,
inputs,
paddings,
state0=self.zero_state(tf.shape(inputs)[1]),
*args,
**kwargs)
return xent_output.logits
@classmethod
def StepOutputDimension(cls, params):
"""Returns dimensions of `Step()`'s output dimension.
Args:
params: Params for this layer.
Returns:
A `.NestedMap` with fields
logits: a python int.
The vocab size.
last_hidden: a python int.
The last hidden layer's dimension.
"""
raise NotImplementedError('Abstract method')
def Step(self, theta, inputs, paddings, state0, *args, **kwargs):
"""FProp one step.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: a tensor of shape [batch] or [batch, dims].
paddings: a 0/1 tensor of shape [batch].
state0: A `.NestedMap` containing the initial recurrent state.
*args: optional extra arguments.
**kwargs: optional extra keyword arguments.
Returns:
A tuple (output, state1).
output: A `.NestedMap` with fields.
logits:
[batch, vocab_size].
log_probs:
[batch, vocab_size].
last_hidden:
[batch, dims].
state1:
The new recurrent state.
"""
def ExpandTime(x):
return tf.expand_dims(x, axis=0)
xent_output, state1 = self.FProp(
theta=theta,
inputs=ExpandTime(inputs),
paddings=ExpandTime(paddings),
state0=state0,
*args,
**kwargs)
output = py_utils.NestedMap()
output.log_probs = tf.squeeze(xent_output.log_probs, axis=0)
output.probs = tf.squeeze(xent_output.probs, axis=0)
output.last_hidden = tf.squeeze(xent_output.last_hidden, axis=0)
if 'logits' in xent_output:
# FstLm doesn't return logits.
output.logits = tf.squeeze(xent_output.logits, axis=0)
return output, state1
def GetFeedDict(self):
"""Returns an optional feed dict with str keys and Tensor values."""
return {}
class NullLm(BaseLanguageModel):
"""A trivial language model does nothing really."""
def zero_state(self, batch_size):
return py_utils.NestedMap(
m=tf.zeros([batch_size, 0], dtype=self.params.dtype))
def FProp(self, theta, inputs, paddings, state0, *args, **kwargs):
p = self.params
time = tf.shape(inputs)[0]
batch = tf.shape(inputs)[1]
logits = tf.zeros([time, batch, p.vocab_size], dtype=p.dtype)
return py_utils.NestedMap(
logits=logits,
probs=tf.nn.softmax(logits),
log_probs=tf.nn.log_softmax(logits),
last_hidden=tf.zeros([time, batch, 0], dtype=p.dtype)), state0
def Logits(self, theta, inputs, paddings, *args, **kwargs):
"""FProp and returns the logits for the whole sequence."""
p = self.params
del theta, paddings
time, batch = tf.unstack(tf.shape(inputs)[:2])
return tf.zeros([time, batch, p.vocab_size], dtype=p.dtype)
@classmethod
def StepOutputDimension(cls, params):
"""Returns dimensions of `Step()`'s output dimension."""
return py_utils.NestedMap(logits=params.vocab_size, last_hidden=0)
def Step(self, theta, inputs, paddings, state0, *args, **kwargs):
"""FProp one step."""
p = self.params
batch = tf.shape(inputs)[0]
logits = tf.zeros([batch, p.vocab_size], dtype=p.dtype)
return py_utils.NestedMap(
logits=logits,
log_probs=tf.nn.log_softmax(logits),
probs=tf.nn.softmax(logits),
last_hidden=tf.zeros([batch, 0], dtype=p.dtype)), state0
def _RnnOutputSize(rnns):
cell = rnns.cell_tpl[-1]
return cell.num_output_nodes
class RnnLmNoEmbedding(BaseLanguageModel):
"""Stacked RNN based language model layer."""
@classmethod
def Params(cls):
p = super(RnnLmNoEmbedding, cls).Params()
p.Define('rnns', rnn_layers.StackedCuDNNLSTM.Params(),
'The stacked-RNNs layer params.')
p.Define('softmax', layers.SimpleFullSoftmax.Params(),
'The softmax layer params.')
p.Define('pred_proj', layers.ProjectionLayer.Params(),
'The projection layer params.')
p.Define('pred_rnn', rnn_layers.StackedCuDNNLSTM.Params(),
'The rnn layer for chunk prediction')
p.Define(
'direct_features_dim', 0,
'If > 0, then the number of dimensions of direct features '
'that bypass the RNN and are provided directly to the softmax '
'input.')
p.Define('decoded_filler_keep_prob', 1.0, 'Keep prob for the decoded (noisy) filler embedding')
p.Define('num_word_roles', 0, 'Number of roles on word level')
p.Define('num_sent_roles', 0, 'Number of top/sentence level roles')
p.Define('sent_role_anneal_steps', None, 'Anneal to 1.0 until this step.')
p.Define('use_chunks', False, 'Whether to include chunk loss')
p.Define('pred_mode', 'trigram', 'Prediction mode')
p.Define('trainable_basis', True, 'trainable basis embeddings')
return p
@base_layer.initializer
def __init__(self, params):
super(RnnLmNoEmbedding, self).__init__(params)
p = self.params
if not isinstance(p.rnns.cell_tpl, (list, tuple)):
p.rnns.cell_tpl = [p.rnns.cell_tpl]
p.rnns.allow_implicit_capture = p.allow_implicit_capture
cell_output_size = _RnnOutputSize(p.rnns)
output_layer_size = cell_output_size + p.direct_features_dim
if p.use_chunks:
output_layer_size //= 2
actual_output_size = output_layer_size * max(1, p.num_word_roles)
if actual_output_size != p.softmax.input_dim:
raise ValueError(
'Output layer size %d does not match softmax input size %d! '
'cell_output_size: %d direct_features_dim: %d ' %
(actual_output_size, p.softmax.input_dim, cell_output_size,
p.direct_features_dim))
if p.softmax.num_classes != p.vocab_size:
raise ValueError(
'softmax num of classess %d does not match vocabulary size %d!' %
(p.softmax.num_classes, p.vocab_size))
with tf.variable_scope(p.name):
self.CreateChild('rnns', p.rnns)
self.CreateChild('softmax', p.softmax)
if p.use_chunks:
assert p.num_sent_roles == len(p.sent_role_anneal_steps) + 1, 'provide anneal steps for all roles!'
sp = layers.SimpleFullSoftmax.Params()
sp.name = 'lower_softmax'
sp.num_classes = p.num_sent_roles
input_dim = p.rnns.cell_tpl[-1].num_output_nodes // 2
sp.input_dim = input_dim # Note the output is split into two parts
self.CreateChild('lower_softmax', sp)
cc_dim = p.rnns.cell_tpl[0].num_input_nodes
if p.pred_mode == 'bigram':
cc_inp = cc_dim
elif p.pred_mode == 'trigram':
cc_inp = 2 * cc_dim
elif p.pred_mode == 'rnn':
cc_inp = cc_dim
else:
raise
if p.pred_mode == 'rnn':
self.CreateChild('pred_rnn', p.pred_rnn)
else:
self.CreateChild('pred_proj', p.pred_proj)
SOS_pc = py_utils.WeightParams(
shape=[cc_dim], # HACK
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
EOS_pc = py_utils.WeightParams(
shape=[p.num_sent_roles, cc_dim], # HACK
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('chunk_SOS', SOS_pc)
self.CreateVariable('chunk_EOS', EOS_pc)
# used for constructing two orthogonal contextualized word embeddings
A_pc = py_utils.WeightParams(
shape=[p.rnns.cell_tpl[0].num_input_nodes, 2 * p.rnns.cell_tpl[0].num_input_nodes], # HACK
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('A', A_pc)
R_shape = [p.num_sent_roles, p.rnns.cell_tpl[0].num_input_nodes]
R_pc = py_utils.WeightParams(
shape=R_shape, # HACK
init=get_basis_init(p, R_shape),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('R', R_pc, trainable=p.trainable_basis)
def zero_state(self, batch_size):
return self.rnns.zero_state(batch_size)
@classmethod
def StepOutputDimension(cls, params):
return py_utils.NestedMap(
logits=params.vocab_size, last_hidden=params.softmax.input_dim)
def Step(self,
theta,
inputs,
paddings,
state0,
direct_features=None,
*args,
**kwargs):
"""FProp one step.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: a tensor of shape [batch] or [batch, dims].
paddings: a 0/1 tensor of shape [batch].
state0: A `.NestedMap` containing the initial recurrent state.
direct_features: If not None, a tensor of [batch, direct_feature_dims]
that is concatenated to the output of the last RNN layer.
*args: optional extra arguments.
**kwargs: optional extra keyword arguments.
Returns:
A tuple (output, state1).
output: A `.NestedMap` with fields.
logits:
[batch, vocab_size].
last_hidden:
[batch, dims].
state1:
The new recurrent state.
"""
def ExpandTime(x):
return tf.expand_dims(x, axis=0)
if direct_features is not None:
direct_features = py_utils.HasRank(direct_features, 2)
direct_features = ExpandTime(direct_features)
xent_output, state1 = self.FProp(
theta=theta,
inputs=ExpandTime(inputs),
paddings=ExpandTime(paddings),
state0=state0,
direct_features=direct_features,
*args,
**kwargs)
output = py_utils.NestedMap()
output.logits = tf.squeeze(xent_output.logits, axis=0)
output.probs = tf.squeeze(xent_output.probs, axis=0)
output.log_probs = tf.squeeze(xent_output.log_probs, axis=0)
output.last_hidden = tf.squeeze(xent_output.last_hidden, axis=0)
if 'cce' in xent_output:
output.cce = tf.squeeze(xent_output.cce, axis=-2)
# TODO(jmluo) HACKY
if 'gating_probs' in xent_output:
output.gating_probs = tf.squeeze(xent_output.gating_probs, axis=0)
return output, state1
def FProp(self,
theta,
inputs,
paddings,
state0,
labels=None,
direct_features=None,
emb_weights=None,
chunk_ids=None,
step_inference=False,
ids=None):
"""Computes xent loss given the language model input activations.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: input activation. A tensor of shape [time, batch, dims].
paddings: a 0/1 tensor of shape [time, batch].
state0: A `.NestedMap` containing the initial recurrent state.
labels: If not None, a `.NestedMap` containing the following fields.
- class_weights, a tensor with shape [time, batch] containing the
weights for each target word.
- class_ids, a tensor with shape [time, batch] of int32 dtype containing
the target class labels.
- class_probabilities, a tensor with shape [time, batch, vocab_size] of
float values indicating class-membership probabilities.
direct_features:
If not None, a tensor of [time, batch, direct_feature_dims] that is
concatenated to the output of the last RNN layer.
Returns:
If `labels` is not None, returns (xent_output, state1), where
`xent_output` is a `.NestedMap` as defined by `SoftmaxLayer`'s return
value and `state1` is the next recurrent state. Otherwise,
`xent_output` contains the softmax logits, probabilities (.probs) and
log-probabilities (.log_probs).
"""
inputs = py_utils.HasRank(inputs, 3)
seqlen, batch, _ = tf.unstack(tf.shape(inputs), num=3)
paddings = py_utils.HasShape(paddings, [seqlen, batch])
assert state0 is not None
p = self.params
# Storage for intermediate results
inter_res = py_utils.NestedMap(emb_word=inputs)
activation, state1 = self.rnns.FProp(theta.rnns, inputs,
tf.expand_dims(paddings, 2), state0)
if direct_features is not None:
direct_features = py_utils.HasRank(direct_features, 3)
activation = tf.concat([activation, direct_features], axis=2)
# retrieve word level representations from the sentence level ones.
if p.use_chunks > 0:
with tf.name_scope('predict_sent_role'):
sent_act, activation = tf.split(activation, 2, axis=-1)
lower_logits = self.lower_softmax.Logits(theta=theta.lower_softmax, inputs=tf.reshape(sent_act, [seqlen * batch, -1]))
lower_sent_role_probs = tf.nn.softmax(lower_logits)
inter_res.h_word = activation
inter_res.h_sent = sent_act
inter_res.logits_sent = lower_logits
inter_res.role_probs = lower_sent_role_probs
# sanity check -- one role only
# lower_sent_role_probs = tf.stack([tf.ones([seqlen * batch]), tf.zeros([seqlen * batch])], axis=-1)
# lower_sent_roles = py_utils.Matmul(lower_sent_role_probs, theta.R) # sl*bs x d
def forward(softmax_name, act, h=None): # TODO(jmluo) may wanna rename activation
softmax_layer = getattr(self, softmax_name)
softmax_theta = getattr(theta, softmax_name)
if labels is None:
# We can only compute the logits here.
logits = softmax_layer.Logits(
theta=softmax_theta,
inputs=tf.reshape(act, [seqlen * batch, -1]),
activation=h,
return_gating=p.softmax.gating)
if p.softmax.gating:
logits, gating_probs = logits
xent_output = py_utils.NestedMap(
logits=tf.reshape(logits, [seqlen, batch, -1]))
xent_output.probs = tf.nn.softmax(xent_output.logits)
xent_output.log_probs = tf.nn.log_softmax(xent_output.logits)
if p.softmax.gating:
xent_output.gating_probs = tf.reshape(gating_probs, [seqlen, batch, -1])
elif 'class_ids' in labels:
print(softmax_layer)
xent_output = softmax_layer.FProp(
theta=softmax_theta,
inputs=act,
class_weights=labels.class_weights,
class_ids=labels.class_ids,
activation=h,
emb_weights=emb_weights)
else:
assert 'class_probabilities' in labels
xent_output = softmax_layer.FProp(
theta=softmax_theta,
inputs=act,
class_weights=labels.class_weights,
class_probabilities=labels.class_probabilities,
activation=h)
xent_output.last_hidden = activation
return xent_output
p = self.params
if p.num_word_roles == 0:
return forward('softmax', activation), state1
else:
assert emb_weights is not None
preceding_shape = tf.shape(activation)[:-1]
f_noisy = self.emb.decode(tf.expand_dims(activation, axis=-2), emb_weights.r) # This is actually a bit hacky -- you don't know you have emb attribute
if p.decoded_filler_keep_prob < 1.0 and not p.is_eval:
f_noisy = tf.nn.dropout(f_noisy, p.decoded_filler_keep_prob)
cat = tf.reshape(f_noisy, tf.concat([preceding_shape, [p.softmax.input_dim]], axis=0))
out = forward('softmax', cat, h=activation)
if p.num_sent_roles > 0:
out.lower_roles = lower_sent_role_probs
out.emb = inputs
batch_major = True
if p.use_chunks and not step_inference: # skip chunk loss in step inference mode
with tf.name_scope('chunk_prediction'):
last_dim = tf.shape(sent_act)[-1]
if batch_major:
bm_inputs = tf.transpose(inputs, perm=[1, 0, 2]) # bs x sl x d
w = tf.reshape(tf.matmul(tf.reshape(bm_inputs, [-1, last_dim]), theta.A), [-1, p.num_sent_roles, last_dim])
rw = HRREmbeddingLayer.static_circular_conv(theta.R, w)
else:
w = tf.reshape(tf.matmul(tf.reshape(inputs, [-1, last_dim]), theta.A), [-1, p.num_sent_roles, last_dim])
rw = HRREmbeddingLayer.static_circular_conv(theta.R, w)
inter_res.chunk_ids = chunk_ids
inter_res.w = w
inter_res.rw = rw
if batch_major:
bm_lower_sent_role_probs = tf.reshape(tf.transpose(tf.reshape(lower_sent_role_probs, [seqlen, batch, p.num_sent_roles]), perm=[1, 0, 2]), [batch * seqlen, p.num_sent_roles])
clean_w = tf.expand_dims(bm_lower_sent_role_probs, axis=-1) * w # bsxsl x 2 x d
clean_w = tf.reshape(clean_w, [batch, seqlen, p.num_sent_roles, last_dim])
clean_w = [clean_w[:, :, ri] for ri in range(p.num_sent_roles)]
else:
clean_w = tf.expand_dims(lower_sent_role_probs, axis=-1) * w # size: sl*bs x 2 x d
# clean_w = [clean_w[:, ri] for ri in range(p.num_sent_roles)] # bs x sl x d for each role
clean_w = tf.transpose(tf.reshape(clean_w, [seqlen, batch, p.num_sent_roles, last_dim]), perm=[1, 2, 0, 3]) # size: bs x 2 x sl x d
out.cce = clean_w
inter_res.w_clean = clean_w
bs_indices = tf.tile(tf.expand_dims(tf.range(batch), axis=0), [seqlen, 1])
sl_indices = tf.tile(tf.expand_dims(tf.range(seqlen), axis=1), [1, batch])
clen = tf.reduce_max(chunk_ids) + 1
indices = tf.stack([bs_indices, chunk_ids, sl_indices], axis=-1) # size: sl x bs x 3
sm_shape = [batch, clen, seqlen] # size: bs x cl x sl
ones = tf.ones_like(chunk_ids)
sm = tf.to_float(tf.scatter_nd(indices, ones, sm_shape))
# TODO(jmluo): I don't even remember what sm stands for. Summation matrix?
inter_res.sm = sm
non_empty = tf.reduce_max(sm, axis=-1) # size: bs x cl
last_chunk_id = tf.to_int32(tf.reduce_max(chunk_ids, axis=0)) # size: bs
chunk_weights = tf.concat([tf.to_float(non_empty > 0)[:, :-1], tf.ones([batch, 1])], axis=-1) # size: bs x cl
# chunk weight offset positions
if batch_major:
bm_bound_w = tf.reduce_sum(tf.expand_dims(bm_lower_sent_role_probs, axis=-1) * rw, axis=-2) # bs*sl x d
bound_w = tf.reshape(bm_bound_w, [batch, seqlen, last_dim])
else:
bound_w = tf.reduce_sum(tf.expand_dims(lower_sent_role_probs, axis=-1) * rw, axis=-2) # size: sl*bs x d
bound_w = tf.transpose(tf.reshape(bound_w, [seqlen, batch, last_dim]), perm=[1, 0, 2]) # size: bs x sl x d
chunk_emb = tf.matmul(sm, bound_w, name='chunk_e') # size: bs x cl x d
if batch_major:
# clean_chunk_emb = tf.matmul(tf.tile(tf.expand_dims(sm, axis=1), [1, p.num_sent_roles, 1, 1]), clean_w, name='chunk_f') # size: bs x 2 x cl x d
# clean_chunk_emb = [clean_chunk_emb[:, ri] for ri in range(p.num_sent_roles)]
clean_chunk_emb = [tf.matmul(sm, cw) for cw in clean_w] # bs x cl x d for each role
else:
clean_chunk_emb = [tf.matmul(sm, cw) for cw in clean_w] # bs x cl x d for each role
# clean_chunk_emb = tf.matmul(tf.tile(tf.expand_dims(sm, axis=1), [1, p.num_sent_roles, 1, 1]), clean_w, name='chunk_f') # size: bs x 2 x cl x d
inter_res.bound_w = bound_w
inter_res.ce = chunk_emb
inter_res.cce = clean_chunk_emb
# get input chunks and target chunks
SOS_emb = tf.tile(tf.reshape(theta.chunk_SOS, [1, 1, -1]), [batch, 1, 1])
input_chunk_emb = tf.concat([SOS_emb, chunk_emb[:, 1:]], axis=1) # replace the first chunk with chunk_emb embedding
# input_chunk_emb = tf.nn.l2_normalize(input_chunk_emb, axis=-1)
# EOS_emb = tf.tile(tf.reshape(theta.chunk_EOS, [1, p.num_sent_roles, 1, -1]), [batch, 1, 1, 1])
# target_chunk_emb = tf.concat([clean_chunk_emb[:, :, 1:], EOS_emb], axis=2) # move EOS_emb to the end of sentences. After all paddings!
EOS_emb = tf.tile(tf.reshape(theta.chunk_EOS, [1, p.num_sent_roles, 1, -1]), [batch, 1, 1, 1])
EOS_embs = tf.unstack(EOS_emb, axis=1)
target_chunk_emb = [tf.concat([clean_chunk_emb[ri][:, 1:], EOS_embs[ri]], axis=1) for ri in range(p.num_sent_roles)] # move EOS_emb to the end of sentences. After all paddings!
# only normalize target embeddings (these are ground truth embeddings)
target_chunk_emb = [tf.nn.l2_normalize(tce, axis=-1) for tce in target_chunk_emb]
inter_res.input_chunk_emb = input_chunk_emb
inter_res.target_chunk_emb = target_chunk_emb
def mm3by2(x, y, transpose=False):
with tf.name_scope('mm3by2'):
py_utils.HasRank(x, 3)
py_utils.HasRank(y, 2)
bs, sl, dx = tf.unstack(tf.shape(x))
dy = tf.shape(y)[0 if transpose else 1]
return tf.reshape(tf.matmul(tf.reshape(x, [bs * sl, dx]), y, transpose_b=transpose), [bs, sl, dy])
def get_predictions(chunk_emb):
if p.pred_mode == 'rnn':
input_ = tf.transpose(chunk_emb, [1, 0, 2])
sent_state0 = self.pred_rnn.zero_state(batch)
sent_paddings = tf.expand_dims(1.0 - tf.transpose(chunk_weights), 2) # NOTE this happens before deltas are applied
h_chunk, _ = self.pred_rnn.FProp(theta.pred_rnn, input_, sent_paddings, sent_state0)
# return h_chunk # NOTE seqlen major to get rid of one transpose
return tf.transpose(h_chunk, [1, 0, 2])
elif p.pred_mode == 'bigram':
cat = chunk_emb
elif p.pred_mode == 'trigram':
# note that length dim is the second axis
bs, cl, d = tf.unstack(tf.shape(chunk_emb))
prev = tf.concat([tf.zeros([bs, 1, d]), chunk_emb[:, :-1]], axis=1)
cat = tf.concat([prev, chunk_emb], axis=-1)
elif p.pred_mode == 'rnn':
cat = chunk_emb
# h_chunk = mm3by2(tf.tanh(cat), theta.pred) # size: bs x cl x d
h_chunk = self.pred_proj.FProp(theta.pred_proj, cat)
return h_chunk
h_chunk = get_predictions(input_chunk_emb)
last_pred_pos_indices = tf.stack([tf.range(batch), last_chunk_id], axis=-1) # size: bs x 2
# if p.pred_mode == 'rnn':
# rnn_last_pred_pos_indices = tf.stack([last_chunk_id, tf.range(batch)], axis=-1) # size: bs x 2
# f_chunk = HRREmbeddingLayer.static_circular_corr(theta.R, tf.expand_dims(h_chunk, axis=-2)) # size: cl x bs x 2 x d
# last_pred = tf.reshape(tf.gather_nd(f_chunk, rnn_last_pred_pos_indices), [1, batch, p.num_sent_roles, -1])
# f_chunk = tf.concat([f_chunk[:-1], last_pred], axis=0)
# f_hat1, f_hat2 = tf.unstack(f_chunk, axis=-2) # cl x bs x d
# else:
f_chunk = HRREmbeddingLayer.static_circular_corr(theta.R, tf.expand_dims(h_chunk, axis=-2)) # size: bs x cl x 2 x d
last_pred = tf.reshape(tf.gather_nd(f_chunk, last_pred_pos_indices), [batch, 1, p.num_sent_roles, -1])
f_chunk = tf.concat([f_chunk[:, :-1], last_pred], axis=1)
f_hats = tf.unstack(f_chunk, axis=-2)
inter_res.h_chunk = h_chunk
inter_res.f_chunk = f_chunk
inter_res.f_hats = f_hats
# gold1, gold2 = tf.unstack(target_chunk_emb, axis=1)
gold_embs = target_chunk_emb
# if p.pred_mode == 'rnn':
# merged_indices = tf.transpose(tf.reshape(tf.range(batch * clen), [batch, -1]), perm=[1, 0]) # cl x bs
# else:
merged_indices = tf.reshape(tf.range(batch * clen), [batch, -1])
dots = [mm3by2(f_hat, tf.reshape(gold_emb, [batch * clen, -1]), transpose=True) for f_hat, gold_emb in zip(f_hats, gold_embs)]
# dot1 = mm3by2(f_hat1, tf.reshape(gold1, [batch * clen, -1]), transpose=True) # bs x cl x bs*cl / cl x bs x bs*cl (using rnn)
# dot2 = mm3by2(f_hat2, tf.reshape(gold2, [batch * clen, -1]), transpose=True) # bs x cl x bs*cl
global_step = tf.to_float(py_utils.GetOrCreateGlobalStep())
temperatures = [tf.minimum(tf.constant(sras), global_step) / sras for sras in p.sent_role_anneal_steps]
for i, t in enumerate(temperatures):
tf.summary.scalar('temperature_sent_role_%d' %i, t)
den_dot = sum([dots[0]] + [dot * temperature for dot, temperature in zip(dots[1:], temperatures)])
inter_res.gold_embs = gold_embs
inter_res.dots = dots
inter_res.dot = den_dot
with tf.name_scope('chunk_loss'):
delta = tf.scatter_nd(last_pred_pos_indices, -tf.ones([batch]), [batch, clen])
chunk_weights = chunk_weights + delta
one_hot_target = tf.one_hot(merged_indices, batch * clen, off_value=1e-8)
den_dot = den_dot + tf.reshape(chunk_weights * 99.0 - 99.0, [-1])
chunk_log_probs = tf.reduce_sum(one_hot_target * tf.nn.log_softmax(den_dot), axis=-1)
# if p.pred_mode == 'rnn':
# out.chunk_log_probs = chunk_log_probs * tf.transpose(chunk_weights, [1, 0])
# else:
out.chunk_log_probs = chunk_log_probs * chunk_weights
out.num_chunks = tf.reduce_sum(chunk_weights) + 1e-8
inter_res.w_chunk = chunk_weights
inter_res.target = one_hot_target
inter_res.masked_dot = den_dot
inter_res.clp = out.chunk_log_probs
inter_res.num_chunks = out.num_chunks
out.inter_res = inter_res
return out, state1
else:
return out, state1
class RnnLm(RnnLmNoEmbedding):
"""Stacked RNN based language model layer."""
@classmethod
def Params(cls):
p = super(RnnLm, cls).Params()
p.Define('emb', layers.EmbeddingLayer.Params(),
'The embedding layer params.')
p.Define('embedding_dropout_keep_prob', 1.0, 'Embedding dropout keep prob.')
p.Define('embedding_dropout_seed', None, 'Embedding dropout seed.')
p.Define('tie', False, 'Tie input and output embeddings.')
p.emb.max_num_shards = 1
return p
# TODO(zhifengc): Consider merge Params() and CommonParams().
@classmethod
def CommonParams(cls,
vocab_size,
emb_dim=1024,
num_layers=2,
rnn_dims=2048,
rnn_hidden_dims=0,
residual_start=1,
softmax_max_alloc=None):
"""A LM model parameterized by vocab size, etc.
Args:
vocab_size: Vocab size.
emb_dim: Embedding dimension.
num_layers: The number of rnn layers.
rnn_dims: Each RNN layer has this many output nodes.
rnn_hidden_dims: If > 0, each RNN layer has this many hidden nodes.
residual_start: index of the first layer with a residual connection;
higher index layers also have residuals.
softmax_max_alloc: If set to a positive integer the soft-max
computation is chunked into allocations of at most
`softmax_max_alloc`; when left to its default value of None no
chunking is done.
Returns:
A `RnnLm` parameter object.
"""
p = cls.Params()
p.vocab_size = vocab_size
init_scale = 1.0 / math.sqrt(rnn_dims)
# Embedding.
p.emb.vocab_size = vocab_size
p.emb.embedding_dim = emb_dim
p.emb.scale_sqrt_depth = False
p.emb.params_init = py_utils.WeightInit.Uniform(init_scale)
# RNNs
p.rnns.num_layers = num_layers
# Which layer starts to have the residual connection.
p.rnns.skip_start = residual_start
if num_layers > 1:
p.rnns.cell_tpl = [
rnn_cell.LSTMCellSimple.Params().Set(
num_input_nodes=emb_dim,
num_output_nodes=rnn_dims,
num_hidden_nodes=rnn_hidden_dims),
rnn_cell.LSTMCellSimple.Params().Set(
num_input_nodes=rnn_dims,
num_output_nodes=rnn_dims,
num_hidden_nodes=rnn_hidden_dims)
]
else:
p.rnns.cell_tpl = [
rnn_cell.LSTMCellSimple.Params().Set(
num_input_nodes=emb_dim,
num_output_nodes=rnn_dims,
num_hidden_nodes=rnn_hidden_dims)
]
# Softmax
p.softmax.input_dim = rnn_dims
p.softmax.num_classes = vocab_size
p.softmax.params_init = py_utils.WeightInit.Uniform(init_scale)
if softmax_max_alloc:
# If the vocab is very large, computes the softmax chunk-by-chunk.
p.softmax.chunk_size = max(1, int(softmax_max_alloc / vocab_size))
return p
@base_layer.initializer
def __init__(self, params):
super(RnnLm, self).__init__(params)
p = self.params
assert p.emb.vocab_size == p.vocab_size, ('{} vs. {}'.format(
p.emb.vocab_size, p.vocab_size))
assert p.emb.embedding_dim == p.rnns.cell_tpl[0].num_input_nodes, (
'{} vs. {}'.format(p.emb.embedding_dim,
p.rnns.cell_tpl[0].num_input_nodes))
with tf.variable_scope(p.name):
self.CreateChild('emb', p.emb)
def FProp(self,
theta,
inputs,
paddings,
state0,
labels=None,
direct_features=None,
chunk_ids=None,
step_inference=False,
ids=None):
"""Computes xent loss given the language model input activations.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: input ids. An int32 tensor of shape [time, batch].
paddings: a 0/1 tensor of shape [time, batch].
state0: A `.NestedMap` containing the initial recurrent state.
labels: If not None, a `.NestedMap` containing the following fields:
- class_weights, a tensor with shape [time, batch] containing the
weights for each target word.
- class_ids, a tensor with shape [time, batch] of int32 dtype containing
the target class labels.
- class_probabilities, a tensor with shape [time, batch, vocab_size] of
float values indicating class-membership probabilities.
direct_features:
If not None, a tensor of [time, batch, direct_feature_dims] that is
concatenated to the output of the last RNN layer.
Returns:
If `labels` is not None, returns (xent_output, state1), where
`xent_output` is a `.NestedMap` as defined by `SoftmaxLayer`'s return
value and `state1` is the next recurrent state. Otherwise,
`xent_output` only contains the softmax logits.
"""
p = self.params
ids = py_utils.HasRank(inputs, 2)
paddings = py_utils.HasShape(paddings, tf.shape(ids))
assert state0
def forward(activation):
# Dropout on embeddings is only applied in training.
if p.embedding_dropout_keep_prob < 1.0 and not p.is_eval:
activation = tf.nn.dropout(
activation,
keep_prob=p.embedding_dropout_keep_prob,
seed=p.embedding_dropout_seed)
return super(RnnLm, self).FProp(theta, activation, paddings, state0,
labels=labels,
direct_features=direct_features,
emb_weights=emb_weights,
chunk_ids=chunk_ids,
step_inference=step_inference,
ids=ids)
# TODO(jmluo) may wanna get rid of this assertion to obtain a baseline (nr > 0 but w/o HRR)
# also, should move this into __init__.
if p.num_word_roles > 0:
assert p.emb.cls == HRREmbeddingLayer
assert p.tie
if p.emb.cls == HRREmbeddingLayer:
activation, signature, emb_weights = self.emb.EmbLookup(theta.emb, ids)
else:
activation = self.emb.EmbLookup(theta.emb, ids)
emb_weights = None
if p.tie:
try:
num_shards = len(theta.emb.wm)
except:
num_shards = len(theta.emb.s.wm)
def transpose_or_not(w):
transpose = (p.softmax.num_sampled == 0)
if transpose:
return tf.transpose(w)
else:
return w
if p.emb.cls == HRREmbeddingLayer:
if p.num_word_roles > 0:
# for i in xrange(p.num_roles):
# softmax_theta = getattr(theta, 'softmax_%d' %i)
if p.emb.lazy:
pass # NOTE lazy mode means don't share the softmax weights directly
# for shard_ind in xrange(num_shards):
# theta.softmax['weight_%d' %shard_ind] = transpose_or_not(theta.emb.s.wm[shard_ind])
else:
for shard_ind in xrange(num_shards):
f_shard = emb_weights.f[shard_ind]
reshaped_f_shard = tf.reshape(f_shard, [-1, p.softmax.input_dim])
theta.softmax['weight_%d' %shard_ind] = transpose_or_not(reshaped_f_shard)
else:
for shard_ind in xrange(num_shards):
theta.softmax['weight_%d' %shard_ind] = transpose_or_not(emb.e[shard_ind])
else:
for shard_ind in xrange(num_shards):
main = transpose_or_not(theta.emb.wm[shard_ind])
theta.softmax['weight_%d' %shard_ind] = main
res = forward(activation)
xent_output = res[0]
return res
class MoeLm(BaseLanguageModel):
"""Mixture of experts language modeling class."""
@classmethod
def Params(cls):
p = super(MoeLm, cls).Params()
p.Define(
'emb',
layers.EmbeddingLayer.Params().Set(max_num_shards=1),
'The embedding layer params.')
p.Define('shared_emb', True, 'If true, uses a single embedding')
p.Define(
'add_postgating_rnn', True, 'If true, add an RNNLM post gating. '
'If false, add only a softmax on top.')
p.Define('rnns', rnn_layers.StackedFRNNLayerByLayer.Params(),
'The stacked-RNNs layer params.')
p.Define('number_of_experts', 7, 'Number of experts.')
p.Define('merge', RnnLmNoEmbedding.Params(),
'The LM to use for the merged LM')
return p
@base_layer.initializer
def __init__(self, params):
super(MoeLm, self).__init__(params)
p = self.params
if not isinstance(p.rnns.cell_tpl, (list, tuple)):
p.rnns.cell_tpl = [p.rnns.cell_tpl]
assert p.emb.vocab_size == p.vocab_size, ('{} vs. {}'.format(
p.emb.vocab_size, p.vocab_size))
assert p.emb.embedding_dim == p.rnns.cell_tpl[0].num_input_nodes, (
'{} vs. {}'.format(p.emb.embedding_dim,
p.rnns.cell_tpl[0].num_input_nodes))
if p.add_postgating_rnn:
assert p.merge.vocab_size == p.vocab_size, ('{} vs. {}'.format(
p.merge.vocab_size, p.vocab_size))
with tf.variable_scope(p.name):
# Embeddings
if p.shared_emb:
self.CreateChild('emb', p.emb)
else:
# 0-th embedding is for the domain predictor.
self.CreateChildren(
'emb', [
p.emb.Copy().Set(name='emb_%d' % i)
for i in range(1 + p.number_of_experts)
])
# Rnns
# 0-th rnns is for the domain predictor.
self.CreateChildren(
'rnns', [p.rnns.Copy() for i in range(1 + p.number_of_experts)])
# Softmax
rnn_output_size = _RnnOutputSize(p.rnns)
sm_params = layers.SimpleFullSoftmax.Params()
sm_params.name = 'domain_predictor_softmax'
sm_params.input_dim = rnn_output_size
sm_params.num_classes = p.number_of_experts
self.CreateChild('domain_predictor_softmax', sm_params)
# Merge
if p.add_postgating_rnn:
self.CreateChild('merge', p.merge)
else:
output_sm_params = layers.SimpleFullSoftmax.Params()
output_sm_params.name = 'output_softmax'
output_sm_params.input_dim = rnn_output_size
output_sm_params.num_classes = p.vocab_size
self.CreateChild('output_softmax', output_sm_params)
def zero_state(self, batch_size):
p = self.params
if p.add_postgating_rnn:
return py_utils.NestedMap(
rnns=[x.zero_state(batch_size) for x in self.rnns],
merge=self.merge.zero_state(batch_size))
else:
return py_utils.NestedMap(
rnns=[x.zero_state(batch_size) for x in self.rnns])
def FProp(self, theta, inputs, paddings, state0, labels=None):
"""Forward compute."""
p = self.params
ids = py_utils.HasRank(inputs, 2)
paddings = py_utils.HasShape(paddings, tf.shape(ids))
seqlen, batch = tf.unstack(tf.shape(inputs), num=2)
assert state0
paddings_3d = tf.expand_dims(paddings, axis=2)
# RNNs
if p.shared_emb:
emb_act = [self.emb.EmbLookup(theta.emb, inputs)
] * (1 + p.number_of_experts)
else:
emb_act = [
self.emb[i].EmbLookup(theta.emb[i], inputs)
for i in range(1 + p.number_of_experts)
]
state1 = py_utils.NestedMap(rnns=[])
rnns_act = []
for i, act in enumerate(emb_act):
act, state = self.rnns[i].FProp(theta.rnns[i], act, paddings_3d,
state0.rnns[i])
act = py_utils.HasRank(act, 3)
rnns_act += [act]
state1.rnns += [state]
# [time, batch, experts, dims].
expert_stacked = tf.stack(rnns_act[1:], axis=2)
# Compute gating softmax. The 0-th rnns is used as the expert
# predictor. Because SoftmaxLayer.Logits takes a matrix as input,
# we reshape rnns_act[0], the domain predictor activation, to a
# matrix here.
act = tf.reshape(rnns_act[0], [seqlen * batch, -1])
logits = self.domain_predictor_softmax.Logits(
theta.domain_predictor_softmax, act)
# [time, batch, experts]
gating = tf.reshape(tf.nn.softmax(logits), [seqlen, batch, -1])
# Mix the experts.
# [time, batch, dims]
combined = tf.squeeze(
tf.matmul(
# [time, batch, 1, experts]
tf.expand_dims(gating, axis=2),
# [time, batch, experts, dims]
expert_stacked),
axis=2)
if p.add_postgating_rnn:
# Note that this layer includes 1 or more RNN layers followed
# by a softmax.
xent_loss, state1.merge = self.merge.FProp(theta.merge, combined,
paddings, state0.merge, labels)
else:
xent_loss = self.output_softmax.FProp(
theta=theta.output_softmax,
inputs=combined,
class_weights=labels.class_weights,
class_ids=labels.class_ids)
# return xent_loss, state1
return xent_loss, state1
class TransformerLmNoEmbedding(BaseLanguageModel):
"""Transformer language model."""
@classmethod
def Params(cls):
p = super(TransformerLmNoEmbedding, cls).Params()
p.Define('position_emb', layers.PositionalEmbeddingLayer.Params(),
'Position embedding layer params.')
p.Define(
'model_dim', 512, 'Model dimension that applies to embedding '
'layers and all Transformer layers.')
p.Define('num_trans_layers', 6, 'Number of Transformer layers.')
p.Define('trans_tpl', layers_with_attention.TransformerLayer.Params(),
'Transformer Layer params.')
p.Define('input_dropout_prob', 0.0, 'Prob at which we do input dropout.')
p.Define(
'residual_dropout_prob', 0.0, 'Dropout prob to the output of '
'each sub-layer before it is added to the sub-layer input.')
p.Define(
'atten_dropout_prob', 0.0, 'Dropout prob to the attention '
'weights in each Transformer attention sub-layer.')
p.Define(
'relu_dropout_prob', 0.0, 'Dropout prob to the inner layer '
'output (ReLU activation) in each Transformer feed-forward '
'sub-layer.')
p.Define('softmax', layers.SimpleFullSoftmax.Params(),
'The softmax layer params.')
# Default config for the transformer layers.
p.trans_tpl.is_decoder = False
p.trans_tpl.mask_self_atten = True
p.trans_tpl.tr_atten_tpl.num_attention_heads = 8
p.trans_tpl.tr_atten_tpl.atten_tpl.enable_ctx_pre_proj = True
p.trans_tpl.tr_atten_tpl.atten_tpl.enable_ctx_post_proj = True
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 2048
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerLmNoEmbedding, self).__init__(params)
p = self.params
p.trans_tpl.tr_atten_tpl.residual_dropout_prob = p.residual_dropout_prob
p.trans_tpl.tr_atten_tpl.atten_dropout_prob = p.atten_dropout_prob
p.trans_tpl.tr_fflayer_tpl.residual_dropout_prob = p.residual_dropout_prob
p.trans_tpl.tr_fflayer_tpl.relu_dropout_prob = p.relu_dropout_prob
with tf.variable_scope(p.name):
p.position_emb.embedding_dim = p.model_dim
self.CreateChild('position_emb', p.position_emb)
dropout_tpl = layers.DropoutLayer.Params().Set(
keep_prob=(1.0 - p.input_dropout_prob))
self.CreateChild('input_dropout', dropout_tpl)
params_trans_layers = []
for i in range(p.num_trans_layers):
params = p.trans_tpl.Copy()
params.source_dim = p.model_dim
params.name = 'layer_%d' % i
params_trans_layers.append(params)
self.CreateChildren('trans', params_trans_layers)
p.softmax.input_dim = p.model_dim
p.softmax.num_classes = p.vocab_size
self.CreateChild('softmax', p.softmax)
def zero_state(self, batch_size):
p = self.params
return py_utils.NestedMap({
'layer_%d' % layer: py_utils.NestedMap({
'key': tf.zeros([batch_size, 0, p.model_dim]),
'value': tf.zeros([batch_size, 0, p.model_dim]),
}) for layer in range(p.num_trans_layers)
})
@classmethod
def StepOutputDimension(cls, params):
return py_utils.NestedMap(
logits=params.vocab_size, last_hidden=params.softmax.input_dim)
def Step(self, theta, inputs, paddings, state0, *args, **kwargs):
"""FProp one step.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: a tensor of shape [batch, model_dim].
paddings: a 0/1 tensor of shape [batch]. Unused here.
state0: A `.NestedMap` containing the prefix states up to step t-1.
*args: optional extra arguments.
**kwargs: optional extra keyword arguments.
Returns:
A tuple (output, state1).
output: A `.NestedMap` with fields.
logits:
[batch, vocab_size].
last_hidden:
[batch, model_dims].
state1:
The updated prefix states including step t.
"""
_, prefix_len = py_utils.GetShape(state0['layer_0'].key, 2)
# [1, model_dim]
posit_embs = self.position_emb.FProp(theta.position_emb,
prefix_len + 1)[-1:, :]
# [batch, model_dim]
input_embs = inputs + posit_embs
input_embs = self.input_dropout.FProp(theta.input_dropout, input_embs)
# Make a copy of the input.
state1 = state0.Pack(state0.Flatten())
layer_in = input_embs
for i, (layer, layer_theta) in enumerate(zip(self.trans, theta.trans)):
layer_prefix_states = state0['layer_%i' % i]
# [batch, model_dim]
layer_out, _, updated_prefix_states = layer.ExtendStep(
layer_theta, layer_in, layer_prefix_states)
state1['layer_%i' % i] = updated_prefix_states
layer_in = layer_out
# [batch, vocab_size]
logits = self.softmax.Logits(theta=theta.softmax, inputs=layer_out)
output = py_utils.NestedMap(logits=logits, last_hidden=layer_out)
return output, state1
def FProp(self, theta, inputs, paddings, state0=None, labels=None):
"""Computes xent loss given the language model input activations.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: Input activation. A tensor of shape [time, batch, model_dim].
paddings: A 0/1 tensor of shape [time, batch].
state0: Not used for Transformer.
labels: If not None, a `.NestedMap` containing the following fields:
- class_weights, a tensor with shape [time, batch] containing the
weights for each target word.
- class_ids, a tensor with shape [time, batch] of int32 dtype containing
the target class labels.
- class_probabilities, a tensor with shape [time, batch, vocab_size] of
float values indicating class-membership probabilities.
Returns:
If `labels` is not None, returns (xent_output, None), where
`xent_output` is a `.NestedMap` as defined by `SoftmaxLayer`'s return
value. Otherwise, `xent_output` only contains the softmax logits.
"""
p = self.params
inputs = py_utils.HasRank(inputs, 3)
seqlen, batch, _ = tf.unstack(tf.shape(inputs), num=3)
inputs = py_utils.HasShape(inputs, [seqlen, batch, p.model_dim])
paddings = py_utils.HasShape(paddings, [seqlen, batch])
# [time, 1, model_dim]
posit_embs = tf.expand_dims(
self.position_emb.FProp(theta.position_emb, seqlen), 1)
# [time, batch, model_dim]
input_embs = inputs + posit_embs
input_embs = self.input_dropout.FProp(theta.input_dropout, input_embs)
layer_in = input_embs
for layer, layer_theta in zip(self.trans, theta.trans):
# [time, batch, model_dim]
layer_out, _ = layer.FProp(layer_theta, layer_in, paddings)
layer_in = layer_out
if labels is None:
# We can only compute the logits here.
logits = self.softmax.Logits(
theta=theta.softmax,
inputs=tf.reshape(layer_out, [seqlen * batch, -1]))
xent_output = py_utils.NestedMap(
logits=tf.reshape(logits, [seqlen, batch, -1]))
elif 'class_ids' in labels:
xent_output = self.softmax.FProp(
theta=theta.softmax,
inputs=layer_out,
class_weights=labels.class_weights,
class_ids=labels.class_ids)
else:
assert 'class_probabilities' in labels
xent_output = self.softmax.FProp(
theta=theta.softmax,
inputs=layer_out,
class_weights=labels.class_weights,
class_probabilities=labels.class_probabilities)
xent_output.last_hidden = layer_out
return xent_output, None
class TransformerLm(TransformerLmNoEmbedding):
"""Stacked RNN based language model layer."""
@classmethod
def Params(cls):
p = super(TransformerLm, cls).Params()
p.Define('emb', layers.EmbeddingLayer.Params(),
'The embedding layer params.')
p.emb.max_num_shards = 1
return p
@classmethod
def CommonParams(cls,
model_dim,
hidden_dim,
num_heads,
num_layers,
learning_rate,
warmup_steps,
vocab_size,
input_dropout_prob=0.0,
residual_dropout_prob=0.1,
atten_dropout_prob=0.0,
relu_dropout_prob=0.0,
softmax_max_alloc=None):
"""Common setup for Transformer language models.
Args:
model_dim: model dimension.
hidden_dim: hidden dimension of feed-forward inner layer.
num_heads: number of attention heads.
num_layers: number of layers in the transformer LM.
learning_rate: learning rate.
warmup_steps: warmup steps for TransformerLearningRateSchedule.
vocab_size: vocab size.
input_dropout_prob: dropout prob to the sums of the token embeddings and
the position embeddings.
residual_dropout_prob: dropout prob to the output of each sub-layer before
it is added to the sub-layer input.
atten_dropout_prob: dropout prob to the attention weights in each
Transformer attention sub-layer.
relu_dropout_prob: dropout prob to the inner layer output (ReLU
activation) in each Transformer feed-forward sub-layer.
softmax_max_alloc: If set to a positive integer the soft-max
computation is chunked into allocations of at most
softmax_max_alloc; when left to its default value of None no
chunking is done.
Returns:
A Params object containing the parameters that set up a Transformer LM.
"""
p = cls.Params()
p.name = 'transformerlm'
p.model_dim = model_dim
p.vocab_size = vocab_size
p.num_trans_layers = num_layers
p.input_dropout_prob = input_dropout_prob
p.residual_dropout_prob = residual_dropout_prob
p.atten_dropout_prob = atten_dropout_prob
p.relu_dropout_prob = relu_dropout_prob
default_params_init = py_utils.WeightInit.Xavier(1.0)
emb_params_init = py_utils.WeightInit.Gaussian(1.0 / math.sqrt(p.model_dim))
p.emb.Set(
vocab_size=vocab_size,
embedding_dim=p.model_dim,
max_num_shards=16,
params_init=emb_params_init,
scale_sqrt_depth=True)
p.position_emb.Set(embedding_dim=p.model_dim, trainable_scaling=False)
p.trans_tpl.is_decoder = False
p.trans_tpl.mask_self_atten = True
p.trans_tpl.tr_atten_tpl.Set(
num_attention_heads=num_heads, params_init=default_params_init)
p.trans_tpl.tr_atten_tpl.atten_tpl.Set(
enable_ctx_pre_proj=True, enable_ctx_post_proj=True)
p.trans_tpl.tr_fflayer_tpl.Set(
hidden_dim=hidden_dim, params_init=default_params_init)
p.softmax.Set(
num_classes=vocab_size, num_shards=16, params_init=emb_params_init)
if softmax_max_alloc:
# If the vocab is very large, computes the softmax chunk-by-chunk.
p.softmax.chunk_size = max(1, int(softmax_max_alloc / vocab_size))
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerLm, self).__init__(params)
p = self.params
assert p.emb.vocab_size == p.vocab_size, ('{} vs. {}'.format(
p.emb.vocab_size, p.vocab_size))
assert p.emb.embedding_dim == p.position_emb.embedding_dim, (
'{} vs. {}'.format(p.emb.embedding_dim, p.position_emb.embedding_dim))
assert p.emb.embedding_dim == p.model_dim, ('{} vs. {}'.format(
p.emb.embedding_dim, p.model_dim))
with tf.variable_scope(p.name):
self.CreateChild('emb', p.emb)
def FProp(self, theta, inputs, paddings, state0=None, labels=None):
"""Computes xent loss given the language model input activations.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: Input ids. An int32 tensor of shape [time, batch].
paddings: A 0/1 tensor of shape [time, batch].
state0: Not used for Transformer.
labels: If not None, a `.NestedMap` containing the following fields:
- class_weights, a tensor with shape [time, batch] containing the
weights for each target word.
- class_ids, a tensor with shape [time, batch] of int32 dtype containing
the target class labels.
- class_probabilities, a tensor with shape [time, batch, vocab_size] of
float values indicating class-membership probabilities.
Returns:
If `labels` is not None, returns (xent_output, state1), where
`xent_output` is a `.NestedMap` as defined by `SoftmaxLayer`'s return
value and `state1` is the next recurrent state. Otherwise,
`xent_output` only contains the softmax logits.
"""
ids = py_utils.HasRank(inputs, 2)
paddings = py_utils.HasShape(paddings, tf.shape(ids))
activation = self.emb.EmbLookup(theta.emb, ids)
return super(TransformerLm, self).FProp(
theta, activation, paddings, labels=labels)
class HRREmbeddingLayer(base_layer.BaseLayer):
"""HRR embedding layer"""
@classmethod
def Params(cls):
p = super(HRREmbeddingLayer, cls).Params()
p.Define('embedding_dim', 0, 'Embedding size')
p.Define('num_roles', 0, 'Number of different roles (n)')
# TODO(jmluo)
# might want to use different m values for different roles.
p.Define('num_fillers_per_role', 20,
'Number of different fillers for each role (m)')
p.Define('e_l', layers.EmbeddingLayer.Params(), 'Lexicalized embedding')
# note that s is used num_roles times
p.Define('s', layers.EmbeddingLayer.Params(), 'Signature embedding')
# p.Define('rs', layers.EmbeddingLayer.Params(), 'Role signature')
p.Define('mode', 'basic', 'Modes')
p.Define('merge', False, 'Flag to merge all collections of filler matrices into a big one')
p.Define('lazy', True, 'Flag to merge all collections of filler matrices into a big one')
# TODO(jmluo)
p.Define('vocab_size', 0, 'Vocabulary size')
p.Define('actual_shards', -1, 'Actual number of shards used. This should not be specified, but computed during __init__ call')
p.Define('trainable_basis', True, 'trainable basis embeddings')
return p
@base_layer.initializer
def __init__(self, params):
super(HRREmbeddingLayer, self).__init__(params)
p = self.params
assert p.embedding_dim > 0
assert p.num_roles > 0
assert p.num_fillers_per_role > 0
assert p.vocab_size > 0
assert p.e_l.vocab_size == p.vocab_size == p.s.vocab_size
assert p.e_l.embedding_dim == p.embedding_dim
assert p.s.embedding_dim == p.num_fillers_per_role * p.num_roles
assert p.mode in ['basic', 'rs', 'dec_only']
if p.merge:
assert p.mode == 'rs', 'Other modes not supported yet'
r_shape = [p.num_roles, p.embedding_dim]
r_pc = py_utils.WeightParams(
shape=r_shape,
init=get_basis_init(p, r_shape),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
F_shape = [p.num_roles, p.num_fillers_per_role, p.embedding_dim]
F_pc = py_utils.WeightParams(
shape=F_shape,
init=get_basis_init(p, F_shape),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
with tf.variable_scope(p.name):
# TODO(jmluo) disabled for now
# self.CreateChild('e_l', p.e_l)
if p.mode == 'rs':
rr_pc = py_utils.WeightParams(
shape=[p.num_roles, p.embedding_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
rs = p.s.Copy()
rs.embedding_dim = 2 * p.num_roles
rs.name = 'rs'
rs.params_init = py_utils.WeightInit.PositiveUniform()
# const = [[1., 0.], [0., 1.]]
# const = [const] * rs.vocab_size
# rs.params_init = py_utils.WeightInit.Constant(scale=const)
# rs.trainable = False
self.CreateChild('rs', rs)
self.CreateVariable('rR', rr_pc)
self.CreateChild('s', p.s)
self.CreateVariable('F', F_pc)
elif p.mode == 'basic':
self.CreateChild('s', p.s)
self.CreateVariable('r', r_pc, trainable=p.trainable_basis)
self.CreateVariable('F', F_pc, trainable=p.trainable_basis)
else:
self.CreateChild('e_l', p.e_l)
self.CreateVariable('r', r_pc)
def _circular_conv(self, a, b):
with tf.name_scope('circular_conv'):
a_fft = tf.fft(tf.complex(a, 0.0))
b_fft = tf.fft(tf.complex(b, 0.0))
ifft = tf.ifft(a_fft * b_fft)
res = tf.cast(tf.real(ifft), 'float32')
return res
def _circular_corr(self, a, b):
with tf.name_scope('circular_corr'):
a_fft = tf.conj(tf.fft(tf.complex(a, 0.0)))
b_fft = tf.fft(tf.complex(b, 0.0))
ifft = tf.ifft(a_fft * b_fft)
res = tf.cast(tf.real(ifft), 'float32')
return res
def decode(self, x, r):
# r_weight: nr x d
# x: ? x d
with tf.name_scope('HRR_decode'):
res = self._circular_corr(r, x)
return res
@staticmethod
def static_circular_conv(a, b):
with tf.name_scope('static_circular_conv'):
a_fft = tf.fft(tf.complex(a, 0.0))
b_fft = tf.fft(tf.complex(b, 0.0))
ifft = tf.ifft(a_fft * b_fft)
res = tf.cast(tf.real(ifft), 'float32')
return res
@staticmethod
def static_circular_corr(a, b):
with tf.name_scope('static_circular_corr'):
a_fft = tf.conj(tf.fft(tf.complex(a, 0.0)))
b_fft = tf.fft(tf.complex(b, 0.0))
ifft = tf.ifft(a_fft * b_fft)
res = tf.cast(tf.real(ifft), 'float32')
return res
@staticmethod
def static_decode(x, r):
# r_weight: nr x d
# x: ? x d
with tf.name_scope('static_HRR_decode'):
res = HRREmbeddingLayer.static_circular_corr(r, x)
return res
def EmbLookup(self, theta, ids, role_anneal=False):
"""Looks up embedding vectors for ids.
Args:
theta: Named tuple with the weight matrix for the embedding.
ids: A rank-N int32 tensor.
Returns:
embs: A rank-(N+1) params.dtype tensor. embs[indices, :] is the
embedding vector for ids[indices].
"""
p = self.params
with tf.name_scope('HRR_emb_lookup'):
if p.lazy:
emb_weights = self._Emb2WeightLazy(theta)
emb = emb_weights.func(ids).e
else:
emb_weights = self._Emb2Weight(theta, role_anneal=role_anneal)
emb = tf.nn.embedding_lookup(emb_weights.e, ids, partition_strategy=p.s.partition_strategy)
s_cat = None
# distribution constraint
# mean, variance = tf.nn.moments(emb, axes=[2]) # size: l x bs, l x bs
# mean = tf.expand_dims(mean, axis=2)
# variance = tf.expand_dims(variance, axis=2)
# d = tf.shape(emb)[2]
# (emb - mean) / tf.sqrt(variance * d)
return emb, s_cat, emb_weights
def _Emb2Weight(self, theta, role_anneal=False):
p = self.params
e_weights = list()
rf_weights = list()
f_weights = list()
if p.mode == 'rs':
bases = self._circular_conv(tf.expand_dims(theta.rR, axis=1), theta.F) # size: nr x nf x d
for rs_shard, s_shard in zip(theta.rs.wm, theta.s.wm):
rs_shard = tf.reshape(rs_shard, [-1, p.num_roles, 2])
s_shard = tf.reshape(s_shard, [-1, p.num_roles, p.num_fillers_per_role])
coeffs = tf.matmul(tf.transpose(rs_shard, perm=[0, 2, 1]), s_shard) # size: V/n_shards x nr x nf
coeffs_t = tf.transpose(coeffs, [1, 0, 2])
rf_shard = tf.matmul(coeffs_t, bases) # size: nr x V/n_shards x d
e_shard = tf.reduce_sum(rf_shard, axis=0)
# old
# rf_shard = self._circular_conv(hid_r_shard, hid_f_shard)
# e_shard = tf.reduce_sum(rf_shard, axis=1) # size: V/n_shards x d
e_weights.append(e_shard)
rf_weights.append(rf_shard)
# real f shard
f_shard = self._circular_corr(theta.rR, tf.expand_dims(e_shard, axis=1))
f_weights.append(f_shard)
# f_weights.append(hid_f_shard)
r_weights = theta.rR
elif p.mode == 'basic':
for s_shard in theta.s.wm:
s_shard = tf.reshape(s_shard, [-1, p.num_roles, p.num_fillers_per_role])
f_shard_list = list()
for role_ind in xrange(p.num_roles):
f_shard_i = tf.matmul(s_shard[:, role_ind], theta.F[role_ind]) # size: V/n_shards x d
f_shard_list.append(f_shard_i)
f_shard = tf.stack(f_shard_list, axis=1) # size: V/n_shards x nr x d
# TODO(jmluo) revert this
# if role_anneal:
# prob_1 = tf.ones(shape=tf.shape(f_shard_list[0]))
# global_step = tf.to_float(py_utils.GetOrCreateGlobalStep())
# temperature = tf.minimum(tf.constant(3000.0), global_step) / 3000
# probs = tf.stack([prob_1, prob_1 * temperature], axis=1)
# f_shard = f_shard * probs
# f_shard = tf.transpose(tf.matmul(tf.transpose(s_shard, perm=[1, 0, 2]), theta.F), perm=[1, 0, 2]) # |V|/n_shards x nr x d
# f_shard = tf.reduce_sum(s_shard * theta.F, axis=2) # size: V/n_shards x nr x d
rf_shard = self._circular_conv(theta.r, f_shard)
e_shard = tf.reduce_sum(rf_shard, axis=1)
e_weights.append(e_shard)
rf_weights.append(rf_shard)
f_weights.append(f_shard)
# noisy_f_shard = self._circular_corr(theta.r, tf.expand_dims(e_shard, axis=1))
# f_weights.append(noisy_f_shard)
r_weights = theta.r
else:
e_weights = list()
f_weights = list()
r_weights = theta.r
for e_shard in theta.e_l.wm:
e_weights.append(e_shard)
e_shard = tf.reshape(e_shard, [-1, 1, p.embedding_dim])
f_shard = self._circular_corr(theta.r, e_shard) # size: V/n_shard x nr x d
f_weights.append(f_shard)
# NOTE all following weights are sharded along the |V| axis, except r_weights which are
# not sharded.
return py_utils.NestedMap(e=e_weights,
# rf=rf_weights,
r=r_weights,
f=f_weights)
def _Emb2WeightLazy(self, theta):
'''
Returns a function handle instead of relevant tensors
'''
p = self.params
assert p.mode == 'basic'
def _get_e_f_from_samples(samples):
e_weights = list()
rf_weights = list()
f_list = list()
all_weights = tf.concat(theta.s.wm, axis=0)
sampled_weights = tf.nn.embedding_lookup(theta.s.wm, samples, partition_strategy=p.s.partition_strategy)
sample_shape = tf.shape(samples)
s_weights = tf.reshape(sampled_weights, [-1, p.num_roles, p.num_fillers_per_role])
f_shard_list = list()
for role_ind in xrange(p.num_roles):
f_i = tf.matmul(s_weights[:, role_ind], theta.F[role_ind]) # size: V/ns x d
f_list.append(f_i)
f = tf.stack(f_list, axis=1) # size: V/n_shards x nr x d
rf = self._circular_conv(theta.r, f)
e = tf.reduce_sum(rf, axis=1)
e = tf.reshape(e, tf.concat([sample_shape, [p.embedding_dim]], axis=0))
f = tf.reshape(f, tf.concat([sample_shape, [p.num_roles, p.embedding_dim]], axis=0))
return py_utils.NestedMap(e=e, f=f)
return py_utils.NestedMap(func=_get_e_f_from_samples, r=theta.r, num_shards=len(theta.s.wm), ids=tf.range(0, p.vocab_size, dtype=tf.int64))
```
#### File: mt/params/base_config.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from lingvo.core import layers
from lingvo.core import lr_schedule
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.tasks.mt import decoder
from lingvo.tasks.mt import encoder
from lingvo.tasks.mt import input_generator
from lingvo.tasks.mt import model
def InitTrainDatasetParams(vocab_size=None, params=None):
"""Initializes typical values for train datasets.
Args:
vocab_size: the number of tokens in your vocabulary. The default is None
because this parameter is often not used.
params: initial Params value, e.g. `NmtInput.Params()`.
Returns:
a `Params` object.
"""
if params is None:
params = input_generator.NmtInput.Params()
params.is_nmt_example = True
params.file_random_seed = 0
# How many threads to run in parallel.
params.file_parallelism = 16
# Note, for training, we prefer to use big file_buffer_size (as long as all
# fits in RAM), to more thoroughly randomize the training examples. when the
# file_buffer_size too small, we run the risk of sequentially going over the
# example as they are stored in the sstable which may not be random (e.g.
# maybe alphabetically ordered).
params.file_buffer_size = 10000000
if vocab_size is not None:
params.tokenizer.vocab_size = vocab_size
# The bucket upper bound is determined based on an exponentially growing
# scheme, with _GenerateBuckets(10, 100) resulting buckets starting from
# minimum bucket size of 10 to maximum bucket size of 137.
# For word and sub-word level NMT, we train on sequences up to maximum
# bucket size and discard the examples that are longer than 137.
params.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98, 137]
# The bucket batch limit determines how many examples are there in each
# batch during training. We reduce the batch size for the buckets that
# have higher upper bound (batches that consist of longer sequences eg.
# 98, 137) in order to prevent out of memory issues.
# Note that this hyperparameter varies widely based on the model and language.
# larger models may warrant smaller batches in order to fit in memory, for
# example; and ideographical languages like Chinese may benefit from more
# buckets.
params.bucket_batch_limit = [128] * 8 + [64]
return params
def InitTestDatasetParams(vocab_size=None, params=None):
"""Initializes typical values for test and dev datasets.
Args:
vocab_size: the number of tokens in your vocabulary.
params: initial Params value, e.g. `NmtInput.Params()`.
Returns:
a `Params` object.
"""
if params is None:
params = input_generator.NmtInput.Params()
params.file_random_seed = 27182818
# How many threads to run in parallel.
params.file_parallelism = 1
# In order to make exactly one pass over the dev/test sets, we set buffer
# size to 1. Greater numbers may cause inaccurate dev/test scores.
params.file_buffer_size = 1
if vocab_size is not None:
params.tokenizer.vocab_size = vocab_size
# The largest bucket upper bound must be larger than the longest sequence
# length in dev/test set. Since we discard sequences longer than the
# max(bucket_upper_bound) we may end up having scores based on only shorter
# sequences only if we mistakenly set this to be too small.
params.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98, 137, 200]
params.bucket_batch_limit = [128] * 8 + [64] + [32]
return params
def InitTransformerTestBuckets(params):
params.bucket_upper_bound = [10, 14, 19, 26, 36, 50, 70, 98, 137, 200]
params.bucket_batch_limit = [16] * 10
return params
def InitTransformerTrainBuckets(params):
params.bucket_upper_bound = [8, 12, 16, 24, 32, 48, 64, 96]
params.bucket_batch_limit = [512, 341, 256, 170, 128, 85, 64, 42]
return params
def SetupTransformerParams(name,
vocab_size,
model_dim,
hidden_dim,
num_heads,
num_layers,
learning_rate,
warmup_steps,
residual_dropout_prob=0.1,
input_dropout_prob=0.0,
atten_dropout_prob=0.0,
relu_dropout_prob=0.0,
label_smoothing_uncertainty=0.1,
is_transparent=False):
"""Common model setup for different transformer models.
Args:
name: An identifier for an instance of a transformer model.
vocab_size: an integer representing the size of the vocabulary, probably
16000 or 32000.
model_dim: dimension of the transformer block (column)
hidden_dim: dimension of Feed-Forward neural network in each layer
num_heads: number of attention heads to use for the transformer
num_layers: number of layers in the transformer
learning_rate: learning rate for Adam. For the base model, we use 1.0; for
the big model, 3.0
warmup_steps: warmup steps for `TransformerLearningRateSchedule`. For the
base model, we use 4000; for the big model, 40000
residual_dropout_prob: dropout prob to the output of each sub-layer before
it is added to the sub-layer input
input_dropout_prob: dropout prob to the sums of the token embeddings and the
position embeddings
atten_dropout_prob: dropout prob to the attention weights in each
Transformer attention sub-layer
relu_dropout_prob: dropout prob to the inner layer output (ReLU activation)
in each Transformer feed-forward sub-layer
label_smoothing_uncertainty: if this value is 0, no label smoothing will be
applied
is_transparent: If set, decoder layers attend to weighted combinations of
encoder layers.
Returns:
A Params object containing the parameters that specify a transformer model
(Vaswani 2017)
"""
p = model.TransformerModel.Params()
p.name = name
# Transformer encoder and decoder setup
p.encoder = SetupTransformerEncoder(
model_dim, vocab_size, num_layers, num_heads, hidden_dim,
residual_dropout_prob, input_dropout_prob, atten_dropout_prob,
relu_dropout_prob, is_transparent)
p.decoder = SetupTransformerDecoder(
model_dim, vocab_size, num_layers, num_heads, hidden_dim,
residual_dropout_prob, input_dropout_prob, atten_dropout_prob,
relu_dropout_prob, label_smoothing_uncertainty, is_transparent)
p.train.Set(
learning_rate=learning_rate,
optimizer=optimizer.Adam.ParamsB(),
clip_gradient_norm_to_value=0.0,
grad_norm_to_clip_to_zero=0.0,
lr_schedule=lr_schedule.TransformerLearningRateSchedule.Params().Set(
warmup_steps=warmup_steps, worker_replicas=1, model_dim=model_dim))
p.eval.samples_per_summary = 12000
return p
def SetupTransformerDecoder(model_dim,
vocab_size,
num_layers,
num_heads,
hidden_dim,
residual_dropout_prob=0.1,
input_dropout_prob=0.0,
atten_dropout_prob=0.0,
relu_dropout_prob=0.0,
label_smoothing_uncertainty=0.1,
is_transparent=False):
"""Common setup for transformer model decoder."""
disable_vn = py_utils.VariationalNoiseParams(1.0, False, False)
default_params_init = py_utils.WeightInit.Xavier(1.0)
emb_params_init = py_utils.WeightInit.Gaussian(1.0 / math.sqrt(model_dim))
# Decoder
decoder_params = decoder.TransformerDecoder.Params()
decoder_params.source_dim = model_dim
decoder_params.model_dim = model_dim
decoder_params.num_trans_layers = num_layers
decoder_params.input_dropout_prob = input_dropout_prob
decoder_params.token_emb.Set(
vocab_size=vocab_size,
embedding_dim=model_dim,
max_num_shards=16,
params_init=emb_params_init,
vn=disable_vn,
scale_sqrt_depth=True)
decoder_params.position_emb.Set(
embedding_dim=model_dim, trainable_scaling=False, vn=disable_vn)
decoder_params.trans_tpl.source_dim = model_dim
decoder_params.trans_tpl.tr_atten_tpl.Set(
source_dim=model_dim,
num_attention_heads=num_heads,
residual_dropout_prob=residual_dropout_prob,
atten_dropout_prob=atten_dropout_prob,
params_init=default_params_init,
vn=disable_vn)
decoder_params.trans_tpl.tr_atten_tpl.atten_tpl.Set(
enable_ctx_pre_proj=True,
enable_ctx_post_proj=True,
context_dim=model_dim,
vn=disable_vn)
decoder_params.trans_tpl.tr_fflayer_tpl.Set(
input_dim=model_dim,
hidden_dim=hidden_dim,
residual_dropout_prob=residual_dropout_prob,
relu_dropout_prob=relu_dropout_prob,
params_init=default_params_init,
vn=disable_vn)
decoder_params.softmax.Set(
num_classes=vocab_size,
vn=disable_vn,
params_init=emb_params_init,
num_shards=16)
decoder_params.per_word_avg_loss = True
decoder_params.label_smoothing = layers.UniformLabelSmoother.Params()
decoder_params.label_smoothing.num_classes = vocab_size
decoder_params.label_smoothing.uncertainty = label_smoothing_uncertainty
if is_transparent:
decoder_params.is_transparent = True
return decoder_params
def SetupTransformerEncoder(model_dim,
vocab_size,
num_layers,
num_heads,
hidden_dim,
residual_dropout_prob=0.1,
input_dropout_prob=0.0,
atten_dropout_prob=0.0,
relu_dropout_prob=0.0,
is_transparent=False):
"""Common setup for transformer model encoder.
Args:
model_dim: specifies dimension of transformer layers, token embeddings,
and positional embeddings as well context vectors (attention values).
vocab_size: for token embeddings.
num_layers: number of transformer layers.
num_heads: number of attention heads.
hidden_dim: in transformer feedforward layer.
residual_dropout_prob: used in transformer feedforward and attention layer.
input_dropout_prob: input dropout.
atten_dropout_prob: used in attention layer.
relu_dropout_prob: used in transformer feedforward layer.
is_transparent: if set, outputs a merger of embeddings and layer outputs.
Returns:
Encoder params.
"""
disable_vn = py_utils.VariationalNoiseParams(1.0, False, False)
default_params_init = py_utils.WeightInit.Xavier(1.0)
emb_params_init = py_utils.WeightInit.Gaussian(1.0 / math.sqrt(model_dim))
# Encoder
encoder_params = encoder.TransformerEncoder.Params()
encoder_params.token_emb.Set(
embedding_dim=model_dim,
max_num_shards=16,
params_init=emb_params_init,
vocab_size=vocab_size,
vn=disable_vn,
scale_sqrt_depth=True)
encoder_params.position_emb.Set(
embedding_dim=model_dim, trainable_scaling=False, vn=disable_vn)
# Encoder TransformerStack params
encoder_params.model_dim = model_dim
encoder_params.transformer_stack.model_dim = model_dim
encoder_params.transformer_stack.num_transformer_layers = num_layers
encoder_params.input_dropout_prob = input_dropout_prob
encoder_params.transformer_stack.transformer_tpl.tr_atten_tpl.Set(
num_attention_heads=num_heads,
residual_dropout_prob=residual_dropout_prob,
atten_dropout_prob=atten_dropout_prob,
params_init=default_params_init,
vn=disable_vn)
encoder_params.transformer_stack.transformer_tpl.tr_atten_tpl.atten_tpl.Set(
num_attention_heads=num_heads,
enable_ctx_pre_proj=True,
enable_ctx_post_proj=True,
context_dim=model_dim,
vn=disable_vn)
encoder_params.transformer_stack.transformer_tpl.tr_fflayer_tpl.Set(
hidden_dim=hidden_dim,
residual_dropout_prob=residual_dropout_prob,
relu_dropout_prob=relu_dropout_prob,
params_init=default_params_init,
vn=disable_vn)
if is_transparent:
encoder_params.transformer_stack.is_transparent = True
return encoder_params
```
#### File: tasks/punctuator/model_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from lingvo.core import py_utils
from lingvo.core import test_helper
from lingvo.tasks.mt import decoder
from lingvo.tasks.mt import encoder
from lingvo.tasks.mt import model
from lingvo.tasks.punctuator import input_generator
_TF_RANDOM_SEED = 93820986
class PunctuatorModelTest(tf.test.TestCase):
"""Tests for the Punctuator model.
Overriding parameters and inheriting
tests from TransformerModelTest.
"""
def _InputParams(self):
p = input_generator.PunctuatorInput.Params()
input_file = test_helper.test_src_dir_path('tasks/lm/testdata/lm1b_100.txt')
p.tokenizer.token_vocab_filepath = test_helper.test_src_dir_path(
'tasks/punctuator/testdata/test_vocab.txt')
p.file_pattern = 'text:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [4]
p.source_max_length = 40
p.target_max_length = 40
return p
def _EncoderParams(self):
p = encoder.TransformerEncoder.Params()
p.name = 'encoder'
p.random_seed = 1234
p.model_dim = 4
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim(
seed=p.random_seed)
p.position_emb.embedding_dim = 4
p.transformer_stack.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
p.transformer_stack.transformer_tpl.tr_fflayer_tpl.hidden_dim = 5
return p
def _DecoderParams(self):
p = decoder.TransformerDecoder.Params()
p.name = 'decoder'
p.random_seed = 1234
p.source_dim = 4
p.model_dim = 4
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim(
seed=p.random_seed)
p.position_emb.embedding_dim = 4
p.trans_tpl.source_dim = 4
p.trans_tpl.tr_atten_tpl.source_dim = 4
p.trans_tpl.tr_atten_tpl.num_attention_heads = 2
p.trans_tpl.tr_fflayer_tpl.input_dim = 4
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 8
p.softmax.num_shards = 1
p.target_seq_len = 5
return p
def _testParams(self):
p = model.TransformerModel.Params()
p.name = 'test_mdl'
p.input = self._InputParams()
p.encoder = self._EncoderParams()
p.decoder = self._DecoderParams()
p.train.learning_rate = 2e-4
return p
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.cls(p)
print('vars = ', mdl.vars)
flatten_vars = mdl.vars.Flatten()
print('vars flattened = ', flatten_vars)
self.assertEqual(len(flatten_vars), 238)
# Should match tf.trainable_variables().
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars))
def testFProp(self, dtype=tf.float32):
with self.session() as sess:
tf.set_random_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.dtype = dtype
mdl = p.cls(p)
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
tf.global_variables_initializer().run()
vals = []
for _ in range(3):
vals += [sess.run((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
expected_vals = [[371.16153, 10.382141], [415.236511, 10.380913],
[415.484863, 10.387121]]
self.assertAllClose(vals, expected_vals)
def testBProp(self):
with self.session() as sess:
tf.set_random_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.cls(p)
mdl.FPropDefaultTheta()
mdl.BProp()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
tf.global_variables_initializer().run()
vals = []
for _ in range(3):
vals += [sess.run((loss, logp, mdl.train_op))[:2]]
print('BProp actual vals = ', vals)
expected_vals = [[371.16153, 10.382141], [415.046509, 10.376163],
[415.137573, 10.378439]]
self.assertAllClose(vals, expected_vals)
def testFPropEvalMode(self):
with self.session() as sess:
tf.set_random_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.is_eval = True
mdl = p.cls(p)
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
tf.global_variables_initializer().run()
vals = []
for _ in range(3):
vals += [sess.run((loss, logp))]
print('actual vals = ', vals)
expected_vals = [[371.16153, 10.382141], [415.236511, 10.380913],
[415.484863, 10.387121]]
self.assertAllClose(vals, expected_vals)
if __name__ == '__main__':
tf.test.main()
``` |
{
"source": "jluscher/SCANIT",
"score": 2
} |
#### File: jluscher/SCANIT/scanit_v031.py
```python
import sys, string, time
#
from pathlib import Path
#
from tkinter import *
from tkinter import font
from tkinter import filedialog
from tkinter.ttk import Progressbar
# from tkinter import ttk
# from tkinter.scrolledtext import *
# from serial import *
import tkinter.messagebox as mBox
# import tkinter.simpledialog as simpledialog
import matplotlib
from matplotlib.widgets import Cursor
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy
from numpy import searchsorted
siTitle = 'SCANIT for RetroSPEX [v031]' # Program name and version
TANBG = '#F8E2CD' # Background color
WARNC = '#FFBBFF' # Warning color (pinkish)
ACTIVB = '#F07748' # activebackground color for buttons
jjltest = True # print messages, testing
#=====================================================================
## SCANIT Window (GUI window for Spectrometer Control & Data Capture)
#
siWinW = 1260 # width
siWinH = 760 # height
#
siWin = Tk()
siWin.title(siTitle)
siWin['bg'] = TANBG # background color
if jjltest:
siWin.geometry('+670+50') # shift to right for testing
transGeom = '+780+250' # ... for 'transient' screens
else:
siWin.geometry('{}x{}+0+0'.format(siWinW,siWinH))
transGeom = '+110+200' # ... for 'transient' screens
#siWin.geometry('{}x{}+80+50'.format(siWinW,siWinH))
#siWin.geometry('+50+50') # window in upper-left of screen
#
monoFont10 = font.Font(family='Ubuntu Mono', size=10)
monoFont12 = font.Font(family='Ubuntu Mono', size=12)
monoFont14 = font.Font(family='Ubuntu Mono', size=14)
monoFont16 = font.Font(family='Ubuntu Mono', size=16)
monoFont24 = font.Font(family='Ubuntu Mono', size=24)
#=====================================================================
## Global variables (for Spectrometer Control & Data Capture)
#
#==============
# settings: configuration data (from 'settings.txt')
#
#
#
#
# Transient (settable but not saved/restored)
offLine = True # No Spectrometer connection made (serial - USB)
#
# User Default Settings (settable and saved/restored)
varEXinc = StringVar() # Setting EX Inc Wavelength (nm)
varEMinc = StringVar() # Setting EM Inc Wavelength (nm)
varTMinc = StringVar() # Setting TM Inc time (s)
varEXslit = StringVar() # Slit size EX (nm)
varEXslit = StringVar() # Slit size EM (nm)
varEMhv = StringVar() # EM PMT high voltage (v)
varREFhv = StringVar() # REF PMT high voltage (v)
varREFdiode = StringVar() # REF DIODE Gain setting [0,1,2,3]
#
#==============
# scan data acquired
#
scanDataX = [] # X value sample was taken at (wavelength / time)
scanDataY = [] # Y value of sample - PMT counts
#
#==============
# background: input data from previous scan (for reference)
#
inputFileHdr = [] # Header section from fileLoad
inputFileData = [] # Data section from fileload
#
backgroundDataX = [] # X value sample was taken at (wavelength / time)
backgroundDataY = [] # Y value of sample - PMT counts
#
#==============
# dayfile: data about the experiments being done today
#
dayFileData = [] # Data section from fileload / or for writing
#
varDayDate = StringVar() # Date this data was entered
varDayMeaning1 = StringVar() # Meaning of Experiment
varDayMeaning2 = StringVar() # Meaning of Experiment
varDayMeaning3 = StringVar() # Meaning of Experiment
varDayEXslit = StringVar() # Excitation slit wavelength nm
varDayEMslit = StringVar() # Emission slit Wavelength nm
varDayBulb = StringVar() # Bulb Intensity
varDayNotebook = StringVar() # Notebook Page
varDayOther1 = StringVar() # Other comments
varDayOther2 = StringVar() # Other comments
varDayOther3 = StringVar() # Other comments
#
#==============
# type of scan
EXscan = 0
EMscan = 1
TMscan = 2
varScanMode = IntVar() # Determines type of scan taken
varRTDsignal = StringVar() # Real Time Data
varRTDreference = StringVar() # Real Time Data
#
varEXwaveStart = StringVar() # Excitation Start Wavelength nm
varEXwaveEnd = StringVar() # Excitation End Wavelength nm
varEXwaveInc = StringVar() # Excitation Inc Wavelength nm
#
varEMwaveStart = StringVar() # Emission Start Wavelength nm
varEMwaveEnd = StringVar() # Emission End Wavelength nm
varEMwaveInc = StringVar() # Emission Inc Wavelength nm
#
varTMwavePause = StringVar() # Pause (s)
varTMwaveEnd = StringVar() # End (s)
varTMwaveInc = StringVar() # Inc time (s)
#
varEXslit = StringVar() # Inc time (s)
varEMslit = StringVar() # Inc time (s)
#
varSpecimenDetails = StringVar() # Description of sample
#
varEXstepsNm = StringVar() # EX StepMotor steps per (nm)
varEMstepsNm = StringVar() # EM StepMotor steps per (nm)
#
varEXposition = StringVar() # EX monochrometer position (nm)
varEMposition = StringVar() # EM monochrometer position (nm)
#
varPCTdone = IntVar() # % completion of scan
varPCTdone.set(35) # testing: software completion % ;-)
#
MINnm = 200 # Minimum nanoMeters for monochrometer position
MAXnm = 1000 # Maximum nanoMeters for monochrometer position
#
def setScanMode_FR(mode): # Forward Reference for setting Scan Mode
if jjltest:
print('CALLED: setScanMode_FR(mode) => pass')
pass
#
def setScanMode(mode):
if jjltest:
print('CALLED: setScanMode(mode) => setScanMode_FR(mode)')
setScanMode_FR(mode)
def updatePlot_FR(): # Forward Reference FUNCTION NAME ... for updating Plot
pass
#
def updatePlot(event=None): # Call the function defined later on...
updatePlot_FR() # ... maps old references to the new routine
#===================
## Utility functions
#
# Set and Read High Voltage Power Supply
#
# D 1 FFFF -> 1000 v (neg) Emission PMT
# 0 ~ 65535 -> 1000 v : 65.535 / volt
#
# HV 1:
# SET: [ 900 to E666] :: E666/FFFF -> 0.90000 (58982/65535)*1000 = 900.00
# READ: [BCD4 to 900] :: BCD4/D1B6 -> 0.90042 (48340/53686)*1000 = 900.42
#
# 2.048/2.5 = 0.8192 ** ratio of DAC/ADC reference voltages
# 65.535 * 0.8192 = 53.686 ** ADC conversion divisor (53686) / D1B6
#
#
# HV 1:
# VOLTStoHEX = hex(int(volts * 65.535))[2:]
# 900.0 * 65.535 => hex( int( 58982 ))[2:] = 'e666'
# HEXtoVOLTS = int(setHV1str,16) /( 65.535 * 0.8192 )
# (BCD4) 48340 / 53.686 => 900.42
#
#----
def VOLTStoHEX(volts):
""" DAC: 1000.0 volts full scale (FFFF).
(for setting DAC output)
VOLTStoHEX(1000.0) => 'FFFF'
VOLTStoHEX( 900.0) => 'E665' """
return hex(int(volts * 65.535))[2:].upper()
#
#----
def HEXtoVOLTS(ADChexStr):
"""ADC: 1000.0 volts full scale (D1B6).
(for scaling ADC input)
HEXtoVOLTS('D1B6') => 1000
HEXtoVOLTS('BCD4') => 900 """
return int(int(ADChexStr,16) / 53.686 + 0.5)
#
def digitsOnly(text):
s = ''
for c in text:
if c in string.digits:
s = s + c
if s.strip() == '':
s = '0'
return str( int(s) ) # no leading zeros
def floatOnly(text):
'''get StringVar's value as float().'''
point = False
s = ''
r = ''
for c in text:
if point == False: # no decimal yet
if c in string.digits:
s = s + c
elif c == '.':
point = True
else:
if c in string.digits:
r = r + c
# supress leading zeros
s = s.lstrip('0')
# but keep at least one zero(!)
if len(s) == 0:
s = '0'
# resolution limited to mS
if len(r) > 3:
r = r[0:3]
s = s+ '.' +r
return s
def getVarInt(v):
'''get StringVar's value as int().'''
s = v.get()
if s.strip() == '':
return 0
return int(s)
def getVarFloat(v):
'''get StrinvVar's float value.'''
s = v.get()
if s.strip() == '':
return 0.0
return float(s)
def setFocus(obj):
obj.focus_set()
return
def toggleBtnVar(var, btn, iconOff, iconOn):
'''Toggle boolean state of Button and set matching icon.'''
if var:
var = False
btn['image'] = iconOff
else:
var = True
btn['image'] = iconOn
return var
def getDateToday():
t = time.localtime()
return '{}-{:02d}-{:02d}'.format(t[0],t[1],t[2])
def timeNow():
t = time.localtime()
return '{}-{:02d}-{:02d};{:02d}:{:02d}'.format(t[0],t[1],t[2],t[3],t[4])
def writePositions():
'''Write monochrometer positions to "positions.txt" file.'''
#
global varEXposition,varEMposition
#
data = 'EX: ' + varEXposition.get() + ' EM: ' + varEMposition.get() + '\n'
fo = open('positions.txt','w')
fo.write(data)
fo.close()
return
def readPositions():
'''Recover monochrometer positions from "positions.txt" file.'''
#
global varEXposition,varEMposition
#
try: # one line file: "EX: nnnn EM: mmmm"
tmpFile = open('positions.txt').read().splitlines()
for s in tmpFile:
t = s.split()
if len(t) == 4 and t[0] == 'EX:' and t[2] == 'EM:':
varEXposition.set(t[1])
varEMposition.set(t[3])
tmpFile = None
except:
varEXposition.set('0')
varEMposition.set('0')
writePositions()
return
def dataFileREAD():
'''Read Data file, seperate into lists of header and data.'''
#
global inputFileHdr # Header section from fileLoad
global inputFileData # Data section from fileload
#
inputFileHdr = []
inputFileData = []
#
dataFileTypes = [("Data ", ("*.txt","*.TXT")), ]
dataDir = '~/SCANS'
fInp = filedialog.askopenfilename(filetypes = dataFileTypes
,initialdir=dataDir)
#
tmpFile = open(fInp).read().splitlines()
#
header = True # looking for header lines first
#
for line in tmpFile: # examine each line in list
if header:
if line.startswith('...'): # end of Header line mark
header = False
else:
inputFileHdr.append(line.strip()) # save Header lines
else:
if line.startswith('___'): # end of Data line mark
break
else:
inputFileData.append(line.strip()) # save data lines
tmpFile = None # discard temp file data now
return
#
## 'sanityCheck' functions
#
# COMPARISONS:
# Within [min <= var <= max]
# Order [var1 < var2]
# Min [min <= var]
#
# Button lookup dictionary - defined as buttons are created below
btnLookup = {} # entries of form: 'EXS':<EX-Start_button>
# 'EXE':<EX-END_button>
# test variable min max EntryType
chkEntVal =[ [['Within', varEXwaveStart, MINnm, MAXnm] , 'EXS' ]
, [['Within', varEXwaveEnd, MINnm, MAXnm] , 'EXE' ]
, [['Order' , varEXwaveStart, varEXwaveEnd] , 'EXE' ]
, [['Min' , varEXwaveInc, 1] , 'EXI' ]
, [['Within', varEMwaveStart, MINnm, MAXnm] , 'EMS' ]
, [['Within', varEMwaveEnd, MINnm, MAXnm] , 'EME' ]
, [['Order' , varEMwaveStart, varEMwaveEnd] , 'EME' ]
, [['Min' , varEMwaveInc, 1] , 'EMI' ]
, [['Min' , varTMwaveEnd, 0.100] , 'TME' ]
, [['Order' , varTMwavePause, varTMwaveEnd] , 'TME' ]
, [['Min' , varTMwaveInc, 0.001] , 'TMI' ]
]
#
def scanSanityCheck(warn = False):
'''Check that measurement parameters have "sane" values.
If not color Entry field WARNC color.
If "warn" argument is True also generate popup message.'''
#
isSane = True # start assuming that no errors were found ;-)
#
for e in chkEntVal:
test,entryType = e # get test list and Entry-Type
#
# are any of these Entry objects 'DISABLED'?
# - don't check values for disabled Entry fields
if btnLookup[entryType]['state'] == DISABLED:
continue # try next test
#
if test[0] == 'Min': # is entry at least equal to the minimum
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
if entryType[0] == 'T': # float value
var = getVarFloat(test[1])
else:
var = getVarInt(test[1])
#print('.........var={} < min={}'.format(var,test[2]))
if var < test[2]: # BELOW minimum = Error
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
elif test[0] == 'Within': # entry not OUTSIDE limits
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
var = getVarInt(test[1])
#print('.........var={} < min={}'.format(var,test[2]))
limLow = test[2]
limHi = test[3]
#print('.........limLow={} < limHi={}'.format(limLow,limHi))
if var < limLow or var > limHi: # outside range
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
elif test[0] == 'Order': # entry 1 less-than entry 2
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
if entryType[0] == 'T': # float value
print('scanSanityCheck() #318... test[1]={}, '
'test[2]={}'.format(test[1],test[2]))
var1 = getVarFloat(test[1])
var2 = getVarFloat(test[2])
print('scanSanityCheck() #322... var1={}, var2={}'.format(
var1,var2))
else:
var1 = getVarInt(test[1])
var2 = getVarInt(test[2])
#print('.........var1={} < var2={}'.format(var1,var2))
if var1 >= var2: # improper order
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
#
# set the selected color for the Entry object
btnObj = btnLookup[entryType]
btnObj['bg'] = bgColor # set button color
return isSane
#
## 'legacy' data file input functions
def dataFileMGET():
'''Read Data file, seperate into header and data.
Parse header into measurement parameters.
Set the parameters for taking another scan.
'''
global inputFileHdr # Header section from fileLoad
#
dataFileREAD() # read in data file, prepare header list
#
# Parse Header information - "classic format"
# Emission only - for now
scanIs = None
for line in inputFileHdr:
if line.startswith('Emission Scan'):
scanIs = EMscan
break
#
if scanIs != EMscan: # Error
if jjltest:
print("Can't handle non-Emission Scan yet.")
sys.exit(0)
#
setScanMode(EMscan)
#
# varEMwaveStart = StringVar() # Emission Start Wavelength nm
# varEMwaveEnd = StringVar() # Emission End Wavelength nm
# varEMwaveInc = StringVar() # Emission Inc Wavelength nm
# varTMwaveInc = StringVar() # Time Inc time S
# varEXwaveStart = StringVar() # Excitation Start Wavelength nm
#
for line in inputFileHdr:
if line.startswith('Start '): # Start , End
s,e = line.split(',')
s = s.split(' ')[1] # "Start 5.000000e+002"
n = int( float( s))
varEMwaveStart.set( str(n))
#
e = e.strip()
e = e.split(' ')[1] # "End 7.000000e+002"
n = int( float( e))
varEMwaveEnd.set( str(n))
continue
if line.startswith('Increment '):
c,t = line.split(',')
c = c.split(' ')[1] # "Increment 1.000000e+000"
n = int( float( c))
varEMwaveInc.set( str(n))
#
t = t.strip()
t = t.split(' ')[2] # "Integration Time 1.000000e-001"
n = float( t)
varTMwaveInc.set( str(n))
continue
if line.startswith('Excit Mono Slits:'):
continue
if line.startswith('Excit Mono'):
x = line.split(' ')[2] # "Excit Mono 4.880000e+002"
n = int( float( x))
varEXwaveStart.set( str(n))
scanSanityCheck()
return
def dataFileLOAD():
'''Read Data file, seperate into header and data.
Parse header into measurement parameters.
Parse data into x,y values for plotting.
'''
global inputFileData # Data section from fileload
global backgroundDataX # X value sample was taken at (wavelength / time)
global backgroundDataY # Y value of sample - PMT counts
#
dataFileMGET() # Read data file, setup measurement parameters.
#
backgroundDataX = []
backgroundDataY = []
#
for line in inputFileData:
pos,val = line.split('\t')
backgroundDataX.append( int( float( pos )))
backgroundDataY.append( float( val ))
updatePlot()
#
## 'dayfile.txt' - functions for recording Experimental Plan
#
#
# 'dayfile.txt' format:
#
# DATE: 2015-01-29
# Meaning of Experiment:
# #m#... (text: additional lines of meaning)
# Slit Widths EX: 2 (integer in nm)
# Slit Widths EM: 2 (integer in nm)
# Bulb Intensity: ?? (integer in ??)
# Notebook page: ?? (text)
# Other comments:
# #c#... (text: additional lines of comments)
#
# dayFileData = [] # Data section from fileload
# #
# varDayDate = StringVar() # Date this data was entered
# varDayMeaning1 = StringVar() # Meaning of Experiment
# varDayMeaning2 = StringVar() # Meaning of Experiment
# varDayMeaning3 = StringVar() # Meaning of Experiment
# varEXslit = StringVar() # Excitation slit size nm
# varEMslit = StringVar() # Emission slit size nm
# varDayBulb = StringVar() # Measured Bulb Intensity
# varDayNotebook = StringVar() # Notebook Page for Experiment Data
# varDayOther1 = StringVar() # Other comments
# varDayOther2 = StringVar() # Other comments
# varDayOther3 = StringVar() # Other comments
#
def makeDayFile():
'''Create new GUI screen for entering Experimental Data.
This data is constant for each day and recorded with data scans.'''
#
if jjltest:
print('makeDayFile()')
#
varDayDate.set( getDateToday() )
#
froot = Toplevel()
froot.title('Edit Experiment Information for {}'.format(varDayDate.get()))
froot.geometry(transGeom)
#siWin.withdraw()
#
# ========
#
#-------
frootFrame = Frame(froot, bg = TANBG)
frootFrame.grid()
#-------
dayTopFrame = LabelFrame(frootFrame, bg = TANBG, borderwidth=4
,text=' Meaning of Experiment: '
, font=monoFont14)
dayTopFrame.grid(row = 0, padx=4, pady=4, sticky=NSEW)
#
#
#-------
varDayMeaning1.set('')
dayMeanEnt1 = Entry(dayTopFrame, textvariable=varDayMeaning1
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt1.grid(row=1, padx=4, pady=0, sticky=EW)
dayMeanEnt1.focus_set()
#-------
varDayMeaning2.set('')
dayMeanEnt2 = Entry(dayTopFrame, textvariable=varDayMeaning2
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt2.grid(row=2, padx=4, pady=0, sticky=EW)
dayMeanEnt1.bind("<Return>", lambda e: setFocus(dayMeanEnt2))
#-------
varDayMeaning3.set('')
dayMeanEnt3 = Entry(dayTopFrame, textvariable=varDayMeaning3
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt3.grid(row=3, padx=4, pady=0, sticky=EW)
dayMeanEnt2.bind("<Return>", lambda e: setFocus(dayMeanEnt3))
#
# ========
#
#-------
dayMidFrame = Frame(frootFrame, bg = TANBG, borderwidth=0)
dayMidFrame.grid(row = 1, sticky=NSEW)
#
# Slit Width EX:
#-------
daySlitExLab = Label(dayMidFrame, text='Slit Width EX:'
, font=monoFont14, bg = TANBG )
daySlitExLab.grid(row=0, sticky=W)
#-------
daySlitExEnt = Entry(dayMidFrame, textvariable=varEXslit
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
daySlitExEnt.grid(row=0, column=1, padx=4, pady=4, sticky=E)
dayMeanEnt3.bind("<Return>", lambda e: setFocus(daySlitExEnt))
#
# Slit Width EM:
#-------
daySlitEmLab = Label(dayMidFrame, text='Slit Width EM:'
, font=monoFont14, bg = TANBG )
daySlitEmLab.grid(row=1, sticky=W)
#-------
daySlitEmEnt = Entry(dayMidFrame, textvariable=varEMslit
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
daySlitEmEnt.grid(row=1, column=1, padx=4, pady=4, sticky=E)
daySlitExEnt.bind("<Return>", lambda e: setFocus(daySlitEmEnt))
#
# Bulb Intensity:
#-------
dayBulbIntLab = Label(dayMidFrame, text='Bulb Intensity:'
, font=monoFont14, bg = TANBG )
dayBulbIntLab.grid(row=2, sticky=W)
#-------
dayBulbIntEnt = Entry(dayMidFrame, textvariable=varDayBulb
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
dayBulbIntEnt.grid(row=2, column=1, padx=4, pady=4, sticky=E)
daySlitEmEnt.bind("<Return>", lambda e: setFocus(dayBulbIntEnt))
#
# Notebook Page:
#-------
dayNbPageLab = Label(dayMidFrame, text='Notebook Page:'
, font=monoFont14, bg = TANBG )
dayNbPageLab.grid(row=3, sticky=W)
#-------
dayNbPageEnt = Entry(dayMidFrame, textvariable=varDayNotebook
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
dayNbPageEnt.grid(row=3, column=1, padx=4, pady=4, sticky=E)
dayBulbIntEnt.bind("<Return>", lambda e: setFocus(dayNbPageEnt))
#
# Other Comments:
#-------
dayBotFrame = LabelFrame(frootFrame, bg = TANBG, borderwidth=4
,text=' Other comments: ', font=monoFont14)
dayBotFrame.grid(row = 2, padx=4, pady=4, sticky=NSEW)
#-------
dayOtherEnt1 = Entry(dayBotFrame, textvariable=varDayOther1
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt1.grid(padx=4, pady=0, sticky=EW)
dayNbPageEnt.bind("<Return>", lambda e: setFocus(dayOtherEnt1))
#-------
dayOtherEnt2 = Entry(dayBotFrame, textvariable=varDayOther2
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt2.grid(padx=5, pady=0, sticky=EW)
dayOtherEnt1.bind("<Return>", lambda e: setFocus(dayOtherEnt2))
#-------
dayOtherEnt3 = Entry(dayBotFrame, textvariable=varDayOther3
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt3.grid(padx=6, pady=0, sticky=EW)
dayOtherEnt2.bind("<Return>", lambda e: setFocus(dayOtherEnt3))
#
# ========
#
def makeDayFileDone(root=froot):
#siWin.deiconify()
print('makeDayFileDone(root=froot): [A]')
froot.destroy()
print('makeDayFileDone(root=froot): [b]')
return
#
#-------
dayButFrame = Frame(frootFrame, bg = TANBG, borderwidth=4)
dayButFrame.grid(row = 3, padx=2, pady=2, sticky=NSEW)
#-------
dayButBut = Button(dayButFrame, bg = TANBG, borderwidth=4
,text = 'DONE', command = makeDayFileDone
,activebackground=ACTIVB, font=monoFont16)
dayButBut.grid()
dayOtherEnt3.bind("<Return>", lambda e: setFocus(dayButBut))
dayButBut.bind("<Return>", makeDayFileDone)
#
froot.transient(siWin)
froot.grab_set()
siWin.wait_window(froot)
#
# ======== NOW write out the data that was entered
#
dayFileData = [ 'DATE: ' + getDateToday()
, 'Meaning of Experiment: '
]
dayFileData.append( '# ' + varDayMeaning1.get() )
dayFileData.append( '# ' + varDayMeaning2.get() )
dayFileData.append( '# ' + varDayMeaning3.get() )
dayFileData.extend(
[ 'Slit Widths EX: ' + varEXslit.get()
, 'Slit Widths EM: ' + varEMslit.get()
, 'Bulb Intensity: ' + varDayBulb.get()
, 'Notebook page: ' + varDayNotebook.get()
, 'Other comments: '
] )
dayFileData.append( '# ' + varDayOther1.get() )
dayFileData.append( '# ' + varDayOther2.get() )
dayFileData.append( '# ' + varDayOther3.get() )
#
print('dayFileData: {}'.format(dayFileData))
#
#
dayf = open('dayfile.txt','w')
dayf.write( '\n'.join(dayFileData) )
dayf.close()
return
#
def checkDayFile():
'''Read 'dayfile.txt' and if not created today, update it.'''
global dayFileData
#
if jjltest:
print('checkDayFile()')
try:
dayf = open('dayfile.txt','r')
except:
print('dayfile.txt does not exist, CREATE (and write) it.')
makeDayFile()
return
#
# Check that the day file is for TODAY's date
dayFileData = dayf.read().splitlines()
dayf.close()
# file have data ?
if len(dayFileData)<1: # not one line !
makeDayFile() # create a new file
return
# examine the previous date
print('len(dayFileData): {}'.format(len(dayFileData)))
today = dayFileData[0]
print('checkDayFile(): dayFile.txt, line #1: {}'.format(today))
#
date = dayFileData[0].strip() # look at first line of file
print('checkDayFile() READ: {}'.format(date))
if date.startswith( 'DATE: ' + getDateToday()) :
print('checkDayFile() CURRENT data in file')
return # file has current data
# create a new file
makeDayFile()
return
#
## Settings Read (default settings, etc.) for measurement
#
def readSettings():
'''Read 'settings.txt' and recover default values.'''
if jjltest:
print('readSettings()')
#
#
# "Factory Default Settings" (if no others are established)
#
# "EXinc: 1" # Setting EX Inc Wavelength (nm)
varEXinc.set('1')
# "EMinc: 1" # Setting EM Inc Wavelength (nm)
varEMinc.set('1')
# "TMinc: 0.1" # Setting TM Inc time (s)
varTMinc.set('0.1')
# "varEXslit: 2.9" # Setting EX slit width (nm)
varEXslit.set('2.9')
# "varEMslit: 2.9" # Setting EM slit width (nm)
varEMslit.set('2.9')
# "EMhv: -900" # Setting EM PMT high voltage (v)
varEMhv.set('-900')
# "REFdiode: 0" # Setting REF DIODE Gain setting [0,1,2,3]
varREFdiode.set('0')
# "REFhv: -450" # Setting REF PMT high voltage (v)
varREFhv.set('0')
# CALIBRATION SETTINGS:
# "EXstepsNm: 10" # EX Steper Motor Cal: steps/nm
varEXstepsNm.set('10')
# "EMstepsNm: 10" # EM Steper Motor Cal: steps/nm
varEMstepsNm.set('10')
#
# Now OVER WRITE FACTORY with SITE'S SETTINGS
try:
tmpFile = open('settings.txt','r').read().splitlines()
for line in tmpFile:
print('line = {}'.format(line))
items = line.split()
#
# parse 'settings.txt' for 'site default values'
# (SITE DEFAULT SETTINGS)
# EXinc: 1
# EMinc: 1
# TMinc: 0.1
# (SITE ESTABLISHED SETTINGS)
# EXslit: 2.9
# EMslit: 2.9
# EMhv: -900
# REFhv: -450
# EXstepsNm: 10
# EMstepsNm: 10
#
if items[0] == "EXinc:":
varEXinc.set(items[1])
elif items[0] == "EMinc:":
varEMinc.set(items[1])
elif items[0] == "TMinc:":
varTMinc.set(items[1])
elif items[0] == "EXslit:":
varEXslit.set(items[1])
elif items[0] == "EMslit:":
varEMslit.set(items[1])
elif items[0] == "EMhv:":
varEMhv.set(items[1])
elif items[0] == "REFdiode:":
varREFdiode.set(items[1])
elif items[0] == "REFhv:":
varREFhv.set(items[1])
elif items[0] == "EXstepsNm:":
varEXstepsNm.set(items[1])
elif items[0] == "EMstepsNm:":
varEMstepsNm.set(items[1])
except:
pass # no SITE SETTINGS WERE SAVED
if jjltest:
print('settings.txt does not exist!')
return
#
## Settings Edit (default settings, etc.) for measurement
def editSettings():
'''Edit 'settings.txt' to alter default values.'''
#
edset = Toplevel()
edset.geometry(transGeom)
edset.title("Spectrometer Settings")
#
#-------
edsetTop = Frame(edset, bg = TANBG)
edsetTop.grid()
#
# User Default Settings SETTINGS - defaults to load for editing
#
# varEXinc = StringVar() # Setting EX Inc Wavelength (nm)
# varEMinc = StringVar() # Setting EM Inc Wavelength (nm)
# varTMinc = StringVar() # Setting TM Inc time (s)
# varEXslit = StringVar() # Setting EX Slit Opening (nm)
# varEMslit = StringVar() # Setting EM Slit Opening (nm)
# varEMhv = StringVar() # Setting EM PMT high voltage (v)
# varREFdiode = StringVar() # Setting for REF DIODE Gain
# varREFhv = StringVar() # Setting REF PMT high voltage (v)
#
#-------
edsetPf = LabelFrame(edsetTop, text="Site Default Settings."
,bg = TANBG, font=monoFont16
,borderwidth=6)
edsetPf.grid(row=0, padx=4, pady=4, sticky=EW)
#
# EX default increment (nm)
#-------
EXiPL = Label(edsetPf, text = "EX default increment (nm):"
, bg = TANBG, font=monoFont14)
EXiPL.grid(row=0, column=0, padx=4, sticky=W)
#-------
EXiPE = Entry(edsetPf, textvariable = varEXinc, font=monoFont14)
EXiPE.grid(row=0, column=1, padx=4, sticky=E)
#
# EM default increment (nm)
#-------
EMiPL = Label(edsetPf, text = "EM default increment (nm):"
, bg = TANBG, font=monoFont14)
EMiPL.grid(row=1, column=0, padx=4, sticky=W)
#-------
EMiPE = Entry(edsetPf, textvariable = varEMinc, font=monoFont14)
EMiPE.grid(row=1, column=1, padx=4, sticky=E)
#
# TM default increment (S)
#-------
TMiPL = Label(edsetPf, text = "TM default increment (S):"
, bg = TANBG, font=monoFont14)
TMiPL.grid(row=2, column=0, padx=4, sticky=W)
#-------
TMiPE = Entry(edsetPf, textvariable = varTMinc, font=monoFont14)
TMiPE.grid(row=2, column=1, padx=4, sticky=E)
#
# Site Established Settings - due to instrument setup. I.E.
# CALIBRATION SETTINGS - measured during calibration of spectrometer
# {stepper motor calibration values - should not need changing}
# varEXstepsNm = StringVar() # EX StepMotor steps per (nm)
# varEMstepsNm = StringVar() # EM StepMotor steps per (nm)
#
#-------
edsetCf = LabelFrame(edsetTop, text="Site Established Settings."
,bg = TANBG, font=monoFont16
,borderwidth=6)
edsetCf.grid(row=1, padx=4, pady=4, sticky=EW)
#
# EX Slit size (nm)
#-------
EXiPL = Label(edsetCf, text = "EX Slit size (nm):"
, bg = TANBG, font=monoFont14)
EXiPL.grid(row=0, column=0, padx=4, sticky=W)
#-------
EXiPE = Entry(edsetCf, textvariable = varEXslit, font=monoFont14)
EXiPE.grid(row=0, column=1, padx=4, sticky=E)
#
# EM Slit size (nm)
#-------
EMiPL = Label(edsetCf, text = "EM Slit size (nm):"
, bg = TANBG, font=monoFont14)
EMiPL.grid(row=1, column=0, padx=4, sticky=W)
#-------
EMiPE = Entry(edsetCf, textvariable = varEMslit, font=monoFont14)
EMiPE.grid(row=1, column=1, padx=4, sticky=E)
#
# EM PMT high voltage (v)
#-------
EMhvL = Label(edsetCf, text = "EM PMT high voltage (v):"
, bg = TANBG, font=monoFont14)
EMhvL.grid(row=2, column=0, padx=4, sticky=W)
#-------
EMhvE = Entry(edsetCf, textvariable = varEMhv, font=monoFont14)
EMhvE.grid(row=2, column=1, padx=4, sticky=E)
#
# REF DIODE Gain setting [0,1,2,3]
#-------
REFhvL = Label(edsetCf, text = "REF DIODE Gain Setting:"
, bg = TANBG, font=monoFont14)
REFhvL.grid(row=3, column=0, padx=4, sticky=W)
#-------
# varREFdiode = StringVar() # REF DIODE Gain setting [0,1,2,3]
REFhvE = Entry(edsetCf, textvariable = varREFdiode, font=monoFont14)
REFhvE.grid(row=3, column=1, padx=4, sticky=E)
#
# REF PMT high voltage (v)
#-------
REFhvL = Label(edsetCf, text = "REF PMT high voltage (v):"
, bg = TANBG, font=monoFont14)
REFhvL.grid(row=4, column=0, padx=4, sticky=W)
#-------
REFhvE = Entry(edsetCf, textvariable = varREFhv, font=monoFont14)
REFhvE.grid(row=4, column=1, padx=4, sticky=E)
#
# EX Steper Motor Cal: steps/nm
#-------
EXnmCL = Label(edsetCf, text = "EX motor steps/nm:"
, bg = TANBG, font=monoFont14)
EXnmCL.grid(row=5, column=0, padx=4, sticky=W)
#-------
EXnmCE = Entry(edsetCf, textvariable = varEXstepsNm, font=monoFont14)
EXnmCE.grid(row=5, column=1, padx=4, sticky=E)
#
# EM Steper Motor Cal: steps/nm
#-------
EMnmCL = Label(edsetCf, text = "EM motor steps/nm:"
, bg = TANBG, font=monoFont14)
EMnmCL.grid(row=6, column=0, padx=4, sticky=W)
#-------
EMnmCE = Entry(edsetCf, textvariable = varEMstepsNm, font=monoFont14)
EMnmCE.grid(row=6, column=1, padx=4, sticky=E)
#
#
# DONE
def edsetDone(x=None):
# Write out Settings to 'settings.txt'
fo = open('settings.txt','w')
tempData = [ '# site default settings'
, 'EXinc: ' + varEXinc.get()
, 'EMinc: ' + varEMinc.get()
, 'TMinc: ' + varTMinc.get()
, '# site calibrated settings'
, 'EXslit: ' + varEXslit.get()
, 'EMslit: ' + varEMslit.get()
, 'EMhv: ' + varEMhv.get()
, 'REFdiode: ' + varREFdiode.get()
, 'REFhv: ' + varREFhv.get()
, 'EXstepsNm: ' + varEXstepsNm.get()
, 'EMstepsNm: ' + varEMstepsNm.get()
]
#
fo.write( '\n'.join(tempData) )
fo.close()
# next read in (apply) settings
readSettings()
# lastly Close Edit window
edset.destroy()
return # ignore
#
bDone = Button(edsetTop, text = 'DONE', bg = TANBG, borderwidth=4
,command = edsetDone
,activebackground=ACTIVB, font=monoFont16)
bDone.grid(row=2,padx=4, pady=2, sticky=W)
#
edset.transient(siWin)
edset.grab_set()
siWin.wait_window(edset)
if jjltest:
print( 'edsetDone!')
return
#
## Calibration Input (odometer settings) for monochrometer
#
# varEXposition = StringVar() # EX monochrometer position (nm)
# varEMposition = StringVar() # EM monochrometer position (nm)
#
def monoCal():
'''Get 'odometer' values for the monochrometers.
(i.e. Calibrate SPEX monochrometers; EX and EM.)'''
#
cal = Toplevel()
cal.geometry(transGeom)
cal.title("Monochronometer Calibration")
#
calTop = Frame(cal, bg = TANBG)
calTop.grid()
#
calf = LabelFrame(calTop, text="Verify odometer values."
,bg = TANBG, font=monoFont16
,borderwidth=6)
calf.grid(padx=4,pady=4)
#
lEX = Label(calf, text = "EXcitation:"
, bg = TANBG, font=monoFont14)
lEX.grid(row=0, column=0, padx=4, sticky=E)
eEX = Entry(calf, textvariable = varEXposition, font=monoFont14)
eEX.grid(row=0, column=1, padx=4, sticky=E)
def eEXchk(x=None):
eEX['bg'] = 'white'
return
eEX.bind('<KeyRelease>',eEXchk)
eEX.focus_set()
#
lEM = Label(calf, text = "EMission:"
, bg = TANBG, font=monoFont14)
lEM.grid(row=1, column=0, padx=4, sticky=E)
eEM = Entry(calf, textvariable = varEMposition, font=monoFont14)
eEM.grid(row=1, column=1, padx=4, sticky=E)
def eEMchk(x=None):
eEM['bg'] = 'white'
return
eEM.bind('<KeyRelease>',eEMchk)
#
#
def monoCheck(val, ent):
'''True if val in 'legal' range, False otherwise.
Sets Entry field pink when val is outside 'legal'.'''
n = getVarInt(val)
if n >= MINnm and n<= MAXnm:
ent['bg'] = 'white' # 'legal' value
return True
else:
ent['bg'] = WARNC # 'illegal' value
ent.focus_set()
return False
#
def monoCalDone(x=None):
# Close window if both values are in 'normal' range
if monoCheck(varEXposition, eEX) and monoCheck(varEMposition, eEM):
writePositions() # save Verified positions to file
cal.destroy()
return # ignore
#
bDone = Button(calTop, text = 'DONE', bg = TANBG, borderwidth=4
,command = monoCalDone
,activebackground=ACTIVB, font=monoFont16)
bDone.grid(row=1, column=0, padx=4, pady=2, sticky=W)
#
cal.transient(siWin)
cal.grab_set()
siWin.wait_window(cal)
print( 'done!')
#
## Power Up - operations to sequence initialization of hardware/software
#
def PowerUp():
'''Load "settings" and calibrate SPEX.'''
#
#TODO read (overwrite or establish default values) 'settings.txt' file
readSettings()
#
#TODO establish serial connection to RetroSPEX controller
print("TODO: establish serial connection to RetroSPEX controller")
#( => simulate commands for now)
#
#TODO if connected: Move Monochrometers by 10nm (anti-backlash)
print("TODO: if connected: Move Monochrometers by 10nm (anti-backlash)")
readPositions()
# #TODO recover monochrometer positions 'positions.txt' file
# print("TODO: recover monochrometer positions 'positions.txt' file")
#
# perform monochrometer calibration (verification)
monoCal()
#TODO ( => always move POS dir (or sept NEG val+10 and then POS 10)
#TODO ( => real time display values initialize)
#
#TODO read (overwrite or establish default values) 'settings.txt' file
readSettings()
print("TODO: read (or establish default) 'settings.txt' file")
#
#TODO read (or have user create) 'dayfile.txt' file
print("TODO: read (or have user create) 'dayfile.txt' file")
checkDayFile()
#
return
#
## Power Down - operations to sequence shutdown of hardware/software
#
def PowerDown():
#
#TODO stop scanning one is in process
print("TODO: scanning one is in process")
#
#TODO log data such as monochrometer position on shutdown
print("TODO: log data such as monochrometer position on shutdown")
#
return
#====================================
## Scan Control Frame
#
#-------
controlsFrame = Frame(siWin, bg = TANBG, borderwidth=0)
controlsFrame.grid(row=0,column=0, sticky=N)
#
#-------
scfScanControlFrame = LabelFrame(controlsFrame,text='Control',
bg = TANBG, borderwidth=4)
scfScanControlFrame.grid(row=0,column=0, sticky=N)
## Scan; START/STOP - Spectrometer scan control
#
scanStopIcon = PhotoImage(file='icons/icon_scanSTOP.gif')
scanStartIcon = PhotoImage(file='icons/icon_scanSTART.gif')
runOn = False # default == OFF
#
def toggleScan():
'''Scan Start/Stop - Spectrometer scan control'''
global runOn
if runOn: # then STOP the scan !!
if jjltest:
print('STOPPING NOT IMPLEMENTED YET ;-)')
runOn = False
runScfB00['image'] = scanStartIcon
else: # START up a scan
# perform sanity checks before starting scan
sane = scanSanityCheck( warn = True )
if jjltest:
print('STARTING A SCAN NOT IMPLEMENTED YET ;-)')
sane = False
if sane:
runOn = True
runScfB00['image'] = scanStopIcon
return
#
#-------
runScfB00 = Button(scfScanControlFrame,image=scanStartIcon
,borderwidth = 0,activebackground=ACTIVB
,bg = TANBG, command = toggleScan )
runScfB00.grid(column=0,row=0, padx=2)
## HV - On/Off - High Voltage (red: safety concern)
#
hvOffIcon = PhotoImage(file='icons/icon_hvOff.gif')
hvOnIcon = PhotoImage(file='icons/icon_hvOn.gif')
hvOn = False # default == OFF
#
def toggleHV():
'''HV - On/Off - High Voltage (red: safety concern)'''
global hvOn
hvOn = toggleBtnVar(hvOn, hvScfB01, hvOffIcon, hvOnIcon)
return
#
#-------
hvScfB01 = Button(scfScanControlFrame, image = hvOffIcon
,activebackground=ACTIVB
,borderwidth = 0, bg = TANBG, command = toggleHV)
hvScfB01.grid(column=0,row=1)
#====================================
## Scan Data Frame -- Load previous Scan Data for Reference or Settings recall
#
#-------
filesFrame = LabelFrame(controlsFrame,text='Scan Data',
bg = TANBG, borderwidth=4)
filesFrame.grid(row=1,column=0, padx=2, sticky=NW)
#
# LOAD experimental settings from disk
dataLoadIcon = PhotoImage(file='icons/icon_dataLOAD.gif')
#
#-------
fileFileDataLoad = Button(filesFrame, image=dataLoadIcon
, bg = TANBG, activebackground=ACTIVB
,command = dataFileLOAD
,borderwidth = 0, font=monoFont14 )
fileFileDataLoad.grid(row=0, column=0, sticky=NW)
#
#
dataMgetIcon = PhotoImage(file='icons/icon_dataMGET.gif')
#
#-------
fileSettingsGet = Button(filesFrame, image=dataMgetIcon, bg = TANBG
,command = dataFileMGET,activebackground=ACTIVB
,borderwidth = 0, font=monoFont14 )
fileSettingsGet.grid(row=1, column=0,sticky=NW)
#====================================
## Macro Files Frame
#
#-------
macroFrame = LabelFrame(controlsFrame,text='Macro Files',
bg = TANBG, borderwidth=4)
macroFrame.grid(row=2,column=0, padx=2, sticky=NW)
#
# LOAD scan settings from disk
macroLoadIcon = PhotoImage(file='icons/icon_macroLOAD.gif')
#
#-------
macroFileLoad = Button(macroFrame, image=macroLoadIcon, bg = TANBG
,borderwidth = 0
,activebackground=ACTIVB, font=monoFont14 )
macroFileLoad.grid(row=0, column=0,sticky=NW)
#
#
macroEditIcon = PhotoImage(file='icons/icon_macroEDIT.gif')
#
#-------
macroFileEdit = Button(macroFrame, image=macroEditIcon, bg = TANBG
, borderwidth = 0
,activebackground=ACTIVB, font=monoFont14 )
macroFileEdit.grid(row=1, column=0,sticky=NW)
#====================================
## Settings Frame
#
#-------
settingsFrame = LabelFrame(controlsFrame,text='Settings',
bg = TANBG, borderwidth=4)
settingsFrame.grid(row=12,column=0, sticky=S)
#
#
settingsIcon = PhotoImage(file='icons/icon_settings.gif')
#
#-------
settingsBtn = Button(settingsFrame, image=settingsIcon, bg = TANBG
,borderwidth = 0, command = editSettings
,activebackground=ACTIVB, font=monoFont14 )
settingsBtn.grid()
#====================================
## Quit Frame
#
def quitCommand():
#
# Shutdown equipment
#
PowerDown()
#
siWin.destroy()
#-------
quitFrame = LabelFrame(controlsFrame,text='Quit',
bg = TANBG, borderwidth=4)
quitFrame.grid(row=13,column=0, sticky=S)
#
#
quitIcon = PhotoImage(file='icons/icon_quit.gif')
#
#-------
quitBtn = Button(quitFrame, image=quitIcon, bg = TANBG, borderwidth = 0
,command = quitCommand
,activebackground=ACTIVB, font=monoFont14 )
quitBtn.grid()
#====================================
## Experiment Frame -- Window to right of Control frame
#
#-------
efFrame = Frame(siWin, bg = TANBG, borderwidth=0)
efFrame.grid(row=0,column=1,sticky=NW)
#====================================
## Experiment Settings Frame
#
#-------
esfFrame = Frame(efFrame, bg = TANBG, borderwidth=0)
esfFrame.grid(row=0,column=0,sticky=NW)
#====================================
## Spectrometer / Specimen Box Frame
#
#-------
ssbFrame = Frame(esfFrame, bg = TANBG, borderwidth=0)
ssbFrame.grid(row=0,column=0,sticky=EW)
#====================================
## Spectrometer Settings Frame
#
#-------
ssfFrame = LabelFrame(ssbFrame,text='Spectrometer Settings',
bg = TANBG, borderwidth=4)
ssfFrame.grid(row=0,column=0,sticky=NW)
#====================================
## Spectrometer EX Frame - EXcitation
#
# EX scan
#
#-------
sEXfFrame = Frame(ssfFrame, bg = TANBG)
sEXfFrame.grid(row=0,column=0,sticky=NW)
#
#
sEXfB00_FR = NotImplemented # forward reference to Button
sEMfB00_FR = NotImplemented # forward reference to Button
sTMfB00_FR = NotImplemented # forward reference to Button
#
exIconT = PhotoImage(file='icons/icon_modeEXt.gif')
exIconF = PhotoImage(file='icons/icon_modeEXf.gif')
#
emIconT = PhotoImage(file='icons/icon_modeEMt.gif')
emIconF = PhotoImage(file='icons/icon_modeEMf.gif')
#
tmIconT = PhotoImage(file='icons/icon_modeTMt.gif')
tmIconF = PhotoImage(file='icons/icon_modeTMf.gif')
#
def buttonEX():
'''Display/Change scanning mode: to EXcitation.'''
setScanMode(EXscan)
return
#
#-------
sEXfB00 = Button(sEXfFrame, image = exIconT, bg = TANBG
,borderwidth=0, command = buttonEX,activebackground=ACTIVB)
sEXfB00.grid(row=0,column=0,sticky=W)
sEXfB00_FR = sEXfB00 # resolve the forward reference to this button
#
# Wavelength Setting (frame)
#-------
sEXwavFrame = Frame(sEXfFrame, bg = TANBG)
sEXwavFrame.grid(row=0,column=2,sticky=NW)
#
# Wavelength Start - Label
#-------
sEXwavSLabel = Label(sEXwavFrame, text='Start (nm)', font=monoFont12, bg = TANBG )
sEXwavSLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# Wavelength End - Label
#-------
sEXwavELabel = Label(sEXwavFrame, text='End (nm)', font=monoFont12, bg = TANBG )
sEXwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Wavelength Inc - Label
#-------
sEXwavILabel = Label(sEXwavFrame, text='Inc (nm)', font=monoFont12, bg = TANBG )
sEXwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
# Start wavelength - Enter
#
def validateEXwaveStart(eventKeyRelease):
sEXwavSEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavSEntry = Entry(sEXwavFrame, textvariable=varEXwaveStart,
border=2, relief=SUNKEN, width=8, font=monoFont14 )
sEXwavSEntry.grid(row=1, column=0, padx=4, pady=2, sticky=W)
sEXwavSEntry.bind('<KeyRelease>',validateEXwaveStart)
#
btnLookup['EXS'] = sEXwavSEntry # put button into dictionary by name
#
# End wavelength - Enter
#
def validateEXwaveEnd(eventKeyRelease):
sEXwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavEEntry = Entry(sEXwavFrame, textvariable=varEXwaveEnd,
border=2, relief=SUNKEN, width=7, font=monoFont14 )
sEXwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=W)
sEXwavEEntry.bind('<KeyRelease>',validateEXwaveEnd)
#
btnLookup['EXE'] = sEXwavEEntry # put button into dictionary by name
#
# Inc wavelength - Enter
#
def validateEXwaveInc(eventKeyRelease):
sEXwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavIEntry = Entry(sEXwavFrame, textvariable=varEXwaveInc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sEXwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=W)
sEXwavIEntry.bind('<KeyRelease>',validateEXwaveInc)
#
btnLookup['EXI'] = sEXwavIEntry # put button into dictionary by name
#====================================
## Spectrometer EM Frame - EMission
#
# EM scan
#
#-------
sEMfFrame = Frame(ssfFrame, bg = TANBG)
sEMfFrame.grid(row=0,column=1,sticky=NW)
#
def buttonEM():
'''Display/Change scanning mode: to EMission.'''
setScanMode(EMscan)
return
#
#-------
sEMfB00 = Button(sEMfFrame, image = emIconF, bg = TANBG
,borderwidth=0, activebackground=ACTIVB, command = buttonEM)
sEMfB00.grid(row=0,column=0,sticky=W)
sEMfB00_FR = sEMfB00 # resolve the forward reference to this button
#
# Wavelength Setting (frame)
#-------
sEMwavFrame = Frame(sEMfFrame, bg = TANBG)
sEMwavFrame.grid(row=0,column=2,sticky=NW)
#
# Wavelength Start - Label
#-------
sEMwavSLabel = Label(sEMwavFrame, text='Start (nm)', font=monoFont12, bg = TANBG )
sEMwavSLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# Wavelength End - Label
#-------
sEMwavELabel = Label(sEMwavFrame, text='End (nm)', font=monoFont12, bg = TANBG )
sEMwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Wavelength Inc - Label
#-------
sEMwavILabel = Label(sEMwavFrame, text='Inc (nm)', font=monoFont12, bg = TANBG )
sEMwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
# Start wavelength - Enter
#
def validateEMwaveStart(eventKeyRelease):
sEMwavSEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavSEntry = Entry(sEMwavFrame, textvariable=varEMwaveStart,
border=2, relief=SUNKEN, width=8, font=monoFont14 )
sEMwavSEntry.grid(row=1, column=0, padx=4, pady=2, sticky=E)
sEMwavSEntry.bind('<KeyRelease>',validateEMwaveStart)
#
btnLookup['EMS'] = sEMwavSEntry # put button into dictionary by name
#
# End wavelength - Enter
#
def validateEMwaveEnd(eventKeyRelease):
sEMwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavEEntry = Entry(sEMwavFrame, textvariable=varEMwaveEnd,
border=2, relief=SUNKEN, width=7, font=monoFont14 )
sEMwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=EW)
sEMwavEEntry.bind('<KeyRelease>',validateEMwaveEnd)
#
btnLookup['EME'] = sEMwavEEntry # put button into dictionary by name
#
# Inc wavelength - Enter
#
def validateEMwaveInc(eventKeyRelease):
sEMwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavIEntry = Entry(sEMwavFrame, textvariable=varEMwaveInc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sEMwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=EW)
sEMwavIEntry.bind('<KeyRelease>',validateEMwaveInc)
#
btnLookup['EMI'] = sEMwavIEntry # put button into dictionary by name
#====================================
## Spectrometer TM Frame - TiMe
#
# TM scan
#
#-------
sTMfFrame = Frame(ssfFrame, bg = TANBG)
sTMfFrame.grid(row=0,column=2,sticky=NW)
#
def buttonTM():
'''Display/Change scanning mode: to EXcitation.'''
setScanMode(TMscan)
return
#
#-------
sTMfB00 = Button(sTMfFrame, image = tmIconF, bg = TANBG,
borderwidth=0,activebackground=ACTIVB, command = buttonTM)
sTMfB00.grid(row=0,column=0,sticky=W)
sTMfB00_FR = sTMfB00 # resolve the forward reference to this button
#
#
# Time Setting (frame)
#-------
sTMwavFrame = Frame(sTMfFrame, bg = TANBG)
sTMwavFrame.grid(row=0,column=1,sticky=NW)
#
# Pause step# - Label
#-------
sTMwavPLabel = Label(sTMwavFrame, text='Pause(S)', font=monoFont12, bg = TANBG )
sTMwavPLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# End step# - Label
#-------
sTMwavELabel = Label(sTMwavFrame, text='End (S)', font=monoFont12, bg = TANBG )
sTMwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Increment Time - Label
#-------
sTMwavILabel = Label(sTMwavFrame, text='Inc (S)', font=monoFont12, bg = TANBG )
sTMwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
#
# Pause (step#) - Enter
#
def validateTMwavePause(eventKeyRelease):
sTMwavPEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavPEntry = Entry(sTMwavFrame, textvariable=varTMwavePause,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavPEntry.grid(row=1, column=0, padx=4, pady=2, sticky=EW)
sTMwavPEntry.bind('<KeyRelease>',validateTMwavePause)
#
btnLookup['TMP'] = sTMwavPEntry # put button into dictionary by name
#
# End step# - Enter
#
def validateTMwaveEnd(eventKeyRelease):
sTMwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavEEntry = Entry(sTMwavFrame, textvariable=varTMwaveEnd,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=EW)
sTMwavEEntry.bind('<KeyRelease>',validateTMwaveEnd)
#
btnLookup['TME'] = sTMwavEEntry # put button into dictionary by name
#
# Increment Time - Enter
#
def validateTMwaveInc(eventKeyRelease):
sTMwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavIEntry = Entry(sTMwavFrame, textvariable=varTMwaveInc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=W)
sTMwavIEntry.bind('<KeyRelease>',validateTMwaveInc)
#
btnLookup['TMI'] = sTMwavIEntry # put button into dictionary by name
#====================================
## S+R Frame - record Reference data?
#
# S+R
#
#-------
srFrame = Frame(ssfFrame, bg = TANBG)
srFrame.grid(row=0,column=3,sticky=NW)
#
# Reference Data - On/Off - 'S'(signal) alone or with 'R'(reference) too?
#
refOffIcon = PhotoImage(file='icons/icon_refOff.gif')
refOnIcon = PhotoImage(file='icons/icon_refOn.gif')
refOn = False # default == OFF (i.e. 'S' and 'R')
#
def toggleRef():
'''Ref - On/Off - 'S'(signal) alone or with 'R'(reference) too?'''
global refOn
refOn = toggleBtnVar(refOn, refScfB02, refOffIcon, refOnIcon)
return
#
#-------
refScfB02 = Button(srFrame, image = refOffIcon, borderwidth = 0
,bg = TANBG,activebackground=ACTIVB, command = toggleRef)
refScfB02.grid(row=0,column=0,sticky=W)
#====================================
## Set 'scan mode' - complete forward reference
#
def setScanMode(mode):
'''Select the type of spectrometer scan to perform.
Sets the EX, EM and TM Icons to incidate scan type.
Sets the 'state' (NORMAL/DISABLE) for scan setting params.'''
#
# any change?
if varScanMode.get() == mode:
if jjltest:
print('setScanMode(): NO change.')
return # no change
#
varScanMode.set(mode) # set the scan mode
#
# update icons
if varScanMode.get() == EXscan :
sEXfB00_FR['image'] = exIconT # SCAN MODE - back to Default
sEMfB00_FR['image'] = emIconF
sTMfB00_FR['image'] = tmIconF
elif varScanMode.get() == EMscan :
sEXfB00_FR['image'] = exIconF
sEMfB00_FR['image'] = emIconT # SCAN MODE
sTMfB00_FR['image'] = tmIconF
elif varScanMode.get() == TMscan :
sEXfB00_FR['image'] = exIconF
sEMfB00_FR['image'] = emIconF
sTMfB00_FR['image'] = tmIconT # SCAN MODE
else:
if jjltest:
print('Bad scan mode found in setScanMode(mode)')
sys.exit(0)
#
updatePlot() # synchronize plot with scan mode
#
# set the correct 'state' for wavelength/time icons
#
if varScanMode.get() == EXscan:
sEXwavSLabel['text'] = 'Start (nm)' # EXscan - Start wavelength
sEXwavELabel['text'] = 'End (nm)' # - End label set
sEXwavEEntry['state'] = NORMAL # - End entry enabled
sEXwavILabel['text'] = 'Inc (nm)' # - Inc label set
sEXwavIEntry['state'] = NORMAL # - Inc entry enabled
sEMwavSLabel['text'] = 'Park (nm)' # EMscan - EM wavelength Parked
sEMwavELabel['text'] = '' # - End label cleared
sEMwavEEntry['state'] = DISABLED # - End entry disabled
sEMwavILabel['text'] = '' # - Inc label cleared
sEMwavIEntry['state'] = DISABLED # - Inc entry disabled
sTMwavPLabel['text'] = '' # TMscam - Pause label cleared
sTMwavPEntry['state'] = DISABLED # - Pause entry disabled
sTMwavELabel['text'] = '' # - End label cleared
sTMwavEEntry['state'] = DISABLED # - End entry disabled
elif varScanMode.get() == EMscan:
sEXwavSLabel['text'] = 'Park (nm)' # EXscan - EX wavelength Parked
sEXwavELabel['text'] = '' # - End label cleared
sEXwavEEntry['state'] = DISABLED # - End entry disabled
sEXwavILabel['text'] = '' # - Inc label cleared
sEXwavIEntry['state'] = DISABLED # - End entry disabled
sEMwavSLabel['text'] = 'Start (nm)' # EMscan - EM wavelength set
sEMwavELabel['text'] = 'End (nm)' # - End label set
sEMwavEEntry['state'] = NORMAL # - End entry enabled
sEMwavILabel['text'] = 'Inc (nm)' # - Inc label set
sEMwavIEntry['state'] = NORMAL # - Inc entry enabled
sTMwavPLabel['text'] = '' # TMscam - Pause label cleared
sTMwavPEntry['state'] = DISABLED # - Pause entry disabled
sTMwavELabel['text'] = '' # - End label cleared
sTMwavEEntry['state'] = DISABLED # - End entry disabled
elif varScanMode.get() == TMscan:
sEXwavSLabel['text'] = 'Park (nm)' # EXscan - EX wavelength Parked
sEXwavELabel['text'] = '' # - End label cleared
sEXwavEEntry['state'] = DISABLED # - End entry disabled
sEXwavILabel['text'] = '' # - Inc label cleared
sEXwavIEntry['state'] = DISABLED # - End entry disabled
sEMwavSLabel['text'] = 'Park (nm)' # EMscan - EM wavelength Parked
sEMwavELabel['text'] = '' # - End label cleared
sEMwavEEntry['state'] = DISABLED # - End entry disabled
sEMwavILabel['text'] = '' # - Inc label cleared
sEMwavIEntry['state'] = DISABLED # - Inc entry disabled
sTMwavPLabel['text'] = 'Pause(S)' # TMscam - Pause label set
sTMwavPEntry['state'] = NORMAL # - Pause entry enabled
sTMwavELabel['text'] = 'End (S)' # - End label set
sTMwavEEntry['state'] = NORMAL # - End entry enabled
else:
err = 'Internal Errr: undefined scan mode: {} !'.format(varScanModee.get())
mBox.showerror(title='Fatal Error',message=err)
sys.exit(0)
#
scanSanityCheck() # update out-of-bounds parameter coloring
return
#
setScanMode_FR = setScanMode # resolve the Forward Reference to function
#====================================
## Specimen Details Frame
#
#-------
sdFrame = LabelFrame(ssbFrame,text='Specimen Details', bg = TANBG, borderwidth=0)
sdFrame.grid(row=1,column=0, pady=4, sticky=NW)
sdEntry = Entry(sdFrame, textvariable=varSpecimenDetails ,
width=96, bg = 'white', border=2, relief=SUNKEN, font=monoFont14)
sdEntry.grid(row=0, column=0, padx=20, pady=2, sticky=EW)
sdEntry.bind('<KeyRelease>',updatePlot)
#====================================
## Real Time data Frame -- frame inside Experiment Frame
#
# Frame to hold real time data
#-------
rtdmFrame = LabelFrame(esfFrame, text='Live Data', bg = TANBG, borderwidth=4)
rtdmFrame.grid(row=0,column=1, padx=4, pady=2,sticky=NS+E)
#
#
# Real Time Data -- Row 0 => Signal
#-------
rtdmLabel00 = Label(rtdmFrame, text='S:', font=monoFont14, bg = TANBG )
rtdmLabel00.grid(row=0, column=0,sticky=E)
#-------
rtdmLabel00 = Label(rtdmFrame, textvariable=varRTDsignal
,border=0, relief=FLAT, bg='white'
,width=15, font=monoFont12, anchor=E )
rtdmLabel00.grid(row=0, column=1, padx=4, pady=2, sticky=W)
#
# Real Time Data -- Row 1 => Reference
#-------
rtdmLabel10 = Label(rtdmFrame, text='R:', font=monoFont14, bg = TANBG )
rtdmLabel10.grid(row=1, column=0,sticky=E)
#-------
rtdmLabel11 = Label(rtdmFrame, textvariable=varRTDreference
,border=0, relief=FLAT, bg='white'
,width=15, font=monoFont12, anchor=E )
rtdmLabel11.grid(row=1, column=1, padx=4, pady=2, sticky=W)
#
# Real Time Data -- Row 2 => PCT (%) scan complete
#-------
rtdmLabel40 = Label(rtdmFrame, text='%:', font=monoFont14, bg = TANBG )
rtdmLabel40.grid(row=2, column=0,sticky=E)
rtdmProgress41 = Progressbar(rtdmFrame, orient='horizontal'
,mode='determinate', variable=varPCTdone
,length=124)
rtdmProgress41.grid(row=2, column=1, padx=4, pady=2,sticky=W)
#
#
# FRAME for Real Time Data2 -- EX/EM position and HV readings
#
rtdmFrame2 = Frame(rtdmFrame, bg = TANBG)
rtdmFrame2.grid(row=3,columnspan=2, padx=0, pady=0,sticky=NSEW)
#
# Real Time Data2 -- Row 0,[Col 0&1] => EX monochrometer position (nm)
#-------
rtdm2Label00 = Label(rtdmFrame2, text='EX:', font=monoFont14, bg = TANBG )
rtdm2Label00.grid(row=0, column=0,sticky=E)
#-------
rtdm2Label01 = Label(rtdmFrame2, textvariable=varEXposition
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label01.grid(row=0, column=1, padx=2, pady=2, sticky=W)
#
# Real Time Data -- Row 0,[Col 2&3] => EM monochrometer position (nm)
#-------
rtdm2Label02 = Label(rtdmFrame2, text='EM:', font=monoFont14, bg = TANBG )
rtdm2Label02.grid(row=0, column=2,sticky=E)
#-------
rtdm2Label03 = Label(rtdmFrame2, textvariable=varEMposition
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label03.grid(row=0, column=3, padx=2, pady=2, sticky=W)
#
# Real Time Data2 -- Row 1,[Col 0&1] => EM PMT HV readings (v)
#-------
rtdm2Label10 = Label(rtdmFrame2, text='HVm:', font=monoFont14, bg = TANBG )
rtdm2Label10.grid(row=1, column=0,sticky=E)
#-------
rtdm2Label11 = Label(rtdmFrame2, textvariable=varEMhv
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label11.grid(row=1, column=1, padx=2, pady=2, sticky=W)
#
# Real Time Data -- Row 1,[Col 2&3] => REF PMT HV readings (v)
#-------
rtdm2Label22 = Label(rtdmFrame2, text='HVr:', font=monoFont14, bg = TANBG )
rtdm2Label22.grid(row=1, column=2,sticky=E)
#-------
rtdm2Label23 = Label(rtdmFrame2, textvariable=varREFhv
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label23.grid(row=1, column=3, padx=2, pady=2, sticky=W)
#====================================
## Plotting Frame
#
#-------
plotFrame = Frame(efFrame, bg = TANBG, borderwidth=0)
plotFrame.grid(row=2,column=0, sticky=NSEW)
#
fig = Figure(figsize = (11.56,6), dpi=100) # TopLevel container for all plot elements
#
# initialize the "plot" element as "ax"
#
ax = fig.add_subplot(111, axisbg='w')
#
canvas = FigureCanvasTkAgg(fig, master=plotFrame)
canvas.get_tk_widget().grid(row=0,column=0, padx=2)
#
def updatePlot():
global ax
global scanDataX,scanDataY
global backgroundDataX,backgroundDataY
# #
# # returns Axes instance for single plot
# try:
# fig.axes.remove(ax)
# except:
# pass
#print('CALLED: updatePlot() len(scanDataX)={}'.format(len(scanDataX)))
#
# remove 'old' lines before re-draw
while len(ax.lines):
ax.lines.remove(ax.lines[-1])
#
# Get correct scaling for X axis
#
minX = 200
maxX = 1000
sm = varScanMode.get()
if sm == EXscan:
if jjltest:
print('Error: EXscan not implemented.')
else:
mBox.showerror(message='Error: EXscan not implemented.')
startX = minX
endX = maxX
elif sm == EMscan:
if getVarInt(varEMwaveEnd) - getVarInt(varEMwaveStart) < 2:
startX = minX
endX = maxX
else:
startX = getVarInt(varEMwaveStart)
endX = getVarInt(varEMwaveEnd)
elif sm == TMscan:
if jjltest:
print('Error: TMscan not implemented.')
else:
mBox.showerror(message='Error: TMscan not implemented.')
startX = minX
endX = maxX
else:
mErr('Error: updatePlot() invalid varScanMode')
sys.exit(0)
#
# Get correct scaling for Y axis
#
if len(scanDataY) < 2 :
maxScanY = 5000 # default if NO scan data
else:
maxScanY = 1.1*max(scanDataY)
#
if len(backgroundDataY) < 2 :
maxInputY = 5000 # default if NO input (reference) data
else:
maxInputY = 1.1*max(backgroundDataY)
#
maxY = max(5000, maxScanY, maxInputY)
#
# set the X & Y sizes for axes now
#
ax.axis([startX, endX, 0, maxY ])
#
ax.set_title( timeNow() + ' - Specimen Details:\n'
+ varSpecimenDetails.get() )
ax.set_ylabel('counts')
#
# plot "background" waveform (IF one has been loaded)
if len(backgroundDataX) > 1:
if jjltest:
print('\nbefore: len(ax.lines)={}'.format(len(ax.lines)))
#ax.plot(scanDataX, scanDataY, 'b')
if jjltest:
print('mid: len(ax.lines)={}'.format(len(ax.lines)))
ax.plot(backgroundDataX, backgroundDataY, 'g')
if jjltest:
print('after: len(ax.lines)={}'.format(len(ax.lines)))
if jjltest:
print('len(backgroundDataX):{}, len(backgroundDataY):{}'.format(len(backgroundDataX),len(backgroundDataY)))
#
# xlabel depends upon type of scan: EXscan = 0, EMscan = 1, TMscan = 2; varScanMode
#
if varScanMode.get() == TMscan:
ax.set_xlabel('time (S)') # scan by time
else:
ax.set_xlabel('wavelength (nm)') # scan by wavelength
#
# set up "cursor" to display values from plot
#
cursor = Cursor(ax, horizOn=False, useblit=True, color='red', linewidth=2 )
#cursor = Cursor(ax, horizOn=False, color='red', linewidth=2 )
#
canvas.show()
#
updatePlot_FR = updatePlot # resolve the Forward Reference to updatePlot()
# ========================
#=================
## Start up Window
#
setScanMode(EMscan) # establish default EX scan type
updatePlot() # draw the graph
#
PowerUp() # initialize settings & calibrate SPEX
#
siWin.mainloop()
``` |
{
"source": "jluss0ll1/casa-de-cambio",
"score": 4
} |
#### File: jluss0ll1/casa-de-cambio/sistema.py
```python
from operacoes import Operacao
from typing import List, Dict
from time import sleep
import pickle
registros: List[Operacao] = []
def main() -> None:
menu()
def menu() -> None:
print('=====================================================')
print('================== MUITO DINHEIRO ===================')
print('================== Casa de Câmbio ===================')
print('=====================================================')
print('Seja bem-vindo! Selecione uma opção: ')
print('1 - Cadastrar Operação')
print('2 - Listar Operações')
print('3 - Buscar Operações por Cliente e Intervalo de Tempo')
print('4 - Relatório: Valor Total das Operações')
print('5 - Relatório: Valor Total das Taxas Cobradas')
print('6 - Relatório: Valores por Cliente e Intervalo de Tempo')
print('7 - Salvar Registros')
print('8 - Carregar Registros')
print('9 - Sair do Sistema')
opcao: int = int(input('Selecione uma opção: '))
if opcao == 1:
cadastrar_operacao()
elif opcao == 2:
listar_operacoes()
elif opcao == 3:
nomecliente = input('Informe o nome do cliente: ')
datainicial = input('Informe a data inicial do intervalo: ')
datafinal = input('Informe a data final do intervalo: ')
operacao: Operacao = busca_operacoes_por_cliente(nomecliente, datainicial, datafinal)
print('----------')
for o in operacao:
print(o)
print('----------')
sleep(1)
menu()
elif opcao == 4:
soma_op = round(total_operacoes(), 2)
print(f'Valor total das operações: BRL {soma_op}')
sleep(1)
menu()
elif opcao == 5:
soma_taxas = round(total_taxas(), 2)
print(f'Valor total das taxas cobradas: BRL {soma_taxas}')
sleep(1)
menu()
elif opcao == 6:
nomecliente = input('Informe o nome do cliente: ')
datainicial = input('Informe a data inicial do intervalo: ')
datafinal = input('Informe a data final do intervalo: ')
operacao: Operacao = totais_por_cliente(nomecliente, datainicial, datafinal)
print('----------')
print(f'Valor total das operações realizadas pelo cliente {nomecliente} do dia {datainicial} até o dia {datafinal}:')
print(f'BRL {operacao[0]}')
print(f'Valor total das taxas cobradas do cliente {nomecliente} do dia {datainicial} até o dia {datafinal}:')
print(f'BRL {operacao[1]}')
print('----------')
sleep(2)
menu()
elif opcao == 7:
salvar_operacoes()
elif opcao == 8:
abrir_registros()
elif opcao == 9:
sair()
else:
print('Opção inválida. Tente novamente.')
sleep(1)
menu()
# Opção 1 - Cadastrar Operação
def cadastrar_operacao() -> None:
print('Cadastro de Operação: ')
print('======================')
try:
nome: str = input('Informe o nome do cliente: ')
moeda_origem: str = input('Informe a moeda de origem: ')
moeda_destino: str = input('Informe a moeda de destino: ')
valor_original: float = input('Informe o valor original: ')
operacao: Operacao = Operacao(nome, moeda_origem, moeda_destino, valor_original)
registros.append(operacao)
print('----------------')
print(operacao)
print(f'A operação para o cliente {operacao.nome} foi concluída com sucesso!')
sleep(2)
menu()
except:
print('Erro na operação! Favor tentar novamente.')
sleep(1)
menu()
# Opção 2 - Listar Operações
def listar_operacoes() -> None:
if len(registros) > 0:
print('Lista de operações')
print('==================')
for registro in registros:
print(registro)
print('==================')
sleep(1)
else:
print('Não há operações registradas no sistema!')
sleep(1)
menu()
# Opção 3 - Buscar Operações por Cliente e Intervalo de Tempo
def busca_operacoes_por_cliente(nome: str, data_inicial: str, data_final: str) -> Operacao:
o: Operacao = []
for registro in registros:
if (registro.nome == nome) and (registro.data_operacao >= data_inicial) and (registro.data_operacao <= data_final):
o.append(registro)
return o
# Opção 4 - Relatório: Valor Total das Operações
def total_operacoes():
lista_op = []
for registro in registros:
lista_op.append(float(registro.valor_real))
total_op = sum(lista_op)
return total_op
# Opção 5 - Relatório: Valor Total das Taxas Cobradas
def total_taxas():
lista_op = []
for registro in registros:
lista_op.append(float(registro.valor_real))
total_taxas = (sum(lista_op))*registro.taxa_cobrada
return total_taxas
# Opção 6 - Relatório: Valores por Cliente e Intervalo de Tempo
def totais_por_cliente(nome: str, data_inicial: str, data_final: str) -> Operacao:
valores = []
taxas = []
for registro in registros:
if (registro.nome == nome) and (registro.data_operacao >= data_inicial) and (registro.data_operacao <= data_final):
valores.append(float(registro.valor_real))
taxas.append(float(registro.taxa_operacao))
total_valores = round(sum(valores), 2)
total_taxas = round(sum(taxas), 2)
return (total_valores, total_taxas)
# Opção 7 - Salvar Registros
def salvar_operacoes():
nome_arquivo = "registros.pkl"
abrir_arquivo = open(nome_arquivo, "wb")
for registro in registros:
pickle.dump(registro, abrir_arquivo)
abrir_arquivo.close()
print('Salvo com sucesso!')
sleep(3)
menu()
# Opção 8 - Carregar Registros
def abrir_registros():
nome_arquivo = "registros.pkl"
objetos = []
with (open(nome_arquivo, "rb")) as openfile:
while True:
try:
objetos.append(pickle.load(openfile))
except EOFError:
break
for obj in range(len(objetos)):
print('---------')
print(objetos[obj])
registros.append(objetos[obj])
print('---------')
sleep(1)
menu()
# Opção 9 - Sair do Sistema
def sair():
exit('Volte sempre!')
if __name__ == '__main__':
main()
``` |
{
"source": "jlustigy/approxposterior",
"score": 4
} |
#### File: approxposterior/approxposterior/priors.py
```python
__all__ = ["Prior", "UniformPrior", "GaussianPrior", "get_lnprior",
"get_prior_unit_cube", "get_theta_bounds", "get_theta_names"]
# Generic packages
import numpy as np
import scipy as sp
from scipy.special import erfcinv
################################################################################
# P r i o r C l a s s
################################################################################
class Prior(object):
"""
Prior probability class meant for subclassing.
Warning
-------
:class:`Prior` is a base class to construct specific prior distribution
classes and instances. It cannot be used directly as a prior. See
:class:`UniformPrior` and :class:`GaussianPrior` for functional
subclasses.
Parameters
----------
theta_name : str
State vector parameter name
"""
def __init__(self, theta_name = None):
self.theta_name = theta_name
return
def __call__(self, x):
"""
Returns the log-prior probability of ``x``
"""
return self.lnprior(x)
def __repr__(self):
"""
"""
return "%s(%s=%.3f, %s=%.3f)" %(self.__class__.__name__,
list(self.__dict__.keys())[0],
list(self.__dict__.values())[0],
list(self.__dict__.keys())[1],
list(self.__dict__.values())[1])
def __str__(self):
"""
"""
return self.__repr__()
def lnprior(self, x):
"""
Returns the natural log of the prior probability
Parameters
----------
x : float
State at which to evaluate the log-prior
Returns
-------
lnprior : float
Log of the prior probability
"""
return NotImplementedError("You must specify `lnprior` function in a subclass.")
def random_sample(self):
"""
Returns a sample from the prior probability distribution function
Parameters
----------
size : int or None
Number of random samples to return; default ``None`` will return a
float, otherwise a numpy array is returned.
Returns
-------
x0 : float or numpy.array
Randomly drawn sample from the prior
"""
return NotImplementedError("You must specify `random_sample` function in a subclass.")
def transform_uniform(self, r):
"""
Tranformation from hypercube to physical parameters. The MultiNest native space is a unit hyper-cube
in which all the parameter are uniformly distributed in [0, 1]. The user is required to transform
the hypercube parameters to physical parameters. This transformation is described in Sec 5.1
of arXiv:0809.3437.
These functions are based on the prior transformations provided here:
https://github.com/JohannesBuchner/MultiNest/blob/master/src/priors.f90
Parameters
----------
r : float
Hypercube value
Returns
-------
r2 : float
Transformed parameter value
"""
return NotImplementedError("`transform_uniform` must be specified by a specific subclass.")
def get_bounds(self):
"""
Returns a tuple of the strict boundaries
Returns
-------
bounds : tuple
Hard bounds ``(xmin, xmax)``
"""
return NotImplementedError("You must specify `get_bounds` in a subclass.")
################################################################################
# U n i f o r m P r i o r
################################################################################
class UniformPrior(Prior):
"""
Uniform prior subclass. This distribution is constant between low and
high.
Parameters
----------
low : float
Minimum parameter value
high : float
Maximum parameter value
Attributes
----------
dist : scipy.stats.uniform
A uniform continuous random variable instance
"""
def __init__(self, low, high, **kwargs):
self.low = low
self.high = high
self.dist = sp.stats.uniform(loc = self.low, scale = self.high - self.low)
super(UniformPrior, self).__init__(**kwargs)
return
def lnprior(self, x):
"""
Returns the natural log of the prior probability
Parameters
----------
x : float
State at which to evaluate the log-prior
Returns
-------
lnprior : float
Log of the prior probability
"""
#if x >= self.low and x <= self.high:
# lp = 0.0
#else:
# lp = -np.inf
return self.dist.logpdf(x) #lp
def random_sample(self, size=None):
"""
Returns a sample from the prior probability distribution function
Parameters
----------
size : int or None
Number of random samples to return; default ``None`` will return a
float, otherwise a numpy array is returned.
Returns
-------
x0 : float or numpy.array
Randomly drawn sample from the prior
"""
return self.dist.rvs(size=size)
def transform_uniform(self, r):
"""
Tranformation from hypercube to physical parameters. The MultiNest native space is a unit hyper-cube
in which all the parameter are uniformly distributed in [0, 1]. The user is required to transform
the hypercube parameters to physical parameters. This transformation is described in Sec 5.1
of arXiv:0809.3437.
These functions are based on the prior transformations provided here:
https://github.com/JohannesBuchner/MultiNest/blob/master/src/priors.f90
Parameters
----------
r : float
Hypercube value
Returns
-------
r2 : float
Transformed parameter value
"""
# Parse attributes
x1 = self.low
x2 = self.high
# Calculate transformation
u=x1+r*(x2-x1)
return u
def get_bounds(self):
"""
Returns a tuple of the strict boundaries
Returns
-------
bounds : tuple
Hard bounds ``(xmin, xmax)``
"""
return (self.low, self.high)
################################################################################
# G a u s s i a n P r i o r
################################################################################
class GaussianPrior(Prior):
"""
Gaussian prior object.
The probability density for the Gaussian distribution is
.. math::
p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }} e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },
where :math:`\mu` is the mean and :math:`\sigma` the standard
deviation. The square of the standard deviation, :math:`\sigma^2`,
is called the variance.
Parameters
----------
mu : float
Mean of the normal distribution
sigma : float
Standard deviation of the normal distribution
Attributes
----------
dist : scipy.stats.norm
A normal continuous random variable instance
"""
def __init__(self, mu, sigma, **kwargs):
self.mu = mu
self.sigma = sigma
self.dist = sp.stats.norm(loc = self.mu, scale = self.sigma)
super(GaussianPrior, self).__init__(**kwargs)
return
def lnprior(self, x):
"""
Returns the natural log of the prior probability
Parameters
----------
x : float
State at which to evaluate the log-prior
Returns
-------
lnprior : float
Log of the prior probability
"""
#p = (1.0 / (2.0 * np.pi * self.sigma**2.0)) * np.exp(- ((x - self.mu)**2.0) / (2.0 * self.sigma**2.0))
return self.dist.logpdf(x)
def random_sample(self, size=None):
"""
Returns a sample from the prior probability distribution function
Parameters
----------
size : int or None
Number of random samples to return; default ``None`` will return a
float, otherwise a numpy array is returned.
Returns
-------
x0 : float or numpy.array
Randomly drawn sample from the prior
"""
return self.dist.rvs(size=size)
def transform_uniform(self, r):
"""
Tranformation from hypercube to physical parameters. The MultiNest native space is a unit hyper-cube
in which all the parameter are uniformly distributed in [0, 1]. The user is required to transform
the hypercube parameters to physical parameters. This transformation is described in Sec 5.1
of arXiv:0809.3437.
These functions are based on the prior transformations provided here:
https://github.com/JohannesBuchner/MultiNest/blob/master/src/priors.f90
Parameters
----------
r : float
Hypercube value
Returns
-------
r2 : float
Transformed parameter value
"""
# Calculate transformation
u = self.mu + self.sigma * np.sqrt(2.0) * erfcinv(2.0*(1.0 - r))
return u
def get_bounds(self, Nstd = 5.0):
"""
Returns a tuple of the strict boundaries
Parameters
----------
Nstd : float, optional
Number of standard deviations away from the mean to define hard bounds
Returns
-------
bounds : tuple
Hard bounds ``(xmin, xmax)``
"""
return (self.dist.mean() - Nstd*self.dist.std(), self.dist.mean() + Nstd*self.dist.std())
################################################################################
# P r i o r U t i l i t i e s
################################################################################
def get_lnprior(theta, priors):
"""
Returns the summed log-prior probability of ``theta`` given ``priors``.
Parameters
----------
theta : list
State vector
priors : list of Prior
:class:`Prior` vector
Returns
-------
lp : int
Log-prior probability
"""
assert len(theta) == len(priors)
lp = 0.0
# Loop over all parameters
for i, prior in enumerate(priors):
# Sum lnprobs
lp += prior.lnprior(theta[i])
return lp
def get_prior_unit_cube(cube, priors):
"""
Returns the transformed unit cube for MultiNest.
Parameters
----------
cube : list or numpy.array
Unit cube [0,1]
priors : list of instantiated Prior objects
:class:`Prior` vector
Returns
-------
cube : list or numpy.array
Physical parameters
"""
# Loop over all parameters
for i, prior in enumerate(priors):
# Transform from uniform to physical
cube[i] = prior.transform_uniform(cube[i])
return cube
def get_theta_bounds(priors):
"""
Returns the state vector parameters bounds.
Parameters
----------
priors : list of instantiated Prior objects
:class:`Prior` vector
Returns
-------
bounds : list
List of (min, max) tuples
"""
return [prior.get_bounds() for prior in priors]
def get_theta_names(priors):
"""
Returns a list of state vector names.
Parameters
----------
priors : list of instantiated Prior objects
:class:`Prior` vector
Returns
-------
theta_names : list
List of parameter names
"""
return [prior.theta_name for prior in priors]
``` |
{
"source": "jlustigy/coronagraph",
"score": 3
} |
#### File: coronagraph/scripts/transit_demo.py
```python
from __future__ import (division as _, print_function as _,
absolute_import as _, unicode_literals as _)
# Import some standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys, os
# The location to *this* file
RELPATH = os.path.dirname(__file__)
# Import coronagraph model
import coronagraph as cg
from coronagraph import plot_setup
plot_setup.setup()
def earth_analog_transits(d = 10., ntran = 10, nout = 2):
'''
Simulate the transmission spectrum of Earth transiting a Sun-like star that
is `d` parsecs away.
Parameters
----------
d : float
Distance to system [pc]
ntran : int
Number of transits
nout : int
Number of out-of-transit transit durations to observe
Example
-------
>>> from transit_demo import earth_analog_transits
>>> earth_analog_transits()
.. plot::
:align: center
from scripts import transit_demo
from coronagraph import plot_setup
plot_setup.setup()
transit_demo.earth_analog_transits()
'''
# Read-in high-res Earth model data
lam, tdepth, fplan, fstar = cg.get_earth_trans_spectrum()
# Set telescope parameters
telescope = cg.Telescope(Tput = 0.5,
D = 15.,
R = 70,
lammin = 0.5,
lammax = 2.0)
planet = cg.Planet(a = 1.0,
d = d,
Rp = 1.0)
star = cg.Star(Rs = 1.0, Teff = 5700.)
# Instantiate transit noise model
tn = cg.TransitNoise(tdur = 8.0 * 60 * 60,
telescope = telescope,
planet = planet,
star = star,
ntran = ntran,
nout = nout)
# Calculate count rates
tn.run_count_rates(lam, tdepth, fstar)
# Plot the spectrum
fig, ax = tn.plot_spectrum()
plt.show()
# Plot the SNR
fig, ax = tn.plot_SNRn()
plt.show()
# Plot the number of transits to given SNR
fig, ax = tn.plot_ntran_to_wantsnr()
plt.show()
# This is the SNR we want on the max difference in planet radius
wantvsnr = 3
# Calculate the SNR we want for the transit depths to get the right
# SNR on the radius difference
wantsnr = wantvsnr * np.mean(tn.RpRs2) / (np.max(tn.RpRs2) - np.min(tn.RpRs2))
tn.recalc_wantsnr(wantsnr = wantsnr)
# Plot the number of transits to new SNR
fig, ax = tn.plot_ntran_to_wantsnr()
plt.show()
# Plot the time to new SNR
fig, ax = tn.plot_time_to_wantsnr()
plt.show()
# Plot the count rates
fig, ax = tn.plot_count_rates()
plt.show()
if __name__ == '__main__':
earth_analog_transits()
``` |
{
"source": "jlustigy/libra",
"score": 2
} |
#### File: libra/libra/variability.py
```python
import os
import numpy as np
from scipy.interpolate import interp1d
gp_path = os.path.join(os.path.dirname(__file__), 'data',
'spitzer_4.5um_gp.txt')
__all__ = ['spitzer_variability']
def spitzer_variability(times, seed=None):
"""
Mimic unidentified variability observed at 4.5 um in Spitzer.
Mimic the variability observed at 4.5 um in the Spitzer observations
of TRAPPIST-1 from Delrez et al. 2018, by interpolating from a gaussian
process fit to the observations with transits and flares removed..
Parameters
----------
times : `~numpy.ndarray`
seed : int or float
random seed (can be specified for reproducibility)
Returns
-------
f : `~numpy.ndarray`
Fluxes to be multiplied by your time series
"""
if seed is not None:
np.random.seed(seed)
duration = times.max() - times.min()
gp_time, gp_flux = np.loadtxt(gp_path, unpack=True)
f = interp1d(gp_time, gp_flux, kind='linear', bounds_error=False,
fill_value=0)
if duration > gp_time.max() - gp_time.min():
raise NotImplementedError()
t_start = (gp_time.ptp() - duration) * np.random.rand()
times_from_zero = times - times.min()
return f(times_from_zero + t_start) + 1
``` |
{
"source": "jlustigy/samurai",
"score": 2
} |
#### File: samurai/samurai/fitlc_params.py
```python
import numpy as np
#--------------------------------------------------------------------
# Parameters
#--------------------------------------------------------------------
NUM_MCMC = 10000
NUM_MCMC_BURNIN = 0
SEED_AMP = 0.5
#REGULARIZATION = None
REGULARIZATION = 'GP'
#REGULARIZATION = 'GP2'
#REGULARIZATION = 'Tikhonov'
SIGMA_Y = 3.0
NOISELEVEL = 0.01
FLAG_REG_AREA = False
FLAG_REG_ALBD = False
#n_slice = 4
N_TYPE = 2
deg2rad = np.pi/180.
N_SIDE = 32
#INFILE = "data/raddata_12_norm"
##INFILE = "data/raddata_2_norm"
#INFILE = "mockdata/mock_simple_1_data"
#INFILE = "mockdata/mock_simple_3types_1_data"
# INFILE = 'mockdata/mock_simple_1_scattered0.01_data_with_noise'
INFILE = 'mockdata/simpleIGBP_quadrature_lc'
WAVEBAND_CENTERS = np.array([550., 650., 750., 850.])
WAVEBAND_WIDTHS = np.array([100., 100., 100., 100.])
HDF5_COMPRESSION = 'lzf'
def calculate_walkers(n_dim):
return 10*n_dim
```
#### File: samurai/samurai/main_fitlc_mcmc_EPOXI_NEW.py
```python
import numpy as np
import healpy as hp
import emcee
from scipy.optimize import minimize
import sys
import datetime
import multiprocessing
import os
from pdb import set_trace as stop
import h5py
__all__ = ["run_lightcurve_mcmc"]
from fitlc_params import NUM_MCMC, NUM_MCMC_BURNIN, SEED_AMP, SIGMA_Y, NOISELEVEL, \
REGULARIZATION, N_TYPE, deg2rad, N_SIDE, INFILE, calculate_walkers, HDF5_COMPRESSION, \
WAVEBAND_CENTERS, WAVEBAND_WIDTHS
import prior
import reparameterize
from map_utils import generate_tex_names, save2hdf5
NCPU = multiprocessing.cpu_count()
# March 2008
#LAT_S = -0.5857506 # sub-solar latitude
#LON_S = 267.6066184 # sub-solar longitude
#LAT_O = 1.6808370 # sub-observer longitude
#LON_O = 210.1242232 # sub-observer longitude
#===================================================
# basic functions
#=============================================== ====
N_REGPARAM = 0
if REGULARIZATION is not None:
if REGULARIZATION == 'Tikhonov' :
N_REGPARAM = 1
elif REGULARIZATION == 'GP' :
N_REGPARAM = 3
elif REGULARIZATION == 'GP2' :
N_REGPARAM = 2
else :
N_REGPARAM = 0
#---------------------------------------------------
def lnprob(Y_array, *args):
"""
Misfit-function to be minimized
"""
# Unpack args
Obs_ij, Obsnoise_ij, Kernel_il, N_REGPARAM, flip, verbose = args
n_slice = len(Obs_ij)
n_band = len(Obs_ij[0])
# Parameter conversion
if (N_REGPARAM > 0):
X_albd_kj, X_area_lk = reparameterize.transform_Y2X(Y_array[:-1*N_REGPARAM], N_TYPE, n_band, n_slice )
else:
X_albd_kj, X_area_lk = reparameterize.transform_Y2X(Y_array, N_TYPE, n_band, n_slice )
# Model
Model_ij = np.dot(Kernel_il, np.dot(X_area_lk, X_albd_kj))
# Chi-squared statistic
Diff_ij = ( Obs_ij - Model_ij ) / Obsnoise_ij
Chi2_i = np.diag(np.dot( Diff_ij, Diff_ij.T ))
chi2 = np.sum(Chi2_i)
# Flat prior for albedo
Y_albd_kj = Y_array[0:N_TYPE*n_band].reshape([N_TYPE, n_band])
ln_prior_albd = prior.get_ln_prior_albd( Y_albd_kj )
# flat prior for area fraction
Y_area_lk = Y_array[N_TYPE*n_band:N_TYPE*n_band+n_slice*(N_TYPE-1)].reshape([n_slice, N_TYPE-1])
ln_prior_area = prior.get_ln_prior_area_new( Y_area_lk, X_area_lk[:,:-1] )
# flat ordering prior for labeling degeneracy
ln_prior_order = prior.get_ln_prior_ordering(X_albd_kj, X_area_lk)
# regularization
# ---Tikhonov Regularization
if REGULARIZATION is not None:
if ( REGULARIZATION == 'Tikhonov' ):
regparam = Y_array[-1*N_REGPARAM]
regterm_area = prior.regularize_area_tikhonov( X_area_lk, regparam )
# ---Gaussian Process
elif ( REGULARIZATION == 'GP' ):
regparam = ( Y_array[-1*N_REGPARAM], Y_array[-1*N_REGPARAM+1], Y_array[-1*N_REGPARAM+2] )
regterm_area = prior.regularize_area_GP( X_area_lk, regparam )
# ---Gaussian Process without constraint
elif ( REGULARIZATION == 'GP2' ):
regparam = ( Y_array[-1*N_REGPARAM], Y_array[-1*N_REGPARAM+1] )
regterm_area = prior.regularize_area_GP2( X_area_lk, regparam )
# ---Others
else :
regterm_area = 0.
# verbose
if verbose :
print 'chi2', chi2 - ln_prior_albd - ln_prior_area, chi2, ln_prior_albd, ln_prior_area
print 'chi2/d.o.f.', chi2 / (len(Y_array)*1.-1.), len(Y_array)
answer = - chi2 + ln_prior_albd + ln_prior_area + ln_prior_order + regterm_area
# Check for nans
if np.isnan(answer):
answer = -np.inf
if flip :
return -1. * answer
else :
return answer, Model_ij
#---------------------------------------------------
def run_initial_optimization(lnlike, data, guess, method="Nelder-Mead", run_dir=""):
print "Finding initial best-fit values using %s method..." %method
# Decompose data
Obs_ij = data[0]
n_slice = len(Obs_ij)
n_band = len(Obs_ij[0])
n_regparam = data[3]
# Run optimization
output = minimize(lnlike, guess, args=data, method=method)
# Get best-fitting params
best_fit = output["x"]
print "initial best-fit:", best_fit
# Get best-lnlike and BIC
lnprob_bestfit = lnlike( output['x'], *data )
BIC = 2.0 * lnprob_bestfit + len( output['x'] ) * np.log( len(Obs_ij.flatten()) )
print 'BIC: ', BIC
# Transform back to physical params
if (n_regparam > 0):
X_albd_kj, X_area_lk = reparameterize.transform_Y2X(output["x"][:-1*n_regparam], N_TYPE, n_band, n_slice )
else:
X_albd_kj, X_area_lk = reparameterize.transform_Y2X(output["x"], N_TYPE, n_band, n_slice )
#X_albd_kj, X_area_lk = reparameterize.transform_Y2X(output["x"], N_TYPE, n_band, n_slice )
X_albd_kj_T = X_albd_kj.T
# Flatten best-fitting physical parameters
bestfit = np.r_[ X_albd_kj.flatten(), X_area_lk.T.flatten() ]
# Calculate residuals
residuals = Obs_ij - np.dot( X_area_lk, X_albd_kj )
#print "residuals", residuals
# Create dictionaries of initial results to convert to hdf5
# datasets and attributes
init_dict_datasets = {
"best_fity" : best_fit,
"X_area_lk" : X_area_lk,
"X_albd_kj_T" : X_albd_kj_T,
"best_fitx" : bestfit
}
init_dict_attrs = {
"best_lnprob" : lnprob_bestfit,
"best_BIC" : BIC
}
"""
# Save initialization run as npz
print "Saving:", run_dir+"initial_minimize.npz"
np.savez(run_dir+"initial_minimize.npz", data=data, best_fity=best_fit, \
lnprob_bestfit=lnprob_bestfit, BIC=BIC, X_area_lk=X_area_lk, \
X_albd_kj_T=X_albd_kj_T, residuals=residuals, best_fitx =bestfit)
"""
return (init_dict_datasets, init_dict_attrs)
#===================================================
#if __name__ == "__main__":
def run_lightcurve_mcmc():
"""
"""
# print start time
now = datetime.datetime.now()
print now.strftime("%Y-%m-%d %H:%M:%S")
# Create directory for this run
startstr = now.strftime("%Y-%m-%d--%H-%M")
run_dir = "mcmc_output/" + startstr + "/"
os.mkdir(run_dir)
print "Created directory:", run_dir
# Save THIS file and the param file for reproducibility!
thisfile = os.path.basename(__file__)
paramfile = "fitlc_params.py"
newfile = os.path.join(run_dir, thisfile)
commandString1 = "cp " + thisfile + " " + newfile
commandString2 = "cp "+paramfile+" " + os.path.join(run_dir,paramfile)
os.system(commandString1)
os.system(commandString2)
print "Saved :", thisfile, " &", paramfile
# input data
Obs_ij = np.loadtxt(INFILE)
n_slice = len(Obs_ij)
n_band = len(Obs_ij[0])
Time_i = np.arange( n_slice )
Obsnoise_ij = ( NOISELEVEL * Obs_ij )
# set kernel
# Kernel_il = kernel(Time_i, n_slice)
Kernel_il = np.identity( n_slice )
# Sigma_ll = np.identity(n_slice)
# print 1/0
# set initial condition
# Y0_array = np.ones(N_TYPE*n_band+n_slice*(N_TYPE-1))
X0_albd_kj = 0.3+np.zeros([N_TYPE, n_band])
X0_area_lk = 0.1+np.zeros([n_slice, N_TYPE])
""" # Load perfect starting position from file
temp = np.load("mockdata/mock_simple_3types_1_albd_area.npz")
X0_albd_kj = temp["X0_albd_kj"]
X0_area_lk = temp["X0_area_lk"]
"""
# Create list of strings for Y & X parameter names
Y_names, X_names = generate_tex_names(N_TYPE, n_band, n_slice)
Y0_array = reparameterize.transform_X2Y(X0_albd_kj, X0_area_lk)
if ( N_REGPARAM > 0 ) :
Y0_array = np.append(Y0_array, np.array([10.]*N_REGPARAM) )
n_dim = len(Y0_array)
print '# of parameters', n_dim
# Y0_albd_kj = np.zeros([N_TYPE, len(Obs_ij[0])])
# Y0_area_lk = np.zeros([n_slice, N_TYPE-1])
# Y0_area_lk[:,0] = 1.
# Y0_list = [Y0_albd_kj, Y0_area_lk]
# print "Y0_array", Y0_array
if (N_REGPARAM > 0):
X_albd_kj, X_area_lk = reparameterize.transform_Y2X(Y0_array[:-1*N_REGPARAM], N_TYPE, n_band, n_slice )
else:
X_albd_kj, X_area_lk = reparameterize.transform_Y2X(Y0_array, N_TYPE, n_band, n_slice )
# print "X_area_lk", X_area_lk
# print "X_albd_kj", X_albd_kj
########## use optimization for mcmc initial guesses ##########
data = (Obs_ij, Obsnoise_ij, Kernel_il, N_REGPARAM, True, False)
init_dict_datasets, init_dict_attrs = run_initial_optimization(lnprob, data, Y0_array, method="Nelder-Mead", run_dir=run_dir)
best_fit = init_dict_datasets["best_fity"]
########## Run MCMC ##########
# Number of dimensions is number of free parameters
n_dim = len(Y0_array)
# Number of walkers
n_walkers = calculate_walkers(n_dim)
# Data tuple to pass to emcee
data = (Obs_ij, Obsnoise_ij, Kernel_il, N_REGPARAM, False, False)
# Initialize emcee EnsembleSampler object
sampler = emcee.EnsembleSampler(n_walkers, n_dim, lnprob, args=data, threads=NCPU)
# Set starting guesses as gaussian noise ontop of intial optimized solution
# note: consider using emcee.utils.sample_ball(p0, std) (std: axis-aligned standard deviation.)
# to produce a ball of walkers around an initial parameter value.
p0 = SEED_AMP*np.random.rand(n_dim * n_walkers).reshape((n_walkers, n_dim)) + best_fit
if NUM_MCMC_BURNIN > 0:
print "MCMC until burn-in..."
# Run MCMC
pos, prob, state = sampler.run_mcmc( p0, NUM_MCMC_BURNIN )
# Save initial positions of chain[n_walkers, steps, n_dim]
burnin_chain = sampler.chain[:, :, :].reshape((-1, n_dim))
# Save chain[n_walkers, steps, n_dim] as npz
now = datetime.datetime.now()
print "Finished Burn-in MCMC:", now.strftime("%Y-%m-%d %H:%M:%S")
print "Saving:", run_dir+"mcmc_burnin.npz"
np.savez(run_dir+"mcmc_burnin.npz", pos=pos, prob=prob, burnin_chain=burnin_chain)
print "MCMC from burn-in..."
# Set initial starting position to the current state of chain
p0 = pos
# Reset sampler for production run
sampler.reset()
else:
print "MCMC from initial optimization..."
# Run MCMC
sampler.run_mcmc( p0, NUM_MCMC )
# Get emcee chain samples
original_samples = sampler.chain
# Get model evaluations
blobs = sampler.blobs
shape = (len(blobs), len(blobs[0]), len(blobs[0][0]), len(blobs[0][0][0]))
model_ij = np.reshape(blobs, shape)
############ Save HDF5 File ############
# Specify hdf5 save file and group names
hfile = os.path.join(run_dir, "samurai_out.hdf5")
grp_init_name = "initial_optimization"
grp_mcmc_name = "mcmc"
grp_data_name = "data"
compression = HDF5_COMPRESSION
# print
print "Saving:", hfile
# dictionary for global run metadata
hfile_attrs = {
"N_TYPE" : N_TYPE,
"N_SLICE" : n_slice,
"N_REGPARAM" : N_REGPARAM
}
# Create dictionaries for mcmc data and metadata
mcmc_dict_datasets = {
"samples" : original_samples,
"model_ij" : model_ij,
"p0" : p0
}
mcmc_dict_attrs = {
"Y_names" : Y_names,
"X_names" : X_names,
}
# Create dictionaries for observation data and metadata
data_dict_datasets = {
"Obs_ij" : Obs_ij,
"Obsnoise_ij" : Obsnoise_ij,
"Kernel_il" : Kernel_il,
"lam_j" : WAVEBAND_CENTERS,
"dlam_j" : WAVEBAND_WIDTHS
}
data_dict_attrs = {
"datafile" : INFILE
}
# Create hdf5 file
f = h5py.File(hfile, 'w')
# Add global metadata
for key, value in hfile_attrs.iteritems(): f.attrs[key] = value
# Create hdf5 groups (like a directory structure)
grp_init = f.create_group(grp_init_name) # f["initial_optimization/"]
grp_data = f.create_group(grp_data_name) # f["data/"]
grp_mcmc = f.create_group(grp_mcmc_name) # f[mcmc/]
# Save initial run datasets
for key, value in init_dict_datasets.iteritems():
grp_init.create_dataset(key, data=value, compression=compression)
# Save initial run metadata
for key, value in init_dict_attrs.iteritems():
grp_init.attrs[key] = value
# Save data datasets
for key, value in data_dict_datasets.iteritems():
grp_data.create_dataset(key, data=value, compression=compression)
# Save data metadata
for key, value in data_dict_attrs.iteritems():
grp_data.attrs[key] = value
# Save mcmc run datasets
for key, value in mcmc_dict_datasets.iteritems():
grp_mcmc.create_dataset(key, data=value, compression=compression)
# Save mcmc run metadata
for key, value in mcmc_dict_attrs.iteritems():
grp_mcmc.attrs[key] = value
# Close hdf5 file stream
f.close()
```
#### File: samurai/samurai/main_map_EPOXI_mcmc.py
```python
import numpy as np
import sys
import datetime
import multiprocessing
from scipy.optimize import minimize
import os
from pdb import set_trace as stop
import healpy as hp
import emcee
import h5py
import geometry
import prior
from reparameterize import *
from map_utils import generate_tex_names, save2hdf5
__all__ = ["run_map_mcmc"]
#--------------------------------------------------------------------
# Parameters
#--------------------------------------------------------------------
from map_EPOXI_params import N_TYPE, N_SLICE, MONTH, NOISELEVEL, \
NUM_MCMC, NUM_MCMC_BURNIN, SEED_AMP, N_SIDE, OMEGA, REGULARIZATION, \
calculate_walkers, HDF5_COMPRESSION, WAVEBAND_CENTERS, WAVEBAND_WIDTHS
NCPU = multiprocessing.cpu_count()
#--------------------------------------------------------------------
# set-up
#--------------------------------------------------------------------
if ( MONTH == 'March' ):
# from spectroscopic data
# Sub-Sun Lon/Lat = 97.091 -0.581 / W longitude, degrees
# Sub-SC Lon/Lat = 154.577 1.678 / W longitude, degrees
LAT_S = -0.581 # sub-solar latitude
LON_S = 262.909 # sub-solar longitude
LAT_O = 1.678 # sub-observer latitude
LON_O = 205.423 # sub-observer longitude
INFILE = "data/raddata_1_norm"
Time_i = np.arange(25)*1.
elif ( MONTH == 'June' ):
# from spectroscopic data
# Sub-Sun Lon/Lat = 79.023 22.531 / W longitude, degrees
# Sub-SC Lon/Lat = 154.535 0.264 / W longitude, degrees
LON_S = 280.977
LAT_S = 22.531
LON_O = 205.465
LAT_O = 0.264
# LON_O = 165.4663412
# LAT_O = -0.3521857
# LON_S = 239.1424068
# LAT_S = 21.6159766
INFILE = "data/raddata_2_norm"
Time_i = np.arange(25)*1.
elif ( MONTH == 'test' ):
# from spectroscopic data
# Sub-Sun Lon/Lat = 97.091 -0.581 / W longitude, degrees
# Sub-SC Lon/Lat = 154.577 1.678 / W longitude, degrees
LON_S = 280.977
LAT_S = 22.531
LON_O = 205.465
LAT_O = 0.264
# INFILE = "mockdata/mock_simple_JuneKernel_scattered0.01_data_with_noise"
INFILE = "mockdata/mock_simple_3types_JuneKernel_scattered0.01_data_with_noise"
Time_i = np.arange(25)*1.
elif ( MONTH == 'simpleIGBP' ):
LON_S = 90.0
LAT_S = 0.0
LON_O = 0.0
LAT_O = 0.0
INFILE = 'mockdata/simpleIGBP_quadrature_lc'
Time_i = np.arange(7)/7.*24.
else :
print 'ERROR: Invalid MONTH'
sys.exit()
N_REGPARAM = 0
if REGULARIZATION is not None:
if REGULARIZATION == 'Tikhonov' :
N_REGPARAM = 1
elif REGULARIZATION == 'GP' :
N_REGPARAM = 3
elif REGULARIZATION == 'GP2' :
N_REGPARAM = 2
else :
N_REGPARAM = 0
#--------------------------------------------------------------------
# log ( posterior probability )
#--------------------------------------------------------------------
def lnprob(Y_array, *args):
"""
Misfit-function to be minimized
"""
Obs_ij, Obsnoise_ij, Kernel_il, n_regparam, flip, verbose = args
n_band = len(Obs_ij[0])
# parameter conversion
if ( n_regparam > 0 ):
X_albd_kj, X_area_lk = transform_Y2X(Y_array[:-1*n_regparam], N_TYPE, n_band, N_SLICE)
else:
X_albd_kj, X_area_lk = transform_Y2X(Y_array, N_TYPE, n_band, N_SLICE)
# making matrix...
Model_ij = np.dot(Kernel_il, np.dot(X_area_lk, X_albd_kj))
Diff_ij = ( Obs_ij - Model_ij ) / Obsnoise_ij
Chi2_i = np.diag(np.dot( Diff_ij, Diff_ij.T ))
chi2 = np.sum( Chi2_i )
# flat prior for albedo
Y_albd_kj = Y_array[0:N_TYPE*n_band].reshape([N_TYPE, n_band])
ln_prior_albd = prior.get_ln_prior_albd( Y_albd_kj )
# flat prior for area fraction
Y_area_lk = Y_array[N_TYPE*n_band:N_TYPE*n_band+N_SLICE*(N_TYPE-1)].reshape([N_SLICE, N_TYPE-1])
ln_prior_area = prior.get_ln_prior_area_new( Y_area_lk, X_area_lk[:,:-1] )
# flat ordering prior for labeling degeneracy
ln_prior_order = prior.get_ln_prior_ordering(X_albd_kj, X_area_lk)
# regularization
# ---Tikhonov Regularization
if REGULARIZATION is not None:
if ( REGULARIZATION == 'Tikhonov' ):
regparam = Y_array[-1*n_regparam]
regterm_area = prior.regularize_area_tikhonov( X_area_lk, regparam )
# ---Gaussian Process
elif ( REGULARIZATION == 'GP' ):
regparam = ( Y_array[-1*n_regparam], Y_array[-1*n_regparam+1], Y_array[-1*n_regparam+2] )
regterm_area = prior.regularize_area_GP( X_area_lk, regparam )
# ---Gaussian Process without constraint
elif ( REGULARIZATION == 'GP2' ):
regparam = ( Y_array[-1*n_regparam], Y_array[-1*n_regparam+1] )
regterm_area = prior.regularize_area_GP2( X_area_lk, regparam )
# ---Others
else :
regterm_area = 0.
# verbose
if verbose :
print 'chi2', chi2 - ln_prior_albd - ln_prior_area, chi2, ln_prior_albd, ln_prior_area
print 'chi2/d.o.f.', chi2 / (len(Y_array)*1.-1.), len(Y_array)
answer = - chi2 + ln_prior_albd + ln_prior_area + ln_prior_order + regterm_area
# Check for nans
if np.isnan(answer):
answer = -np.inf
if flip :
return -1. * answer
else :
return answer, Model_ij
#===================================================
#if __name__ == "__main__":
def run_map_mcmc():
"""
"""
# print start time
now = datetime.datetime.now()
print now.strftime("%Y-%m-%d %H:%M:%S")
# Create directory for this run
startstr = now.strftime("%Y-%m-%d--%H-%M")
run_dir = os.path.join("mcmc_output", startstr)
os.mkdir(run_dir)
print "Created directory:", run_dir
# Save THIS file and the param file for reproducibility!
thisfile = os.path.basename(__file__)
paramfile = "map_EPOXI_params.py"
newfile = os.path.join(run_dir, thisfile)
commandString1 = "cp " + thisfile + " " + newfile
commandString2 = "cp "+paramfile+" " + os.path.join(run_dir,paramfile)
os.system(commandString1)
os.system(commandString2)
print "Saved :", thisfile, " &", paramfile
# input data
Obs_ij = np.loadtxt(INFILE)
Obsnoise_ij = ( NOISELEVEL * Obs_ij )
n_band = len(Obs_ij[0])
# set kernel
param_geometry = ( LAT_O, LON_O, LAT_S, LON_S, OMEGA )
Kernel_il = geometry.kernel( Time_i, N_SLICE, N_SIDE, param_geometry )
# initialize the fitting parameters
X0_albd_kj = 0.3+np.zeros([N_TYPE, n_band])
X0_area_lk = 0.2+np.zeros([N_SLICE, N_TYPE])
Y0_array = transform_X2Y(X0_albd_kj, X0_area_lk)
if ( N_REGPARAM > 0 ) :
Y0_array = np.append(Y0_array, np.array([10.]*N_REGPARAM) )
n_dim = len(Y0_array)
print 'Y0_array', Y0_array
print '# of parameters', n_dim
print 'N_REGPARAM', N_REGPARAM
if (N_REGPARAM > 0):
X_albd_kj, X_area_lk = transform_Y2X(Y0_array[:-1*N_REGPARAM], N_TYPE, n_band, N_SLICE)
else:
X_albd_kj, X_area_lk = transform_Y2X(Y0_array, N_TYPE, n_band, N_SLICE)
# Create list of strings for Y & X parameter names
Y_names, X_names = generate_tex_names(N_TYPE, n_band, N_SLICE)
############ run minimization ############
# minimize
print "finding best-fit values..."
data = (Obs_ij, Obsnoise_ij, Kernel_il, N_REGPARAM, True, False)
output = minimize(lnprob, Y0_array, args=data, method="Nelder-Mead")
# output = minimize(lnprob, Y0_array, args=data, method="L-BFGS-B" )
best_fit = output["x"]
print "best-fit", best_fit
# more information about the best-fit parameters
data = (Obs_ij, Obsnoise_ij, Kernel_il, N_REGPARAM, True, False)
lnprob_bestfit = lnprob( output['x'], *data )
# compute BIC
BIC = 2.0 * lnprob_bestfit + len( output['x'] ) * np.log( len(Obs_ij.flatten()) )
print 'BIC: ', BIC
# best-fit values for physical parameters
if N_REGPARAM > 0:
X_albd_kj, X_area_lk = transform_Y2X(output["x"][:-1*N_REGPARAM], N_TYPE, n_band, N_SLICE)
else :
X_albd_kj, X_area_lk = transform_Y2X(output["x"], N_TYPE, n_band, N_SLICE)
X_albd_kj_T = X_albd_kj.T
# best-fit values for regularizing parameters
if REGULARIZATION is not None:
if REGULARIZATION == 'Tikhonov' :
print 'sigma', best_fit[-1]
elif REGULARIZATION == 'GP' :
print 'overall_amp', best_fit[-3]
print 'wn_rel_amp', np.exp( best_fit[-2] ) / ( 1. + np.exp( best_fit[-2] ) )
print 'lambda _angular', best_fit[-1] * ( 180. / np.pi )
elif REGULARIZATION == 'GP2' :
print 'overall_amp', best_fit[-2]
print 'lambda _angular', best_fit[-1]* ( 180. / np.pi )
# Flatten best-fitting physical parameters
bestfit = np.r_[ X_albd_kj.flatten(), X_area_lk.T.flatten() ]
# Create dictionaries of initial results to convert to hdf5
# datasets and attributes
init_dict_datasets = {
"best_fity" : best_fit,
"X_area_lk" : X_area_lk,
"X_albd_kj_T" : X_albd_kj_T,
"best_fitx" : bestfit
}
init_dict_attrs = {
"best_lnprob" : lnprob_bestfit,
"best_BIC" : BIC
}
"""
# Save initialization run as npz
print "Saving:", run_dir+"initial_minimize.npz"
np.savez(run_dir+"initial_minimize.npz", data=data, best_fity=best_fit, \
lnprob_bestfit=lnprob_bestfit, X_area_lk=X_area_lk, X_albd_kj_T=X_albd_kj_T)
"""
############ run MCMC ############
# Define MCMC parameters
n_dim = len(Y0_array)
n_walkers = calculate_walkers(n_dim)
# Define data tuple for emcee
data = (Obs_ij, Obsnoise_ij, Kernel_il, N_REGPARAM, False, False)
# Initialize emcee EnsembleSampler
sampler = emcee.EnsembleSampler(n_walkers, n_dim, lnprob, args=data, threads=NCPU)
# Guess starting position vector
p0 = SEED_AMP * np.random.rand(n_dim * n_walkers).reshape((n_walkers, n_dim)) + best_fit
# Do Burn-in run?
if NUM_MCMC_BURNIN > 0:
print "Running MCMC burn-in..."
# Run MCMC burn-in
pos, prob, state = sampler.run_mcmc( p0, NUM_MCMC_BURNIN )
# Save initial positions of chain[n_walkers, steps, n_dim]
burnin_chain = sampler.chain[:, :, :].reshape((-1, n_dim))
# Save chain[n_walkers, steps, n_dim] as npz
now = datetime.datetime.now()
print "Finished Burn-in MCMC:", now.strftime("%Y-%m-%d %H:%M:%S")
print "Saving:", run_dir+"mcmc_burnin.npz"
np.savez(run_dir+"mcmc_burnin.npz", pos=pos, prob=prob, burnin_chain=burnin_chain)
# Set initial starting position to the current state of chain
p0 = pos
# Reset sampler for production run
sampler.reset()
print "Running MCMC from burned-in position..."
else:
print "Running MCMC from initial optimization..."
# Run MCMC
sampler.run_mcmc( p0, NUM_MCMC )
# Extract chain from sampler
original_samples = sampler.chain
# Get model evaluations
blobs = sampler.blobs
shape = (len(blobs), len(blobs[0]), len(blobs[0][0]), len(blobs[0][0][0]))
model_ij = np.reshape(blobs, shape)
############ Save HDF5 File ############
# Specify hdf5 save file and group names
hfile = os.path.join(run_dir, "samurai_out.hdf5")
grp_init_name = "initial_optimization"
grp_mcmc_name = "mcmc"
grp_data_name = "data"
compression = HDF5_COMPRESSION
# print
print "Saving:", hfile
# dictionary for global run metadata
hfile_attrs = {
"N_TYPE" : N_TYPE,
"N_SLICE" : N_SLICE,
"N_REGPARAM" : N_REGPARAM
}
# Create dictionaries for mcmc data and metadata
mcmc_dict_datasets = {
"samples" : original_samples,
"model_ij" : model_ij,
"p0" : p0
}
mcmc_dict_attrs = {
"Y_names" : Y_names,
"X_names" : X_names,
}
# Create dictionaries for observation data and metadata
data_dict_datasets = {
"Obs_ij" : Obs_ij,
"Obsnoise_ij" : Obsnoise_ij,
"Kernel_il" : Kernel_il,
"lam_j" : WAVEBAND_CENTERS,
"dlam_j" : WAVEBAND_WIDTHS,
"Time_i" : Time_i
}
data_dict_attrs = {
"datafile" : INFILE,
"LON_S" : LON_S,
"LAT_S" : LAT_S,
"LON_O" : LON_O,
"LAT_O" : LAT_O
}
# Create hdf5 file
f = h5py.File(hfile, 'w')
# Add global metadata
for key, value in hfile_attrs.iteritems(): f.attrs[key] = value
# Create hdf5 groups (like a directory structure)
grp_init = f.create_group(grp_init_name) # f["initial_optimization/"]
grp_data = f.create_group(grp_data_name) # f["data/"]
grp_mcmc = f.create_group(grp_mcmc_name) # f[mcmc/]
# Save initial run datasets
for key, value in init_dict_datasets.iteritems():
grp_init.create_dataset(key, data=value, compression=compression)
# Save initial run metadata
for key, value in init_dict_attrs.iteritems():
grp_init.attrs[key] = value
# Save data datasets
for key, value in data_dict_datasets.iteritems():
grp_data.create_dataset(key, data=value, compression=compression)
# Save data metadata
for key, value in data_dict_attrs.iteritems():
grp_data.attrs[key] = value
# Save mcmc run datasets
for key, value in mcmc_dict_datasets.iteritems():
grp_mcmc.create_dataset(key, data=value, compression=compression)
# Save mcmc run metadata
for key, value in mcmc_dict_attrs.iteritems():
grp_mcmc.attrs[key] = value
# Close hdf5 file stream
f.close()
``` |
{
"source": "jluszcz/ListOfLists",
"score": 2
} |
#### File: jluszcz/ListOfLists/generator.py
```python
import argparse
import boto3
import htmlmin
import jinja2
import json
import logging
import os
import tempfile
from botocore.exceptions import ClientError
CARD_IMAGE_LOCATION = 'images/card.png'
def _get_file_for_read(file_name, bucket=None, local=False):
if local:
return file_name
f = tempfile.NamedTemporaryFile(delete=False)
logging.debug('Downloading %s to %s', file_name, f.name)
bucket.download_fileobj(file_name, f)
return f.name
def _get_file_for_write(file_name, local=False):
if local:
return file_name
return tempfile.NamedTemporaryFile(delete=False).name
def _read_template(template_file):
return jinja2.Template(template_file.read())
def read_template(bucket=None, local=False):
with open(_get_file_for_read('index.template', bucket, local)) as f:
return _read_template(f)
def _read_list(list_file):
return json.load(list_file)
def read_list(bucket=None, local=False):
with open(_get_file_for_read(f"{os.environ['SITE']}.json", bucket, local)) as f:
return _read_list(f)
def _card_image_exists(site_bucket, local):
if not site_bucket:
return False
try:
site_bucket.Object(CARD_IMAGE_LOCATION).load()
return True
except ClientError as e:
if e.response['Error']['Code'] != '404':
logging.warn('Failed to check existence of %s: %s', CARD_IMAGE_LOCATION, e)
return False
def write_index(template, list_data, site_bucket=None, local=False, minify=True):
filename = _get_file_for_write('index.html', local)
template_data = {
'title': list_data['title'],
'lists': list_data['lists'],
'card_url': None,
}
if _card_image_exists(site_bucket, local):
template_data['card_url'] = f"https://{os.environ['SITE_URL']}/{CARD_IMAGE_LOCATION}"
rendered_site = template.render(**template_data)
if minify:
rendered_site = htmlmin.minify(rendered_site, remove_comments=True, remove_empty_space=True)
logging.debug('Minified index.html')
with open(filename, 'w') as f:
f.write(rendered_site)
if not local:
logging.debug('Uploading index.html')
with open(filename, 'r') as f:
site_bucket.put_object(Key='index.html', Body=f.read(), ContentType='text/html')
def parse_args():
parser = argparse.ArgumentParser(description='List of lists website generator')
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='If provided, log at DEBUG instead of INFO.')
parser.add_argument('--s3', action='store_true', help='If provided, use S3 rather than local files.')
return parser.parse_args()
def setup_logging(verbose=False):
"""Sets up logging using the default python logger, at INFO or DEBUG, depending on the value of verbose"""
logger = logging.getLogger()
logger.setLevel(logging.INFO if not verbose else logging.DEBUG)
for boto_module in ['boto3', 'botocore', 's3transfer']:
logging.getLogger(boto_module).setLevel(logging.CRITICAL)
def get_bucket(bucket_name, local=False):
if local:
return None
s3 = boto3.resource('s3')
return s3.Bucket(bucket_name)
def write_index_to_bucket(local=False):
gen_bucket = get_bucket(f"{os.environ['SITE_URL']}-generator", local)
site_bucket = get_bucket(os.environ['SITE_URL'], local)
template = read_template(gen_bucket, local)
list_data = read_list(gen_bucket, local)
write_index(template, list_data, site_bucket, local)
def lambda_handler(event, context):
"""Entry point for Lambda"""
setup_logging()
write_index_to_bucket()
def main():
"""Entry point for running as a CLI"""
args = parse_args()
setup_logging(args.verbose)
write_index_to_bucket(local=not args.s3)
if __name__ == '__main__':
main()
``` |
{
"source": "jluszcz/Splitit",
"score": 2
} |
#### File: Splitit/splitit/app.py
```python
from chalice import Chalice, BadRequestError, NotFoundError, ConflictError
from chalicelib import splitit
app = Chalice(app_name='splitit')
app.debug = True
@app.route('/check', methods=['POST'])
def create_check():
request_body = app.current_request.json_body
try:
check = splitit.put_check(request_body.get('date'), request_body.get('description'))
except ValueError as e:
raise BadRequestError(e)
return check.to_json()
def _get_check(check_id):
check = splitit.get_check(check_id)
if not check:
raise NotFoundError('%s does not exist' % check_id)
return check
@app.route('/check/{check_id}', methods=['GET'])
def get_check(check_id):
return _get_check(check_id).to_json()
@app.route('/check/{check_id}/summary', methods=['GET'])
def get_check_summary(check_id):
check = _get_check(check_id)
summary = splitit.summarize_check(check)
return summary
@app.route('/check/{check_id}', methods=['PUT'])
def update_check(check_id):
request_body = app.current_request.json_body
check = _get_check(check_id)
try:
check = splitit.update_check(check, request_body.get('date'), request_body.get('description'))
except ValueError as e:
raise BadRequestError(e)
return check.to_json()
@app.route('/check/{check_id}', methods=['DELETE'])
def remove_check(check_id):
check = splitit.remove_check(check_id)
if check:
return check.to_json()
return {}
@app.route('/check/{check_id}/location', methods=['POST'])
def create_location(check_id):
request_body = app.current_request.json_body
check = _get_check(check_id)
try:
location = splitit.put_location(check, request_body.get('name'), request_body.get('taxInCents'),
request_body.get('tipInCents'))
except ValueError as e:
raise BadRequestError(e)
return location.to_json()
@app.route('/check/{check_id}/location/{location_id}', methods=['PUT'])
def update_location(check_id, location_id):
request_body = app.current_request.json_body
check = _get_check(check_id)
try:
location = splitit.update_location(check, location_id, request_body.get('name'), request_body.get('taxInCents'),
request_body.get('tipInCents'))
except ValueError as e:
raise BadRequestError(e)
except splitit.ConflictError as e:
raise ConflictError(e)
if not location:
raise NotFoundError('No location found for %s', location_id)
return location.to_json()
@app.route('/check/{check_id}/location/{location_id}', methods=['DELETE'])
def remove_location(check_id, location_id):
check = _get_check(check_id)
try:
location = splitit.delete_location(check, location_id)
except ValueError as e:
raise BadRequestError(e)
if location:
return location.to_json()
return {}
@app.route('/check/{check_id}/line-item', methods=['POST'])
def create_line_item(check_id):
request_body = app.current_request.json_body
check = _get_check(check_id)
try:
line_item = splitit.put_line_item(check, request_body.get('name'), request_body.get('locationId'),
request_body.get('owners'), request_body.get('amountInCents'))
except ValueError as e:
raise BadRequestError(e)
return line_item.to_json()
@app.route('/check/{check_id}/line-item/{line_item_id}', methods=['PUT'])
def update_line_item(check_id, line_item_id):
request_body = app.current_request.json_body
check = _get_check(check_id)
try:
line_item = splitit.update_line_item(check, line_item_id, request_body.get('name'), request_body.get('locationId'),
request_body.get('ownersToAdd'), request_body.get('ownersToRemove'),
request_body.get('amountInCents'))
except ValueError as e:
raise BadRequestError(e)
except KeyError as e:
raise NotFoundError(e)
return line_item.to_json()
@app.route('/check/{check_id}/line-item/{line_item_id}', methods=['DELETE'])
def remove_line_item(check_id, line_item_id):
check = _get_check(check_id)
line_item = splitit.delete_line_item(check, line_item_id)
if line_item:
return line_item.to_json()
return {}
```
#### File: Splitit/splitit/test_splitit.py
```python
import pytest
from chalicelib import splitit, model
ID = '88786937-E9FA-4013-9FA6-D419C3E16815'
VALID_DATE = '2019-05-20'
VALID_DESC = 'Foo bar baz'
VALID_LOCATION_NAME = 'Some Bar'
VALID_TAX = 100
VALID_TIP = 200
VALID_ITEM_NAME = 'Some Drink'
VALID_AMOUNT = 300
VALID_OWNERS = ['Foo', 'Bar', 'Baz']
@pytest.fixture(autouse=True)
def setup_fake_ddb(mocker):
mocker.patch('chalicelib.model.Check.save')
mocker.patch('chalicelib.model.Check.delete')
mocker.patch('chalicelib.model.LineItem.save')
mocker.patch('chalicelib.model.LineItem.delete')
def test_get_check_no_check(mocker):
mocker.patch('chalicelib.model.Check.get', side_effect=model.Check.DoesNotExist)
check = splitit.get_check(ID)
assert check is None
def test_get_check(mocker):
mocker.patch('chalicelib.model.Check.get', return_value=model.Check())
check = splitit.get_check(ID)
assert check is not None
def test_put_check_date_is_none():
with pytest.raises(ValueError, match=r'^Invalid date'):
splitit.put_check(date=None, description='Foo bar baz')
def test_put_check_date_is_bad():
with pytest.raises(ValueError, match=r'^Invalid date'):
splitit.put_check(date='20190520', description='Foo bar baz')
def test_put_check_description_is_none():
with pytest.raises(ValueError, match=r'^Invalid description'):
splitit.put_check(date=VALID_DATE, description=None)
def test_put_check_description_is_bad():
with pytest.raises(ValueError, match=r'^Invalid description'):
splitit.put_check(date=VALID_DATE, description='')
def test_put_check():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
assert check.check_id
assert check.create_timestamp
assert VALID_DATE == check.date
assert VALID_DESC == check.description
assert 1 == len(check.locations)
loc = check.locations[0]
assert not loc.name
assert 0 == loc.line_item_count
assert 0 == loc.tax_in_cents
assert 0 == loc.tip_in_cents
assert not check.line_item_ids
def test_update_check_no_changes():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
splitit.update_check(check, date=None, description=None)
model.Check.save.assert_not_called()
def test_update_check_invalid_date():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(ValueError, match=r'^Invalid date.*'):
splitit.update_check(check, date='20190520', description=None)
def test_update_check_date():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
new_date = '2019-05-21'
assert VALID_DATE != new_date
splitit.update_check(check, date=new_date, description=None)
model.Check.save.assert_called_once()
assert new_date == check.date
assert VALID_DESC == check.description
def test_update_check_description():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
new_desc = 'Bar baz quux'
assert VALID_DESC != new_desc
splitit.update_check(check, date=None, description=new_desc)
model.Check.save.assert_called_once()
assert VALID_DATE == check.date
assert new_desc == check.description
def test_remove_check(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
mocker.patch('chalicelib.model.Check.get', return_value=check)
check = splitit.remove_check(check.check_id)
assert check is not None
model.Check.delete.assert_called_once()
def test_remove_no_check(mocker):
mocker.patch('chalicelib.model.Check.get', side_effect=model.Check.DoesNotExist)
check = splitit.remove_check(ID)
assert check is None
model.Check.delete.assert_not_called()
def _test_put_location_invalid_tax(tax):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(ValueError, match=r'^Invalid tax'):
splitit.put_location(check, tax_in_cents=tax)
def test_put_location_tax_is_not_int():
_test_put_location_invalid_tax('100')
def test_put_location_tax_is_negative():
_test_put_location_invalid_tax(-100)
def _test_put_location_invalid_tip(tip):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(ValueError, match=r'^Invalid tip'):
splitit.put_location(check, tip_in_cents=tip)
def test_put_location_tip_is_not_int():
_test_put_location_invalid_tip('100')
def test_put_location_tip_is_negative():
_test_put_location_invalid_tip(-100)
def test_put_location_duplicate_name():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(ValueError, match=r'already exists'):
splitit.put_location(check)
def test_put_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
location = splitit.put_location(check, location_name=VALID_LOCATION_NAME, tax_in_cents=VALID_TAX, tip_in_cents=VALID_TIP)
assert 2 == len(check.locations)
assert location in check.locations
assert location.location_id
assert VALID_LOCATION_NAME == location.name
assert VALID_TAX == location.tax_in_cents
assert VALID_TIP == location.tip_in_cents
def test_put_location_no_tip():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
location = splitit.put_location(check, location_name=VALID_LOCATION_NAME, tax_in_cents=VALID_TAX)
assert 2 == len(check.locations)
assert location in check.locations
assert location.location_id
assert VALID_LOCATION_NAME == location.name
assert VALID_TAX == location.tax_in_cents
assert not location.tip_in_cents
def test_put_location_no_tax():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
location = splitit.put_location(check, location_name=VALID_LOCATION_NAME, tip_in_cents=VALID_TIP)
assert 2 == len(check.locations)
assert location in check.locations
assert location.location_id
assert VALID_LOCATION_NAME == location.name
assert not location.tax_in_cents
assert VALID_TIP == location.tip_in_cents
def test_update_non_existent_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
location = splitit.update_location(check, location_id=ID)
assert location is None
model.Check.save.assert_not_called()
def test_update_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
location = check.locations[0]
assert VALID_LOCATION_NAME != location.name
assert VALID_TAX != location.tax_in_cents
assert VALID_TIP != location.tip_in_cents
location = splitit.update_location(check, location_id=location.location_id, name=VALID_LOCATION_NAME, tip_in_cents=VALID_TIP,
tax_in_cents=VALID_TAX)
assert VALID_LOCATION_NAME == location.name
assert VALID_TAX == location.tax_in_cents
assert VALID_TIP == location.tip_in_cents
model.Check.save.assert_called_once()
def test_update_location_name():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
location = check.locations[0]
assert VALID_LOCATION_NAME != location.name
assert VALID_TAX != location.tax_in_cents
assert VALID_TIP != location.tip_in_cents
location = splitit.update_location(check, location_id=location.location_id, name=VALID_LOCATION_NAME)
assert VALID_LOCATION_NAME == location.name
assert VALID_TAX != location.tax_in_cents
assert VALID_TIP != location.tip_in_cents
model.Check.save.assert_called_once()
def test_update_location_tip():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
location = check.locations[0]
assert VALID_LOCATION_NAME != location.name
assert VALID_TAX != location.tax_in_cents
assert VALID_TIP != location.tip_in_cents
location = splitit.update_location(check, location_id=location.location_id, tip_in_cents=VALID_TIP)
assert VALID_LOCATION_NAME != location.name
assert VALID_TAX != location.tax_in_cents
assert VALID_TIP == location.tip_in_cents
model.Check.save.assert_called_once()
def test_update_location_tax():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
location = check.locations[0]
assert VALID_LOCATION_NAME != location.name
assert VALID_TAX != location.tax_in_cents
assert VALID_TIP != location.tip_in_cents
location = splitit.update_location(check, location_id=location.location_id, tax_in_cents=VALID_TAX)
assert VALID_LOCATION_NAME != location.name
assert VALID_TAX == location.tax_in_cents
assert VALID_TIP != location.tip_in_cents
model.Check.save.assert_called_once()
def test_update_location_no_change():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
model.Check.save.reset_mock()
location = check.locations[0]
splitit.update_location(check, location_id=location.location_id)
model.Check.save.assert_not_called()
def test_delete_only_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
location = check.locations[0]
with pytest.raises(ValueError, match=r'all locations'):
splitit.delete_location(check, location.location_id)
def test_delete_non_existent_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
location = splitit.delete_location(check, ID)
assert location is None
def test_delete_location_with_line_items():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
location = check.locations[0]
location.line_item_count += 1
with pytest.raises(ValueError, match=r'with line items'):
splitit.delete_location(check, location.location_id)
def test_delete_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
location = splitit.put_location(check, location_name=VALID_LOCATION_NAME)
model.Check.save.reset_mock()
assert 2 == len(check.locations)
deleted = splitit.delete_location(check, location.location_id)
assert 1 == len(check.locations)
assert location.location_id == deleted.location_id
model.Check.save.assert_called_once()
def test_put_line_item_no_description():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(ValueError, match=r'Missing name'):
splitit.put_line_item(check, None)
def test_put_line_item_bad_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(KeyError, match=r'Location'):
splitit.put_line_item(check, VALID_ITEM_NAME, ID)
def test_put_line_item_check_has_one_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
default_location_id = check.locations[0].location_id
location = splitit.put_location(check, VALID_LOCATION_NAME)
splitit.delete_location(check, default_location_id)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
assert line_item is not None
assert location.location_id == line_item.location_id
def test_put_line_item_check_has_multiple_locations():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
location = splitit.put_location(check, VALID_LOCATION_NAME)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME, location.location_id)
assert line_item is not None
assert location.location_id == line_item.location_id
def test_put_line_item_check_default_location():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
default_location_id = check.locations[0].location_id
splitit.put_location(check, VALID_LOCATION_NAME)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
assert line_item is not None
assert default_location_id == line_item.location_id
def _test_put_line_item_invalid_amount(amount):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(ValueError, match=r'^Invalid amount'):
splitit.put_line_item(check, VALID_ITEM_NAME, amount_in_cents=amount)
def test_put_line_item_invalid_amount_not_int():
_test_put_line_item_invalid_amount('100')
def test_put_line_item_invalid_amount_is_negative():
_test_put_line_item_invalid_amount(-100)
def test_put_line_item_duplicate_owners():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(ValueError, match=r'Duplicate owner'):
splitit.put_line_item(check, VALID_ITEM_NAME, owners=['Foo', 'Foo'])
def test_put_line_item():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
default_location_id = check.locations[0].location_id
line_item = splitit.put_line_item(check, VALID_ITEM_NAME, amount_in_cents=VALID_AMOUNT, owners=VALID_OWNERS)
assert line_item is not None
assert default_location_id == line_item.location_id
assert check.check_id == line_item.check_id
assert VALID_ITEM_NAME == line_item.name
assert VALID_AMOUNT == line_item.amount_in_cents
assert VALID_OWNERS == line_item.owners
def test_update_line_item_not_in_check(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
mocker.patch('chalicelib.model.LineItem.get', side_effect=model.LineItem.DoesNotExist)
with pytest.raises(KeyError, match=r'Line Item'):
splitit.update_line_item(check, line_item.line_item_id)
def test_update_non_existent_line_item():
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
with pytest.raises(KeyError, match=r'Line Item'):
splitit.update_line_item(check, ID)
def test_update_line_item_no_changes(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.LineItem.save.reset_mock()
line_item = splitit.update_line_item(check, line_item.line_item_id)
model.LineItem.save.assert_not_called()
def test_update_line_item_name(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.LineItem.save.reset_mock()
line_item = splitit.update_line_item(check, line_item.line_item_id, name='Modified %s' % VALID_ITEM_NAME)
assert VALID_ITEM_NAME != line_item.name
model.LineItem.save.assert_called_once()
def test_update_line_item_location(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
default_location = check.locations[0]
location = splitit.put_location(check, VALID_LOCATION_NAME)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.Check.save.reset_mock()
model.LineItem.save.reset_mock()
line_item = splitit.update_line_item(check, line_item.line_item_id, location_id=location.location_id)
assert location.location_id == line_item.location_id
assert 0 == default_location.line_item_count
assert 1 == location.line_item_count
model.Check.save.assert_called_once()
model.LineItem.save.assert_called_once()
def test_update_line_item_to_non_existent_location(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
with pytest.raises(KeyError, match=r'Location'):
splitit.update_line_item(check, line_item.line_item_id, location_id=ID)
def test_update_line_item_add_owners(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.LineItem.save.reset_mock()
line_item = splitit.update_line_item(check, line_item.line_item_id, owners_to_add=VALID_OWNERS)
assert VALID_OWNERS == line_item.owners
model.LineItem.save.assert_called_once()
def test_update_line_item_add_duplicate_owner(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME, owners=VALID_OWNERS)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.LineItem.save.reset_mock()
with pytest.raises(ValueError, match=r'Duplicate owners'):
splitit.update_line_item(check, line_item.line_item_id, owners_to_add=[VALID_OWNERS[0]])
def test_update_line_item_remove_owners(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME, owners=VALID_OWNERS)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.LineItem.save.reset_mock()
line_item = splitit.update_line_item(check, line_item.line_item_id, owners_to_remove=[VALID_OWNERS[0]])
assert VALID_OWNERS[1:] == line_item.owners
model.LineItem.save.assert_called_once()
def test_update_line_item_amount(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.LineItem.save.reset_mock()
line_item = splitit.update_line_item(check, line_item.line_item_id, amount_in_cents=VALID_AMOUNT)
assert VALID_AMOUNT == line_item.amount_in_cents
model.LineItem.save.assert_called_once()
def test_remove_non_existent_line_item(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
mocker.patch('chalicelib.model.LineItem.get', side_effect=model.LineItem.DoesNotExist)
assert splitit.delete_line_item(check, ID) is None
def test_remove_line_item_not_in_check(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = model.LineItem()
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.Check.save.reset_mock()
line_item == splitit.delete_line_item(check, line_item.line_item_id)
assert line_item
model.Check.save.assert_not_called()
model.LineItem.delete.assert_called_once()
def test_remove_line_item(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
line_item = splitit.put_line_item(check, VALID_ITEM_NAME)
mocker.patch('chalicelib.model.LineItem.get', return_value=line_item)
model.Check.save.reset_mock()
line_item == splitit.delete_line_item(check, line_item.line_item_id)
location = check.locations[0]
assert line_item
assert 0 == location.line_item_count
model.Check.save.assert_called_once()
model.LineItem.delete.assert_called_once()
def test_summarize_check_no_line_items(mocker):
mocker.patch('chalicelib.model.LineItem.batch_get', return_value=[])
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
splitit.put_location(check, location_name=VALID_LOCATION_NAME)
summary = splitit.summarize_check(check)
assert check.description == summary['description']
assert check.date == summary['date']
assert 2 == len(summary['locations'])
assert not summary['amountInCentsByOwner']
def _create_line_item(location_id, owners, amount_in_cents):
line_item = model.LineItem()
line_item.name = VALID_ITEM_NAME
line_item.location_id = location_id
line_item.owners = owners
line_item.amount_in_cents = amount_in_cents
return line_item
def test_summarize_check(mocker):
check = splitit.put_check(date=VALID_DATE, description=VALID_DESC)
default_location_id = check.locations[0].location_id
splitit.update_location(check, default_location_id, tax_in_cents=500, tip_in_cents=1000)
location = splitit.put_location(check, location_name=VALID_LOCATION_NAME)
location_id = location.location_id
splitit.update_location(check, location_id, tax_in_cents=1500, tip_in_cents=2000)
line_items = [
_create_line_item(default_location_id, ['Foo', 'Bar', 'Baz'], 3000),
_create_line_item(location_id, ['Foo'], 500),
_create_line_item(location_id, ['Bar'], 1500),
_create_line_item(location_id, ['Baz'], 1000),
_create_line_item(location_id, ['Baz'], 2000),
]
mocker.patch('chalicelib.model.LineItem.batch_get', return_value=line_items)
summary = splitit.summarize_check(check)
assert check.description == summary['description']
assert check.date == summary['date']
assert 2 == len(summary['locations'])
assert 2350 == summary['amountInCentsByOwner']['Foo']
assert 4050 == summary['amountInCentsByOwner']['Bar']
assert 6600 == summary['amountInCentsByOwner']['Baz']
``` |
{
"source": "jluttine/d3py",
"score": 3
} |
#### File: d3py/d3py/plot.py
```python
from d3py import core
def line_chart(x, y):
# Build Javascript
js = core.populate_template(
core.read_lib('js', 'line_chart'),
data=core.dict_to_json([dict(x=xi, y=yi) for (xi, yi) in zip(x, y)]),
# x=core.array_to_json(x),
# y=core.array_to_json(y),
#width=width,
#height=height,
#colors=core.array_to_json(colors)
)
# Build CSS
css = core.populate_template(core.read_lib('css', 'line_chart'))
# Build graph HTML
return core.graph(js, css)
def chord_diagram(data, width=950, height=500, colors=None):
# Define default color cycle
if colors is None:
colors = [
"#000000", "#FFDD89", "#957244", "#F26223"
]
# Build Javascript
js = core.populate_template(
core.read_lib('js', 'chord_diagram'),
data=core.array_to_json(data),
width=width,
height=height,
colors=core.array_to_json(colors)
)
# Build CSS
css = core.populate_template(core.read_lib('css', 'chord_diagram'))
# Build graph HTML
return core.graph(js, css)
``` |
{
"source": "jluttine/haskpy",
"score": 3
} |
#### File: haskpy/haskpy/conftest.py
```python
import sys
import hypothesis.strategies as st
from hypothesis import given
def is_pytest():
return "pytest" in sys.modules
def pytest_configure(config):
# Workaround for Hypothesis bug causing flaky tests if they use characters
# or text: https://github.com/HypothesisWorks/hypothesis/issues/2108
@given(st.text())
def foo(x):
pass
foo()
return
# PYTEST_RUNNING = False
# def pytest_configure(config):
# global PYTEST_RUNNING
# PYTEST_RUNNING = True
# return
# def pytest_unconfigure(config):
# global PYTEST_RUNNING
# PYTEST_RUNNING = False
# return
```
#### File: haskpy/typeclasses/_contravariant.py
```python
from hypothesis import given
from hypothesis import strategies as st
from .typeclass import Type
from haskpy.internal import class_function, abstract_class_function
from haskpy.testing import assert_output
from haskpy import testing
class Contravariant(Type):
"""Contravariant functor
Minimal complete definition:
- ``contramap`` method
"""
def contramap(self, f):
"""f b -> (a -> b) -> f a"""
raise NotImplementedError()
def contrareplace(self, x):
"""f b -> b -> f a"""
return self.contramap(lambda _: x)
#
# Sampling methods for property tests
#
@abstract_class_function
def sample_contravariant_type_constructor(cls):
pass
#
# Test typeclass laws
#
@class_function
@assert_output
def assert_contravariant_identity(cls, v):
from haskpy.utils import identity
return(
v,
v.contramap(identity),
)
@class_function
@given(st.data())
def test_contravariant_identity(cls, data):
# Draw types
a = data.draw(testing.sample_type())
t = data.draw(cls.sample_contravariant_type_constructor())
ta = t(a)
# Draw values
v = data.draw(ta)
cls.assert_contravariant_identity(v, data=data)
return
@class_function
@assert_output
def assert_contravariant_composition(cls, v, f, g):
return (
v.contramap(f).contramap(g),
v.contramap(lambda x: f(g(x))),
)
@class_function
@given(st.data())
def test_contravariant_composition(cls, data):
# Draw types
a = data.draw(testing.sample_type())
b = data.draw(testing.sample_eq_type())
c = data.draw(testing.sample_eq_type())
t = data.draw(cls.sample_contravariant_type_constructor())
ta = t(a)
# Draw values
v = data.draw(ta)
f = data.draw(testing.sample_function(b))
g = data.draw(testing.sample_function(c))
cls.assert_contravariant_composition(v, f, g, data=data)
return
#
# Test laws based on default implementations
#
@class_function
@assert_output
def assert_contravariant_contramap(cls, v, f):
from haskpy import contramap
return (
v.contramap(f),
contramap(f, v),
)
@class_function
@given(st.data())
def test_contravariant_contramap(cls, data):
# Draw types
a = data.draw(testing.sample_type())
b = data.draw(testing.sample_eq_type())
t = data.draw(cls.sample_contravariant_type_constructor())
ta = t(a)
# Draw values
v = data.draw(ta)
f = data.draw(testing.sample_function(b))
cls.assert_contravariant_contramap(v, f, data=data)
return
@class_function
@assert_output
def assert_contravariant_contrareplace(cls, v, x):
from haskpy import contrareplace
return (
Contravariant.contrareplace(v, x),
contrareplace(x, v),
v.contrareplace(x),
)
@class_function
@given(st.data())
def test_contravariant_contrareplace(cls, data):
# Draw types
a = data.draw(testing.sample_type())
b = data.draw(testing.sample_eq_type())
t = data.draw(cls.sample_contravariant_type_constructor())
ta = t(a)
# Draw values
v = data.draw(ta)
x = data.draw(b)
cls.assert_contravariant_contrareplace(v, x, data=data)
return
```
#### File: haskpy/typeclasses/equality.py
```python
import hypothesis.strategies as st
from hypothesis import given
from .typeclass import Type
from haskpy.internal import class_function
from haskpy import testing
from haskpy.types.function import function
class Eq(Type):
"""Equality and inequality comparison
Minimal complete definition:
..
(__eq__ | __ne__) & sample_type
Minimal complete definition for type constructors:
..
(__eq_generic__ | (__eq_test__ & (__eq__ | __ne__))) & sample_eq_type
"""
def __eq__(self, other):
"""Equality comparison: ``Eq a => a -> a -> bool``
Can be used as ``==`` operator.
The default implementation uses ``__ne__``.
"""
return not self.__ne__(other)
def __ne__(self, other):
"""Inequality comparison: ``Eq a => a -> a -> bool``
Can be used as ``!=`` operator.
The default implementation uses ``__eq__``.
"""
return not self.__eq__(other)
#
# Sampling functions for property tests
#
@class_function
def sample_eq_type(cls):
"""Sample Eq type
By default, assume that the type is always Eq. Subclasses should
override this when needed, for instance, if a type from a type
constructor is Eq only if it's type argument is Eq (e.g., Maybe)
"""
return cls.sample_type()
#
# Test typeclass laws
#
@class_function
def assert_eq_reflexivity(cls, x):
assert (x == x) is True
return
@class_function
@given(st.data())
def test_eq_reflexivity(cls, data):
"""Test ``x == x = True``"""
a = data.draw(cls.sample_eq_type())
x = data.draw(a)
cls.assert_eq_reflexivity(x)
return
@class_function
def assert_eq_symmetry(cls, x, y):
assert (x == y) == (y == x)
return
@class_function
@given(st.data())
def test_eq_symmetry(cls, data):
"""Test ``x == y = y == x``"""
a = data.draw(cls.sample_eq_type())
x = data.draw(a)
y = data.draw(a)
cls.assert_eq_symmetry(x, y)
return
@class_function
def assert_eq_transitivity(cls, x, y, z):
cond = (x == y) and (y == z)
then = (x == z)
assert (cond and then) or (not cond)
return
@class_function
@given(st.data())
def test_eq_transitivity(cls, data):
"""Test if ``x == y && y == z = True``, then ``x == z = True``"""
a = data.draw(cls.sample_eq_type())
x = data.draw(a)
y = data.draw(a)
z = data.draw(a)
cls.assert_eq_transitivity(x, y, z)
return
@class_function
def assert_eq_substitutivity(cls, x, y, f):
cond = (x == y)
then = (f(x) == f(y))
assert (cond and then) or (not cond)
return
@class_function
@given(st.data())
def test_eq_substitutivity(cls, data):
"""Test if ``x == y = True``, then ``f(x) == f(y) = True``"""
# Draw types
a = data.draw(cls.sample_eq_type())
b = data.draw(testing.sample_eq_type())
# Draw values
x = data.draw(a)
y = data.draw(a)
f = data.draw(testing.sample_function(b))
# Note: the only requirement for arbitrary functions is that the input
# variable has __eq__ implemented. And we have that for Eq type so this
# test can always be run.
cls.assert_eq_substitutivity(x, y, f)
return
@class_function
def assert_eq_negation(cls, x, y):
neq = (x != y)
eq = (x == y)
assert (neq == (not eq))
return
@class_function
@given(st.data())
def test_eq_negation(cls, data):
"""Test ``x != y = not (x == y)``"""
a = data.draw(cls.sample_eq_type())
x = data.draw(a)
y = data.draw(a)
cls.assert_eq_negation(x, y)
return
#
# Test default implementations
#
@class_function
def assert_eq_eq(cls, x, y):
assert (x == y) == eq(x, y)
assert (x == y) == cls.__eq__(x, y)
return
@class_function
@given(st.data())
def test_eq_eq(cls, data):
a = data.draw(cls.sample_eq_type())
x = data.draw(a)
y = data.draw(a)
cls.assert_eq_eq(x, y)
return
@class_function
def assert_eq_ne(cls, x, y):
from haskpy.functions import ne
assert (x != y) == ne(x, y)
assert (x != y) == cls.__ne__(x, y)
return
@class_function
@given(st.data())
def test_eq_ne(cls, data):
a = data.draw(cls.sample_eq_type())
x = data.draw(a)
y = data.draw(a)
cls.assert_eq_eq(x, y)
return
@function
def eq(x, y):
"""Equality: ``Eq a => a -> a -> Bool``
Note that one can use `==` operator instead of this function. But operators
cannot be partially applied in Python, so for that usecase this function
can be useful.
.. code-block:: python
>>> from haskpy import List, map
>>> map(eq(42), List(1, 2, 42, 666, 42)
List(False, False, True, False, True)
"""
return x == y
@function
def ne(x, y):
"""Inequality: ``Eq a => a -> a -> Bool``"""
return x != y
```
#### File: haskpy/types/either.py
```python
import attr
import hypothesis.strategies as st
from haskpy.typeclasses import Monad, Eq
from haskpy.internal import class_function, immutable
from haskpy.optics import prism
from haskpy.testing import eq_test
from haskpy import testing
from haskpy.types.function import function
@immutable
class Either(Monad, Eq):
match = attr.ib()
@class_function
def pure(cls, x):
return Right(x)
def map(self, f):
return self.match(
Left=lambda _: self,
Right=lambda x: Right(f(x)),
)
def apply_to(self, x):
return self.match(
Left=lambda _: self,
Right=lambda f: x.map(f),
)
def bind(self, f):
return self.match(
Left=lambda _: self,
Right=lambda x: f(x),
)
def __eq__(self, other):
return self.match(
Left=lambda x: other.match(
Left=lambda y: x == y,
Right=lambda _: False,
),
Right=lambda x: other.match(
Left=lambda _: False,
Right=lambda y: x == y,
),
)
def __eq_test__(self, other, data):
return self.match(
Left=lambda x: other.match(
Left=lambda y: eq_test(x, y, data=data),
Right=lambda _: False,
),
Right=lambda x: other.match(
Left=lambda _: False,
Right=lambda y: eq_test(x, y, data=data),
),
)
def __repr__(self):
return self.match(
Left=lambda x: "Left({0})".format(repr(x)),
Right=lambda x: "Right({0})".format(repr(x)),
)
@class_function
def sample_value(cls, a, b):
return st.one_of(a.map(Left), b.map(Right))
sample_type = testing.create_type_sampler(
testing.sample_type(),
testing.sample_type(),
)
sample_functor_type_constructor = testing.create_type_constructor_sampler(
testing.sample_type(),
)
# Some typeclass instances have constraints for the types inside
sample_eq_type = testing.create_type_sampler(
testing.sample_eq_type(),
testing.sample_eq_type(),
)
def Left(x):
return Either(lambda *, Left, Right: Left(x))
def Right(x):
return Either(lambda *, Left, Right: Right(x))
@function
def either(f, g, e):
"""(a -> c) -> (b -> c) -> Either a b -> c"""
return e.match(Left=f, Right=g)
@function
def is_left(m):
return m.match(
Left=lambda _: True,
Right=lambda _: False,
)
@function
def is_right(m):
return m.match(
Left=lambda _: False,
Right=lambda _: True,
)
@function
def from_left(x, e):
return e.match(
Left=lambda y: y,
Right=lambda _: x,
)
@function
def from_right(x, e):
return e.match(
Left=lambda _: x,
Right=lambda y: y,
)
#
# Optics
#
left = prism(
lambda m: m.match(
Left=lambda x: Right(x),
Right=lambda _: Left(m),
),
lambda x: Left(x),
)
right = prism(
lambda m: m.match(
Left=lambda _: Left(m),
Right=lambda x: Right(x),
),
lambda x: Right(x),
)
```
#### File: types/tests/test_identity.py
```python
from haskpy import Identity, IdentityT, Function, List, Either
from haskpy.testing import make_test_class
# Test typeclass laws for Identity
TestIdentity = make_test_class(Identity)
def test_identity_map():
"""Make sure the originally given value isn't kept constant"""
assert Identity(42).map(lambda x: x + 1) == Identity(43)
return
TestIdentityT = make_test_class(IdentityT(Either))
TestIdentityT = make_test_class(IdentityT(List))
TestIdentityT = make_test_class(IdentityT(Function))
```
#### File: types/tests/test_maybe.py
```python
from haskpy import Maybe, Just, Nothing, MaybeT, List, Function, Compose, Either
from haskpy.testing import make_test_class
# Test typeclass laws for Maybe
TestMaybe = make_test_class(Maybe)
def test_maybe_match():
"""Make sure the given value is actually stored"""
assert Just(42).match(Nothing=lambda: 666, Just=lambda x: x) == 42
assert Nothing.match(Nothing=lambda: 666, Just=lambda x: x) == 666
return
def test_maybe_map():
"""Make sure the originally given value isn't kept constant"""
assert Just(42).map(lambda x: x + 1) == Just(43)
return
def test_maybe_foldl():
"""Make sure the folding is done as we expect"""
assert Just("foo").foldl(lambda x: lambda y: x + y, "bar") == "barfoo"
return
# Test typeclass laws for MaybeT monad transformer (using some example monad).
TestMaybeT = make_test_class(MaybeT(Either))
TestMaybeT = make_test_class(MaybeT(Function))
TestMaybeT = make_test_class(MaybeT(List))
def test_compose_vs_maybet():
"""Test the difference between MaybeT and Compose
This was an interesting example that showed how MaybeT differs from Compose
even for Applicative instance. So, MaybeT isn't a monadic extension of
Compose, but rather it's.. well.. a monad transformer.
"""
MaybeList = MaybeT(List)
xs = MaybeList(List(Nothing, Just(42)))
ys = MaybeList(List(Just(1), Just(2)))
assert xs.apply_second(ys) == MaybeList(List(Nothing, Just(1), Just(2)))
MaybeList2 = Compose(List, Maybe)
xs2 = MaybeList2(List(Nothing, Just(42)))
ys2 = MaybeList2(List(Just(1), Just(2)))
assert xs2.apply_second(ys2) == MaybeList2(List(Nothing, Nothing, Just(1), Just(2)))
return
``` |
{
"source": "jluttine/junction-tree",
"score": 3
} |
#### File: junction-tree/junctiontree/construction.py
```python
import numpy as np
import heapq
from itertools import chain, combinations
import copy
def get_clique(tree, node_list, var_label):
'''Finds a single clique containing variable
:param tree: the tree structure (a list) of the junction tree
:param key_list: contains the node indexed by clique id for all cliques in tree
:param var_label: the variable label of variable being searched for
:return: a clique containing the variable or None if no such clique exists in tree
'''
ix = tree[0]
node = node_list[ix]
separators = tree[1:]
if var_label in node:
return ix, node
if separators == (): # base case reached (leaf)
return None
for separator in separators:
separator_ix, c_tree = separator
separator_vars = node_list[separator_ix]
if var_label in separator_vars:
return separator_ix, separator_vars
clique_info = get_clique(c_tree, node_list, var_label)
if clique_info:
return clique_info
return None
def factors_to_undirected_graph(factors):
'''Represent factor graph as undirected graph
:param factors: list of factors
:return: undirected graph as dictionary with edges as keys and the factor from
which edge originates as values
'''
factor_edges = {}
for factor_ix, factor in enumerate(factors):
for ix, k1 in enumerate(factor):
for k2 in factor[ix+1:]:
factor_edges.setdefault( frozenset( (k1,k2) ), set() ).add(factor_ix)
return factor_edges
def initialize_triangulation_heap(var_sizes, edges):
'''Create heap used for graph triangulation
:param key_sizes: dictionary with variable label as keys and variable size as values
:param edges: list of pairs of variables representing factor graph edges
:return heap: heap with entry structure:
[
num edges added to triangulated graph by removal of variable,
induced cluster weight,
variable associated with first two elements
]
:return entry_finder: dictionary with variable label as key and reference to heap entry for variable
'''
heap, entry_finder = update_heap(var_sizes.keys(), edges, var_sizes)
return heap, entry_finder
def update_heap(remaining_vars, edges, var_sizes, heap=None, entry_finder=None):
'''Update heap entries
:param remaining_vars: list of variables remaining in the heap
:param edges: list of edges (pairs of variables )
:param var_sizes: dictionary of variables (variable label is key, size is value)
:param heap: heap to be updated (None if new heap is to be created)
:param entry_finder: entry_finder dictionary with references to heap elements
:return h: updated (or newly created) heap
:return entry_finder: dictionary with updated references to heap elements
'''
h = heap if heap else []
entry_finder = entry_finder if entry_finder else {}
for var in remaining_vars:
rem_neighbors = [(set(edge) - set([var])).pop()
for edge in edges if var in edge and len(set(remaining_vars).intersection(edge)) == 2]
# determine how many of var's remaining neighbors need to be connected
num_new_edges = sum(
[
frozenset((n1,n2)) not in edges
for i, n1 in enumerate(rem_neighbors)
for n2 in rem_neighbors[i+1:]
]
)
# weight of a cluster is the product of all key lengths in cluster
weight = var_sizes[var] * np.prod([var_sizes[n] for n in rem_neighbors])
entry = [num_new_edges, weight, var]
heapq.heappush(h, entry)
# invalidate previous entry if it exists
prev = entry_finder.get(var, None)
if prev:
# set entry to be removed
prev[2] = ""
entry_finder[var] = entry
return h, entry_finder
def factors_to_undirected_graph(factors):
'''Represent factor graph as undirected graph
:param factors: list of factors
:return: undirected graph as dictionary with edges as keys and the factor from
which edge originates as values
'''
factor_edges = {}
for factor_ix, factor in enumerate(factors):
for ix, k1 in enumerate(factor):
for k2 in factor[ix+1:]:
factor_edges.setdefault( frozenset( (k1,k2) ), set() ).add(factor_ix)
return factor_edges
def remove_next(heap, entry_finder, remaining_vars, var_sizes, edges):
'''Removes next entry from heap
:param heap: heap structure containing remaining factors and weights
:param entry_finder: dictionary with updated references to heap elements
:param remaining_vars: list of variables remaining in G'
:param var_sizes: dictionary of variables (variable label is key, size is value)
:param edges: list of edge pairs in original graph G
:return entry: the entry removed from the heap
:return heap: heap structure with updated entries after variable removal
:return entry_finder: dictionary with updated references to heap elements
:return remaining_vars: list of variables without most recently removed variable
'''
entry = (None, None, "")
while entry[2] == "":
entry = heapq.heappop(heap)
# remove entry from entry_finder
del entry_finder[entry[2]]
# remove key from remaining keys list
remaining_vars.remove(entry[2])
heap, entry_finder = update_heap(
remaining_vars,
edges,
var_sizes,
heap,
entry_finder
)
return entry, heap, entry_finder, remaining_vars
def find_triangulation(factors, var_sizes):
'''Triangulate given factor graph.
TODO: Provide different algorithms.
:param factors: list of factors where each factor is given as a list of variables the factor contains:
[vars1, ..., varsN]
:param var_sizes: dictionary of variables (variable label is key, size is value)
{
key1: size1,
...
keyM: sizeM
}
:return tri: list of edges added to triangulate the undirected graph
:return induced_clusters: list of variable lists representing induced clusters from triangulation
:return max_cliques: list of maximal cliques generated during triangulation process
:return factor_to_maxclique: dictionary mapping each factor to the max_clique which contains the factor
'''
def generate_subsets(factors):
'''For each factor, identify all factors that are subset of that factor
:param factors: list of factors (list of variables) representing the factor graph
:return: a dictionary with factor index as key and a list of the factor indices for which the factor
is a superset as value
'''
subsets = {}
for ix, f1 in enumerate(factors):
subset_of_ix = max(
enumerate(
[
-1 if not set(f1) <= set(f2) else len(set(f2) - set(f1))
for f2 in factors
]
),
key=lambda t: t[1]
)[0]
subsets.setdefault(subset_of_ix, []).append(ix)
return subsets
def find_origin_factors(factor_ixs, subsets, factor_to_maxclique):
'''Creates a list of original factors that contain an edge
:param factor_ixs: the original factors containing the edge
:param subsets: dictionary of factor id as key and factor id of subset factors as value
:param factor_to_maxclique: list mapping factor by id to assigned maxclique
:return: a list of the original factors of factor graph which contain the edge
'''
return list(
chain(
*[
# adding this factor id and factor ids of factors that are subsets
list(
set(
subsets.get(factor_ix, []) + [factor_ix]
)
)
for factor_ix in factor_ixs
if factor_to_maxclique[factor_ix] is None
]
)
)
def find_unconnected_neighbors(neighbors, edges):
'''Create a list of tuples representing edges between unconnected neighbors
:param neighbors: list of variables representing neighbors in a factor
:param edges: view of variables (frozensets representing a graph edge)
:return:
'''
return [
(k1,k2)
for k1,k2 in combinations(neighbors, 2)
if frozenset((k1, k2)) not in edges
]
def find_maxclique(cluster, max_cliques):
'''Identifies the index of max clique which contains cluster of variables
:param cluster: a list of variables
:param max_cliques: list of variable lists (representing a max clique)
:return: the id of the clique for which the cluster is a subset, -1 otherwise
'''
search_results = [
clique_ix
for clique_ix, clique in enumerate(max_cliques)
if set(cluster) < set(clique)
]
return -1 if len(search_results) == 0 else search_results[0]
# NOTE: Only variables that have been used at least in one factor should be
# used. Ignore those variable sizes that are not in any factor. Perhaps this
# could be fixed elsewhere. Just added a quick fix here to filter variable
# sizes.
used_vars = list(
set(var for factor in factors for var in factor)
)
var_sizes = {
var: size
for (var, size) in var_sizes.items()
if var in used_vars
}
factor_edges = factors_to_undirected_graph(factors)
if len(factor_edges) == 0:
# no edges present in factor graph
return ([], factors, {i:i for i in range(len(factors))})
tri = []
max_cliques = []
factor_to_maxclique = [None]*len(factors)
subsets = generate_subsets(factors)
heap, entry_finder = initialize_triangulation_heap(var_sizes, factor_edges)
rem_vars = used_vars
while len(rem_vars) > 0:
entry, heap, entry_finder, rem_vars = remove_next(
heap,
entry_finder,
rem_vars,
var_sizes,
factor_edges
)
# var is the 3rd element in entry list
var = entry[2]
rem_neighbors = []
origin_factors = []
# find neighbors that are in remaining keys
for r_var in rem_vars:
edge_set = frozenset([var, r_var])
if edge_set in factor_edges:
rem_neighbors.append(r_var)
origin_factors.extend(find_origin_factors(factor_edges[edge_set], subsets, factor_to_maxclique))
if len(origin_factors) > 0:
# implies that list of origin factors not yet accounted for in existing maxcliques
new_edges = find_unconnected_neighbors(rem_neighbors, factor_edges.keys())
# connect all unconnected neighbors of key
factor_edges.update({frozenset(edge): set() for edge in new_edges})
tri.extend(new_edges)
# if possible, assign factor to maxclique which is either
# the factor itself or a factor which it is a subset of
new_cluster = rem_neighbors + [var]
maxclique_ix = find_maxclique(new_cluster, max_cliques)
# new maxclique discovered if maxclique == -1
max_cliques.extend( [] if maxclique_ix != -1 else [sorted(new_cluster)] )
maxclique_ix = maxclique_ix if maxclique_ix != -1 else len(max_cliques) - 1
for factor_ix in set(origin_factors):
factor_to_maxclique[factor_ix] = maxclique_ix
return tri, max_cliques, factor_to_maxclique
def merge_trees(tree1, clique1_ix, tree2, clique2_ix, sepset_ix):
'''Merge two trees into one separated by sepset
:param tree1: tree structure (a list) containing clique_1
:param clique1_ix: clique_id for clique_1
:param tree2: tree structure (a list) containing clique_2
:param clique2_ix: clique_id for clique_2
:param sepset_ix: sepset id for the sepset to be inserted
:return tree: tree structure (a list) containing clique_1, clique_2, and sepset
'''
t2 = copy.deepcopy(tree2)
# combine tree2 (rooted by clique2) with sepset
sepset_group = (sepset_ix, change_root(t2, clique2_ix))
merged_tree = insert_sepset(tree1, clique1_ix, sepset_group)
return merged_tree
def change_root(tree, clique_ix, child=[], sep=[]):
'''Restructures tree so that clique becomes root
:param tree: tree to be altered
:param clique_ix: id of the clique that will become tree's root
:param child: child tree to be added to new root of tree
:param sep: separator connecting root to recursively constructed child tree
:return: tree with clique_ix as root
If clique_ix is already root of tree, original tree is returned.
If clique_ix not in tree, empty list is returned.
'''
if tree[0] == clique_ix:
if len(child) > 0:
tree.append((sep[0],child))
return tree
return list(
chain(
*[
change_root(
child_sepset[1],
clique_ix,
tree[:c_ix+1] + tree[c_ix+2:] + [(sep[0],child)] if len(child) else tree[:c_ix+1] + tree[c_ix+2:],
[child_sepset[0]]
)
for c_ix, child_sepset in enumerate(tree[1:])
]
)
)
def insert_sepset(tree, clique_ix, sepset_group):
'''Inserts sepset into tree as child of clique
:param tree: tree structure (a list) in which to insert sepset
:param clique_ix: clique id of the sepset's parent
:param sepset_group: sepset group being inserted
:return new_tree: tree structure with the sepset inserted as a child of clique
'''
return [tree[0]] + list(
chain(
*[
[(child_sepset[0], insert_sepset(child_sepset[1], clique_ix, sepset_group))]
for child_sepset in tree[1:]
],
[] if tree[0] != clique_ix else [(sepset_group)]
)
)
def generate_potential_pairs(tree):
'''Returns a list of tuples consisting of clique id and child separator ids
:param tree: tree structure in list format
:return: list of clique id/child sep id tuples
[
(clique_id0, child0_sep_id0),
(clique_id0, child1_sep_id0),
(clique_id1, child0_sep_id1),
...
(clique_idN, child(M-1)_sep_idN),
(clique_idN, childM_sep_idN)
]
'''
return list(bf_traverse(tree, func=yield_clique_pairs))
def yield_id(tree):
'''Yields id of tree's root
:param tree: tree structure in list format
'''
yield tree[0]
def bf_traverse(tree, clique_ix=None, func=yield_id):
'''Breadth-first search traversal with optional early termination
:param tree: tree structure in list format
:param clique_ix: clique id used to terminate traversal
:param func: function controlling component of tree output
Output: Depends on func argument. Default is list of clique ids
[id1, ..., idN] (or [id1, ..., cid])
'''
queue = [tree]
while queue:
tree = queue.pop(0)
yield from func(tree)
if tree[0] == clique_ix:
raise StopIteration
queue.extend([child for child in tree[1:]])
def yield_clique_pairs(tree):
'''Yields tuples of root clique id and sepset id
:param tree: tree structure in list format
'''
for child in tree[1:]:
yield (tree[0], child[0])
def get_clique_vars(clique_vars, clique_ix):
'''Get variables of the clique with id clique_ix
:param clique_vars: list of variables (maxclique + separators)
:param clique_ix: clique id to find
:return: list of variables in clique clique_ix (or None if clique_ix not in tree)
'''
return clique_vars[clique_ix] if len(clique_vars) > clique_ix else None
def df_traverse(tree, clique_ix=None, func=yield_id):
'''Depth-first traversal with optional early termination
:param tree: tree structure in list format
:param clique_ix: clique id used to terminate traversal
:param func: function controlling component of tree output
Output: Depends on func argument. Default is list of clique ids
[id1, ..., idN] (or [id1, ..., cid])
'''
stack = [tree]
while stack:
tree = stack.pop()
yield from func(tree)
if tree[0] == clique_ix:
raise StopIteration
stack.extend([child for child in reversed(tree[1:])])
def construct_junction_tree(cliques, var_sizes):
'''Construct junction tree from input cliques
:param cliques: a list of maximal cliques where each maximal clique is a list of variables it contains
:param var_sizes: a dictionary of (var label, var size) pairs
:return tree: a junction tree structure from the input cliques
:return separators: a list of separators in the order in which they appear in the tree.
Note: Empty separator sets indicate the presence of distinct unconnected trees in the structure
'''
trees = [[c_ix] for c_ix, clique in enumerate(cliques)]
# set of candidate sepsets
sepsets = list()
for i, X in enumerate(cliques):
for j, Y in enumerate(cliques[i+1:]):
sepset = tuple(set(X).intersection(Y))
sepsets.append((sepset, (i,j+i+1)))
separator_dict = {}
heap = build_sepset_heap(sepsets, cliques, var_sizes)
num_selected = 0
while num_selected < len(cliques) - 1:
entry = heapq.heappop(heap)
ss_ix = entry[2]
(cliq1_ix, cliq2_ix) = sepsets[ss_ix][1]
tree1, tree2 = None, None
for tree in trees:
# find tree (tree1) containing cliq1_ix
tree1 = tree1 if tree1 else (tree if find_subtree(tree,cliq1_ix) else None)
# find tree (tree2) containing cliq2_ix
tree2 = tree2 if tree2 else (tree if find_subtree(tree,cliq2_ix) else None)
if tree1 != tree2:
ss_tree_ix = len(cliques) + num_selected
# merge tree1 and tree2 into new_tree
new_tree = merge_trees(
tree1,
cliq1_ix,
tree2,
cliq2_ix,
ss_tree_ix
)
separator_dict[ss_tree_ix] = sepsets[ss_ix][0]
# insert new_tree into forest
trees.append(new_tree)
# remove tree1 and tree2 from forest
trees.remove(tree1)
trees.remove(tree2)
num_selected += 1
# trees list contains one tree which is the fully constructed tree
return trees[0], [list(separator_dict[ix]) for ix in sorted(separator_dict.keys())]
def build_sepset_heap(sepsets, cliques, var_sizes):
'''Build sepset heap to be used for building junction tree from cliques
:param sepsets: set of candidate sepsets consisting of sets of factor ids and tuple
of clique ids which produce sepset
:param cliques: list of cliques (represented by variable list)
:param key_sizes: dictionary of variable label as key and variable size as value
:return sepset_heap: heap of sepset entries
'''
heap = []
for i, (ss, (cliq1_ix, cliq2_ix)) in enumerate(sepsets):
mass = len(ss) + 0.001 # avoids division by zero if sepset empty
weight1 = np.prod([var_sizes[var] for var in cliques[cliq1_ix]])
weight2 = np.prod([var_sizes[var] for var in cliques[cliq2_ix]])
# invert mass to use minheap
entry = [1.0/mass, weight1 + weight2, i]
heapq.heappush(heap, entry)
return heap
def find_subtree(tree, clique_ix):
'''Evaluates if subtree rooted by clique exists in tree
:param tree: tree structure (a list) to search
:param clique_ix: id of the clique serving as root of subtree
:return tree_found: True if subtree rooted by clique_ix, False otherwise
'''
if tree[0] == clique_ix:
return True
elif len(tree) == 1:
return False
else:
for child_tree in tree[1:]:
if find_subtree(child_tree, clique_ix):
return True
return False
def generate_potential_pairs(tree):
'''Returns a list of tuples consisting of clique id and child separator ids
:param tree: tree structure in list format
:return: list of clique id/child sep id tuples
[
(clique_id0, child0_sep_id0),
(clique_id0, child1_sep_id0),
(clique_id1, child0_sep_id1),
...
(clique_idN, child(M-1)_sep_idN),
(clique_idN, childM_sep_idN)
]
'''
return list(bf_traverse(tree, func=yield_clique_pairs))
```
#### File: junction-tree/tests/test_computation.py
```python
from junctiontree import computation as comp
import numpy as np
from .util import assert_potentials_equal
def get_arrays_and_vars(tree, node_list, potentials):
"""Get all arrays and their variables as a flat list
Output: [array1, vars1, ..., arrayN, varsN]
"""
return list([potentials[tree[0]],node_list[tree[0]]]) + sum(
[
get_arrays_and_vars(child_tree, node_list, potentials)
for child_tree in tree[1:]
],
[]
)
def brute_force_sum_product(tree, node_list, potentials):
"""Compute brute force sum-product with einsum """
# Function to compute the sum-product with brute force einsum
arrays_vars = get_arrays_and_vars(tree, node_list, potentials)
f = lambda output_vars: np.einsum(*(arrays_vars + [output_vars]))
def __run(tree, node_list, p, f, res=[]):
res.append(f(node_list[tree[0]]))
for child_tree in tree[1:]:
__run(child_tree, node_list, p, f, res)
return res
return __run(tree, node_list, potentials, f)
def assert_sum_product(tree, node_order, potentials, variables):
""" Test shafer-shenoy vs brute force sum-product """
# node_order represents the order nodes are traversed
# in get_arrays_and_vars function
assert_potentials_equal(
brute_force_sum_product(
tree,
[variables[idx] for idx in node_order],
[potentials[idx] for idx in node_order]
),
comp.compute_beliefs(tree, potentials, variables)
)
def test_one_scalar_node():
assert_sum_product(
[
0,
],
[0],
[
np.random.randn(),
],
[[]] # no variables for scalar
)
def test_one_matrix_node():
assert_sum_product(
[
0,
],
[0],
[
np.random.randn(2, 3),
],
[
[3,5]
]
)
def test_one_child_node_with_all_variables_shared():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2, 3),
np.random.randn(3, 2),
np.ones((3, 2)),
],
[
[3,5],
[5,3],
[5,3]
]
)
def test_one_child_node_with_one_common_variable():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.ones((3,)),
],
[
[3,5],
[5,9],
[5]
]
)
def test_one_child_node_with_no_common_variable():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2),
np.random.randn(3),
np.ones(()),
],
[
[3],
[9],
[]
]
)
def test_one_grand_child_node_with_no_variable_shared_with_grand_parent():
assert_sum_product(
[
0,
(
3,
[
1,
(
4,
[
2,
]
)
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(4, 5),
np.ones((3,)),
np.ones((4,)),
],
[
[3, 5],
[5, 9],
[9, 1],
[5],
[9]
]
)
def test_one_grand_child_node_with_variable_shared_with_grand_parent():
assert_sum_product(
[
0,
(
3,
[
1,
(
4,
[
2,
]
)
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(6, 3),
np.ones((3,)),
np.ones((3,)),
],
[
[3, 5],
[5, 9],
[1, 5],
[5],
[5]
]
)
def test_two_children_with_no_variable_shared():
assert_sum_product(
[
0,
(
3,
[
1,
]
),
(
4,
[
2,
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(2, 5),
np.ones((3,)),
np.ones((2,)),
],
[
[3, 5],
[5, 9],
[3, 1],
[5],
[3]
]
)
def test_two_child_with_shared_variable():
assert_sum_product(
[
0,
(
3,
[
1,
]
),
(
4,
[
2,
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(3),
np.ones((3,)),
np.ones((3,)),
],
[
[3, 5],
[5, 9],
[5],
[5],
[5]
]
)
def test_two_children_with_3D_tensors():
assert_sum_product(
[
0,
(
3,
[
1,
]
),
(
4,
[
2,
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3, 4),
np.random.randn(3, 4, 5),
np.random.randn(3, 6),
np.ones((3, 4)),
np.ones((3,)),
],
[
[3,5,7],
[5,7,9],
[5,1],
[5,7],
[5]
]
)
def test_divide_matrix_product():
# dividing messages from product when neighbor message is excluded
# this avoids re-doing einsum calculations to accomplish the same
# one full message product is calculated and messages are removed from the
# product by performing the division operation
potentials = [
np.random.randn(2, 3, 6),
np.random.randn(3, 4),
np.random.randn(2, 5),
np.ones((3,)),
np.ones((2,)),
np.ones((6,)),
np.random.randn(4, 6)
]
variables = [
[3, 5, 7],
[5, 9],
[3, 1],
[5],
[3],
[7],
[2, 7]
]
msg1 = np.einsum(potentials[1], variables[1], variables[3])
msg2 = np.einsum(potentials[2], variables[2], variables[4])
msg3 = np.einsum(potentials[6], variables[6], variables[5])
msg_prod = np.einsum(msg1, variables[3], msg2, variables[4], msg3, variables[5], variables[0])
msg_prod_x6 = np.einsum(msg1, variables[3], msg2, variables[4], [3,5])
assert np.allclose(msg_prod_x6, np.divide(msg_prod, msg3[None, None, ...])[:,:,0])
mask = np.in1d(variables[0], variables[6])
exp_ix = np.full(msg_prod.ndim, None)
exp_ix[mask] = slice(None)
slice_ix = np.full(msg_prod.ndim, slice(None))
slice_ix[mask] = 0
np.divide(msg_prod, msg3[tuple(exp_ix)])[tuple(slice_ix)]
assert np.allclose(msg_prod_x6, np.divide(msg_prod, msg3[tuple(exp_ix)])[tuple(slice_ix)])
msg_prod_x1 = np.einsum(msg2, variables[4], msg3, variables[-2], [3,7])
assert np.allclose(msg_prod_x1, np.divide(msg_prod, msg1[None, ..., None])[:,0,:])
msg_prod_x2 = np.einsum(msg1, variables[3], msg3, variables[5], [5,7])
assert np.allclose(msg_prod_x2, np.divide(msg_prod, msg2[..., None, None])[0,:,:])
def test_apply_evidence_to_potentials():
potentials = [
np.random.randn(2, 3, 6),
np.random.randn(3, 4),
np.random.randn(2, 5),
np.ones((3,)),
np.ones((2,)),
np.ones((6,)),
np.random.randn(4, 6)
]
variables = [
[3, 5, 7],
[5, 9],
[3, 1],
[5],
[3],
[7],
[2, 7]
]
evidence = {3:0, 9:2}
shrunken_potentials = comp.apply_evidence(potentials, variables, evidence)
np.allclose(potentials[0][0, :, :], shrunken_potentials[0])
np.allclose(potentials[1][:, 2], shrunken_potentials[1])
np.allclose(potentials[2][0, 1], shrunken_potentials[2])
np.allclose(potentials[3], shrunken_potentials[3])
np.allclose(potentials[4][0], shrunken_potentials[4])
np.allclose(potentials[5], shrunken_potentials[5])
np.allclose(potentials[6], shrunken_potentials[6])
def test_evidence_shrinking():
# evidence shrinking can be incorporated by removing axis
# corresponding to observed variable
A = np.random.rand(3,4,2) # vars: a,b,c
a = [0]*3
a[2] = 1
b = [0]*4
b[2] = 1
c = [0]*2
c[0] = 1
# update potential A based on observing a=2
A_updated = comp.sum_product.einsum(A, [0,1,2], a, [0], [0,1,2])
# shrinking from evidence
# set the axis representing a (ax=0) to the value of a
A_updated_es = A_updated[2,:,:]
assert A_updated_es.shape == (4,2)
# imagine we have another potential sharing vars b and c
B = np.random.rand(4,2) # vars: b,c
B_updated = comp.sum_product.einsum(A_updated, [0,1,2], B, [1,2], [1,2])
B_updated_es = comp.sum_product.einsum(A_updated_es, [1,2], B, [1,2], [1,2])
# the result of the calculation should be the same regardless of if we use
# the updated potentials from A_updated (without evidence shrinking)
# or A_updated_es (with evidence shrinking)
np.testing.assert_allclose(
B_updated,
B_updated_es
)
# what happens if the only shared variables between potentials is
# the single variable in potential
C = np.random.rand(3) # vars: a
C_updated = comp.sum_product.einsum(C, [0], a, [0], [0])
C_updated_es = C_updated[2]
np.testing.assert_allclose(
comp.sum_product.einsum(A_updated, [0,1,2], C_updated, [0], []),
comp.sum_product.einsum(A_updated_es, [1,2], C_updated_es, [], [])
)
np.testing.assert_allclose(
comp.sum_product.einsum(A_updated, [0,1,2], C_updated, [0], [1,2]),
comp.sum_product.einsum(A_updated_es, [1,2], C_updated_es, [], [1,2])
)
``` |
{
"source": "jlutz777/FreeStore",
"score": 2
} |
#### File: alembic/versions/2eeb0ddd8f9_add_winter_seasonal_category.py
```python
revision = '2<PASSWORD>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table
def upgrade():
shopping_categories = table(
'shopping_category',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(75), nullable=False),
sa.Column('daily_limit', sa.Integer, nullable=True),
sa.Column('monthly_limit', sa.Integer, nullable=False),
sa.Column('family_wide', sa.Boolean, nullable=False),
sa.Column('order', sa.Integer, nullable=False),
sa.Column('min_age', sa.Integer, nullable=True),
sa.Column('max_age', sa.Integer, nullable=True),
sa.Column('disabled', sa.Boolean, nullable=False)
)
op.bulk_insert(
shopping_categories,
[
{'id': 12, 'name': 'Winter Seasonal',
'daily_limit': 1, 'monthly_limit': 4,
'family_wide': False,
'order': 9, 'disabled': False}
])
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Seasonal')).
values({'disabled': op.inline_literal(True)})
)
def downgrade():
pass
```
#### File: alembic/versions/3053811d54d4_create_customer_table.py
```python
revision = '3<PASSWORD>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'customerfamily',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('email', sa.Unicode(100), nullable=False),
sa.Column('phone', sa.Unicode(40)),
sa.Column('address', sa.Unicode(100), nullable=False),
sa.Column('city', sa.Unicode(40), nullable=False),
sa.Column('state', sa.Unicode(40), nullable=False),
sa.Column('zip', sa.Unicode(20), nullable=False),
sa.Column('datecreated', sa.DateTime, nullable=False)
)
def downgrade():
op.drop_table('customerfamily')
```
#### File: alembic/versions/3838ebd18fd9_add_shopping_item_and_category_tables.py
```python
revision = '<KEY>'
down_revision = '4<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'shopping_category',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(75), nullable=False),
sa.Column('daily_limit', sa.Integer, nullable=True)
)
op.create_table(
'shopping_item',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(75), nullable=False),
sa.Column('category', sa.Integer, sa.ForeignKey('shopping_category.id'))
)
def downgrade():
op.drop_table('shopping_category')
op.drop_table('shopping_item')
```
#### File: FreeStore/models/customerfamily.py
```python
from datetime import datetime
from sqlalchemy import Integer, Unicode, DateTime, Boolean
from sqlalchemy import Column as col
from sqlalchemy.orm import relationship
import models.base as base
from models.dependent import Dependent
class CustomerFamily(base.Base):
"""Sqlalchemy deals model"""
__tablename__ = "customerfamily"
id = col(Integer, primary_key=True)
email = col('email', Unicode, default='')
phone = col('phone', Unicode, default='')
address = col('address', Unicode, default='')
city = col('city', Unicode, default='', nullable=False)
state = col('state', Unicode, default='', nullable=False)
zip = col('zip', Unicode, default='', nullable=False)
datecreated = col('datecreated', DateTime, nullable=False)
comments = col('comments', Unicode, default='')
checkoutComments = col('checkoutcomments', Unicode, default='')
adminComments = col('admincomments', Unicode, default='')
isCustomer = col('is_customer', Boolean, default=True, nullable=False)
isVolunteer = col('is_volunteer', Boolean, default=False, nullable=False)
depOrder = 'Dependent.isPrimary.desc()'
dependents = relationship("Dependent", backref="family", order_by=depOrder)
visits = relationship("Visit", backref="family", lazy="dynamic")
vTable = "VolunteerVisit"
volunteerVisits = relationship(vTable, backref="family", lazy="dynamic")
def __checkFirstName__(self, formDependent, form):
hasError = False
if formDependent['firstName'].data == '':
formError = 'First name is required'
formDependent['firstName'].errors.append(formError)
form.errors['dependent_firstname'] = 'required'
hasError = True
return hasError
def __checkLastName__(self, formDependent, form):
hasError = False
if formDependent['lastName'].data == '':
formErr = 'Last name is required'
formDependent['lastName'].errors.append(formErr)
form.errors['dependent_lastname'] = 'required'
hasError = True
return hasError
def __checkBirthDate__(self, formDependent, form):
hasError = False
# Only customers need a birthdate
if not form.isCustomer.data:
pass
elif formDependent['birthdate'].data is None:
formError = 'Birthday is required'
formDependent['birthdate'].errors.append(formError)
form.errors['dependent_birthdate'] = 'required'
hasError = True
elif formDependent['birthdate'].data < datetime(1900, 1, 1):
formError = 'Birthday must be after 1900'
formDependent['birthdate'].errors.append(formError)
form.errors['dependent_birthdate'] = 'required'
formDependent['birthdate'].data = None
hasError = True
return hasError
def __checkRelationship__(self, formDependent, form):
hasError = False
# Is optional
if not formDependent['relationship'].data:
pass
elif formDependent['relationship'].data < 1 or \
formDependent['relationship'].data > 5:
formError = 'Relationship is invalid'
formDependent['relationship'].errors.append(formError)
form.errors['dependent_relationship'] = 'required'
hasError = True
return hasError
def updatedFromRegistration(self, form):
pass
def fromForm(self, id, form):
if id is not None:
self.id = id
self.datecreated = form.datecreated.data
else:
self.datecreated = datetime.now()
self.email = form.email.data
self.phone = form.phone.data
self.address = form.address.data
self.city = form.city.data
self.state = form.state.data
self.zip = form.zip.data
self.comments = form.comments.data
self.adminComments = form.adminComments.data
self.isVolunteer = form.isVolunteer.data
self.isCustomer = form.isCustomer.data
for formDependent in form.dependents:
if not formDependent['isPrimary'].data and \
(formDependent['firstName'].data == '' and
formDependent['lastName'].data == ''):
continue
dependent = Dependent()
dependent.id = formDependent['id'].data
dependent.isPrimary = formDependent['isPrimary'].data
hasError = self.__checkFirstName__(formDependent, form)
dependent.firstName = formDependent['firstName'].data
if self.__checkLastName__(formDependent, form):
hasError = True
dependent.lastName = formDependent['lastName'].data
if self.__checkBirthDate__(formDependent, form):
hasError = True
dependent.birthdate = formDependent['birthdate'].data
if self.__checkRelationship__(formDependent, form):
hasError = True
dependent.relationship = formDependent['relationship'].data
if hasError:
raise Exception('Dependent data needed')
self.dependents.append(dependent)
def findMatch(self, form, db):
matchedFam = None
# A match is when the first name, last name, zip, and city all match
for formDependent in form.dependents:
if not formDependent['isPrimary'].data:
continue
deps = db.query(Dependent).filter(Dependent.isPrimary)\
.filter(Dependent.firstName==formDependent.firstName.data)\
.filter(Dependent.lastName==formDependent.lastName.data)
for dep in deps:
fam = dep.family
if fam is not None and fam.zip == form.zip.data and fam.city == form.city.data:
matchedFam = fam
break
return matchedFam
```
#### File: FreeStore/models/volunteervisit.py
```python
from sqlalchemy import Column, Integer, DateTime, ForeignKey
import models.base as base
from utils.utils import *
class VolunteerVisit(base.Base):
"""Sqlalchemy deals model"""
__tablename__ = "volunteervisits"
id = Column(Integer, primary_key=True)
checkin = Column('checkin', DateTime)
checkout = Column('checkout', DateTime)
family_id = Column('family', Integer, ForeignKey('customerfamily.id'))
def fromForm(self, form):
# Ensure if there is no id data, it gets marked as None so
# the db creates a new volunteer visit
if form.id.data is not None and form.id.data != '':
self.id = form.id.data
else:
self.id = None
self.checkin = local_time_to_utc_time(form.checkin.data)
self.checkout = local_time_to_utc_time(form.checkout.data)
self.family_id = form.family_id.data
```
#### File: FreeStore/reporting/utils.py
```python
from .reports import FamilyTotalOverTimeReport, IndividualsByAgeReport
from .reports import FamilyCheckoutsPerWeekReport, DependentCheckoutsPerWeekReport
from .reports import EmptyFamilyCheckoutsPerWeekReport, FamilyCheckInsPerWeekReport
from .reports import FamiliesPerZipReport, CheckoutFrequencyPerMonthReport
from .reports import VolunteersHoursWorkedReport, VolunteersPerDayReport
#from .reports import DependentsTotalOverTimeReport, ItemsPerCategoryPerMonthReport
availableReports = {}
availableReports[1] = FamilyTotalOverTimeReport
availableReports[2] = FamilyCheckoutsPerWeekReport
availableReports[3] = EmptyFamilyCheckoutsPerWeekReport
availableReports[4] = FamilyCheckInsPerWeekReport
availableReports[5] = DependentCheckoutsPerWeekReport
availableReports[6] = IndividualsByAgeReport
availableReports[7] = FamiliesPerZipReport
availableReports[8] = CheckoutFrequencyPerMonthReport
availableReports[9] = VolunteersHoursWorkedReport
availableReports[10] = VolunteersPerDayReport
#availableReports[11] = DependentsTotalOverTimeReport
#availableReports[12] = ItemsPerCategoryPerMonthReport
def determineAndCreateReport(report_num, startDate='', endDate=''):
"""Determine the report"""
return availableReports[report_num](startDate, endDate)
```
#### File: FreeStore/tests/functional_tests.py
```python
import sys
sys.path.append("..")
from webtest import TestApp
from nose.tools import with_setup
import run_freestore
app = TestApp(run_freestore.sessionApp)
def setup_func_user():
app.post('/login', {'username': 'user', 'password': '<PASSWORD>'})
def setup_func_admin():
app.post('/login', {'username': 'admin', 'password': '<PASSWORD>'})
def teardown_func():
app.get('/logout')
app.reset()
@with_setup(setup_func_admin, teardown_func)
def test_admin():
assert app.get('/admin').status == '200 OK'
assert app.get('/report').status == '200 OK'
assert app.get('/report/info/1').status == '200 OK'
assert app.get('/report/data/1').status == '200 OK'
#assert app.post('/create_role', {'role': 'stuff2', 'level': '5'}).status == '200 OK'
#assert app.post('/delete_role', {'role': 'stuff2'}).status == '200 OK'
@with_setup(setup_func_user, teardown_func)
def test_user_cannot_do_admin():
assert app.get('/admin').status == '302 Found'
assert app.get('/report').status == '302 Found'
assert app.get('/').status == '200 OK'
assert app.get('/currentVisits').status == '200 OK'
assert app.get('/report/info/1').status == '302 Found'
assert app.get('/report/data/1').status == '302 Found'
#assert app.post('/create_role', {'role': 'stuff', 'level': '5'}).status == '302 Found'
def test_unauthenticated():
assert app.get('/').status == '302 Found'
assert app.get('/login').status == '200 OK'
assert app.get('/admin').status == '302 Found'
assert app.get('/report').status == '302 Found'
```
#### File: FreeStore/utils/utils.py
```python
import pytz, datetime
from fuzzywuzzy import fuzz
datetime_fmt = "%m/%d/%Y %H:%M"
date_fmt = "%m/%d/%Y"
utc_tz = pytz.utc
local_tz = pytz.timezone('US/Eastern')
def formatted_str_date(my_date):
if my_date is None:
return None
str_date = my_date.strftime(date_fmt)
return str_date
def formatted_str_date_time(my_date):
if my_date is None:
return None
str_date = my_date.strftime(datetime_fmt)
return str_date
def local_time_to_utc_time(local_time):
if local_time is None:
return None
dt = local_time
if isinstance(dt, str):
dt = datetime.datetime.strptime(dt, datetime_fmt)
local_dt = local_tz.localize(dt)
return local_dt.astimezone(utc_tz).strftime(datetime_fmt)
def utc_time_to_local_time(utc_time):
if utc_time is None:
return None
dt = utc_time
if isinstance(dt, str):
dt = datetime.datetime.strptime(dt, datetime_fmt)
utc_dt = utc_tz.localize(dt)
return utc_dt.astimezone(local_tz).strftime(datetime_fmt)
def is_fuzzy_match(data1, data2, accuracy=80):
matchVal = fuzz.ratio(data1, data2)
return matchVal > accuracy
``` |
{
"source": "jlutz777/SimpleAddress",
"score": 3
} |
#### File: SimpleAddress/libraries/utils.py
```python
from bson import json_util
from bson.objectid import ObjectId
import io
import csv
import json
from pymongo.cursor import Cursor
class JSONHelper:
"""
This class converts to and from JSON for you.
Given an object of pretty much any sort, it converts
to and from JSON. For a mongo cursor, it converts
id for you to a string.
"""
def encode(self, o):
"""Return a json string of the object.
:param o: the object to JSON serialize
:returns: JSON of the object
:rtype: str
"""
if isinstance(o, Cursor):
results = []
for item in o:
idToStr(item)
results.append(item)
return json.dumps(results, default=json_util.default)
else:
return json.dumps(o, default=json_util.default)
def pullId(self, data):
"""Separate the id of the dict from the rest of the data.
:param data: the dict to separate id
:param type: dict
:returns: The id and the data, separated
:rtype: tuple
"""
thisId = data.get('_id', None)
if thisId:
del data['_id']
return strToId(thisId), data
else:
return thisId, data
def decode(self, o):
"""Return the json decoded data of an object.
:param o: the JSON string to deserialize
:type o: str
:returns: The ids and the objects, separated
:rtype: tuple of lists
"""
data = json.loads(o, object_hook=json_util.object_hook)
if type(data) is list:
ids = []
objs = []
for item in data:
thisId, thisObj = self.pullId(item)
ids.append(thisId)
objs.append(thisObj)
return ids, objs
else:
return self.pullId(data)
class CSVHelper:
"""
This class converts to and from CSV for you.
Given fields or a file, CSV conversion is done.
"""
def convertToCSV(self, o, orderedFields):
"""Return a string in csv form.
:param o: the object to convert to csv
:param orderFields: the fields to convert
:type orderFields: list of fields
:returns: string in csv form
:rtype: str
"""
output = io.StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(orderedFields)
for item in o:
idToStr(item)
orderedValues = []
for field in orderedFields:
orderedValues.append(item.get(field.name, ''))
writer.writerow(orderedValues)
return str.encode(output.getvalue())
def convertFromCSV(self, fileName):
"""Return a list of dictionaries from a csv file.
:param fileName: the filename to parse
:type fileName: str
:returns: a list of dictionaries from the csv info
:rtype: list
"""
reader = csv.reader(open(fileName), delimiter=',', quotechar='"')
headers = reader.next()
convertedData = []
for row in reader:
thisRow = {}
for i in range(len(headers)):
thisRow[headers[i]] = row[i]
convertedData.append(thisRow)
return convertedData
def idToStr(row):
"""Take a dictionary of data and convert the _id column to a str.
:param row: the dictionary to convert
:type row: dict
:returns: (nothing) ... converts inline
"""
for key in row:
if key == '_id':
row[key] = str(row[key])
break
def strToId(thisStr):
"""Return an ObjectId from a str.
:param thisStr: the string to convert to ObjectId
:type thisStr: str
:returns: the converted string
:rtype: ObjectId
"""
return ObjectId(thisStr)
def fieldsFromFieldNameArray(fieldNames):
"""Return a list of fields based on field names.
Note that the field names follow a naming convention
in order to be properly converted. They must have
underscores where words are separated.
:param fieldNames: the names of the fields
:type fileName: list
:returns: Fields for the given field names
:rtype: list
"""
fields = []
for field in fieldNames:
if type(field) is not str:
fields.append(Field(field[0], field[1]))
elif field != "_id":
fields.append(Field(field))
return fields
class Field:
"""
Field class to easily convert inputs to fields.
They store information, parse the placeholder test,
and store field types if given.
"""
name = ''
placeholder = ''
fieldType = ''
def __init__(self, name, fieldType='', placeholder=''):
self.name = name
self.fieldType = fieldType
if placeholder == '':
self.parsePlaceholderFromName()
else:
self.placeholder = placeholder
def parsePlaceholderFromName(self):
"""Get the placeholder from the field name.
This uses a naming convention of underscores for
new words so it can be easily parsed.
:returns: the place holder text for the field
"""
if self.placeholder == '':
pieces = self.name.split("_")
nameWithSpaces = " ".join(pieces)
self.placeholder = nameWithSpaces.title()
return self.placeholder
def __repr__(self):
return self.placeholder
```
#### File: SimpleAddress/models/address.py
```python
from .dataobject import DataModel
from libraries.utils import fieldsFromFieldNameArray
class AddressModel(DataModel):
"""
This class does CRUD operations on an address.
It has all the requisite required fields and other
information needed to speak with the database.
"""
def __init__(self, mongoUrl, dbName, collectionName='simpleaddresses'):
super(AddressModel, self).__init__(mongoUrl, dbName, collectionName)
def getCreationFields(self):
"""Return list of fields for creating a new address.
:returns: creation fields
:rtype: list
"""
self.fields = ["first_name", "last_name", "spouse", "email_address",
"street_1", "street_2", "city", "state", "zip",
"country", "home_phone", "mobile_phone",
"relationship", "title", "children", "label_name",
("send_christmas_card", "checkBox")]
return super(AddressModel, self).getCreationFields()
def getChristmasFields(self):
"""Return list of fields needed for Christmas cards.
:returns: christmas fields
:rtype: list
"""
return fieldsFromFieldNameArray(["label_name", "street_1", "street_2",
"city", "state", "zip", "country"])
``` |
{
"source": "jlutzwpi/spellingChallenge",
"score": 3
} |
#### File: jlutzwpi/spellingChallenge/spelling.py
```python
import cv2
import enum
import tensorflow as tf
import pandas as pd
import numpy as np
import os
from PIL import Image as im
from translate import Translator
from threading import Thread
from datetime import datetime
# only play the spanish word if found and only once
found = False
numFound = 0
time_found = datetime.now()
#play welcome message
os.system('mpg123 sounds/welcome.mp3')
#video stream class for multithreading
class vStream:
def __init__(self,src,width,height):
self._running = True
self.width=width
self.height=height
self.capture=cv2.VideoCapture(src)
self.thread=Thread(target=self.update,args=())
self.thread.daemon=True
self.thread.start()
def update(self):
while self._running:
success,self.frame=self.capture.read()
if success:
self.frame2=cv2.resize(self.frame,(self.width,self.height))
def getFrame(self):
return self.frame2
#kill the thread
def kill(self):
self.capture.release()
self._running = False
#play the spanish word if the letter is found
class spanishAudio:
isFound = False
fileName = ""
def __init__(self):
self._running = True
self.thread=Thread(target=self.update,args=())
self.thread.daemon=True
self.thread.start()
def update(self):
while self._running:
if self.isFound:
print("Found1")
cmd = 'mpg123 sounds/' + self.fileName
os.system(cmd)
self.isFound = False
def setFound(self,found, file_path):
print("Found2")
self.isFound=found
self.fileName=file_path
def kill(self):
self._running = False
# enumeration of objects to display on the screen
class Object(enum.Enum):
cat = 1
dog = 2
cow = 3
ball = 4
duck = 5
goat = 6
#increment to the next object
def inc(self):
v = self.value + 1
#if we reached the end, start over
if v > 6:
v = 1
return Object(v)
#return the missing letter and its position
#given that the kiddo is just learning letters, only using the first letter
#set up to have the missing letter be anywhere though
def letterPos(self):
l = 1
if self.value == 1:
#l = 1
val = "C"
if self.value == 2:
#l = 3
val = "D"
if self.value == 3:
#l = 2
val = "C"
if self.value == 4:
#l = 2
val = "B"
if self.value == 5:
#l = 4
val = "D"
if self.value == 6:
#l = 3
val = "G"
return (l,val)
# put cat letters on the screen
def drawCatText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
cv2.putText(image, "A", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "T", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
# put duck letters on the screen
def drawDuckText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "D", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "U", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (230, 175), (345, 305), (255, 0, 0), 3)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "C", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
cv2.putText(image, "K", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put goat letters on the screen
def drawGoatText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "G", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "A", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (345, 175), (435, 305), (255, 0, 0), 3)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
cv2.putText(image, "T", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put ball letters on the screen
def drawBallText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "B", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "A", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "L", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
#image = cv2.rectangle(image, (430, 175), (545, 305), (255, 0, 0), 3)
cv2.putText(image, "L", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put cow letters on the screen
def drawCowText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "C", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (230, 175), (345, 305), (255, 0, 0), 3)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "W", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
# put dog letters on the screen
def drawDogText(image):
# show the letters and the one to fill in
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "D", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "G", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (345, 175), (440, 305), (255, 0, 0), 3)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
#put the letters on the screen depending on which object it is
def addLetters(curObject, image):
if curObject.name == "cat":
image = drawCatText(image)
elif curObject.name == "dog":
image = drawDogText(image)
elif curObject.name == "cow":
image = drawCowText(image)
elif curObject.name == "ball":
image = drawBallText(image)
elif curObject.name == "duck":
image = drawDuckText(image)
elif curObject.name == "goat":
image = drawGoatText(image)
return image
# draw the object picture and letters to the screen
def drawScreen(filename, image, curObject):
game_pic = cv2.imread(filename, 1)
game_pic = cv2.resize(game_pic, (200, 150), interpolation=cv2.INTER_LINEAR)
added_image = cv2.addWeighted(
image[10:160, 200:400, :], 0.1, game_pic[0:150, 0:200, :], 0.9, 0)
# Change the region with the result
image[10:160, 200:400] = added_image
# add the letters for the given object to the screen
image = addLetters(curObject, image)
#draw a border around the letters
image = cv2.rectangle(image, (0, 0), (100, 480), (185, 185, 185), -1)
image = cv2.rectangle(image, (0, 325), (640, 480), (185, 185, 185), -1)
image = cv2.rectangle(image, (540, 0), (640, 480), (185, 185, 185), -1)
return image
# get the input from the screen where the letter goes
def getLetter(image, location):
get_letter = []
#only doing the first letter, but can eventually have
#missing letter anywhere in the word
get_letter = image[180:298, 130:240]
#if location == 1:
# get_letter = image[180:298, 130:240]
#if location == 2:
# get_letter = image[180:298, 245:335]
#if location == 3:
# get_letter = image[180:298, 345:435]
#if location == 4:
# get_letter = image[180:298, 445:535]
get_letter = cv2.cvtColor(get_letter, cv2.COLOR_RGB2GRAY)
get_letter = cv2.resize(get_letter, (28, 28),
interpolation=cv2.INTER_LINEAR)
# invert the black and white colows
img = cv2.bitwise_not(get_letter)
# turn the background black
# if the pixel value is less than 160, that means it's background,
# so turn it all the way black
img[img < 160] = 0
#have dimensions match what goes into the model
img = np.expand_dims(img, -1)
img = np.expand_dims(img, axis=0)
img = np.array(img, dtype="float32")
# rescale image from 0..255 to 0...1
img /= 255.0
return img
#tranlate into spanish
def addSpanishWord(curObj, im):
translator= Translator(to_lang="es")
translation = translator.translate(curObj.name)
espanol = "En espanol: " + translation
cv2.putText(im, espanol, (50, 400),
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 8)
return im
# alphabet labels for the model
dataset_labels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
# load the model
TFLITE_MODEL = "model/model.tflite"
tflite_interpreter = tf.lite.Interpreter(model_path=TFLITE_MODEL)
input_details = tflite_interpreter.get_input_details()
output_details = tflite_interpreter.get_output_details()
tflite_interpreter.allocate_tensors()
# start with cat
cur_obj = Object.cat
#set width, height, and camera orientation
width = 640
height = 480
flip = 2
camSet = 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(
flip)+' ! video/x-raw, width='+str(width)+', height='+str(height)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
#start camera thread
cam1=vStream(camSet,width,height)
#start spanish audio thread
spanish = spanishAudio()
#cap = cv2.VideoCapture(camSet)
# main loop
#while cap.isOpened():
while True:
#success, image = cap.read()
try:
image = cam1.getFrame()
except:
print("Frame not found.")
# If loading a video, use 'break' instead of 'continue'.
continue
# add the picture to spell (we iterate through the enum)
filename = 'images/' + cur_obj.name + '.jpeg'
image = drawScreen(filename, image, cur_obj)
# get the missing letter and run it against the model
# but have to turn it to grayscale and resize to 28x28 and invert the colors
(loc,missing_letter) = cur_obj.letterPos()
img = getLetter(image, loc)
# Set image into input tensor
tflite_interpreter.set_tensor(input_details[0]['index'], img)
# Run inference
tflite_interpreter.invoke()
# Get prediction results
tflite_model_predictions = tflite_interpreter.get_tensor(
output_details[0]['index'])
tflite_pred_dataframe = pd.DataFrame(tflite_model_predictions)
tflite_pred_dataframe.columns = dataset_labels
# extract letter from dataframe
letter_col = tflite_pred_dataframe.iloc[0]
col_index = letter_col[letter_col > 0.7].index
if len(col_index) > 0:
the_letter = col_index[0]
if the_letter == missing_letter:
# we found the letter
found = True
#if the letter was found, play spanish translation
#put logic here to keep the words on the screen for 3 sec
#before going to next object
if found:
cv2.putText(image, "You found the letter!!!", (100, 40), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 0, 0), 2)
#translate to spanish and print to the screen
image = addSpanishWord(cur_obj, image)
#put a green rectangle around the letter
image = cv2.rectangle(image, (140, 180), (240, 310), (0, 255, 0), 3)
#if found for the first time, play the audio
if numFound == 0:
sound_str = cur_obj.name + '.mp3'
spanish.setFound(found,sound_str)
numFound += 1
time_found = datetime.now()
# increment to next image after 3 seconds
if(datetime.now() - time_found).total_seconds() > 3:
cur_obj = cur_obj.inc()
#reset found to false and numFound to 0
found = False
numFound = 0
cv2.imshow('Spelling Challenge!', image)
key = cv2.waitKey(1) & 0xFF
# test capturing the image area to go through model
if key == ord("c"):
# capture scaled/converted image
scaled = img * 255.0
scaled = np.reshape(scaled, (28, 28))
data = im.fromarray(scaled)
data = data.convert("L")
data.save('bw_img.png')
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
#kill the threads
cam1.kill()
cv2.destroyAllWindows()
#exit(1)
``` |
{
"source": "JLUVicent/DAEGC",
"score": 2
} |
#### File: DAEGC/DAEGC/daegc.py
```python
import argparse
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.optim import Adam
from torch_geometric.datasets import Planetoid
import utils
from model import GAT
from evaluation import eva
class DAEGC(nn.Module):
def __init__(self, num_features, hidden_size, embedding_size, alpha, num_clusters, v=1):
super(DAEGC, self).__init__()
self.num_clusters = num_clusters
self.v = v
# get pretrain model
self.gat = GAT(num_features, hidden_size, embedding_size, alpha)
self.gat.load_state_dict(torch.load(
args.pretrain_path, map_location='cpu'))
# cluster layer
self.cluster_layer = Parameter(
torch.Tensor(num_clusters, embedding_size))
torch.nn.init.xavier_normal_(self.cluster_layer.data)
def forward(self, x, adj, M):
A_pred, z = self.gat(x, adj, M)
q = self.get_Q(z)
return A_pred, z, q
def get_Q(self, z):
q = 1.0 / (1.0 + torch.sum(torch.pow(z.unsqueeze(1) -
self.cluster_layer, 2), 2) / self.v)
q = q.pow((self.v + 1.0) / 2.0)
q = (q.t() / torch.sum(q, 1)).t()
return q
def target_distribution(q):
weight = q**2 / q.sum(0)
return (weight.t() / weight.sum(1)).t()
def trainer(dataset):
model = DAEGC(num_features=args.input_dim, hidden_size=args.hidden_size,
embedding_size=args.embedding_size, alpha=args.alpha, num_clusters=args.n_clusters).to(device)
print(model)
optimizer = Adam(model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
# data process
dataset = utils.data_preprocessing(dataset)
adj = dataset.adj.to(device)
adj_label = dataset.adj_label.to(device)
M = utils.get_M(adj).to(device)
# data and label
data = torch.Tensor(dataset.x).to(device)
y = dataset.y.cpu().numpy()
with torch.no_grad():
_, z = model.gat(data, adj, M)
# get kmeans and pretrain cluster result
kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)
y_pred = kmeans.fit_predict(z.data.cpu().numpy())
model.cluster_layer.data = torch.tensor(kmeans.cluster_centers_).to(device)
eva(y, y_pred, 'pretrain')
for epoch in range(args.max_epoch):
model.train()
if epoch % args.update_interval == 0:
# update_interval
A_pred, z, Q = model(data, adj, M)
q = Q.detach().data.cpu().numpy().argmax(1) # Q
eva(y, q, epoch)
A_pred, z, q = model(data, adj, M)
p = target_distribution(Q.detach())
kl_loss = F.kl_div(q.log(), p, reduction='batchmean')
re_loss = F.binary_cross_entropy(A_pred.view(-1), adj_label.view(-1))
loss = 10 * kl_loss + re_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--name', type=str, default='Citeseer')
parser.add_argument('--epoch', type=int, default=30)
parser.add_argument('--max_epoch', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--n_clusters', default=6, type=int)
parser.add_argument('--update_interval', default=1, type=int) # [1,3,5]
parser.add_argument('--hidden_size', default=256, type=int)
parser.add_argument('--embedding_size', default=16, type=int)
parser.add_argument('--weight_decay', type=int, default=5e-3)
parser.add_argument('--alpha', type=float, default=0.2,
help='Alpha for the leaky_relu.')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
print("use cuda: {}".format(args.cuda))
device = torch.device("cuda" if args.cuda else "cpu")
datasets = utils.get_dataset(args.name)
dataset = datasets[0]
# print(dataset)
if args.name == 'Citeseer':
args.lr = 0.0001
args.k = None
args.n_clusters = 6
elif args.name == 'Cora':
args.lr = 0.0001
args.k = None
args.n_clusters = 7
elif args.name == "Pubmed":
args.lr = 0.001
args.k = None
args.n_clusters = 3
else:
args.k = None
args.pretrain_path = f'./pretrain/predaegc_{args.name}_{args.epoch}.pkl'
args.input_dim = dataset.num_features
print(args)
print(dataset)
# trainer(dataset)
```
#### File: DAEGC/DAEGC/utils.py
```python
import numpy as np
import torch
from sklearn.preprocessing import normalize
from torch_geometric.datasets import Planetoid
def get_dataset(dataset):
datasets = Planetoid('./dataset', dataset)
return datasets
def data_preprocessing(dataset):
dataset.adj = torch.sparse_coo_tensor(
dataset.edge_index, torch.ones(dataset.edge_index.shape[1]), torch.Size([dataset.x.shape[0], dataset.x.shape[0]])
).to_dense()
dataset.adj_label = dataset.adj
dataset.adj += torch.eye(dataset.x.shape[0])
dataset.adj = normalize(dataset.adj, norm="l1")
dataset.adj = torch.from_numpy(dataset.adj).to(dtype=torch.float)
return dataset
def get_M(adj):
adj_numpy = adj.cpu().numpy()
# t_order
t=2
tran_prob = normalize(adj_numpy, norm="l1", axis=0)
M_numpy = sum([np.linalg.matrix_power(tran_prob, i) for i in range(1, t + 1)]) / t
return torch.Tensor(M_numpy)
``` |
{
"source": "JLUZHAnalytica/Automatic-Health-Card",
"score": 3
} |
#### File: JLUZHAnalytica/Automatic-Health-Card/cas_service.py
```python
import requests
from lxml import etree
import re
import os
import pickle
class casService(object):
def __init__(self,svr_session):
self.cas_url = ""
self.svr_session = svr_session #service_session
self.session = requests.session() #cas session
# self.load_cascookies_from_file() #使用已有的cas-cookie(如果有的话)
self.headers = {
"Accept": "text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8",
"Accept-Language": "zh_CN",
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363",
}
def Login(self,serviceUrl = "",username = None,password = None):
response = self.svr_session.get(url=serviceUrl, headers = self.headers, allow_redirects=False)
if response.status_code == 200:
return True
self.cas_url = response.headers["Location"]
cas_url = response.headers["Location"]
cas_response = self.svr_session.get(cas_url, headers = self.headers, allow_redirects = False)
if cas_response.status_code == 200:#登录界面
if username == None or password == None:
print("cas_cookie not valid")
username = input("plase input username:")
password = input("plase input password:")
loginhtml = etree.HTML(cas_response.text)
execution_value = loginhtml.xpath("//input[@name='execution']/@value")
# lt_value = loginhtml.xpath("//div[@id='bottom']/input[@name='lt']/@value")
auth_data = {
"_eventId" : "submit",
"execution" : execution_value[0],
"username" : username,
"password" : password,
"loginType" : 1,
"submit": "登 录"
}
auth_response = self.svr_session.post(self.cas_url,data = auth_data, headers = self.headers, allow_redirects = False)
if auth_response.status_code == 302:
url_with_ticket = auth_response.headers["Location"]
confirm_response = self.session.get(url = url_with_ticket, headers = self.headers, allow_redirects = True)
if confirm_response.status_code == 200:
print("logon on success")
# self.write_cascookies_to_file()
return requests.utils.dict_from_cookiejar(self.session.cookies)
else:
print("logon on failed")
else:
print('auth failed')
return False
# def load_cascookies_from_file(self):
# if os.path.exists("cas_cookies.dat"):
# with open("cas_cookies.dat", 'rb') as f:
# self.session.cookies.update(pickle.load(f))
# def write_cascookies_to_file(self):
# with open("cas_cookies.dat",'wb') as f:
# pickle.dump(self.session.cookies,f)
``` |
{
"source": "jlvahldiek/monai-deploy-app-sdk",
"score": 2
} |
#### File: core/domain/datapath.py
```python
from pathlib import Path
from typing import Dict, Optional, Union
from monai.deploy.exceptions import IOMappingError, ItemNotExistsError
from .domain import Domain
class DataPath(Domain):
def __init__(self, path: Union[str, Path], read_only: bool = False, metadata: Optional[Dict] = None):
"""Initializes a DataPath object.
Args:
path (Union[str, Path]): Path to the data file/directory.
read_only (bool): True if the the file/directory path cannot be modified.
metadata (Optional[Dict]): A metadata.
"""
super().__init__(metadata=metadata)
self._path: Path = Path(path)
self._read_only: bool = read_only
@property
def path(self):
"""Returns the path of the data file/directory."""
return self._path
@path.setter
def path(self, val):
if self._read_only:
raise IOMappingError("This DataPath is read-only.")
self._path = Path(val)
def to_absolute(self):
"""Convert the internal representation of the path to an absolute path."""
if not self._path.is_absolute():
self._path = self._path.absolute()
class NamedDataPath(Domain):
"""A data path dictionary with name as key and data path as value.
This class is used to store data paths and the provided name of each data path is unique.
A data path for a name is accessible by calling the `get()` method with the name.
If only one data path is available and the name is not specified, the data path is returned.
"""
def __init__(self, paths: Dict[str, DataPath], metadata: Optional[Dict] = None):
super().__init__(metadata=metadata)
self._paths = paths
def get(self, name: Optional[str] = "") -> DataPath:
if name not in self._paths:
if name == "" and len(self._paths) == 1:
return next(iter(self._paths.values()))
else:
raise IOMappingError(
f"'{name}' is not a valid name. It should be one of ({', '.join(self._paths.keys())})."
)
else:
datapath = self._paths.get(name)
if not datapath:
raise ItemNotExistsError(f"A DataPath instance for '{name}' does not exist.")
return datapath
```
#### File: core/domain/domain.py
```python
from abc import ABC
from typing import Dict, Optional
class Domain(ABC):
"""Domain Class."""
def __init__(self, metadata: Optional[Dict] = None):
"""Initialize a Domain object.
Args:
metadata (Optional[Dict]): A metadata.
"""
super().__init__()
if metadata is not None:
self._metadata = metadata
else:
self._metadata = {}
def metadata(self) -> Dict:
return self._metadata
```
#### File: deploy/core/execution_context.py
```python
from typing import Optional
from monai.deploy.core.domain.datapath import NamedDataPath
# To avoid "Cannot resolve forward reference" error
# : https://github.com/agronholm/sphinx-autodoc-typehints#dealing-with-circular-imports
from . import operator
from .datastores import Datastore, MemoryDatastore
from .io_context import InputContext, OutputContext
from .models import Model
class BaseExecutionContext:
"""A base execution context for the application.
BaseExecutionContext is responsible for storing the input and output data paths,
and the models.
Those pieces of information are used by the Operator (in `compute()` method) to perform the execution.
The input and output data paths from the application's context are available through
`context.input.get()` and `context.output.get()`.
"""
def __init__(
self,
datastore: Optional[Datastore],
input: NamedDataPath,
output: NamedDataPath,
models: Optional[Model] = None,
):
if datastore is None:
self._storage: Datastore = MemoryDatastore()
else:
self._storage = datastore
self._input = input
self._output = output
if models is None:
self._models = Model("") # set a null model
else:
self._models = models
@property
def storage(self) -> Datastore:
return self._storage
@property
def input(self) -> NamedDataPath:
return self._input
@property
def output(self) -> NamedDataPath:
return self._output
@property
def models(self) -> Model:
return self._models
class ExecutionContext(BaseExecutionContext):
"""An execution context for the operator."""
def __init__(self, context: BaseExecutionContext, op: "operator.Operator"):
super().__init__(context.storage, context.input, context.output, context.models)
self._context = context
self._op = op
self._input_context = InputContext(self)
self._output_context = OutputContext(self)
@property
def op(self):
return self._op
def get_execution_index(self):
"""Returns the execution index for the operator.
The execution index is incremented every time before the operator is executed.
For the first time, the execution index is set to 0.
Returns:
The execution index(int) for the operator.
"""
storage = self._context.storage
parent_node = f"/operators/{self.op.uid}"
key = f"{parent_node}/execution_index"
if storage.exists(key):
return storage.get(key)
else:
storage.put(key, 0)
return 0
def increase_execution_index(self):
"""Increases the execution index for the operator.
This index number would be increased once for each call to the operator
so that the operator can be executed multiple times.
"""
storage = self._context.storage
parent_node = f"/operators/{self.op.uid}"
key = f"{parent_node}/execution_index"
new_execution_index = self.get_execution_index() + 1
storage.put(key, new_execution_index)
return new_execution_index
@property
def input_context(self):
"""Returns the input context for the operator."""
return self._input_context
@property
def output_context(self):
"""Returns the output context for the operator."""
return self._output_context
```
#### File: core/executors/executor.py
```python
from abc import ABC, abstractmethod
from typing import Dict, Optional
# https://github.com/agronholm/sphinx-autodoc-typehints#dealing-with-circular-imports
from monai.deploy.core import application
from monai.deploy.core.datastores import Datastore, DatastoreFactory
class Executor(ABC):
"""This is the base class that enables execution of an application."""
def __init__(self, app: "application.Application", datastore: Optional[Datastore] = None, **kwargs: Dict):
"""Constructor of the class.
Given an application it invokes the compose method on the app, which
in turn creates the necessary operator and links them up.
Args:
app: An application that needs to be executed.
datastore: A data store that is used to store the data.
"""
self._app = app
if datastore:
self._datastore = datastore
else:
self._datastore = DatastoreFactory.create(DatastoreFactory.DEFAULT)
@property
def app(self) -> "application.Application":
"""Returns the application that is executed by the executor."""
return self._app
@property
def datastore(self) -> Datastore:
"""Returns the data store that is used to store the data."""
return self._datastore
@abstractmethod
def run(self):
"""Run the app.
It is called to execute an application.
This method needs to be implemented by specific concrete subclasses
of `Executor`.
"""
pass
```
#### File: core/graphs/graph.py
```python
from abc import ABC, abstractmethod
from typing import Dict, Generator, Optional, Set
from monai.deploy.core.operator import Operator
class Graph(ABC):
"""Abstract class for graph."""
@abstractmethod
def add_operator(self, op: Operator):
"""Add a node to the graph."""
pass
@abstractmethod
def add_flow(self, op_u: Operator, op_v: Operator, io_map: Dict[str, Set[str]]):
"""Add an edge to the graph.
Args:
op_u (Operator): A source operator.
op_v (Operator): A destination operator.
io_map (Dict[str, Set[str]]): A dictionary of mapping from the source operator's label to the destination
operator's label(s).
"""
pass
@abstractmethod
def get_io_map(self, op_u: Operator, op_v) -> Dict[str, Set[str]]:
"""Get a mapping from the source operator's output label to the destination operator's input label.
Args:
op_u (Operator): A source operator.
op_v (Operator): A destination operator.
Returns:
A dictionary of mapping from the source operator's output label to the destination operator's
input label(s).
"""
pass
@abstractmethod
def is_root(self, op: Operator) -> bool:
"""Check if the operator is a root operator.
Args:
op (Operator): A node in the graph.
Returns:
True if the operator is a root operator.
"""
pass
@abstractmethod
def is_leaf(self, op: Operator) -> bool:
"""Check if the operator is a leaf operator.
Args:
op (Operator): A node in the graph.
Returns:
True if the operator is a leaf operator.
"""
pass
@abstractmethod
def get_root_operators(self) -> Generator[Operator, None, None]:
"""Get all root operators.
Returns:
A generator of root operators.
"""
pass
@abstractmethod
def get_operators(self) -> Generator[Operator, None, None]:
"""Get all operators.
Returns:
A generator of operators.
"""
pass
@abstractmethod
def gen_worklist(self) -> Generator[Optional[Operator], None, None]:
"""Get worklist."""
pass
@abstractmethod
def gen_next_operators(self, op: Operator) -> Generator[Optional[Operator], None, None]:
"""Get next operators."""
pass
```
#### File: core/models/model.py
```python
import os.path
from pathlib import Path
from typing import Any, Dict, ItemsView, List, Tuple
from monai.deploy.exceptions import ItemNotExistsError, UnknownTypeError
# Store all supported model types in the order they should be checked
REGISTERED_MODELS = []
class Model:
"""Represents a model or a model repository.
This encapsulates model's name and path.
If this presents a model repository, repository's name and path are accessed via 'name' and 'path' attributes.
If this presents a model, the model's name and path are accessed via 'name' and 'path' attributes.
If the model's path is not specified(`Model("")`), the model is considered as a null model
and `bool(Model("")) == False`.
All models that this class represents can be retrieved by using `items()` method and a model with specific name
can be retrieved by `get()` method with a model name argument (If only one model is available, you can skip
specifying the model name).
Loaded model object can be accessed via 'predictor' attribute and the predictor can be called
using `__call__` method.
In the `Operator` class, A model is accessible via `context.models` attribute inside `compute` method.
Some subclasses (such as TorchModel) loads model file when `predictor` attribute is accessed so you can
call(`__call__`) the model directly.
>>> class MyOperator(Operator):
>>> def compute(self, op_input: InputContext, op_output: OutputContext, context: ExecutionContext):
>>> model = context.models.get()
>>> result = model(op_input.get().asnumpy())
If you want to load a model file manually, please set 'predictor' attribute to a loaded model object.
>>> class MyOperator(Operator):
>>> def compute(self, op_input: InputContext, op_output: OutputContext, context: ExecutionContext):
>>> import torch
>>> model = context.models.get()
>>> model.predictor = torch.jit.load(model.path, map_location="cpu").eval()
>>> result = model(op_input.get().asnumpy())
Supported model types can be registered using static 'register' method.
"""
model_type: str = "generic"
def __init__(self, path: str, name: str = ""):
"""Constructor of a model.
If name is not provided, the model name is taken from the path.
`_predicator` is set to None and it is expected to be set by the child class when needed.
`_items` is set to an dictionary having itself ({self.name: self}) and it is expected to be cleared
by the child class if the path presents a model repository.
Args:
path (str): A path to a model.
name (str): A name of the model.
"""
self._path = path
if name:
self._name = name
else:
self._name = Path(path).stem
self._predictor = None
# Add self to the list of models
self._items: Dict[str, Model] = {self.name: self}
@property
def predictor(self):
"""Return a predictor of the model.
Returns:
A predictor of the model.
"""
return self._predictor
@predictor.setter
def predictor(self, predictor: Any):
"""Set a predictor of the model.
Args:
predictor: A predictor of the model.
"""
self._predictor = predictor
@property
def path(self):
"""Return a path to the model."""
return self._path
@property
def name(self):
"""Return a name of the model."""
return self._name
@classmethod
def class_name(cls):
"""Return a name of the model class."""
return cls.__name__
@staticmethod
def register(cls_list):
"""Register a list of model classes."""
global REGISTERED_MODELS
REGISTERED_MODELS = cls_list
@staticmethod
def registered_models():
"""Return a list of registered model classes."""
return REGISTERED_MODELS
@classmethod
def accept(cls, path: str) -> Tuple[bool, str]:
"""Check if the path is a type of this model class.
Args:
path (str): A path to a model.
Returns:
(True, <model_type>) if the path is a type of this model class, (False, "") otherwise.
"""
if not os.path.exists(path):
return False, ""
return True, cls.model_type
def get(self, name: str = "") -> "Model":
"""Return a model object by name.
If there is only one model in the repository or the model path, model object can be returned without specifying
name.
If there are more than one models in the repository, the model object can be returned by name whose name
matches the provided name.
Args:
name (str): A name of the model.
Returns:
A model object is returned, matching the provided name if given.
"""
if name:
item = self._items.get(name)
if item:
return item
else:
raise ItemNotExistsError(f"A model with '{name}' does not exist.")
else:
item_count = len(self._items)
if item_count == 1:
return next(iter(self._items.values()))
elif item_count > 1:
raise UnknownTypeError(
f"There are more than one model. It should be one of ({', '.join(self._items.keys())})."
)
else:
return self
def get_model_list(self) -> List[Dict[str, str]]:
"""Return a list of models in the repository.
If this model represents a model repository, then a list of model objects (name and path) is returned.
Otherwise, a single model object list is returned.
Returns:
A list of models (name, path dictionary) in the repository.
"""
model_list = []
model_items = self.items()
for _, m in model_items:
model_list.append({"name": m.name, "path": os.path.abspath(m.path)})
return model_list
def items(self) -> ItemsView[str, "Model"]:
"""Return an ItemsView of models that this Model instance has.
If this model represents a model repository, then an ItemsView of submodel objects is returned.
Otherwise, an ItemsView of a single model object (self) is returned.
Returns:
An ItemView of models: `<model name>: <model object>`.
"""
return self._items.items()
def __call__(self, *args, **kwargs) -> Any:
"""Return a call of predictor of the model.
Args:
*args: A list of positional arguments.
**kwargs: A dictionary of keyword arguments.
Returns:
A call of predictor of the model.
Exceptions:
ItemNotExistsError: If the predictor(model) is not set.
"""
if self.predictor:
return self.predictor(*args, **kwargs)
else:
raise ItemNotExistsError("A predictor of the model is not set.")
def __bool__(self):
"""Return True if the model path is specified."""
return bool(self.path)
```
#### File: core/models/triton_model.py
```python
from pathlib import Path
from .model import Model
class TritonModel(Model):
"""Represents Triton models in the model repository.
Triton Inference Server models are stored in a directory structure like this
(https://github.com/triton-inference-server/server/blob/main/docs/model_repository.md):
::
<model-repository-path>/
<model-name>/
[config.pbtxt]
[<output-labels-file> ...]
<version>/
<model-definition-file>
<version>/
<model-definition-file>
...
<model-name>/
[config.pbtxt]
[<output-labels-file> ...]
<version>/
<model-definition-file>
<version>/
<model-definition-file>
...
...
This class checks if the given path meets the folder structure of Triton:
1) The path should be a folder path.
2) The directory should contain only sub folders (model folders).
3) Each model folder must contain a config.pbtxt file.
a. A config.pbtxt file may contain model name.
In that case, model's name should match with the folder name.
4) Each model folder must include one or more folders having a positive integer value as name.
a. Each such folder must contain a folder or file whose file name (without extension) is 'model'.
It currently doesn't identify which model version would be selected.
Model items identified would have a folder path, not a specific model file path.
"""
model_type: str = "triton"
def __init__(self, path: str, name: str = ""):
"""Initializes a TritonModel.
This assumes that the given path is a valid Triton model repository.
Args:
path (str): A Path to the model repository.
name (str): A name of the model.
"""
super().__init__(path, name)
# Clear existing model item and fill model items
self._items.clear()
model_path: Path = Path(path)
for model_folder in model_path.iterdir():
if model_folder.is_dir():
self._items[model_folder.name] = Model(str(model_folder), model_folder.name)
@classmethod
def accept(cls, path: str):
model_path: Path = Path(path)
# 1) The path should be a folder path.
if not model_path.is_dir():
return False, None
# 2) The directory should contain only sub folders (model folders).
if not all((p.is_dir() for p in model_path.iterdir())):
return False, None
is_triton_model_repository = True
for model_folder in model_path.iterdir():
# 3) Each model folder must contain a config.pbtxt file.
if not (model_folder / "config.pbtxt").exists():
return False, None
# TODO(gigony): We do not check if the config.pbtxt file contains model name for now (3-1).
# We assume that the model name is the same as the folder name.
# 4) Each model folder must include one or more folders having a positive integer value as name.
found_model = False
for version_folder in model_folder.iterdir():
version_folder_name = version_folder.name
if version_folder.is_dir() and version_folder_name.isnumeric() and int(version_folder_name) > 0:
# 4-1) Each such folder must contain a folder or file whose file name (without extension)
# is 'model'.
# TODO(gigony): check config.pbtxt file to see actual model file if specified.
if any(version_folder.glob("model.*")):
found_model = True
else:
return False, None
if not found_model:
is_triton_model_repository = False
break
if is_triton_model_repository:
return True, cls.model_type
return False, None
```
#### File: deploy/utils/argparse_types.py
```python
import argparse
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
def valid_dir_path(path: str) -> Path:
"""Helper type checking and type converting method for ArgumentParser.add_argument
to convert string input to pathlib.Path if the given path exists and it is a directory path.
If directory does not exist, create the directory and convert string input to pathlib.Path.
Args:
path: string input path
Returns:
If path exists and is a directory, return absolute path as a pathlib.Path object.
If path exists and is not a directory, raises argparse.ArgumentTypeError.
If path doesn't exist, create the directory and return absolute path as a pathlib.Path object.
"""
dir_path = Path(path).absolute()
if dir_path.exists():
if dir_path.is_dir():
return dir_path
else:
raise argparse.ArgumentTypeError(f"Expected directory path: '{dir_path}' is not a directory")
# create directory
dir_path.mkdir(parents=True)
return dir_path
def valid_existing_dir_path(path: str) -> Path:
"""Helper type checking and type converting method for ArgumentParser.add_argument
to convert string input to pathlib.Path if the given path exists and it is a directory path.
Args:
path: string input path
Returns:
If path exists and is a directory, return absolute path as a pathlib.Path object.
If path doesn't exist or it is not a directory, raises argparse.ArgumentTypeError.
"""
dir_path = Path(path).absolute()
if dir_path.exists() and dir_path.is_dir():
return dir_path
raise argparse.ArgumentTypeError(f"No such directory: '{dir_path}'")
def valid_existing_path(path: str) -> Path:
"""Helper type checking and type converting method for ArgumentParser.add_argument
to convert string input to pathlib.Path if the given file/folder path exists.
Args:
path: string input path
Returns:
If path exists, return absolute path as a pathlib.Path object.
If path doesn't exist, raises argparse.ArgumentTypeError.
"""
file_path = Path(path).absolute()
if file_path.exists():
return file_path
raise argparse.ArgumentTypeError(f"No such file/folder: '{file_path}'")
```
#### File: tests/unit/test_runner.py
```python
import argparse
from contextlib import contextmanager
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
import pytest
from pytest_lazyfixture import lazy_fixture
class ContainsString(str):
def __eq__(self, other):
return self in other
class DoesntContainsString(str):
def __eq__(self, other):
return self not in other
@contextmanager
def not_raises(exception):
try:
yield
except exception as err:
raise pytest.fail(f"DID RAISE {exception}") from err
@pytest.mark.parametrize("return_value", [0, 125])
@patch("monai.deploy.runner.runner.run_cmd")
@patch("tempfile.TemporaryDirectory")
def test_fetch_map_manifest(
tempdir, mock_run_cmd, return_value, sample_map_name, faux_app_manifest, faux_pkg_manifest, mock_manifest_export_dir
):
from monai.deploy.runner import runner
tempdir.return_value.__enter__.return_value = mock_manifest_export_dir
mock_run_cmd.return_value = return_value
expected_app_manifest = {}
expected_pkg_manifest = {}
if return_value == 0:
expected_app_manifest = faux_app_manifest
expected_pkg_manifest = faux_pkg_manifest
actual_app_manifest, actual_pkg_manifest, returncode = runner.fetch_map_manifest(sample_map_name)
assert returncode == return_value
TestCase().assertDictEqual(actual_app_manifest, expected_app_manifest)
TestCase().assertDictEqual(actual_pkg_manifest, expected_pkg_manifest)
mock_run_cmd.assert_called_once_with(ContainsString(sample_map_name))
mock_run_cmd.assert_called_once_with(ContainsString(mock_manifest_export_dir))
@pytest.mark.parametrize(
"return_value, input_path, output_path, quiet",
[
(0, lazy_fixture("faux_folder"), Path("output/"), False),
(0, lazy_fixture("faux_folder"), Path("output/"), True),
(125, lazy_fixture("faux_folder"), Path("output/"), False),
],
)
@patch("monai.deploy.runner.runner.run_cmd")
def test_run_app_without_gpu_request(
mock_run_cmd, return_value, input_path, output_path, quiet, sample_map_name, faux_app_manifest, faux_pkg_manifest
):
from monai.deploy.runner import runner
mock_run_cmd.return_value = return_value
app_manifest = faux_app_manifest
expected_container_input = Path(app_manifest["input"]["path"])
expected_container_output = Path(app_manifest["output"]["path"])
expected_container_input /= app_manifest["working-directory"]
expected_container_output /= app_manifest["working-directory"]
returncode = runner.run_app(sample_map_name, input_path, output_path, app_manifest, faux_pkg_manifest, quiet)
assert returncode == return_value
mock_run_cmd.assert_called_once_with(ContainsString("docker run"))
mock_run_cmd.assert_called_once_with(ContainsString(sample_map_name))
mock_run_cmd.assert_called_once_with(ContainsString(input_path))
mock_run_cmd.assert_called_once_with(ContainsString(expected_container_input))
mock_run_cmd.assert_called_once_with(ContainsString(output_path))
mock_run_cmd.assert_called_once_with(ContainsString(expected_container_output))
mock_run_cmd.assert_called_once_with(ContainsString("STDERR"))
if quiet:
mock_run_cmd.assert_called_once_with(DoesntContainsString("STDOUT"))
else:
mock_run_cmd.assert_called_once_with(ContainsString("STDOUT"))
@pytest.mark.parametrize(
"return_value, input_path, output_path, quiet",
[
(0, lazy_fixture("faux_folder"), Path("output/"), False),
(0, lazy_fixture("faux_folder"), Path("output/"), True),
(125, lazy_fixture("faux_folder"), Path("output/"), False),
],
)
@patch("monai.deploy.runner.runner.run_cmd")
def test_run_app_with_gpu_request(
mock_run_cmd,
return_value,
input_path,
output_path,
quiet,
sample_map_name,
faux_app_manifest,
faux_pkg_manifest_with_gpu,
):
from monai.deploy.runner import runner
mock_run_cmd.return_value = return_value
app_manifest = faux_app_manifest
expected_container_input = Path(app_manifest["input"]["path"])
expected_container_output = Path(app_manifest["output"]["path"])
expected_container_input /= app_manifest["working-directory"]
expected_container_output /= app_manifest["working-directory"]
returncode = runner.run_app(
sample_map_name, input_path, output_path, app_manifest, faux_pkg_manifest_with_gpu, quiet
)
assert returncode == return_value
mock_run_cmd.assert_called_once_with(ContainsString("nvidia-docker run"))
mock_run_cmd.assert_called_once_with(ContainsString(sample_map_name))
mock_run_cmd.assert_called_once_with(ContainsString(input_path))
mock_run_cmd.assert_called_once_with(ContainsString(expected_container_input))
mock_run_cmd.assert_called_once_with(ContainsString(output_path))
mock_run_cmd.assert_called_once_with(ContainsString(expected_container_output))
mock_run_cmd.assert_called_once_with(ContainsString("STDERR"))
if quiet:
mock_run_cmd.assert_called_once_with(DoesntContainsString("STDOUT"))
else:
mock_run_cmd.assert_called_once_with(ContainsString("STDOUT"))
@pytest.mark.parametrize(
"return_value, input_path, output_path, quiet",
[
(0, lazy_fixture("faux_folder_with_space"), Path("output with space/"), False),
(0, lazy_fixture("faux_folder_with_space"), Path("output with space/"), True),
(125, lazy_fixture("faux_folder_with_space"), Path("output with space/"), False),
],
)
@patch("monai.deploy.runner.runner.run_cmd")
def test_run_app_for_input_output_path_with_space(
mock_run_cmd, return_value, input_path, output_path, quiet, sample_map_name, faux_app_manifest, faux_pkg_manifest
):
from monai.deploy.runner import runner
mock_run_cmd.return_value = return_value
app_manifest = faux_app_manifest
expected_container_input = Path(app_manifest["input"]["path"])
expected_container_output = Path(app_manifest["output"]["path"])
expected_container_input /= app_manifest["working-directory"]
expected_container_output /= app_manifest["working-directory"]
returncode = runner.run_app(sample_map_name, input_path, output_path, app_manifest, faux_pkg_manifest, quiet)
input_path_with_quotes = f'"{input_path.absolute()}"'
output_path_with_quotes = f'"{output_path.absolute()}"'
assert returncode == return_value
mock_run_cmd.assert_called_once_with(ContainsString(sample_map_name))
mock_run_cmd.assert_called_once_with(ContainsString(input_path_with_quotes))
mock_run_cmd.assert_called_once_with(ContainsString(expected_container_input))
mock_run_cmd.assert_called_once_with(ContainsString(output_path_with_quotes))
mock_run_cmd.assert_called_once_with(ContainsString(expected_container_output))
mock_run_cmd.assert_called_once_with(ContainsString("STDERR"))
if quiet:
mock_run_cmd.assert_called_once_with(DoesntContainsString("STDOUT"))
else:
mock_run_cmd.assert_called_once_with(ContainsString("STDOUT"))
@pytest.mark.parametrize(
"return_value, input_path, output_path, quiet",
[
(0, lazy_fixture("faux_folder"), Path("output/"), False),
(0, lazy_fixture("faux_folder"), Path("output/"), True),
(125, lazy_fixture("faux_folder"), Path("output/"), False),
],
)
@patch("monai.deploy.runner.runner.run_cmd")
def test_run_app_for_absolute_paths_in_app_manifest(
mock_run_cmd,
return_value,
input_path,
output_path,
quiet,
sample_map_name,
faux_app_manifest_with_absolute_path,
faux_pkg_manifest,
):
from monai.deploy.runner import runner
mock_run_cmd.return_value = return_value
app_manifest = faux_app_manifest_with_absolute_path
expected_container_input = Path(app_manifest["input"]["path"])
expected_container_output = Path(app_manifest["output"]["path"])
returncode = runner.run_app(sample_map_name, input_path, output_path, app_manifest, faux_pkg_manifest, quiet)
assert returncode == return_value
mock_run_cmd.assert_called_once_with(ContainsString(sample_map_name))
mock_run_cmd.assert_called_once_with(ContainsString(input_path))
mock_run_cmd.assert_called_once_with(ContainsString(expected_container_input))
mock_run_cmd.assert_called_once_with(DoesntContainsString(app_manifest["working-directory"]))
mock_run_cmd.assert_called_once_with(ContainsString(output_path))
mock_run_cmd.assert_called_once_with(ContainsString(expected_container_output))
mock_run_cmd.assert_called_once_with(DoesntContainsString(app_manifest["working-directory"]))
mock_run_cmd.assert_called_once_with(ContainsString("STDERR"))
if quiet:
mock_run_cmd.assert_called_once_with(DoesntContainsString("STDOUT"))
else:
mock_run_cmd.assert_called_once_with(ContainsString("STDOUT"))
@pytest.mark.parametrize(
"which_return, verify_image_return, expected_return_value",
[(True, True, True), (False, True, False), (True, False, False), (False, False, False)],
)
@patch("shutil.which")
@patch("monai.deploy.runner.runner.verify_image")
def test_dependency_verification(
mock_verify_image, mock_which, which_return, verify_image_return, expected_return_value, sample_map_name
):
from monai.deploy.runner import runner
mock_which.return_value = which_return
mock_verify_image.return_value = verify_image_return
actual_return_value = runner.dependency_verification(sample_map_name)
if which_return:
mock_verify_image.assert_called_once_with(sample_map_name)
assert expected_return_value == actual_return_value
@pytest.mark.parametrize(
"dependency_verification_return, fetch_map_manifest_return, run_app_return",
[(True, (lazy_fixture("faux_app_manifest"), lazy_fixture("faux_pkg_manifest"), 0), 0)],
)
@pytest.mark.parametrize(
"parsed_args",
[argparse.Namespace(map=lazy_fixture("sample_map_name"), input="input", output="output", quiet=False)],
)
@patch("monai.deploy.runner.runner.run_app")
@patch("monai.deploy.runner.runner.pkg_specific_dependency_verification")
@patch("monai.deploy.runner.runner.fetch_map_manifest")
@patch("monai.deploy.runner.runner.dependency_verification")
def test_main(
mock_dependency_verification,
mock_fetch_map_manifest,
mock_pkg_specific_dependency_verification,
mock_run_app,
dependency_verification_return,
fetch_map_manifest_return,
run_app_return,
parsed_args,
):
from monai.deploy.runner import runner
mock_dependency_verification.return_value = dependency_verification_return
mock_fetch_map_manifest.return_value = fetch_map_manifest_return
mock_pkg_specific_dependency_verification.return_value = True
mock_run_app.return_value = run_app_return
with not_raises(SystemExit) as _:
runner.main(parsed_args)
@pytest.mark.parametrize(
"dependency_verification_return, fetch_map_manifest_return, pkg_specific_dependency_verification_return, run_app_return",
[
(True, (lazy_fixture("faux_app_manifest"), lazy_fixture("faux_pkg_manifest"), 0), False, 0),
(True, (lazy_fixture("faux_app_manifest"), lazy_fixture("faux_pkg_manifest"), 0), True, 125),
(True, ({}, {}, 125), True, 0),
(False, ({}, {}, 125), True, 125),
(False, (lazy_fixture("faux_app_manifest"), lazy_fixture("faux_pkg_manifest"), 0), True, 0),
(False, (lazy_fixture("faux_app_manifest"), lazy_fixture("faux_pkg_manifest"), 0), True, 125),
],
)
@pytest.mark.parametrize(
"parsed_args",
[argparse.Namespace(map=lazy_fixture("sample_map_name"), input="input", output="output", quiet=False)],
)
@patch("monai.deploy.runner.runner.run_app")
@patch("monai.deploy.runner.runner.pkg_specific_dependency_verification")
@patch("monai.deploy.runner.runner.fetch_map_manifest")
@patch("monai.deploy.runner.runner.dependency_verification")
def test_main_error_conditions(
mock_dependency_verification,
mock_fetch_map_manifest,
mock_pkg_specific_dependency_verification,
mock_run_app,
dependency_verification_return,
fetch_map_manifest_return,
pkg_specific_dependency_verification_return,
run_app_return,
parsed_args,
):
from monai.deploy.runner import runner
mock_dependency_verification.return_value = dependency_verification_return
mock_fetch_map_manifest.return_value = fetch_map_manifest_return
mock_pkg_specific_dependency_verification.return_value = pkg_specific_dependency_verification_return
mock_run_app.return_value = run_app_return
with pytest.raises(SystemExit) as wrapped_error:
runner.main(parsed_args)
assert wrapped_error.type == SystemExit
``` |
{
"source": "jlvahldiek/MONAILabel",
"score": 2
} |
#### File: MONAILabel/monailabel/app.py
```python
import os
import pathlib
from fastapi import FastAPI
from fastapi.middleware import Middleware
from fastapi.middleware.cors import CORSMiddleware
from fastapi.openapi.docs import get_swagger_ui_html
from fastapi.responses import FileResponse, HTMLResponse
from fastapi.staticfiles import StaticFiles
from monailabel.config import settings
from monailabel.endpoints import (
activelearning,
batch_infer,
datastore,
infer,
info,
logs,
ohif,
proxy,
scoring,
session,
train,
wsi_infer,
)
from monailabel.interfaces.utils.app import app_instance, clear_cache
app = FastAPI(
title=settings.MONAI_LABEL_PROJECT_NAME,
openapi_url="/openapi.json",
docs_url=None,
redoc_url="/docs",
middleware=[
Middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in settings.MONAI_LABEL_CORS_ORIGINS]
if settings.MONAI_LABEL_CORS_ORIGINS
else ["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
],
)
static_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "endpoints", "static")
project_root_absolute = pathlib.Path(__file__).parent.parent.resolve()
app.mount(
"/static",
StaticFiles(directory=os.path.join(project_root_absolute, "monailabel", "endpoints", "static")),
name="static",
)
app.include_router(info.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(infer.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(wsi_infer.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(batch_infer.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(train.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(activelearning.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(scoring.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(datastore.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(logs.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(ohif.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(proxy.router, prefix=settings.MONAI_LABEL_API_STR)
app.include_router(session.router, prefix=settings.MONAI_LABEL_API_STR)
@app.get("/", include_in_schema=False)
async def custom_swagger_ui_html():
html = get_swagger_ui_html(openapi_url=app.openapi_url, title=app.title + " - APIs")
body = html.body.decode("utf-8")
body = body.replace("showExtensions: true,", "showExtensions: true, defaultModelsExpandDepth: -1,")
return HTMLResponse(body)
@app.get("/favicon.ico", include_in_schema=False)
async def favicon():
return FileResponse(os.path.join(static_dir, "favicon.ico"), media_type="image/x-icon")
@app.post("/reload", include_in_schema=False)
def reload():
clear_cache()
return {}
@app.on_event("startup")
async def startup_event():
instance = app_instance()
instance.server_mode(True)
instance.on_init_complete()
```
#### File: monailabel/client/client.py
```python
import cgi
import http.client
import json
import logging
import mimetypes
import os
import ssl
import tempfile
from urllib.parse import quote_plus, urlparse
import requests
logger = logging.getLogger(__name__)
class MONAILabelClient:
"""
Basic MONAILabel Client to invoke infer/train APIs over http/https
"""
def __init__(self, server_url, tmpdir=None, client_id=None):
"""
:param server_url: Server URL for MONAILabel (e.g. http://127.0.0.1:8000)
:param tmpdir: Temp directory to save temporary files. If None then it uses tempfile.tempdir
:param client_id: Client ID that will be added for all basic requests
"""
self._server_url = server_url.rstrip("/").strip()
self._tmpdir = tmpdir if tmpdir else tempfile.tempdir if tempfile.tempdir else "/tmp"
self._client_id = client_id
def _update_client_id(self, params):
if params:
params["client_id"] = self._client_id
else:
params = {"client_id": self._client_id}
return params
def get_server_url(self):
"""
Return server url
:return: the url for monailabel server
"""
return self._server_url
def set_server_url(self, server_url):
"""
Set url for monailabel server
:param server_url: server url for monailabel
"""
self._server_url = server_url.rstrip("/").strip()
def info(self):
"""
Invoke /info/ request over MONAILabel Server
:return: json response
"""
selector = "/info/"
status, response, _ = MONAILabelUtils.http_method("GET", self._server_url, selector)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR, f"Status: {status}; Response: {response}", status, response
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def next_sample(self, strategy, params):
"""
Get Next sample
:param strategy: Name of strategy to be used for fetching next sample
:param params: Additional JSON params as part of strategy request
:return: json response which contains information about next image selected for annotation
"""
params = self._update_client_id(params)
selector = f"/activelearning/{MONAILabelUtils.urllib_quote_plus(strategy)}"
status, response, _ = MONAILabelUtils.http_method("POST", self._server_url, selector, params)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR, f"Status: {status}; Response: {response}", status, response
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def create_session(self, image_in, params=None):
"""
Create New Session
:param image_in: filepath for image to be sent to server as part of session creation
:param params: additional JSON params as part of session reqeust
:return: json response which contains session id and other details
"""
selector = "/session/"
params = self._update_client_id(params)
status, response, _ = MONAILabelUtils.http_upload("PUT", self._server_url, selector, params, [image_in])
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR, f"Status: {status}; Response: {response}", status, response
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def get_session(self, session_id):
"""
Get Session
:param session_id: Session Id
:return: json response which contains more details about the session
"""
selector = f"/session/{MONAILabelUtils.urllib_quote_plus(session_id)}"
status, response, _ = MONAILabelUtils.http_method("GET", self._server_url, selector)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR, f"Status: {status}; Response: {response}", status, response
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def remove_session(self, session_id):
"""
Remove any existing Session
:param session_id: Session Id
:return: json response
"""
selector = f"/session/{MONAILabelUtils.urllib_quote_plus(session_id)}"
status, response, _ = MONAILabelUtils.http_method("DELETE", self._server_url, selector)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR, f"Status: {status}; Response: {response}", status, response
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def upload_image(self, image_in, image_id=None, params=None):
"""
Upload New Image to MONAILabel Datastore
:param image_in: Image File Path
:param image_id: Force Image ID; If not provided then Server it auto generate new Image ID
:param params: Additional JSON params
:return: json response which contains image id and other details
"""
selector = f"/datastore/?image={MONAILabelUtils.urllib_quote_plus(image_id)}"
files = {"file": image_in}
params = self._update_client_id(params)
fields = {"params": json.dumps(params) if params else "{}"}
status, response, _ = MONAILabelUtils.http_multipart("PUT", self._server_url, selector, fields, files)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR,
f"Status: {status}; Response: {response}",
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def save_label(self, image_id, label_in, tag="", params=None):
"""
Save/Submit Label
:param image_id: Image Id for which label needs to saved/submitted
:param label_in: Label File path which shall be saved/submitted
:param tag: Save label against tag in datastore
:param params: Additional JSON params for the request
:return: json response
"""
selector = f"/datastore/label?image={MONAILabelUtils.urllib_quote_plus(image_id)}"
if tag:
selector += f"&tag={MONAILabelUtils.urllib_quote_plus(tag)}"
params = self._update_client_id(params)
fields = {
"params": json.dumps(params),
}
files = {"label": label_in}
status, response, _ = MONAILabelUtils.http_multipart("PUT", self._server_url, selector, fields, files)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR,
f"Status: {status}; Response: {response}",
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def infer(self, model, image_id, params, label_in=None, file=None, session_id=None):
"""
Run Infer
:param model: Name of Model
:param image_id: Image Id
:param params: Additional configs/json params as part of Infer request
:param label_in: File path for label mask which is needed to run Inference (e.g. In case of Scribbles)
:param file: File path for Image (use raw image instead of image_id)
:param session_id: Session ID (use existing session id instead of image_id)
:return: response_file (label mask), response_body (json result/output params)
"""
selector = "/infer/{}?image={}".format(
MONAILabelUtils.urllib_quote_plus(model),
MONAILabelUtils.urllib_quote_plus(image_id),
)
if session_id:
selector += f"&session_id={MONAILabelUtils.urllib_quote_plus(session_id)}"
params = self._update_client_id(params)
fields = {"params": json.dumps(params) if params else "{}"}
files = {"label": label_in} if label_in else {}
files.update({"file": file} if file and not session_id else {})
status, form, files = MONAILabelUtils.http_multipart("POST", self._server_url, selector, fields, files)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR,
f"Status: {status}; Response: {form}",
)
form = json.loads(form) if isinstance(form, str) else form
params = form.get("params") if files else form
params = json.loads(params) if isinstance(params, str) else params
image_out = MONAILabelUtils.save_result(files, self._tmpdir)
return image_out, params
def wsi_infer(self, model, image_id, body=None, output="dsa", session_id=None):
"""
Run WSI Infer in case of Pathology App
:param model: Name of Model
:param image_id: Image Id
:param body: Additional configs/json params as part of Infer request
:param output: Output File format (dsa|asap|json)
:param session_id: Session ID (use existing session id instead of image_id)
:return: response_file (None), response_body
"""
selector = "/infer/wsi/{}?image={}".format(
MONAILabelUtils.urllib_quote_plus(model),
MONAILabelUtils.urllib_quote_plus(image_id),
)
if session_id:
selector += f"&session_id={MONAILabelUtils.urllib_quote_plus(session_id)}"
if output:
selector += f"&output={MONAILabelUtils.urllib_quote_plus(output)}"
body = self._update_client_id(body if body else {})
status, form, _ = MONAILabelUtils.http_method("POST", self._server_url, selector, body)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR,
f"Status: {status}; Response: {form}",
)
return None, form
def train_start(self, model, params):
"""
Run Train Task
:param model: Name of Model
:param params: Additional configs/json params as part of Train request
:return: json response
"""
params = self._update_client_id(params)
selector = "/train/"
if model:
selector += MONAILabelUtils.urllib_quote_plus(model)
status, response, _ = MONAILabelUtils.http_method("POST", self._server_url, selector, params)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR,
f"Status: {status}; Response: {response}",
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def train_stop(self):
"""
Stop any running Train Task(s)
:return: json response
"""
selector = "/train/"
status, response, _ = MONAILabelUtils.http_method("DELETE", self._server_url, selector)
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR,
f"Status: {status}; Response: {response}",
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
def train_status(self, check_if_running=False):
"""
Check Train Task Status
:param check_if_running: Fast mode. Only check if training is Running
:return: boolean if check_if_running is enabled; else json response that contains of full details
"""
selector = "/train/"
if check_if_running:
selector += "?check_if_running=true"
status, response, _ = MONAILabelUtils.http_method("GET", self._server_url, selector)
if check_if_running:
return status == 200
if status != 200:
raise MONAILabelClientException(
MONAILabelError.SERVER_ERROR,
f"Status: {status}; Response: {response}",
)
response = response.decode("utf-8") if isinstance(response, bytes) else response
logging.debug(f"Response: {response}")
return json.loads(response)
class MONAILabelError:
"""
Type of Inference Model
Attributes:
SERVER_ERROR - Server Error
SESSION_EXPIRED - Session Expired
UNKNOWN - Unknown Error
"""
SERVER_ERROR = 1
SESSION_EXPIRED = 2
UNKNOWN = 3
class MONAILabelClientException(Exception):
"""
MONAILabel Client Exception
"""
__slots__ = ["error", "msg"]
def __init__(self, error, msg, status_code=None, response=None):
"""
:param error: Error code represented by MONAILabelError
:param msg: Error message
:param status_code: HTTP Response code
:param response: HTTP Response
"""
self.error = error
self.msg = msg
self.status_code = status_code
self.response = response
class MONAILabelUtils:
@staticmethod
def http_method(method, server_url, selector, body=None):
logging.debug(f"{method} {server_url}{selector}")
parsed = urlparse(server_url)
path = parsed.path.rstrip("/")
selector = path + "/" + selector.lstrip("/")
logging.debug(f"URI Path: {selector}")
parsed = urlparse(server_url)
if parsed.scheme == "https":
logger.debug("Using HTTPS mode")
# noinspection PyProtectedMember
conn = http.client.HTTPSConnection(parsed.hostname, parsed.port, context=ssl._create_unverified_context())
else:
conn = http.client.HTTPConnection(parsed.hostname, parsed.port)
headers = {}
if body:
if isinstance(body, dict):
body = json.dumps(body)
content_type = "application/json"
else:
content_type = "text/plain"
headers = {"content-type": content_type, "content-length": str(len(body))}
conn.request(method, selector, body=body, headers=headers)
return MONAILabelUtils.send_response(conn)
@staticmethod
def http_upload(method, server_url, selector, fields, files):
logging.debug(f"{method} {server_url}{selector}")
url = server_url.rstrip("/") + "/" + selector.lstrip("/")
logging.debug(f"URL: {url}")
files = [("files", (os.path.basename(f), open(f, "rb"))) for f in files]
response = requests.post(url, files=files) if method == "POST" else requests.put(url, files=files, data=fields)
return response.status_code, response.text, None
@staticmethod
def http_multipart(method, server_url, selector, fields, files):
logging.debug(f"{method} {server_url}{selector}")
content_type, body = MONAILabelUtils.encode_multipart_formdata(fields, files)
headers = {"content-type": content_type, "content-length": str(len(body))}
parsed = urlparse(server_url)
path = parsed.path.rstrip("/")
selector = path + "/" + selector.lstrip("/")
logging.debug(f"URI Path: {selector}")
conn = http.client.HTTPConnection(parsed.hostname, parsed.port)
conn.request(method, selector, body, headers)
return MONAILabelUtils.send_response(conn, content_type)
@staticmethod
def send_response(conn, content_type="application/json"):
response = conn.getresponse()
logging.debug(f"HTTP Response Code: {response.status}")
logging.debug(f"HTTP Response Message: {response.reason}")
logging.debug(f"HTTP Response Headers: {response.getheaders()}")
response_content_type = response.getheader("content-type", content_type)
logging.debug(f"HTTP Response Content-Type: {response_content_type}")
if "multipart" in response_content_type:
if response.status == 200:
form, files = MONAILabelUtils.parse_multipart(response.fp if response.fp else response, response.msg)
logging.debug(f"Response FORM: {form}")
logging.debug(f"Response FILES: {files.keys()}")
return response.status, form, files
else:
return response.status, response.read(), None
logging.debug("Reading status/content from simple response!")
return response.status, response.read(), None
@staticmethod
def save_result(files, tmpdir):
logging.info(f"Files: {files.keys()} => {tmpdir}")
for name in files:
data = files[name]
result_file = os.path.join(tmpdir, name)
logging.info(f"Saving {name} to {result_file}; Size: {len(data)}")
dir_path = os.path.dirname(os.path.realpath(result_file))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(result_file, "wb") as f:
if isinstance(data, bytes):
f.write(data)
else:
f.write(data.encode("utf-8"))
# Currently only one file per response supported
return result_file
@staticmethod
def encode_multipart_formdata(fields, files):
limit = "----------lImIt_of_THE_fIle_eW_$"
lines = []
for (key, value) in fields.items():
lines.append("--" + limit)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append("")
lines.append(value)
for (key, filename) in files.items():
if isinstance(filename, tuple):
filename, data = filename
else:
with open(filename, mode="rb") as f:
data = f.read()
lines.append("--" + limit)
lines.append(f'Content-Disposition: form-data; name="{key}"; filename="{filename}"')
lines.append("Content-Type: %s" % MONAILabelUtils.get_content_type(filename))
lines.append("")
lines.append(data)
lines.append("--" + limit + "--")
lines.append("")
body = bytearray()
for line in lines:
body.extend(line if isinstance(line, bytes) else line.encode("utf-8"))
body.extend(b"\r\n")
content_type = "multipart/form-data; boundary=%s" % limit
return content_type, body
@staticmethod
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or "application/octet-stream"
@staticmethod
def parse_multipart(fp, headers):
fs = cgi.FieldStorage(
fp=fp,
environ={"REQUEST_METHOD": "POST"},
headers=headers,
keep_blank_values=True,
)
form = {}
files = {}
if hasattr(fs, "list") and isinstance(fs.list, list):
for f in fs.list:
logger.debug(f"FILE-NAME: {f.filename}; NAME: {f.name}; SIZE: {len(f.value)}")
if f.filename:
files[f.filename] = f.value
else:
form[f.name] = f.value
return form, files
@staticmethod
def urllib_quote_plus(s):
return quote_plus(s)
```
#### File: monailabel/datastore/dsa.py
```python
import hashlib
import logging
import os
import pathlib
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List
import girder_client
import numpy as np
from PIL import Image
from monailabel.interfaces.datastore import Datastore, DefaultLabelTag
logger = logging.getLogger(__name__)
class DSADatastore(Datastore):
def __init__(self, api_url, folder=None, api_key=None, annotation_groups=None, asset_store_path="", cache_path=""):
self.api_url = api_url
self.api_key = api_key
self.folders = folder.split(",") if folder else []
self.folders = {f.strip() for f in self.folders}
self.annotation_groups = [a.lower() if a else a for a in annotation_groups] if annotation_groups else []
self.asset_store_path = asset_store_path
uri_hash = hashlib.md5(api_url.encode("utf-8")).hexdigest()
self.cache_path = (
os.path.join(cache_path, uri_hash)
if cache_path
else os.path.join(pathlib.Path.home(), ".cache", "monailabel", uri_hash)
)
logger.info(f"DSA:: Api Url: {api_url}")
logger.info(f"DSA:: Api Key: {'*' * len(api_key) if api_key else ''}")
logger.info(f"DSA:: Folder (Images): {folder}")
logger.info(f"DSA:: Annotation Groups: {annotation_groups}")
logger.info(f"DSA:: Local Asset Store Path: {asset_store_path}")
self.gc = girder_client.GirderClient(apiUrl=api_url)
if api_key:
self.gc.authenticate(apiKey=api_key)
def name(self) -> str:
return "DSA Datastore"
def set_name(self, name: str):
pass
def description(self) -> str:
return "Digital Slide Archive"
def set_description(self, description: str):
pass
def datalist(self) -> List[Dict[str, Any]]:
return [
{
"api_url": self.api_url,
"image": image_id,
"label": image_id,
"groups": self.annotation_groups,
}
for image_id in self.get_labeled_images()
]
def get_labels_by_image_id(self, image_id: str) -> Dict[str, str]:
return {DefaultLabelTag.FINAL.name: image_id}
def get_label_by_image_id(self, image_id: str, tag: str) -> str:
return image_id
def get_image(self, image_id: str, params=None) -> Any:
try:
name = self.get_image_info(image_id)["name"]
except girder_client.HttpError:
image_id, name = self._name_to_id(image_id)
location = params.get("location", [0, 0])
size = params.get("size", [0, 0])
if sum(location) <= 0 and sum(size) <= 0: # whole side image
dest = os.path.join(self.cache_path, name)
if not os.path.exists(dest):
logger.info(f"Downloading: {image_id} => {name} => {dest}")
self.gc.downloadItem(itemId=image_id, dest=self.cache_path)
return dest
parameters = {
"left": location[0],
"top": location[1],
"regionWidth": size[0],
"regionHeight": size[1],
"units": "base_pixels",
"encoding": "PNG",
}
resp = self.gc.get(f"item/{image_id}/tiles/region", parameters=parameters, jsonResp=False)
img = Image.open(BytesIO(resp.content)).convert("RGB")
return np.asarray(img, dtype=np.uint8)
def _name_to_id(self, name):
folders = self.folders if self.folders else self._get_all_folders()
for folder in folders:
data = self.gc.get("item", parameters={"folderId": folder, "limit": 0})
for d in data:
if d.get("largeImage") and d["name"] == name or Path(d["name"]).stem == name:
return d["_id"], d["name"]
return name
def get_image_uri(self, image_id: str) -> str:
try:
name = self.get_image_info(image_id)["name"]
except girder_client.HttpError:
image_id, name = self._name_to_id(image_id)
if self.asset_store_path:
data = self.gc.get(f"item/{image_id}/files", parameters={"limit": 0})
assets = [d["assetstoreId"] for d in data]
for asset in assets:
files = self.gc.get(f"assetstore/{asset}/files", parameters={"limit": 0})
for f in files:
if f["itemId"] == image_id:
return str(os.path.join(self.asset_store_path, f["path"]))
else:
cached = os.path.join(self.cache_path, name)
if os.path.exists(cached):
return str(cached)
return f"{self.api_url}/item/{image_id}"
def get_label(self, label_id: str, label_tag: str, params=None) -> Any:
return self.gc.get(f"annotation/item/{label_id}")
def get_label_uri(self, label_id: str, label_tag: str) -> str:
return f"{self.api_url}/annotation/item/{label_id}"
def get_image_info(self, image_id: str) -> Dict[str, Any]:
return self.gc.getItem(image_id) # type: ignore
def get_label_info(self, label_id: str, label_tag: str) -> Dict[str, Any]:
return {}
def _get_annotated_images(self):
data = self.gc.get("annotation", parameters={"limit": 0})
images = []
for d in data:
if not self.annotation_groups:
images.append(d["itemId"])
continue
# get annotations and find if any matching groups exist
matched = [
g for g in d["groups"] if g in self.annotation_groups or (g and g.lower() in self.annotation_groups)
]
if matched:
images.append(d["itemId"])
return images
def get_labeled_images(self) -> List[str]:
images = self.list_images()
annotated = self._get_annotated_images()
return [image for image in images if image in annotated]
def get_unlabeled_images(self) -> List[str]:
images = self.list_images()
labeled = self.get_labeled_images()
return [image for image in images if image not in labeled]
def _get_all_folders(self):
folders = []
for collection in self.gc.listCollection():
for folder in self.gc.listFolder(parentId=collection["_id"], parentFolderType="collection"):
folders.append(folder["_id"])
return folders
def list_images(self) -> List[str]:
images = []
folders = self.folders if self.folders else self._get_all_folders()
for folder in folders:
for item in self.gc.get("item", parameters={"folderId": folder, "limit": 0}):
if item.get("largeImage"):
images.append(item["_id"])
return images
def refresh(self) -> None:
pass
def add_image(self, image_id: str, image_filename: str, image_info: Dict[str, Any]) -> str:
raise NotImplementedError
def remove_image(self, image_id: str) -> None:
raise NotImplementedError
def save_label(self, image_id: str, label_filename: str, label_tag: str, label_info: Dict[str, Any]) -> str:
raise NotImplementedError
def remove_label(self, label_id: str, label_tag: str) -> None:
raise NotImplementedError
def update_image_info(self, image_id: str, info: Dict[str, Any]) -> None:
raise NotImplementedError
def update_label_info(self, label_id: str, label_tag: str, info: Dict[str, Any]) -> None:
raise NotImplementedError
def status(self) -> Dict[str, Any]:
return {
"total": len(self.list_images()),
"completed": len(self.get_labeled_images()),
}
def json(self):
return self.datalist()
def main():
import json
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] [%(process)s] [%(threadName)s] [%(levelname)s] (%(name)s:%(lineno)d) - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
api_url = "http://0.0.0.0:8080/api/v1"
folder = None
annotation_groups = None
asset_store_path = None
api_key = None # "<KEY>"
# api_url = "https://demo.kitware.com/histomicstk/api/v1"
# folder = "5bbdeba3e629140048d017bb"
# annotation_groups = ["mostly_tumor"]
# asset_store_path = None
# api_key = None
ds = DSADatastore(api_url, folder, api_key, annotation_groups, asset_store_path)
images = ds.list_images()
print(f"Images: {images}")
labeled_images = ds.get_labeled_images()
print(f"Labeled Images: {labeled_images}")
unlabeled_images = ds.get_unlabeled_images()
print(f"UnLabeled Images: {unlabeled_images}")
image_id = images[0]
print(f"Image Info: {json.dumps(ds.get_image_info(image_id), indent=2)}")
print(f"Image URI: {ds.get_image_uri(image_id)}")
print(f"Image URI (name): {ds.get_image_uri('TCGA-02-0010-01Z-00-DX4.07de2e55-a8fe-40ee-9e98-bcb78050b9f7')}")
print(f"Labels: {ds.get_labels_by_image_id(image_id)}")
if labeled_images:
label_id = labeled_images[0]
label_tag = "FINAL"
print(f"Label Info: {json.dumps(ds.get_label_info(label_id, label_tag), indent=2)}")
print(f"Label URI: {ds.get_label_uri(label_id, label_tag)}")
print(f"Dataset for Training: \n{json.dumps(ds.datalist(), indent=2)}")
img = ds.get_image(
"TCGA-02-0010-01Z-00-DX4.07de2e55-a8fe-40ee-9e98-bcb78050b9f7",
params={
"location": (6090, 15863),
"size": (1071, 714),
},
)
print(f"Fetched Region: {img.shape}")
if __name__ == "__main__":
main()
```
#### File: monailabel/endpoints/session.py
```python
import logging
import os
import shutil
import tempfile
from typing import List
from fastapi import APIRouter, File, HTTPException, UploadFile
from fastapi.background import BackgroundTasks
from fastapi.responses import FileResponse
from monailabel.interfaces.app import MONAILabelApp
from monailabel.interfaces.utils.app import app_instance
from monailabel.utils.others.generic import get_basename, get_mime_type, remove_file
from monailabel.utils.sessions import Sessions
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/session",
tags=["Session"],
responses={404: {"description": "Not found"}},
)
def get_session(session_id: str, update_ts: bool = False, image: bool = False):
instance: MONAILabelApp = app_instance()
sessions: Sessions = instance.sessions()
if sessions is None:
logger.error("Session Feature is Not Enabled")
raise HTTPException(status_code=406, detail="Session Feature is Not Enabled")
session_info = sessions.get_session(session_id, update_ts=update_ts)
if session_info:
if image:
return FileResponse(
session_info.image,
media_type=get_mime_type(session_info.image),
filename=get_basename(session_info.image),
)
return session_info.to_json()
raise HTTPException(status_code=404, detail=f"Session ({session_id}) Not Found")
def create_session(
background_tasks: BackgroundTasks,
uncompress: bool = False,
expiry: int = 0,
files: List[UploadFile] = File(...),
):
instance: MONAILabelApp = app_instance()
sessions: Sessions = instance.sessions()
if sessions is None:
logger.error("Session Feature is Not Enabled")
raise HTTPException(status_code=406, detail="Session Feature is Not Enabled")
logger.info(f"Uncompress: {uncompress}; Expiry: {expiry}")
logger.info(f"Request Files: {files}")
received_dir = tempfile.NamedTemporaryFile().name
os.makedirs(received_dir, exist_ok=True)
input_image = ""
total_files = 0
for f in files:
basename = get_basename(f.filename) if f.filename else tempfile.NamedTemporaryFile().name
input_image = os.path.join(received_dir, basename)
with open(input_image, "wb") as fb:
shutil.copyfileobj(f.file, fb)
total_files += 1
logger.info(f"{total_files} => {f} => {input_image}")
if total_files > 1:
logger.info(f"Input has multiple files; Saving ALL into: {received_dir}")
input_image = received_dir
session_id, session_info = sessions.add_session(input_image, expiry, uncompress)
background_tasks.add_task(remove_file, received_dir)
if total_files == 0:
raise HTTPException(status_code=404, detail="Image(s) Not Found")
logger.info(f"Session ID: {session_id}; Info: {session_info.to_str()}")
return {"session_id": session_id, "session_info": session_info.to_json()}
def remove_session(session_id: str):
instance: MONAILabelApp = app_instance()
sessions: Sessions = instance.sessions()
if sessions is None:
logger.error("Session Feature is Not Enabled")
raise HTTPException(status_code=406, detail="Session Feature is Not Enabled")
session_info = sessions.get_session(session_id)
if session_info:
sessions.remove_session(session_id)
return session_info.to_json()
raise HTTPException(status_code=404, detail="Session Not Found")
@router.get("/{session_id}", summary="Get Session ID")
async def api_get_session(session_id: str, update_ts: bool = False, image: bool = False):
return get_session(session_id, update_ts, image)
@router.put("/", summary="Create new session with Image")
async def api_create_session(
background_tasks: BackgroundTasks,
uncompress: bool = False,
expiry: int = 0,
files: List[UploadFile] = File(...),
):
return create_session(background_tasks, uncompress, expiry, files)
@router.delete("/{session_id}", summary="Delete Session")
async def api_remove_session(session_id: str):
return remove_session(session_id)
```
#### File: tasks/train/handler.py
```python
import datetime
import filecmp
import json
import logging
import os
import shutil
import time
from typing import Any, Dict
import torch
from monai.engines.workflow import Engine, Events
logger = logging.getLogger(__name__)
def prepare_stats(start_ts, trainer, evaluator):
def tensor_to_list(d):
r = dict()
for dk, dv in d.items():
r[dk] = dv.tolist() if torch.is_tensor(dv) else dv
return r
stats: Dict[str, Any] = dict()
stats.update(trainer.get_train_stats())
stats["epoch"] = trainer.state.epoch
stats["start_ts"] = int(start_ts)
if trainer.state.epoch == trainer.state.max_epochs:
stats["total_time"] = str(datetime.timedelta(seconds=int(time.time() - start_ts)))
else:
stats["current_time"] = str(datetime.timedelta(seconds=int(time.time() - start_ts)))
for k, v in {"train": trainer, "eval": evaluator}.items():
if not v:
continue
stats["best_metric"] = v.state.best_metric
stats[k] = {
"metrics": tensor_to_list(v.state.metrics),
"key_metric_name": v.state.key_metric_name,
"best_metric": v.state.best_metric,
"best_metric_epoch": v.state.best_metric_epoch,
}
return stats
class PublishStatsAndModel:
def __init__(self, stats_path, publish_path, key_metric_filename, start_ts, run_id, output_dir, trainer, evaluator):
self._stats_path = stats_path
self._publish_path = publish_path
self._key_metric_filename = key_metric_filename
self.start_ts = start_ts
self.run_id = run_id
self.output_dir = output_dir
self.trainer = trainer
self.evaluator = evaluator
def iteration_completed(self):
filename = datetime.datetime.now().strftime(f"stats_{self.run_id}.json")
filename = os.path.join(self.output_dir, filename)
stats = prepare_stats(self.start_ts, self.trainer, self.evaluator)
with open(filename, "w") as f:
json.dump(stats, f, indent=2)
if self._stats_path:
shutil.copy(filename, self._stats_path)
publish_path = self._publish_path
if publish_path:
final_model = os.path.join(self.output_dir, self._key_metric_filename)
if os.path.exists(final_model):
if not os.path.exists(publish_path) or not filecmp.cmp(publish_path, final_model):
shutil.copy(final_model, publish_path)
logger.info(f"New Model published: {final_model} => {publish_path}")
return stats
def attach(self, engine: Engine) -> None:
if not engine.has_event_handler(self.iteration_completed, Events.EPOCH_COMPLETED):
engine.add_event_handler(Events.EPOCH_COMPLETED, self.iteration_completed)
def __call__(self, engine: Engine) -> None:
self.iteration_completed()
```
#### File: lib/configs/nuclick.py
```python
import json
import logging
import os
from distutils.util import strtobool
from typing import Any, Dict, Optional, Union
import lib.infers
import lib.trainers
from monai.networks.nets import BasicUNet
from monailabel.interfaces.config import TaskConfig
from monailabel.interfaces.tasks.infer import InferTask
from monailabel.interfaces.tasks.train import TrainTask
from monailabel.utils.others.generic import download_file
logger = logging.getLogger(__name__)
class NuClick(TaskConfig):
def init(self, name: str, model_dir: str, conf: Dict[str, str], planner: Any, **kwargs):
super().init(name, model_dir, conf, planner, **kwargs)
# Labels
self.labels = ["Nuclei"]
self.label_colors = {"Nuclei": (0, 255, 255)}
# Model Files
self.path = [
os.path.join(self.model_dir, f"pretrained_{name}.pt"), # pretrained
os.path.join(self.model_dir, f"{name}.pt"), # published
]
# Download PreTrained Model
if strtobool(self.conf.get("use_pretrained_model", "true")):
url = f"{self.PRE_TRAINED_PATH}/pathology_nuclick_bunet.pt"
download_file(url, self.path[0])
# Network
self.network = BasicUNet(
spatial_dims=2,
in_channels=5,
out_channels=1,
features=(32, 64, 128, 256, 512, 32),
)
def infer(self) -> Union[InferTask, Dict[str, InferTask]]:
task: InferTask = lib.infers.NuClick(
path=self.path,
network=self.network,
labels=self.labels,
preload=strtobool(self.conf.get("preload", "false")),
roi_size=json.loads(self.conf.get("roi_size", "[512, 512]")),
)
return task
def trainer(self) -> Optional[TrainTask]:
output_dir = os.path.join(self.model_dir, self.name)
task: TrainTask = lib.trainers.NuClick(
model_dir=output_dir,
network=self.network,
load_path=self.path[0],
publish_path=self.path[1],
labels=self.labels,
description="Train Nuclei DeepEdit Model",
train_save_interval=1,
config={
"max_epochs": 10,
"train_batch_size": 64,
"dataset_max_region": (10240, 10240),
"dataset_limit": 0,
"dataset_randomize": True,
},
)
return task
```
#### File: unit/scribbles/test_transforms_infer.py
```python
import os
import unittest
import numpy as np
from monai.transforms import LoadImage
from monai.utils import set_determinism
from parameterized import parameterized
from monailabel.scribbles.infer import HistogramBasedGraphCut
from monailabel.scribbles.transforms import (
AddBackgroundScribblesFromROId,
ApplyGraphCutOptimisationd,
InteractiveSegmentationTransform,
MakeISegUnaryd,
MakeLikelihoodFromScribblesHistogramd,
SoftenProbSoftmax,
WriteLogits,
)
from monailabel.transform.writer import Writer
set_determinism(seed=123)
def generate_synthetic_binary_segmentation(height, width, num_circles=10, r_min=10, r_max=100, random_state=None):
# function based on:
# https://github.com/Project-MONAI/MONAI/blob/dev/monai/data/synthetic.py
if r_min > r_max:
raise ValueError("r_min cannot be greater than r_max")
min_size = min(height, width)
if 2 * r_max > min_size:
raise ValueError("2 * r_max cannot be greater than min side")
rs: np.random.RandomState = np.random.random.__self__ if random_state is None else random_state
mask = np.zeros((height, width), dtype=bool)
for _ in range(num_circles):
x = rs.randint(r_max, width - r_max)
y = rs.randint(r_max, height - r_max)
r = rs.randint(r_min, r_max)
spy, spx = np.ogrid[-x : width - x, -y : height - y]
circle = (spx * spx + spy * spy) <= r * r
mask[circle] = True
return mask
def add_salt_and_pepper_noise(data, p=0.05):
if p <= 0:
return data
original_dtype = data.dtype
random_image_data = np.random.choice([0, 1], p=[p, 1 - p], size=data.shape)
return (data.astype(np.float32) * random_image_data).astype(original_dtype)
def generate_label_with_noise(height, width, label_key="label", noisy_key="noisy", pred_key="pred", num_slices=1):
label = generate_synthetic_binary_segmentation(height, width, num_circles=10, r_min=10, r_max=50)
noisy_invert = ~add_salt_and_pepper_noise(
generate_synthetic_binary_segmentation(height, width, num_circles=10, r_min=10, r_max=50), p=0.7
)
noisy = label & noisy_invert
label = np.expand_dims(label, axis=0).astype(np.float32)
noisy = np.expand_dims(noisy, axis=0).astype(np.float32)
if num_slices >= 1:
if num_slices != 1:
label = np.expand_dims(label, axis=0)
noisy = np.expand_dims(noisy, axis=0)
tmp_list = []
for _ in range(num_slices):
tmp_list.append(label)
label = np.concatenate(tmp_list, axis=1)
tmp_list = []
for _ in range(num_slices):
tmp_list.append(noisy)
noisy = np.concatenate(tmp_list, axis=1)
else:
raise ValueError(f"unrecognised num_slices selected [{num_slices}]")
pred = label
label = np.concatenate([1 - label, label], axis=0)
return {label_key: label, noisy_key: noisy, pred_key: pred}
HEIGHT = 128
WIDTH = 128
NUM_SLICES = 32
# generate 2d noisy data
two_dim_data = generate_label_with_noise(
height=HEIGHT, width=WIDTH, label_key="prob", noisy_key="image", pred_key="target", num_slices=1
)
# generate 3d noisy data
three_dim_data = generate_label_with_noise(
height=HEIGHT, width=WIDTH, label_key="prob", noisy_key="image", pred_key="target", num_slices=NUM_SLICES
)
TEST_CASE_OPTIM_TX = [
# 2D case
(
{"unary": "prob", "pairwise": "image"},
{"prob": two_dim_data["prob"], "image": two_dim_data["image"]},
{"target": two_dim_data["target"]},
(1, HEIGHT, WIDTH),
),
# 3D case
(
{"unary": "prob", "pairwise": "image"},
{"prob": three_dim_data["prob"], "image": three_dim_data["image"]},
{"target": three_dim_data["target"]},
(1, NUM_SLICES, HEIGHT, WIDTH),
),
]
TEST_CASE_ISEG_OPTIM_TX = [
# 2D case
(
{
"image": "image",
"logits": "prob",
"scribbles": "scribbles",
"scribbles_bg_label": 2,
"scribbles_fg_label": 3,
},
{"image": two_dim_data["image"], "prob": two_dim_data["prob"], "scribbles": two_dim_data["prob"][[1], ...] + 2},
{"target": two_dim_data["target"]},
(1, HEIGHT, WIDTH),
),
# 3D case
(
{
"image": "image",
"logits": "prob",
"scribbles": "scribbles",
"scribbles_bg_label": 2,
"scribbles_fg_label": 3,
},
{
"image": three_dim_data["image"],
"prob": three_dim_data["prob"],
"scribbles": three_dim_data["prob"][[1], ...] + 2,
},
{"target": three_dim_data["target"]},
(1, NUM_SLICES, HEIGHT, WIDTH),
),
]
TEST_CASE_MAKE_ISEG_UNARY_TX = [
# 2D case
(
{
"image": "image",
"logits": "prob",
"scribbles": "scribbles",
"scribbles_bg_label": 2,
"scribbles_fg_label": 3,
},
{"image": two_dim_data["image"], "prob": two_dim_data["prob"], "scribbles": two_dim_data["prob"][[1], ...] + 2},
{"target": two_dim_data["prob"]},
(2, HEIGHT, WIDTH),
),
# 3D case
(
{
"image": "image",
"logits": "prob",
"scribbles": "scribbles",
"scribbles_bg_label": 2,
"scribbles_fg_label": 3,
},
{
"image": three_dim_data["image"],
"prob": three_dim_data["prob"],
"scribbles": three_dim_data["prob"][[1], ...] + 2,
},
{"target": three_dim_data["prob"]},
(2, NUM_SLICES, HEIGHT, WIDTH),
),
]
TEST_CASE_MAKE_LIKE_HIST_TX = [
# 2D case
(
{"image": "image", "scribbles": "scribbles", "scribbles_bg_label": 2, "scribbles_fg_label": 3},
{"image": two_dim_data["target"], "scribbles": two_dim_data["prob"][[1], ...] + 2},
{"target": two_dim_data["prob"]},
(2, HEIGHT, WIDTH),
),
# 3D case
(
{"image": "image", "scribbles": "scribbles", "scribbles_bg_label": 2, "scribbles_fg_label": 3},
{"image": three_dim_data["target"], "scribbles": three_dim_data["prob"][[1], ...] + 2},
{"target": three_dim_data["prob"]},
(2, NUM_SLICES, HEIGHT, WIDTH),
),
]
TEST_CASE_ADD_BG_ROI = [
(
{"scribbles": "scribbles", "roi_key": "roi", "scribbles_bg_label": 2, "scribbles_fg_label": 3},
{
"scribbles": np.zeros((1, NUM_SLICES, HEIGHT, WIDTH), dtype=np.float32),
"roi": [
NUM_SLICES // 2 - 4,
NUM_SLICES // 2 + 4,
HEIGHT // 2 - 8,
HEIGHT // 2 + 8,
WIDTH // 2 - 16,
WIDTH // 2 + 16,
],
},
(1, NUM_SLICES, HEIGHT, WIDTH),
),
]
TEST_CASE_HISTOGRAM_GRAPHCUT = [
# 2D case
(
{
"image": np.squeeze(two_dim_data["image"]),
"label": np.squeeze(two_dim_data["prob"][[1], ...] + 2),
"image_meta_dict": {"affine": np.identity(4)},
"label_meta_dict": {"affine": np.identity(4)},
},
(1, HEIGHT, WIDTH),
),
# 3D case
(
{
"image": np.squeeze(three_dim_data["image"]),
"label": np.squeeze(three_dim_data["prob"][[1], ...] + 2),
"image_meta_dict": {"affine": np.identity(5)},
"label_meta_dict": {"affine": np.identity(5)},
},
(NUM_SLICES, HEIGHT, WIDTH),
),
]
class TestScribblesTransforms(unittest.TestCase):
@parameterized.expand(TEST_CASE_ADD_BG_ROI)
def test_add_bg_roi_transform(self, input_param, test_input, expected_shape):
# float32 test
result = AddBackgroundScribblesFromROId(**input_param)(test_input)
input_data = test_input["scribbles"].copy()
mask = input_data.astype(bool)
mask[
:,
test_input["roi"][0] : test_input["roi"][1],
test_input["roi"][2] : test_input["roi"][3],
test_input["roi"][4] : test_input["roi"][5],
] = True
input_data[~mask] = input_param["scribbles_bg_label"]
np.testing.assert_equal(input_data, result["scribbles"])
self.assertTupleEqual(expected_shape, result["scribbles"].shape)
self.assertTupleEqual(test_input["scribbles"].shape, result["scribbles"].shape)
# int32 test
test_input["scribbles"] = test_input["scribbles"].astype(np.int)
result = AddBackgroundScribblesFromROId(**input_param)(test_input)
input_data = test_input["scribbles"].copy()
mask = input_data.astype(bool)
mask[
:,
test_input["roi"][0] : test_input["roi"][1],
test_input["roi"][2] : test_input["roi"][3],
test_input["roi"][4] : test_input["roi"][5],
] = True
input_data[~mask] = input_param["scribbles_bg_label"]
np.testing.assert_equal(input_data, result["scribbles"])
self.assertTupleEqual(expected_shape, result["scribbles"].shape)
self.assertTupleEqual(test_input["scribbles"].shape, result["scribbles"].shape)
@parameterized.expand(TEST_CASE_OPTIM_TX)
def test_optimisation_transforms(self, input_param, test_input, output, expected_shape):
input_param.update({"post_proc_label": "pred"})
for current_tx in [ApplyGraphCutOptimisationd]:
result = current_tx(**input_param)(test_input)
# removed assert_equal as this is non-deterministic func
# np.testing.assert_equal(output["target"], result["pred"])
self.assertTupleEqual(expected_shape, result["pred"].shape)
with self.assertRaises(ValueError):
test_input["prob"] = np.random.rand(3, 128, 128, 128)
result = ApplyGraphCutOptimisationd(**input_param)(test_input)
@parameterized.expand(TEST_CASE_MAKE_ISEG_UNARY_TX)
def test_make_iseg_unary_transform(self, input_param, test_input, output, expected_shape):
input_param.update({"unary": "pred"})
result = MakeISegUnaryd(**input_param)(test_input)
# make expected unary output
expected_result = output["target"].copy()
eps = np.finfo(expected_result.dtype).eps
expected_result[expected_result == 0] = eps
expected_result[expected_result == 1] = 1 - eps
# compare
np.testing.assert_equal(expected_result, result["pred"])
self.assertTupleEqual(expected_shape, result["pred"].shape)
with self.assertRaises(ValueError):
test_input["prob"] = np.random.rand(3, 128, 128, 128)
result = MakeISegUnaryd(**input_param)(test_input)
@parameterized.expand(TEST_CASE_MAKE_LIKE_HIST_TX)
def test_make_likelihood_histogram(self, input_param, test_input, output, expected_shape):
input_param.update({"post_proc_label": "pred"})
result = MakeLikelihoodFromScribblesHistogramd(**input_param)(test_input)
# make expected output
expected_result = np.argmax(output["target"].copy(), axis=0)
# compare
np.testing.assert_equal(expected_result, np.argmax(result["pred"], axis=0))
self.assertTupleEqual(expected_shape, result["pred"].shape)
@parameterized.expand(TEST_CASE_ISEG_OPTIM_TX)
def test_writelogits(self, input_param, test_input, output, expected_shape):
test_input.update({"image_path": "./image.nii.gz"})
result = WriteLogits(key="image")(test_input)
self.assertEqual(os.path.exists(result["result"]["image"]), True)
def test_interactive_seg_transforms(self):
class MyInteractiveSeg(InteractiveSegmentationTransform):
def __init__(self, meta_key_postfix):
super().__init__(meta_key_postfix)
def __call__(self, data):
return data
iseg_tx = MyInteractiveSeg(meta_key_postfix="meta_dict")
data = {"image": [0, 1, 2, 3, 4, 5]}
self.assertEqual(iseg_tx._fetch_data(data, "image"), data["image"])
image_np = np.random.rand(2, 128, 128)
outimage_np = iseg_tx._normalise_logits(image_np, axis=0)
self.assertEqual(np.sum(outimage_np, axis=0).mean(), 1.0)
data.update({"image_meta_dict": {"affine": [0, 1, 2, 3, 4, 5]}})
data = iseg_tx._copy_affine(data, "image", "label")
self.assertIn("label_meta_dict", data.keys())
def test_soften_prob_softmax_transforms(self):
soften_tx = SoftenProbSoftmax(logits="logits", meta_key_postfix="meta_dict", prob="prob")
data = {"logits": np.random.rand(2, 128, 128, 128)}
output_data = soften_tx(data)
# minimum should be close to 0,1
self.assertAlmostEqual(round(output_data["prob"].min(), 1), 0.1)
# maximum should be close to 0.9
self.assertAlmostEqual(round(output_data["prob"].max(), 1), 0.9)
# shape should be same as input
self.assertEqual(output_data["prob"].shape, data["logits"].shape)
class TestScribblesInferers(unittest.TestCase):
@parameterized.expand(TEST_CASE_HISTOGRAM_GRAPHCUT)
def test_histogram_graphcut_inferer(self, test_input, expected_shape):
test_input.update({"image_path": "fakepath.nii"})
# save data to file and update test dictionary
image_file, data = Writer(label="image", nibabel=True)(test_input)
scribbles_file, data = Writer(label="label", nibabel=True)(test_input)
# add paths to file, remove any associated meta_dict
test_input["image"] = image_file
test_input["label"] = scribbles_file
test_input.pop("image_meta_dict", None)
test_input.pop("label_meta_dict", None)
# run scribbles inferer and load results
result_file, _ = HistogramBasedGraphCut()(test_input)
result = LoadImage()(result_file)[0]
# can only check output shape due to non-deterministic results
self.assertTupleEqual(expected_shape, result.shape)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jlvdb/the-wizz",
"score": 3
} |
#### File: the-wizz/the_wizz/core_utils.py
```python
from astropy.cosmology import WMAP5
from astropy.io import fits
import h5py
import numpy as np
def file_checker_loader(input_file_name):
"""Utility function for checking the existence of a file and loading the
file with the proper format. Currently checks for FITS files.
----------------------------------------------------------------------------
Args:
sample_file_name: name of file on disk to load
Returns:
open file object data
"""
try:
file_handle = open(input_file_name)
file_handle.close()
except IOError:
print("IOError: File %s not found. the-wizz is exiting." %
input_file_name)
raise IOError("File not found.")
if input_file_name.endswith('fit') or input_file_name.endswith('fits') or \
input_file_name.endswith('gz') or input_file_name.endswith('cat'):
hdu_list = fits.open(input_file_name)
data = hdu_list[1].data
return data
elif input_file_name.endswith('hdf5') or input_file_name.endswith('dat'):
hdf5_file = h5py.File(input_file_name, 'r')
return hdf5_file
else:
print("File type not currently supported. Try again later. "
"the-wizz is exiting.")
raise IOError
return None
def create_hdf5_file(hdf5_file_name, args):
# TODO:
# Decide if I want to use libver latest or not. Could be more stable
# if we use the "earliest" version. Will have to speed test saving
# and loading of the pairs.
"""Convenience function for creating an HDF5 file with attributes set in
input_flags. Saves the current input flags to the group input_flags for
later reference
----------------------------------------------------------------------------
Args:
hdf5_file_name: string name of the HDF5 file to create
args: argparse ArgumentParser.parse_args object from input_flags
Returns:
open HDF5 file object
"""
hdf5_file = h5py.File(hdf5_file_name, 'w-', libver='latest')
if args is not None:
flag_grp = hdf5_file.create_group('input_flags')
for arg in vars(args):
kwargs = {}
if type(arg) is str:
kwargs["dtype"] = h5py.special_dtype(vlen=str)
if getattr(args, arg) is None:
flag_grp.attrs.create(
arg, 'None', dtype=h5py.special_dtype(vlen=str))
else:
flag_grp.attrs.create(arg, getattr(args, arg), **kwargs)
return hdf5_file
def create_ascii_file(ascii_file_name, args):
"""Convenience function for creating an output ascii file. This method
writes the current state of the input_flags arguments to the header of the
file and returns an open Python file handle object. The method will over
write any file it is given so use with caution.
----------------------------------------------------------------------------
Args:
ascii_file_name: string name of the file to write too
args: argparse ArgumentParser.parse_args object from input_flags
Returns:
open Python file object
"""
ascii_file = open(ascii_file_name, 'w')
ascii_file.writelines('# input_flags:\n')
for arg in vars(args):
ascii_file.writelines('#\t%s : %s\n' % (arg, getattr(args, arg)))
return ascii_file
```
#### File: the-wizz/utility_programs/combine_region_pickles.py
```python
import argparse
import numpy as np
import pickle
"""
This program allows for the combination of different the-wizz, pdf_maker runs
after the fact using the region pickle files output from the code. This can be
useful for combining different recoveries from different spectroscoic pointings
in a way that is internally consistent.
"""
def load_from_pickle(file_name_list):
region_dict = {'n_regions' : 0,
'redshift' : np.array([]),
'n_reference' : np.array([]),
'unknown' : np.array([]),
'rand' : np.array([]),
'area' : np.array([]),
'resolution' : np.array([])}
for file_idx, file_name in enumerate(file_name_list):
pkl_file = open(file_name)
region_density_dict = pickle.load(pkl_file)
pkl_file.close()
region_dict['n_regions'] += region_density_dict['n_regions']
if file_idx == 0:
region_dict['redshift'] = region_density_dict['redshift']
region_dict['n_reference'] = region_density_dict['n_reference']
region_dict['unknown'] = region_density_dict['unknown']
region_dict['rand'] = region_density_dict['rand']
region_dict['area'] = region_density_dict['area']
region_dict['resolution'] = region_density_dict['resolution']
else:
region_dict['redshift'] = np.concatenate(
(region_dict['redshift'], region_density_dict['redshift']),
axis = 1)
region_dict['n_reference'] = np.concatenate(
(region_dict['n_reference'], region_density_dict['n_reference']),
axis = 1)
region_dict['unknown'] = np.concatenate(
(region_dict['unknown'], region_density_dict['unknown']),
axis = 1)
region_dict['rand'] = np.concatenate(
(region_dict['rand'], region_density_dict['rand']),
axis = 1)
region_dict['area'] = np.concatenate(
(region_dict['area'], region_density_dict['area']),
axis = 1)
region_dict['resolution'] = np.concatenate(
(region_dict['resolution'], region_density_dict['resolution']),
axis = 1)
return region_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_region_pickle_files', required=True,
type=str, help='Comma separated list of pickle '
'files containing the over-densities output by '
'pdf_maker.py. These files should contain the same '
'redshift binning.')
parser.add_argument('--input_special_region_pickle_files', default=None,
type=str, help='Comma separated list of pickle '
'files containing the over-densities output by '
'pdf_maker.py. These files are distinct from above '
'as they contain specific regions that one always '
'wants to include. This could be for instance a region '
'of higher redshift but smaller area that you want to '
'include in the recovery but cannot combine in the '
'maker.')
parser.add_argument('--output_pdf_file', required=True,
type=str, help='Name of ascii file to write '
'resultant pdf to.')
parser.add_argument('--n_bootstrap', default=1000, type=int,
help='Argument specifying the number of bootstrap '
'resamlings of the recovery to compute errors.')
parser.add_argument('--output_bootstraps_file', default=None, type=str,
help='This is an optional argument specifying an '
'ascii file to write the individual bootstrap pdfs to.')
parser.add_argument('--bootstrap_samples', default=None, type=str,
help='This is an optional argument specifying an '
'ascii file containing specified bootstrap samplings '
'to run. These should row-wise specifications of '
'regions from the input pair hdf5 file. Overrides '
'the number set in n_bootstrap.')
args = parser.parse_args()
### Load the input pickles.
file_name_list = args.input_region_pickle_files.split(',')
region_dict = load_from_pickle(file_name_list)
### If we want a treat a set of data specially in the bootstrapping process
### we load it here.
if args.input_special_region_pickle_files is not None:
file_name_list = args.input_special_region_pickle_files.split(',')
region_special_list = [load_from_pickle([file_name])
for file_name in file_name_list]
### Create the array of indices for the regions we will bootstrap over.
if args.bootstrap_samples is None:
bootstrap_samples = np.random.randint(region_dict['n_regions'],
size=(args.n_bootstrap,
region_dict['n_regions']))
### Create the bootstraps for the "special" sample and concatenate them
### to the end of the bootstrap samples.
if args.input_special_region_pickle_files is not None:
for region_special_dict in region_special_list:
bootstrap_samples = np.concatenate(
(bootstrap_samples,
np.random.randint(
region_special_dict['n_regions'],
size=(args.n_bootstrap,
region_special_dict['n_regions']))),
axis=1)
### If requested, the code can load a set of fixed bootstraps from disc.
### If using a "special" sample make sure the bootstraps are formated as
### above with the region ids appended to the end of the "normal" regions.
else:
bootstrap_samples = np.loadtxt(args.bootstrap_samples,
dtype=np.int_)
args.n_bootstrap = bootstrap_samples.shape[0]
### Create empty array for storage of the bootstraps.
density_bootstrap_array = np.empty((region_dict['redshift'].shape[0],
args.n_bootstrap))
### Computing mean redshift per bin.
redshift_array = np.sum(region_dict['redshift'], axis=1)
n_reference_array = np.sum(region_dict['n_reference'], axis=1)
if args.input_special_region_pickle_files is not None:
for region_special_dict in region_special_list:
redshift_array += np.sum(region_special_dict['redshift'], axis=1)
n_reference_array += np.sum(region_special_dict['n_reference'], axis=1)
redshift_array /= n_reference_array
### Start the actual bootstrap process.
for boot_idx, boot_reg_ids in enumerate(bootstrap_samples):
tmp_boot_reg_ids = boot_reg_ids[:region_dict['n_regions']]
boot_unknown_array = np.sum(region_dict['unknown'][:,tmp_boot_reg_ids],
axis=1)
boot_rand_array = np.sum(region_dict['rand'][:,tmp_boot_reg_ids],
axis=1)
### Compute the bootstrap average for the "special" samples.
if args.input_special_region_pickle_files is not None:
n_special_region = 0
for region_special_dict in region_special_list:
tmp_boot_reg_ids = boot_reg_ids[
region_dict['n_regions'] + n_special_region:
region_dict['n_regions'] + n_special_region +
region_special_dict['n_regions']]
n_special_region += region_special_dict['n_regions']
boot_unknown_array += np.sum(
region_special_dict['unknown'][:,tmp_boot_reg_ids],
axis=1)
boot_rand_array += np.sum(
region_special_dict['rand'][:,tmp_boot_reg_ids], axis=1)
### Compute the over density for the current bootstrap.
density_bootstrap_array[:, boot_idx] = (boot_unknown_array /
boot_rand_array - 1.0)
### Compute the mean and standard deviation using nan safe means and
### variances.
density_array = np.nanmean(density_bootstrap_array, axis=1)
density_err_array = np.nanstd(density_bootstrap_array, axis=1)
### Create the output ascii header we will use to store the information on
### this run.
output_header = 'input_flags:\n'
for arg in vars(args):
output_header += '\t%s : %s\n' % (arg, getattr(args, arg))
### If requested output the individual bootstraps to a file.
if args.output_bootstraps_file is not None:
np.savetxt(args.output_bootstraps_file, density_bootstrap_array,
header=output_header)
### Add the column names to the header.
output_header += ("type1 = z_mean\n"
"type2 = phi(z)\n"
"type3 = phi_err(z)\n")
### Write the output.
np.savetxt(args.output_pdf_file,
np.array([redshift_array, density_array,
density_err_array]).transpose(),
header=output_header)
``` |
{
"source": "jlvdb/yet_another_wizz",
"score": 3
} |
#### File: yet_another_wizz/yaw_tools/statistics.py
```python
import numpy as np
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d
def mean(z, n):
p = n / np.trapz(n, x=z)
return np.trapz(z * p, x=z)
def median(z, n):
integrated = cumtrapz(n, x=z, initial=0.0)
P = interp1d(integrated / integrated[-1], z)
return P(0.5)
def KullbackLeibler(P, Q, x):
assert(len(P) == len(x))
assert(len(Q) == len(x))
assert(np.all(P >= 0.0) & np.all(Q >= 0.0))
D_KL = 0.0
for p, q in zip(P / np.trapz(P, x), Q / np.trapz(Q, x)):
if q > 0.0:
D_KL += p * np.log(p / q)
# else D_KL_i = 0.0
return D_KL
def KolmogorovSmirnov(P, Q, x):
assert(len(P) == len(x))
assert(len(Q) == len(x))
assert(np.all(P >= 0.0) & np.all(Q >= 0.0))
P_cdf = cumtrapz(P, x, initial=0.0)
Q_cdf = cumtrapz(Q, x, initial=0.0)
D_KS = np.max(P_cdf / P_cdf[-1] - Q_cdf / Q_cdf[-1])
return D_KS
def ChiSquare(P, Q, x):
chisq = np.sum((P - Q)**2)
return chisq
``` |
{
"source": "jlvoiseux/apm-integration-testing",
"score": 2
} |
#### File: tests/unit/test_control.py
```python
import toxiproxy
from pytest import mark
from unittest import mock
from flask import url_for
import dyno.app.api.control as ctl
@mock.patch('toxiproxy.server.Toxiproxy.update_api_consumer')
def test_fetch_proxy_update_consumer(consumer_patch, toxi_default_environment):
"""
GIVEN an environment with TOXI_HOST or TOXI_PORT set
WHEN the _fetch_proxy() helper function is called
THEN the proxy api consumer is updated
"""
ctl._fetch_proxy()
consumer_patch.assert_called_once()
@mark.parametrize('toxi_env', ['TOXI_HOST', 'TOXI_PORT'])
@mock.patch('toxiproxy.server.Toxiproxy.update_api_consumer')
def test_fetch_proxy_no_update_consumer(consumer_patch, toxi_default_environment, toxi_env, monkeypatch):
"""
GIVEN an environment without both TOXI_HOST and TOXI_PORT set
WHEN the _fetch_proxy() helper function is called
THEN the proxy api consumer is *not* updated
"""
monkeypatch.delenv(toxi_env)
ctl._fetch_proxy()
consumer_patch.assert_not_called()
@mark.parametrize('toxi_code', ctl.toxic_map.keys())
def test_decode_toxi(toxi_code):
"""
GIVEN an shortned toxic code
WHEN the code is given to the _decode_toxic() function
THEN it receives back dictionary with the code
"""
assert ctl._decode_toxic(toxi_code)
@mark.parametrize('toxi_cfg', ctl.toxic_map.values())
def test_encode_toxi(toxi_cfg):
"""
GIVEN a toxi configuration
WHEN that configuration is passed to the _encode_toxic() function
THEN the code for that configuration is returned
"""
assert ctl._encode_toxic(toxi_cfg['type'], toxi_cfg['attr'])
def test_get_app(fetch_proxy_mock, client):
"""
GIVEN an HTTP client
WHEN that client requests the /app endpoint
THEN the client receives a dictionary containing the app proxy config
"""
with mock.patch('dyno.app.api.control._fetch_proxy', fetch_proxy_mock):
res = client.get(url_for('api.fetch_app'), query_string={'name': 'fake_proxy'})
assert res.json == {
'enabled': True,
'listen': 8080,
'name': 'opbeans-proxy',
'toxics': {},
'upstream': 'fake_upstream'
}
def test_get_apps(fetch_proxy_mock, client):
"""
GIVEN an HTTP client
WHEN that client requests the /apps endpoint
THEN the client receives a dictionary containing a list of configured apps
"""
with mock.patch('dyno.app.api.control._fetch_proxy', fetch_proxy_mock):
res = client.get(url_for('api.fetch_all_apps'), query_string={'name': 'fake_proxy'})
assert res.json == {'proxies': ['fake_proxy']}
def test_get_apps_full(fetch_proxy_mock, client):
"""
GIVEN an HTTP client
WHEN that client requests the /apps endpoint with the `full` argument supplied
THEN the client receives a dictionary back with all apps and their configurations
"""
with mock.patch('dyno.app.api.control._fetch_proxy', fetch_proxy_mock):
res = client.get(
url_for('api.fetch_all_apps'),
query_string={'name': 'fake_proxy', 'full': True}
)
assert res.json == {'proxies': [{'listen': 8080, 'name': 'opbeans-proxy'}]}
def test_enable(client):
"""
GIVEN an HTTP client
WHEN that client requests the /enable endpoint to enable a given proxy
THEN the toxiproxy API is instructed to enable the proxy
"""
t_ = mock.Mock(spec=toxiproxy.Toxiproxy, name='toxi_mock')
enable_mock = mock.Mock(spec=toxiproxy.proxy.Proxy, name='enable_mock')
t_.attach_mock(mock.Mock(name='get_proxy_mock', return_value=enable_mock), 'get_proxy')
with mock.patch('dyno.app.api.control._fetch_proxy', return_value=t_):
with mock.patch('toxiproxy.proxy.Proxy', enable_mock):
client.get(url_for('api.enable_proxy'))
enable_mock.enable.assert_called()
def test_disable(client):
"""
GIVEN an HTTP client
WHEN that client requests the /disable endpoint to enable a given proxy
THEN the toxiproxy API is instructed to disable the proxy
"""
t_ = mock.Mock(spec=toxiproxy.Toxiproxy)
disable_mock = mock.Mock(spec=toxiproxy.proxy.Proxy)
t_.attach_mock(mock.Mock(name='get_proxy_mock', return_value=disable_mock), 'get_proxy')
with mock.patch('dyno.app.api.control._fetch_proxy', return_value=t_):
with mock.patch('toxiproxy.proxy.Proxy', disable_mock):
client.get(url_for('api.disable_proxy'))
disable_mock.disable.assert_called()
@mark.parametrize('toxi_code', ctl.toxic_map.keys())
def test_slide(toxi_code, client):
"""
GIVEN an HTTP client
WHEN that client hits the /slider endpoint to adjust values for a proxy
THEN the proxy values are adjusted
"""
t_ = mock.Mock(spec=toxiproxy.Toxiproxy)
proxy_mock = mock.Mock(spec=toxiproxy.proxy.Proxy)
t_.attach_mock(mock.Mock(name='get_proxy_mock', return_value=proxy_mock), 'get_proxy')
with mock.patch('dyno.app.api.control._fetch_proxy', return_value=t_):
with mock.patch('toxiproxy.proxy.Proxy', proxy_mock):
client.post(url_for('api.slide'), json={'tox_code': toxi_code, 'val': 100})
proxy_mock.add_toxic.assert_called()
@mark.parametrize('toxi_code', ctl.toxic_map.keys())
def test_slide_exception_side_effect(toxi_code, client):
"""
GIVEN an HTTP client
WHEN that client hits the /slider endpoint to adjust values for a proxy
THEN the proxy values are adjusted
"""
t_ = mock.Mock(spec=toxiproxy.Toxiproxy)
proxy_mock = mock.Mock(spec=toxiproxy.proxy.Proxy)
proxy_mock.toxics = mock.Mock(return_value=['fake_proxy_1', 'fake_proxy_2'])
proxy_mock.add_toxic = mock.Mock(side_effect=Exception, name='sider')
t_.attach_mock(mock.Mock(name='get_proxy_mock', return_value=proxy_mock), 'get_proxy')
with mock.patch('dyno.app.api.control._fetch_proxy', return_value=t_):
with mock.patch('toxiproxy.proxy.Proxy', proxy_mock):
client.post(url_for('api.slide'), json={'tox_code': toxi_code, 'val': 100})
proxy_mock.add_toxic.assert_called()
proxy_mock.destroy_toxic.assert_called()
@mark.parametrize('val', range(1,101, 10))
@mock.patch('dyno.app.api.control._range', mock.Mock(return_value={'Fr': [1,10]}))
def test_normalize(val):
"""
GIVEN values between 1-100
WHEN the value is sent to be normalized
THEN the correct normalized value is returned
"""
got = ctl._normalize_value('Fr', val)
want = (101 - val) / 10
assert got == want
@mark.parametrize('val', range(1,10))
@mock.patch('dyno.app.api.control._range', mock.Mock(return_value={'Fr': [1,10]}))
def test_denormalize(val):
"""
GIVEN values between 1-100
WHEN the value is sent to be denormalized
THEN the correct normalized value is returned
"""
got = ctl._denormalize_value('Fr', val)
want = 100 - (val * 10)
assert got == want
```
#### File: python/flask/app.py
```python
import elasticapm
from flask import Flask
from elasticapm.contrib.flask import ElasticAPM
import logging
import os
app = Flask(__name__)
app.debug = False
app.config['ELASTIC_APM'] = {
'DEBUG': True, # this is not a log level, but rather how agent interacts with Flask's debug mode
'SERVER_URL': os.environ['APM_SERVER_URL'],
'SERVICE_NAME': os.environ['FLASK_SERVICE_NAME'],
'TRANSACTION_SEND_FREQ': 1, # 1.x
'FLUSH_INTERVAL': 1, # 2.x
'MAX_EVENT_QUEUE_LENGTH': 1, # 1.x
'MAX_QUEUE_SIZE': 1, # 2.x
'API_REQUEST_TIME': '50ms', # 4.x
'SECRET_TOKEN': os.getenv('APM_SERVER_SECRET_TOKEN', '<PASSWORD>'),
'TRANSACTIONS_IGNORE_PATTERNS': ['.*healthcheck'],
'LOG_LEVEL': 'warning',
}
apm = ElasticAPM(app, logging=False)
@app.route('/')
def index():
return 'OK'
@app.route('/healthcheck')
def healthcheck():
return 'OK'
@app.route('/foo')
def foo_route():
return foo()
@elasticapm.capture_span()
def foo():
return "foo"
@app.route('/bar')
def bar_route():
return bar()
@elasticapm.capture_span()
def bar():
extra()
return "bar"
@elasticapm.capture_span()
def extra():
return "extra"
@app.route('/oof')
def oof_route():
raise Exception('oof')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(os.environ['FLASK_PORT']))
# Create a logging handler and attach it.
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
```
#### File: tests/agent/test_ruby.py
```python
import pytest
from tests import utils
from tests.agent.concurrent_requests import Concurrent
@pytest.mark.version
@pytest.mark.rails
def test_req_rails(rails):
utils.check_agent_transaction(
rails.foo, rails.apm_server.elasticsearch)
@pytest.mark.version
@pytest.mark.rails
def test_rails_error(rails):
utils.check_agent_error(
rails.oof, rails.apm_server.elasticsearch)
@pytest.mark.version
@pytest.mark.rails
def test_conc_req_rails(es, apm_server, rails):
foo = Concurrent.Endpoint(rails.foo.url,
rails.app_name,
["ApplicationController#foo"],
"ApplicationController#foo",
events_no=1000)
Concurrent(es, [foo], iters=1).run()
@pytest.mark.version
@pytest.mark.rails
def test_conc_req_rails_foobar(es, apm_server, rails):
foo = Concurrent.Endpoint(rails.foo.url,
rails.app_name,
["ApplicationController#foo"],
"ApplicationController#foo")
bar = Concurrent.Endpoint(rails.bar.url,
rails.app_name,
["ApplicationController#bar", "app.extra"],
"ApplicationController#bar",
events_no=820)
Concurrent(es, [foo, bar], iters=1).run()
``` |
{
"source": "jlvoltan/Tchau-Papeleta-de-Faltas",
"score": 3
} |
#### File: Tchau-Papeleta-de-Faltas/Modulo_Servidor/criador_de_inteligencia.py
```python
import cv2
import os
import numpy as np
codigo_turma=0 #Usaremos na verificação da pasta
#Reconhecimento baseado apenas o LBPH
lbph = cv2.face.LBPHFaceRecognizer_create(radius=1, neighbors=8, grid_x=12, grid_y=12,threshold=50) #1.7976931348623157e+100
def getFotoMatricula( pasta): #O parametro turma se refere ao código da turma que é o mesmo que o nome da pasta
lista_face = []
lista_matricula = []
caminhos = [os.path.join(pasta,foto ) for foto in os.listdir(pasta)]
for f in os.listdir(pasta):
caminhos.append(os.path.join(pasta, f))
for caminhoFoto in caminhos:
imagem_face = cv2.cvtColor(cv2.imread(caminhoFoto), cv2.COLOR_BGR2GRAY)
matricula = int(caminhoFoto.split('.')[1])
lista_face.append(imagem_face)
lista_matricula.append(matricula)
return np.array(lista_matricula), lista_face
#Obtendo o nome da turma a ser aprendida com o usuário
while(codigo_turma == 0):
turma = input("Digite o nome da turma que deseja realizar o aprendizado: ")
nome_da_turma_cinza = turma + 'cinza' #Salvamos na pasta turma concatenada com a palavra cinza
if(os.path.isdir(nome_da_turma_cinza)):
codigo_turma = 1
else:
print("O nome da turma informado deve ser o mesmo nome da pasta, onde se encontram as fotos a serem convertidas!\n")
print("Por exemplo, digite turma1, caso as fotos convertidas estejam na pasta turma1cinza \n")
matriculas, faces = getFotoMatricula(nome_da_turma_cinza)
print("Imagens e matrículas da " + nome_da_turma_cinza + " carregadas com sucesso")
print("Aprendendo...")
lbph.train(faces, matriculas)
lbph.write('aprendiLBPH' + nome_da_turma_cinza + '.yml')
print("treinamento da turma " + turma + " concluído.")
``` |
{
"source": "jlvvlj/souji",
"score": 3
} |
#### File: jlvvlj/souji/souji.py
```python
def process_tweet(tweet):
import nltk
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
import string
import re
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# remove old style retweet text "RT"
tweet = re.sub(r'^RT[\s]+', '', tweet)
# remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# remove hashtags
# only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,
reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
# tweets_clean.append(word)
stem_word = stemmer.stem(word) # stemming word
tweets_clean.append(stem_word)
return tweets_clean
def build_frequency_list(tweets, labels):
import numpy
labels = numpy.squeeze(labels).tolist()
freqs = {}
for each_label, each_tweet in zip(labels, tweets):
for each_word in process_tweet(each_tweet):
labeled_word = (each_word, each_label)
if labeled_word in freqs:
freqs[labeled_word] += 1
else:
freqs[labeled_word] = 1
return freqs
``` |
{
"source": "jlward/cribbage",
"score": 3
} |
#### File: jlward/cribbage/main.py
```python
import argparse
from collections import defaultdict
from game import Game
from player import Dumbass, LessDumbass
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--num-games',
default=1,
type=int,
)
args = parser.parse_args()
print('Running game')
winners = defaultdict(int)
for _ in range(args.num_games):
jason = LessDumbass(name='Jason')
zack = Dumbass(name='Zack')
game = Game(players=[jason, zack])
player = game.run()
winners[player.name] += 1
print(winners)
if __name__ == '__main__':
main()
```
#### File: jlward/cribbage/round.py
```python
import random
from deck import Deck
from score import ScoreHand, ScorePegging
class Round:
def __init__(self, players):
self.players = players
self.deck = Deck()
self.crib = []
self.cards_played = []
def discard_to_crib(self, player):
self.crib.extend(player.discard_to_crib())
@property
def players_have_cards(self):
for player in self.players:
if player.hand_count:
return True
return False
def get_cut_card(self):
self.cut_card = random.choice(self.deck.cards)
if self.cut_card.number == 11:
self.players[0].add_points(2)
self.deck.cards.remove(self.cut_card)
def play_cards(self):
while self.players_have_cards:
count = 0
while count <= 31:
card_played = False
for player in self.players:
card = player.play_card(current_count=count)
if card is None:
continue
count += card.value
card_played = True
print(' ', player.name, card, count)
self.cards_played.append(card)
self.check_for_pegging(player)
if not card_played:
break
def check_for_pegging(self, player):
score = ScorePegging(self.cards_played)
player.add_points(score.score())
def score_hands(self):
for player in self.players:
player.score_hand(self.cut_card)
def count_crib(self):
score = ScoreHand(
cards=self.crib,
cut_card=self.cut_card,
is_crib=True,
)
self.players[0].add_points(score.score_hand())
def start(self):
print('Starting round')
print(' Dealing cards')
for player in self.players:
self.deck.deal_to_player(player)
for player in self.players:
self.discard_to_crib(player)
print(' ', self.players)
self.get_cut_card()
self.play_cards()
self.score_hands()
self.count_crib()
```
#### File: jlward/cribbage/score.py
```python
import itertools
from collections import Counter
from more_itertools import consecutive_groups
class ScoreHand:
def __init__(self, cards, cut_card, is_crib=False):
self.cards = cards
self.cut_card = cut_card
self.is_crib = is_crib
def check_for_straight(self):
cards = self.cards + [self.cut_card]
numbers = set(card.number for card in cards)
numbers = sorted(list(numbers))
biggest_run = max(
[list(group) for group in consecutive_groups(numbers)],
key=len,
)
len_biggest_run = len(biggest_run)
if len_biggest_run < 3:
return 0
counts = Counter(card.number for card in cards)
pair_counts = 0
for number, count in counts.items():
if number not in biggest_run:
continue
if count < 2:
continue
pair_counts += count
if pair_counts == 0:
return len_biggest_run
return len_biggest_run * pair_counts
def check_for_pairs(self):
cards = self.cards + [self.cut_card]
points = 0
counts = Counter(card.number for card in cards)
for value in counts.values():
points += value * (value - 1)
return points
def check_for_15s(self):
cards = self.cards + [self.cut_card]
fifteens = [
seq
for i in range(1, len(cards) + 1)
for seq in itertools.combinations(cards, i)
if sum(card.value for card in seq) == 15
]
return len(fifteens) * 2
def check_for_flush(self):
if self.is_crib:
suits = set([card.suit for card in self.cards + [self.cut_card]])
if len(suits) == 1:
return 5
return 0
suits = [card.suit for card in self.cards]
points = 0
if len(set(suits)) != 1:
return points
points = 4
if self.cut_card.suit == self.cards[0].suit:
points += 1
return points
def check_for_nobs(self):
jacks = [card for card in self.cards if card.number == 11]
for jack in jacks:
if jack.suit == self.cut_card.suit:
return 1
return 0
def score_hand(self):
points = 0
points_from_run = self.check_for_straight()
points += points_from_run
points_from_pairs = self.check_for_pairs()
points += points_from_pairs
points_from_15s = self.check_for_15s()
points += points_from_15s
points_from_flush = self.check_for_flush()
points += points_from_flush
points_from_nobs = self.check_for_nobs()
points += points_from_nobs
return points
class ScorePegging:
def __init__(self, cards_played):
self.cards_played = cards_played
def check_for_magic_numbers(self):
count = sum(card.value for card in self.cards_played)
if count == 15:
return True
if count == 31:
return True
return False
def check_for_pair_points(self):
last_card = self.cards_played[-1]
num_pairs = 1
for card in self.cards_played[-2::-1]:
if card == last_card:
num_pairs += 1
else:
break
return num_pairs * (num_pairs - 1)
def _check_for_straight(self, numbers):
current = numbers[0]
for number in numbers[1:]:
if current + 1 != number:
return False
current = number
return True
def check_for_straight_points(self):
if len(self.cards_played) < 3:
return 0
last_cards = []
longest_straight = 0
for card in self.cards_played[::-1]:
last_cards.append(card.number)
if len(last_cards) < 3:
continue
last_cards.sort()
if not self._check_for_straight(last_cards):
continue
longest_straight = len(last_cards)
return longest_straight
def score(self):
points = 0
if self.check_for_magic_numbers():
points += 2
pair_points = self.check_for_pair_points()
if pair_points:
points += pair_points
straight_points = self.check_for_straight_points()
if straight_points:
points += straight_points
return points
```
#### File: cribbage/tests/test_score.py
```python
from unittest import TestCase
from card import Card
from score import ScoreHand, ScorePegging
class ScoreHandTestCase(TestCase):
def test_flush_not_crib(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_flush(), 5)
cut_card.suit = 'b'
self.assertEqual(score.check_for_flush(), 4)
cards[0].suit = 'b'
cut_card.suit = 'a'
self.assertEqual(score.check_for_flush(), 0)
def test_flush_crib(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card, is_crib=True)
self.assertEqual(score.check_for_flush(), 5)
cut_card.suit = 'b'
self.assertEqual(score.check_for_flush(), 0)
cards[0].suit = 'b'
cut_card.suit = 'a'
self.assertEqual(score.check_for_flush(), 0)
def test_check_for_straight(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_straight(), 5)
cards[2].number = 9
self.assertEqual(score.check_for_straight(), 0)
cards[2].number = 3
cards[3].number = 5
self.assertEqual(score.check_for_straight(), 3)
cards[3].number = 3
self.assertEqual(score.check_for_straight(), 6)
def test_check_for_pairs(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_pairs(), 0)
cut_card.number = 1
self.assertEqual(score.check_for_pairs(), 2)
cards[1].number = 1
self.assertEqual(score.check_for_pairs(), 6)
cards[3].number = 3
self.assertEqual(score.check_for_pairs(), 8)
def test_check_for_15s(self):
cards = [
Card(number=1, suit='a'),
Card(number=1, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_15s(), 0)
cards[1].number = 2
self.assertEqual(score.check_for_15s(), 2)
for card in cards:
card.number = 5
cut_card.number = 12
self.assertEqual(score.check_for_15s(), 16)
def test_check_for_nobs(self):
cards = [
Card(number=1, suit='a'),
Card(number=1, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_nobs(), 0)
cards[0].number = 11
self.assertEqual(score.check_for_nobs(), 1)
cut_card.suit = 'b'
self.assertEqual(score.check_for_nobs(), 0)
def test_score_hand(self):
cards = [
Card(number=1, suit='a'),
Card(number=1, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.score_hand(), 10)
class ScorePeggingTestCase(TestCase):
def assert_round(self, cards):
stack = []
for number, expected_points in cards:
stack.append(Card(number=number, suit='a'))
score = ScorePegging(stack)
self.assertEqual(score.score(), expected_points)
def test_magic_numbers_15(self):
cards = [
Card(number=10, suit='a'),
Card(number=5, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_magic_numbers(), True)
cards[0].number = 12
self.assertEqual(score.check_for_magic_numbers(), True)
cards[0].number = 4
self.assertEqual(score.check_for_magic_numbers(), False)
def test_magic_numbers_31(self):
cards = [
Card(number=10, suit='a'),
Card(number=11, suit='a'),
Card(number=12, suit='a'),
Card(number=1, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_magic_numbers(), True)
cards[0].number = 8
self.assertEqual(score.check_for_magic_numbers(), False)
def test_check_for_pair_points(self):
cards = [
Card(number=10, suit='a'),
Card(number=11, suit='a'),
Card(number=12, suit='a'),
Card(number=1, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_pair_points(), 0)
cards[3].number = 12
self.assertEqual(score.check_for_pair_points(), 2)
cards[1].number = 12
self.assertEqual(score.check_for_pair_points(), 6)
cards[0].number = 12
self.assertEqual(score.check_for_pair_points(), 12)
def test_check_for_straight_random_outside(self):
cards = [
Card(number=10, suit='a'),
Card(number=12, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_straight_points(), 0)
cards.append(
Card(number=11, suit='a'),
)
self.assertEqual(score.check_for_straight_points(), 3)
cards.append(
Card(number=9, suit='a'),
)
self.assertEqual(score.check_for_straight_points(), 4)
cards[1].number = 2
self.assertEqual(score.check_for_straight_points(), 0)
def test_check_for_straight_inside_with_other_straights(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=5, suit='a'),
Card(number=6, suit='a'),
Card(number=7, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_straight_points(), 3)
cards.append(
Card(number=4, suit='a'),
)
self.assertEqual(score.check_for_straight_points(), 7)
def test_score_magic_numbers(self):
cards = [
(10, 0),
(5, 2),
(10, 0),
(6, 2),
]
self.assert_round(cards)
def test_score_pairs(self):
cards = [
(10, 0),
(10, 2),
(10, 6),
]
self.assert_round(cards)
def test_score_striaghts(self):
cards = [
(1, 0),
(2, 0),
(3, 3),
(5, 0),
(6, 0),
(4, 6),
]
self.assert_round(cards)
def test_score_fifteen_with_pair(self):
cards = [
(5, 0),
(5, 2),
(5, 8),
]
self.assert_round(cards)
def test_score_fifteen_with_straight(self):
cards = [
(4, 0),
(5, 0),
(6, 5),
]
self.assert_round(cards)
``` |
{
"source": "Jl-wei/feedback-classification",
"score": 3
} |
#### File: Jl-wei/feedback-classification/trainer.py
```python
import random
import numpy as np
import time
import torch
from utilities import format_time, get_device, flat_accuracy
import logging
device = get_device()
def train_model(
model,
train_dataloader,
validation_dataloader,
optimizer,
scheduler,
seed=42,
epochs=5,
):
"""
This training code is based on the `run_glue.py` script here:
https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
"""
# Set the seed value all over the place to make this reproducible.
seed_val = seed
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
model = model.to(device)
# We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings.
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, epochs):
# Perform one full pass over the training set.
logging.info("")
logging.info("======== Epoch {:} / {:} ========".format(epoch_i + 1, epochs))
logging.info("Training...")
print("")
print("======== Epoch {:} / {:} ========".format(epoch_i + 1, epochs))
print("Training...")
# Measure how long the training epoch takes.
t0 = time.time()
# Reset the total loss for this epoch.
total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 5 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
logging.info(
" Batch {:>5,} of {:>5,}. Elapsed: {:}.".format(
step, len(train_dataloader), elapsed
)
)
print(
" Batch {:>5,} of {:>5,}. Elapsed: {:}.".format(
step, len(train_dataloader), elapsed
)
)
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# It returns different numbers of parameters depending on what arguments
# arge given and what flags are set. For our useage here, it returns
# the loss (because we provided labels) and the "logits"--the model
# outputs prior to activation.
output = model(
b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels,
)
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_train_loss += output.loss.item()
# Perform a backward pass to calculate the gradients.
output.loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = format_time(time.time() - t0)
logging.info("")
logging.info(" Average training loss: {0:.2f}".format(avg_train_loss))
logging.info(" Training epcoh took: {:}".format(training_time))
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
logging.info("")
logging.info("Running Validation...")
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using
# the `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Tell pytorch not to bother with constructing the compute graph during
# the forward pass, since this is only needed for backprop (training).
with torch.no_grad():
# Forward pass, calculate logit predictions.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
output = model(
b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels,
)
# Accumulate the validation loss.
total_eval_loss += output.loss.item()
# Move logits and labels to CPU
logits = output.logits.detach().cpu().numpy()
label_ids = b_labels.to("cpu").numpy()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
logging.info(" Accuracy: {0:.2f}".format(avg_val_accuracy))
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = format_time(time.time() - t0)
logging.info(" Validation Loss: {0:.2f}".format(avg_val_loss))
logging.info(" Validation took: {:}".format(validation_time))
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
# Record all statistics from this epoch.
training_stats.append(
{
"epoch": epoch_i + 1,
"Training Loss": avg_train_loss,
"Valid. Loss": avg_val_loss,
"Valid. Accur.": avg_val_accuracy,
"Training Time": training_time,
"Validation Time": validation_time,
}
)
logging.info("")
logging.info("Training complete!")
print("")
print("Training complete!")
logging.info(
"Total training took {:} (h:mm:ss)".format(format_time(time.time() - total_t0))
)
print(
"Total training took {:} (h:mm:ss)".format(format_time(time.time() - total_t0))
)
return model, training_stats
def train_elmo(classifier, train_batch, optimizer_, criterion_ce, gpu=False, clip=5.0):
classifier.train()
classifier.zero_grad()
# source, tags, lengths = train_batch
source, tags = train_batch
if gpu:
source = source.to("cuda")
tags = tags.to("cuda")
# output: batch x nclasses
# output = classifier(source, lengths)
output = classifier(source)
c_loss = criterion_ce(output, tags)
c_loss.backward()
# `clip_grad_norm` to prevent exploding gradient in RNNs / LSTMs
torch.nn.utils.clip_grad_norm_(classifier.parameters(), clip)
optimizer_.step()
total_loss = c_loss.item()
# probs = F.softmax(output, dim=-1)
# max_vals, max_indices = torch.max(probs, -1)
# accuracy = torch.mean(max_indices.eq(tags).float()).item()
return total_loss
``` |
{
"source": "jlweston/ha-graphapi",
"score": 3
} |
#### File: api/auth/models.py
```python
from datetime import datetime, timedelta, timezone
from typing import Optional
from pydantic import BaseModel, Field
def utc_now():
return datetime.now(timezone.utc)
class OAuth2TokenResponse(BaseModel):
token_type: str
expires_in: int
scope: str
access_token: str
refresh_token: str
issued: datetime = Field(default_factory=utc_now)
def is_valid(self) -> bool:
return (self.issued + timedelta(seconds=self.expires_in)) > utc_now()
```
#### File: provider/presence/__init__.py
```python
from aiohttp.client import ClientResponse
from ..baseprovider import BaseProvider
from .models import PresenceResponse
class PresenceProvider(BaseProvider):
BASE_URL = "https://graph.microsoft.com/beta"
async def get_presence(self) -> PresenceResponse:
"""
Get presence info for the current user
Returns:
:class:`PresenceResponse`: Presence Response
"""
url = f"{self.BASE_URL}/me/presence"
resp = await self.client.session.get(url)
resp.raise_for_status()
return PresenceResponse.parse_raw(await resp.text())
async def get_presence_by_id(self, target_id: str) -> PresenceResponse:
"""
Get Userpresence by User ID
Args:
target_id: User ID to get presence for
Returns:
:class:`PresenceResponse`: Presence Response
"""
# https://graph.microsoft.com/beta/users/{user-id}/presence
url = f"{self.BASE_URL}/users/{target_id}/presence"
resp = await self.client.session.get(url)
resp.raise_for_status()
return PresenceResponse.parse_raw(await resp.text())
``` |
{
"source": "jlwgong/hangman",
"score": 4
} |
#### File: hangman/Python code/isPrime.py
```python
def isPrime(number):
# this will tell us if the number is prime, set to True automatically
# We will set to False if the number is divisible by any number less than it
number_is_prime = True
# loop over all numbers less than the input number
for i in range(2, number):
# calculate the remainder
remainder = number % i
# if the remainder is 0, then the number is not prime by definition!
if remainder == 0:
number_is_prime = False
# return result to the user
return number_is_prime
```
#### File: hangman/Python code/tictactoe.py
```python
def get_board(moves):
line1 = [moves["A1"], "|", moves["A2"], "|", moves["A3"]]
line2 = ["-", "-", "-", "-", "-"]
line3 = [moves["B1"], "|", moves["B2"], "|", moves["B3"]]
line4 = ["-", "-", "-", "-", "-"]
line5 = [moves["C1"], "|", moves["C2"], "|", moves["C3"]]
board = [line1, line2, line3, line4, line5]
return board
def print_board(moves):
board = get_board(moves)
for line in board:
print_line = ""
for i in line:
print_line += i
print(print_line)
return None
def get_player_move(moves, letter):
valid_moves = ["A1", "A2", "A3", "B1", "B2", "B3", "C1", "C2", "C3"]
# Player 1 Move
invalid_move = True
while invalid_move:
message = "Enter " + letter + " Move: "
player_move = raw_input(message)
#check if a valid move
if player_move in valid_moves:
# check if spot has already been played
if moves[player_move] != " ":
print("Invalid move, try again!")
else:
invalid_move = False
moves[player_move] = letter
print_board(moves)
else:
print("Invalid move, try again!")
def check_win(moves):
win = False
board = get_board(moves)
# to check:
# across x3
# down x3
# diag x2
for i in range(0,5,2):
across = []
for j in range(0,5,2):
across.append(board[i][j])
# check for horizontal win
if (across[0] == across[1]) and (across[1] == across[2]) and (across[2] != ' '):
win = True
print("Horizontal win detected!")
print(across[0], "wins!")
return win
# check for vertical win
if (board[0][i] == board[2][i]) and (board[2][i] == board[4][i]) and (board[2][i] != ' '):
win = True
print("Vertical win detected!")
print(board[0][i], "wins!")
return win
#check for diagonal win
if (board[0][0] == board[2][2]) and (board[2][2] == board[4][4]) and (board[2][2] != ' '):
win = True
print("Diagonal win detected!")
print(board[2][2], "wins!")
return win
elif (board[4][0] == board[2][2]) and (board[2][2] == board[0][4]) and (board[2][2] != ' '):
win = True
print("Diagonal win detected!")
print(board[2][2], "wins!")
return win
return win
# --------------------------------------------------------
# ------------------ MAIN GAMEPLAY -------------------
# --------------------------------------------------------
# Initialize Board, print moves for user
print(" ")
line1 = ["A1", "|", "A2", "|", "A3"]
line2 = ["-", "-", "-", "-", "-"]
line3 = ["B1", "|", "B2", "|", "B3"]
line4 = ["-", "-", "-", "-", "-"]
line5 = ["AC", "|", "C2", "|", "C3"]
board = [line1, line2, line3, line4, line5]
for line in board:
print_line = ""
for i in line:
print_line += i
print(print_line)
print(" ")
# Initialize Game
positions = ["A1", "A2", "A3", "B1", "B2", "B3", "C1", "C2", "C3"]
moves = {}
for i in range(0,9):
moves[positions[i]] = " "
print_board(moves)
game_done = False
# Start Game
letter_count = 0
while game_done != True:
if letter_count % 2 == 0:
get_player_move(moves, "X")
else:
get_player_move(moves, "O")
game_done = check_win(moves)
null_count = 0
for i in moves.values():
if i == " ":
null_count += 1
if null_count == 0:
game_done = True
letter_count += 1
``` |
{
"source": "jlwollinger-zz/river_level_prediction",
"score": 3
} |
#### File: river_level_prediction/training/load_defaut_image.py
```python
def load():
import json
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import numpy as np
json = json.load(open('C:\\Users\\Wollinger\\Desktop\\TCC\\river_annotation\\ds\\ann\\0.88_215.jpg.json'))
objects = json['objects']
points = objects[0]['points']
exterior = points['exterior']
polygons = []
shape = (1920, 1080)
for i in range(len(exterior)):
polygons.append(tuple(exterior[i]))
img = Image.new('L', shape, 0)
ImageDraw.Draw(img).polygon(polygons, outline=1, fill=1)
mask = np.array(img)
arr = mask.astype('bool')
for i in range(len(mask)):
for j in range(len(mask[i])):
if(mask[i][j] == 0):
arr[i][j] = False
else:
arr[i][j] = True
return arr.reshape((1080,1920,1))
``` |
{
"source": "JLWoodcock/MKL",
"score": 3
} |
#### File: 03_conv_nets/solutions/edge_detection.py
```python
image = tf.placeholder(tf.float32, [None, None, None, 1])
kernel = tf.placeholder(tf.float32, [3, 3])
def conv(x, k):
k = tf.reshape(k, shape=[3, 3, 1, 1])
return tf.nn.conv2d(x, k, strides=[1, 1, 1, 1],
padding='SAME')
output_image = conv(image, kernel)
kernel_data = np.array([
[0.0, 0.2, 0.0],
[0.0, -0.2, 0.0],
[0.0, 0.0, 0.0],
])
# kernel_data = np.array([
# [ 0.1, 0.2, 0.1],
# [ 0.0, 0.0, 0.0],
# [-0.1, -0.2, -0.1],
# ])
print(kernel_data)
with tf.Session() as sess:
feed_dict={image:[grey_sample_image],
kernel: kernel_data}
conv_img = sess.run(output_image, feed_dict=feed_dict)
print("Resulting image shape:", conv_img.shape)
show(conv_img[0])
# We only showcase a vertical edge detection here.
# Many other kernels work, for example differences
# of centered gaussians (sometimes called mexican-hat
# connectivity)
```
#### File: 03_conv_nets/solutions/strides_padding.py
```python
image = tf.placeholder(tf.float32, [None, None, None, 3])
kernel = tf.placeholder(tf.float32, [3, 3, 3])
def conv(x, k):
k = tf.reshape(k, shape=[3, 3, 3, 1])
return tf.nn.depthwise_conv2d(x, k, strides=[1,2,2,1],
padding='SAME')
def conv_valid(x, k):
k = tf.reshape(k, shape=[3, 3, 3, 1])
return tf.nn.depthwise_conv2d(x, k, strides=[1,2,2,1],
padding='VALID')
output_image = conv(image, kernel)
output_image_valid = conv_valid(image, kernel)
kernel_data = np.zeros(shape=(3, 3, 3)).astype(np.float32)
# identity kernel: ones only in the center of the filter
kernel_data[1, 1, :] = 1
print('Identity 3x3x3 kernel:')
print(np.transpose(kernel_data, (2, 0, 1)))
with tf.Session() as sess:
feed_dict = {image: [sample_image], kernel: kernel_data}
conv_img, conv_img_valid = sess.run([output_image, output_image_valid],
feed_dict=feed_dict)
print("Shape of result with SAME padding:", conv_img.shape)
print("Shape of result with VALID padding:", conv_img_valid.shape)
show(conv_img[0])
# We observe that the stride divided the size of the image by 2
# In the case of 'VALID' padding mode, no padding is added, so
# the size of the ouput image is actually 1 less because of the
# kernel size
``` |
{
"source": "jl-wynen/code-quality-checks",
"score": 3
} |
#### File: jl-wynen/code-quality-checks/code_quality.py
```python
import argparse
from pathlib import Path
import re
import subprocess
import sys
CPP_EXTENSIONS = ('.cpp', '.cc', '.cxx', '.hpp', '.hh', '.hxx', '.h')
PY_EXTENSIONS = ('.py',)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--py', action='store_true', default=False,
help='Enable all Python checks')
parser.add_argument('--yapf', nargs='?', default=None, const='yapf',
help='Reformat Python files')
parser.add_argument('--flake8', nargs='?', default=None, const='flake8',
help='Check Python files with flake8')
parser.add_argument('--cpp', action='store_true', default=False,
help='Enable all C++ checks')
parser.add_argument('--clang-format', nargs='?', default=None, const='clang-format',
help='Reformat C++ code')
parser.add_argument('--ref', default='main',
help='Name / hash of the reference branch / commit')
parser.add_argument('--prefix', metavar='NUM', default=0,
help='Strip this number of directories from file paths')
args = parser.parse_args()
if not any((args.py, args.yapf, args.flake8, args.cpp)):
print('WARNING no checkers are enabled.')
if args.py:
if not args.yapf:
args.yapf = 'yapf'
if not args.flake8:
args.flake8 = 'flake8'
if args.cpp:
if not args.clang_format:
args.clang_format = 'clang-format'
return args
def call_pipe(cmd, cwd=None):
return subprocess.run(cmd, cwd=cwd, check=True, capture_output=True).stdout.decode('utf-8').strip()
def find_repo_root():
try:
return call_pipe(['git', 'rev-parse', '--show-toplevel'])
except subprocess.CalledProcessError:
print('Failed to determine git root directory. Is this a git repository?')
sys.exit(1)
def get_diff(repo_root, ref):
current_branch = call_pipe(['git', 'branch', '--show-current'], cwd=repo_root)
base_commit = call_pipe(['git', 'merge-base', ref, current_branch], cwd=repo_root)
return call_pipe(['git', 'diff', '-U0', '--no-color', '--relative', base_commit], cwd=repo_root)
def parse_diff(diff, n_path_strip):
filename_regex = re.compile(rf'^\+\+\+ (.*?/){{{n_path_strip}}}(\S*)')
lineno_regex = re.compile(r'^@@.*?\+(\d+)(,(\d+))?')
lines = dict()
current_file = None
for line in diff.splitlines():
match = filename_regex.match(line)
if match:
current_file = Path(match[2])
if current_file is None:
continue # did not find a file yet or file name is empty
match = lineno_regex.match(line)
if match:
start_line = int(match[1])
n_lines = int(match[3]) if match[3] else 1
if n_lines == 0:
continue
end_line = start_line + n_lines
lines.setdefault(current_file, []).append(slice(start_line, end_line, 1))
return lines
def run_formatter(cmd, modified_lines, extensions, line_separator, cwd):
for fname, lines in filter(lambda t: t[0].suffix in extensions, modified_lines.items()):
subprocess.check_call([cmd, str(fname), '-i', *[f'--lines={l.start}{line_separator}{l.stop}' for l in lines]], cwd=cwd)
def run_flake8(cmd, modified_lines, cwd):
for fname in filter(lambda fn: fn.suffix in PY_EXTENSIONS, modified_lines):
subprocess.run([cmd, str(fname)], cwd=cwd)
def main():
args = parse_args()
repo_root = find_repo_root()
diff = get_diff(repo_root, args.ref)
modified_lines = parse_diff(diff, args.prefix)
if args.clang_format:
run_formatter(args.clang_format, modified_lines, CPP_EXTENSIONS, ':', repo_root)
if args.yapf:
run_formatter(args.yapf, modified_lines, PY_EXTENSIONS, '-', repo_root)
if args.flake8:
run_flake8(args.flake8, modified_lines, repo_root)
if __name__ == '__main__':
main()
``` |
{
"source": "jl-wynen/code_stuffs",
"score": 3
} |
#### File: code_stuffs/python/cached_property.py
```python
class cached_property(object):
"""
Descriptor that converts a method with a single self argument
into a property cached on the instance.
It also has a hook to allow for another property setter to
invalidated the cache, cf the `Square` class below for
an example.
"""
def __init__(self, func):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = self.encode_name(func.__name__)
def __get__(self, instance, type=None):
if instance is None:
return self
if self.name not in instance.__dict__:
instance.__dict__[self.name] = self.func(instance)
return instance.__dict__[self.name]
def __set__(self, instance, value):
raise AttributeError("attribute is read-only")
@classmethod
def encode_name(cls, name):
return "_p_cached_{}".format(name)
@classmethod
def clear_cached(cls, instance, *names):
for name in names:
cached = cls.encode_name(name)
if cached in instance.__dict__:
del instance.__dict__[cached]
@classmethod
def invalidate(cls, *names):
def _invalidate(setter):
def _setter(instance, value):
cls.clear_cached(instance, *names)
return setter(instance, value)
_setter.__name__ = setter.__name__
_setter.__doc__ = getattr(setter, '__doc__')
return _setter
return _invalidate
class Square(object):
def __init__(self, size):
self._size = size
@cached_property
def area(self):
print("calc area")
return self.size * self.size
@property
def size(self):
return self._size
@size.setter
@cached_property.invalidate("area")
def size(self, size):
self._size = size
s = Square(3)
print(s.area)
print(s.area)
s.size = 2
print(s.area)
```
#### File: code_stuffs/python/text_banner.py
```python
import math
import matplotlib.pyplot as plt
from matplotlib.transforms import IdentityTransform, Affine2D
from matplotlib.textpath import TextPath
from matplotlib.patches import PathPatch
from matplotlib.offsetbox import AnnotationBbox, AuxTransformBox
import numpy as np
def rotated_scale(width, height, angle, target_width, target_height):
"""Compute scale factors for rotated rectangle."""
# dimensions of rotated width
wxp = np.abs(width*np.cos(angle))
wyp = np.abs(width*np.sin(angle))
# dimensions of rotated height
hxp = np.abs(height*np.sin(angle))
hyp = np.abs(height*np.cos(angle))
# get scales
return target_width/(wxp+hxp), target_height/(wyp+hyp)
def text_banner(axes, text, facecolor="red", edgecolor="darkred", linewidth=1,
alpha=0.3, angleadjust=True, zorder=0):
"""
Paint text across a hole axes.
For height > width, angleadjust should be False.
"""
# draw the text into a patch
textpath = TextPath((0, 0), text, size=20, transform=axes.transAxes)
tp_bbox = textpath.get_extents()
patch = PathPatch(textpath, fc=facecolor, ec=edgecolor, lw=linewidth, alpha=alpha,
transform=IdentityTransform(), zorder=11)
# get angle and scale to transform text to axes coordinates
ax_bbox = axes.get_window_extent()
angle = math.atan2(ax_bbox.height, ax_bbox.width) * \
(ax_bbox.height/ax_bbox.width if angleadjust else 1)
scale = min(*rotated_scale(tp_bbox.width, tp_bbox.height, angle,
ax_bbox.width, ax_bbox.height))*0.95
# paint the patch into the axes
offsetbox = AuxTransformBox(Affine2D().rotate(angle).scale(scale))
offsetbox.add_artist(patch)
artist = AnnotationBbox(offsetbox, (0.5, 0.5),
xycoords='axes fraction',
frameon=False)
artist.set_zorder(zorder)
axes.add_artist(artist)
# showcase it
fig = plt.figure(figsize=(11,5))
ax = fig.add_subplot(121)
hist, bin_edges = np.histogram(np.random.randn(1000), 20)
ax.plot((bin_edges[:-1]+bin_edges[1:])/2, hist)
plt.tight_layout()
text_banner(ax, "PRELIMINARY", angleadjust=False)
ax = fig.add_subplot(122)
hist, bin_edges = np.histogram(np.random.randn(1000), 20)
ax.plot((bin_edges[:-1]+bin_edges[1:])/2, hist)
plt.tight_layout()
text_banner(ax, "So Solls Sein!", angleadjust=False,
facecolor="blue", edgecolor="none")
plt.show()
``` |
{
"source": "jl-wynen/exact-hubbard",
"score": 3
} |
#### File: exact-hubbard/ana/correlators.py
```python
import numpy as np
import matplotlib.pyplot as plt
def linestyle(i):
linestyles = ["-", "--", "-.", ":"]
return linestyles[i]
def get_irreps(kappa):
"""
Compute the lattice irreps.
"""
# Square
# hopping = np.array([[0, 1, 0, 1],
# [1, 0, 1, 0],
# [0, 1, 0, 1],
# [1, 0, 1, 0]])
# # Triangle
# hopping = np.array([[0, 1, 1],
# [1, 0, 1],
# [1, 1, 0]])
# tetrahedron
hopping = np.array([[0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 0]])
return np.linalg.eigh(hopping * kappa)[1]
def project_to_irreps(corrs, params):
"""
Project correlators from position space to the irrep basis.
"""
irreps = get_irreps(params["kappa"])
return np.einsum("ij,jkt,kl->ilt", irreps.T, corrs, irreps)
def load_correlators(fname):
"""
Load correlators and meta data stored in a file.
"""
with open(fname, "r") as f:
assert f.readline() == "#~ correlator\n"
assert f.readline() == "# nx nt\n"
nx, nt = map(int, f.readline().split(" "))
assert f.readline() == "# U kappa beta\n"
U, kappa, beta = map(float, f.readline().split(" "))
corrs = np.loadtxt(fname, skiprows=5).reshape(nx, nx, nt)
return corrs, dict(U=U, kappa=kappa, beta=beta)
def plot_all_in_one(corrs, params):
"""
Plot all correlators in one plot.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(r"$\kappa \tau$")
ax.set_ylabel(r"$C(\tau)$")
x = np.linspace(0, params["beta"], corrs.shape[2], endpoint=True) * params["kappa"]
for i, j in np.ndindex(corrs.shape[:2]):
ax.plot(x, corrs[i, j], c=f"C{i}", ls=linestyle(j))
fig.tight_layout()
def plot_grid(corrs, params):
"""
Plot a grid of all correlators.
"""
fig = plt.figure(figsize=(11, 10))
fig.suptitle(rf"$U/\kappa = {params['U']/params['kappa']} \qquad \kappa \beta = {params['kappa']*params['beta']}$")
x = np.linspace(0, params["beta"], corrs.shape[2], endpoint=True) * params["kappa"]
for i, j in np.ndindex(corrs.shape[:2]):
ax = fig.add_subplot(corrs.shape[0], corrs.shape[1], i*corrs.shape[1] + j + 1)
ax.set_xlabel(r"$\kappa \tau$")
ax.set_ylabel(rf"$C_{{{i},{j}}}(\tau)$")
ax.plot(x, corrs[i, j])
ax.set_yscale("log")
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
def main():
corrs, params = load_correlators("../correlators.dat")
corrs = project_to_irreps(corrs, params)
plot_grid(corrs, params)
plt.show()
if __name__ == '__main__':
main()
```
#### File: exact-hubbard/ana/spectrum.py
```python
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
def collect_degenerates(spectrum):
q_to_e = dict()
for charge, energy in spectrum:
if charge in q_to_e:
q_to_e[charge].append(energy)
else:
q_to_e[charge] = [energy]
q_to_e = {charge: Counter(energies) for charge, energies in q_to_e.items()}
charges, energies = zip(*sorted(q_to_e.items(), key=lambda t: t[0]))
return charges, energies
def main():
spectrum = np.loadtxt("spectrum.dat")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("Q")
ax.set_ylabel(r"$(E_Q^\alpha - E_\Omega) / \kappa$")
spectrum[:, 1] = np.round(spectrum[:, 1] - np.min(spectrum[:, 1]), 5)
for charge, energies in zip(*collect_degenerates(spectrum)):
for energy, count in energies.items():
ax.plot((charge-0.33, charge+0.33), [energy]*2,
linewidth=2, c="C0")
ax.text(charge, energy+0.1, f"({count})", horizontalalignment="center")
fig.tight_layout()
plt.show()
if __name__ == '__main__':
main()
``` |
{
"source": "jl-wynen/h5shell",
"score": 3
} |
#### File: h5sh/commands/history.py
```python
from . import command
class history(command.Command):
"""Command to show input history."""
def __init__(self):
super(history, self).__init__()
self._parser = command.Command.Parser(prog="history",
description="Show the input history")
def __call__(self, args, wd, h5mngr, term):
"""Execute the history command."""
if not self._parse_args(args, term):
return
term.print(term.history.dump())
``` |
{
"source": "jl-wynen/imgen",
"score": 4
} |
#### File: imgen/imgen/colour.py
```python
import numpy as np
def luminance(colours):
"""
Compute luminance of an array of colours.
"""
return (np.max(colours[..., :3], axis=2) + np.min(colours[..., :3], axis=2)) / 2
def _float2int(f):
return int(f * 255)
def _int2float(i):
return i / 255.0
class Colour:
DTYPE = np.float32
__slots__ = ("colour", )
def __init__(self, colour):
self.colour = np.full((4,), 1.0, dtype=self.DTYPE)
self.colour[:len(colour)] = colour
@classmethod
def RGB(cls, R, G, B, A=255):
return cls((_int2float(R), _int2float(G), _int2float(B), _int2float(A)))
@classmethod
def rgb(cls, r, g, b, a=1.0):
return cls((r, g, b, a))
@classmethod
def HSL(cls, H, S, L, a=1.0):
c = (1 - abs(2*L-1))*S
H = H/60
x = c*(1 - abs(H%2 - 1))
if H == 0:
aux = np.array((0, 0, 0))
elif 0 <= H < 1:
aux = np.array((c, x, 0))
elif 1 <= H < 2:
aux = np.array((x, c, 0))
elif 2 <= H < 3:
aux = np.array((0, c, x))
elif 3 <= H < 4:
aux = np.array((0, x, c))
elif 4 <= H < 5:
aux = np.array((x, 0, c))
else:
aux = np.array((c, 0, x))
aux += L - c/2
return cls.rgb(*aux, a)
def __len__(self):
return 4
def __getitem__(self, idx):
return self.colour[idx]
def interpolate(self, other, fraction):
assert 0 <= fraction <= 1
return Colour.rgb(self.r*(1-fraction) + other.r*fraction,
self.g*(1-fraction) + other.g*fraction,
self.b*(1-fraction) + other.b*fraction,
self.a*(1-fraction) + other.a*fraction)
@property
def r(self):
return self.colour[0]
@r.setter
def r(self, value):
self.colour[0] = value
@property
def R(self):
return _float2int(self.colour[0])
@R.setter
def R(self, value):
self.colour[0] = _int2float(value)
@property
def g(self):
return self.colour[1]
@g.setter
def g(self, value):
self.colour[1] = value
@property
def G(self):
return _float2int(self.colour[1])
@G.setter
def G(self, value):
self.colour[1] = _int2float(value)
@property
def b(self):
return self.colour[2]
@b.setter
def b(self, value):
self.colour[2] = value
@property
def B(self):
return _float2int(self.colour[2])
@B.setter
def B(self, value):
self.colour[2] = _int2float(value)
@property
def a(self):
return self.colour[3]
@a.setter
def a(self, value):
self.colour[3] = value
@property
def A(self):
return _float2int(self.colour[3])
@A.setter
def A(self, value):
self.colour[3] = _int2float(value)
@property
def H(self):
M = np.max(self.colour[:3])
m = np.min(self.colour[:3])
c = M - m
if c == 0:
h = 0
elif M == self.r:
h = 60*(self.g-self.b)/c
elif M == self.g:
h = 60*(2 + (self.b-self.r)/c)
else:
h = 60*(4 + (self.r-self.g)/c)
while h < 0:
h += 360
return h
@H.setter
def H(self, value):
self.colour = self.HSL(value, self.S, self.L, self.a).colour
@property
def S(self):
M = np.max(self.colour[:3])
if M == 0:
return 0
m = np.min(self.colour[:3])
if m == 1:
return 0
return (M-m) / (1 - abs(M+m-1))
@S.setter
def S(self, value):
self.colour = self.HSL(self.H, value, self.L, self.a).colour
@property
def L(self):
M = np.max(self.colour[:3])
m = np.min(self.colour[:3])
return (M + m)/2
@L.setter
def L(self, value):
self.colour = self.HSL(self.H, self.S, value, self.a).colour
@classmethod
def black(cls):
return cls((0.0, 0.0, 0.0, 1.0))
@classmethod
def blue(cls):
return cls.RGB(0, 84, 159, 255)
@classmethod
def bordeaux(cls):
return cls.RGB(161, 15, 53, 255)
@classmethod
def dark(cls):
return cls.RGB(45, 38, 50, 255)
@classmethod
def darkachrom(cls):
return cls.RGB(22, 22, 22, 255)
@classmethod
def green(cls):
return cls.RGB(87, 171, 39, 255)
@classmethod
def orange(cls):
return cls.RGB(224, 71, 40, 255)
@classmethod
def petrol(cls):
return cls.RGB(0, 97, 101, 255)
@classmethod
def red(cls):
return cls.RGB(204, 7, 30, 255)
@classmethod
def violet(cls):
return cls.RGB(97, 33, 88, 255)
@classmethod
def yellow(cls):
return cls.RGB(255, 237, 0, 255)
@classmethod
def white(cls):
return cls((1.0, 1.0, 1.0, 1.0))
@classmethod
def transparent(cls):
return cls((0.0, 0.0, 0.0, 0.0))
## Dictionary mapping predefined colour names to Colour objects (onl yincludes saturated colours).
COLOURS = {name: getattr(Colour, name)()
for name in ("blue", "bordeaux", "green", "yellow",
"orange", "petrol", "red", "violet")}
```
#### File: imgen/imgen/filters.py
```python
import scipy.ndimage as ndi
from .colour import luminance
def bloom(img, threshold=0.5, blurs=(2, 5)):
bright_pixels = img.pixels.copy()
bright_pixels[..., 3] = 0 # drop alpha (not part of bloom)
bright_pixels[luminance(bright_pixels) < threshold] = 0
for blur in blurs:
img.pixels += ndi.gaussian_filter(bright_pixels, (blur, blur, 0), mode="nearest")
``` |
{
"source": "jl-wynen/mantid-data",
"score": 2
} |
#### File: mantid-data/tests/test_mantiddata.py
```python
import mantiddata
import pytest
@pytest.fixture
def inventory():
return mantiddata.create()
def test_PG3_characterization_2011_08_31_HR_txt_is_correct(inventory):
expected = '''#S 1 characterization runs
#L frequency(Hz) center_wavelength(angstrom) bank_num vanadium_run empty_run vanadium_back d_min(angstrom) d_max(angstrom)
60 0.533 1 4866 0 5226 0.10 2.20 00000.00 16666.67
60 1.066 2 4867 0 5227 0.30 3.20 8333.33 25000.00
60 1.333 3 4868 0 5228 0.43 3.80 12500.00 29166.67
60 1.599 4 4869 0 5229 0.57 4.25 16666.67 33333.33
60 2.665 5 4870 0 5230 1.15 6.50 33333.33 50000.00
60 3.731 6 4871 0 5232 1.70 8.50 50000.00 66666.67
60 4.797 7 4872 0 5233 2.00 10.30 66666.67 83333.67
30 1.066 1 4873 0 5234 0.10 4.20 00000.00 33333.33
30 3.198 2 4874 0 5235 1.15 8.25 33333.33 66666.67
30 5.330 3 4891 0 5236 2.00 12.50 66666.67 100000.0
10 3.198 1 4920 0 5315 0.10 12.50 00000.0 100000.0
'''
with open(inventory.fetch('PG3_characterization_2011_08_31-HR.txt'), 'r') as f:
content = f.read()
assert content == expected
``` |
{
"source": "jl-wynen/pentinsula",
"score": 3
} |
#### File: pentinsula/pentinsula/timeseries.py
```python
from enum import Flag, auto
from pathlib import Path
from typing import Optional, Union
import numpy as np
from .chunkbuffer import ChunkBuffer
from .h5utils import open_or_pass_dataset
from .types import File, Dataset, Shape, DType
class BufferPolicy(Flag):
"""
Flag indicating what happens when a TimeSeries changes chunks.
"""
NOTHING = auto()
READ = auto()
WRITE = auto()
READ_WRITE = READ | WRITE
class TimeSeries:
"""
Build or consume a time series of items in an HDF5 dataset.
In this context a 'time series' is understood to be a sequence of 'items' that gets
constructed one item after the other.
Items can be an array of any shape and are stacked into the time series along a new
leading dimension.
This means that the underlying HDF5 dataset has shape (time, *item.shape).
This class provides methods both for building such a series and for reading it.
It uses a chunked dataset and buffers a complete chunk in memory to reduce the
number of file operation.
The memory required for this is entirely determined by the chunk size of the dataset.
It is possible to manually process the series by using the methods TimeSeries.select
and TimeSeries.item to pick a time and access the corresponding item.
This requires the user to pay attention to the buffering.
For easier usage, there are also high level iterators that either read items from a
time series successively (TimeSeries.read_iter) or write them, either overwriting
or extending a series (TimeSeries.write_iter.
"""
def __init__(self,
file_or_buffer: Union[File, ChunkBuffer],
dataset: Optional[Dataset] = None,
buffer_length: Optional[int] = None,
shape: Shape = (),
dtype: Optional[DType] = None,
maxshape: Optional[Shape] = None):
"""
Construct a TimeSeries with underlying buffer in memory.
Does not verify if a suitable dataset exists in the file or create one.
Time 0 is selected.
:param file_or_buffer: Either a file name / file handle or an existing ChunkBuffer.
In case of a file, a new buffer is created, and the arguments
dataset and buffer_length are required.
Otherwise, the existing buffer with all its properties is used.
:param dataset: Name or dataset object for initialising a new buffer.
:param buffer_length: Number of times stored in the buffer, i.e. first rank of a chunk.
:param shape: Shape of *items*, that is the total shape of a chunk is (buffer_length,) + shape.
:param dtype: Datatype of the buffer.
:param maxshape: Maximum shape of the dataset. Must satisfy len(maxshape) = 1 + len(shape).
"""
if isinstance(file_or_buffer, ChunkBuffer):
self._buffer = file_or_buffer
else:
if dataset is None or buffer_length is None:
raise ValueError("dataset and buffer_length must be provided when "
"file_or_buffer indicates a file.")
self._buffer = ChunkBuffer(file_or_buffer, dataset, shape=(buffer_length,) + shape,
dtype=dtype, maxshape=maxshape)
self._buffer_time_index = 0 # into the buffer, not total time
@classmethod
def load(cls, file: File, dataset: Dataset, time_index: int):
"""
Load an existing time series from file.
:param file: The file containing the dataset.
:param dataset: The dataset to load. Must be chunked along its first dimension.
:param time_index: This time is selected and the corresponding chunk is loaded.
Must be > 0.
:return: A newly constructed TimeSeries.
"""
with open_or_pass_dataset(file, dataset, None, "r") as dataset:
series = cls(file, dataset, dataset.chunks[0], shape=dataset.shape[1:],
dtype=dataset.dtype, maxshape=dataset.maxshape)
series.read(time_index, file=dataset.file, dataset=dataset)
return series
@classmethod
def pick_up(cls, file: File, dataset: Dataset):
"""
Extend an existing time series.
Selects the time *after* the last stored time.
This means that the content of the current item is undefined.
:param file: The file containing the dataset.
:param dataset: The dataset to load. Must be chunked along its first dimension.
:return: A newly constructed TimeSeries.
"""
with open_or_pass_dataset(file, dataset, None, "r") as dataset:
series = cls(file, dataset, dataset.chunks[0], shape=dataset.shape[1:],
dtype=dataset.dtype, maxshape=dataset.maxshape)
if dataset.shape[0] % dataset.chunks[0] == 0:
# First element of chunk, nothing to read.
series.select(dataset.shape[0], BufferPolicy.NOTHING)
else:
# Item at shape[0] does not exist, read the one before that and advance.
series.read(dataset.shape[0] - 1, file=dataset.file, dataset=dataset)
series.advance(BufferPolicy.NOTHING)
return series
@property
def item(self) -> np.ndarray:
"""
A view of the currently selected item.
This is always a numpy.ndarray even for scalar items to allow modifications
through the returned object.
Note that this only accesses the buffer in memory, you need to call
read / write to synchronise with the file.
"""
if len(self.shape) == 0:
# Return an array for scalar items to allow assignment.
return self._buffer.data.reshape(-1, 1)[self._buffer_time_index]
return self._buffer.data[self._buffer_time_index]
@property
def time_index(self) -> int:
"""
The current time index (immutable).
"""
return self._buffer.chunk_index[0] * self._buffer.shape[0] + self._buffer_time_index
@property
def buffer_length(self) -> int:
"""
Number of times in a buffer.
"""
return self._buffer.shape[0]
@property
def shape(self) -> Shape:
"""
The shape of items. Does not include the time dimension.
"""
return self._buffer.shape[1:]
@property
def ndim(self) -> int:
"""
The number of dimensions (ranks) of items.
Does not include the time dimension.
"""
return self._buffer.ndim - 1
@property
def dtype(self) -> DType:
"""
The datatype of the dataset.
"""
return self._buffer.dtype
@property
def maxtime(self) -> int:
"""
The maximum time that can be stored in the dataset.
May be None.
"""
return self._buffer.maxshape[0]
@property
def filename(self) -> Path:
"""
The name of the HDF5 file.
"""
return self._buffer.filename
@property
def dataset_name(self) -> Path:
"""
The full path of the dataset inside of the HDF5 file.
"""
return self._buffer.dataset_name
def select(self, time_index: int,
on_chunk_change: BufferPolicy = BufferPolicy.NOTHING,
file: Optional[File] = None,
dataset: Optional[Dataset] = None):
"""
Change the stored time index.
This function switches chunks as necessary but only reads from / writes to the file
if the argument on_buffer_change is set accordingly.
:param time_index: New time index.
:param on_chunk_change: Controls what happen if the chunk is changed.
Data for the new time index is read from the file only if the READ
flag is set, otherwise, the buffer in memory is unchanged.
If the WRITE bit is set, the current buffer is written to file
before changing the time index, otherwise, the file is not modified.
:param file: Indicates the file to read from. If given, it must match the filename stored in the buffer.
:param dataset: Indicates the dataset to read from.
"""
if time_index < 0:
raise IndexError("Time index must be positive.")
if self.maxtime is not None and time_index >= self.maxtime:
raise IndexError(f"Time index out of bounds, index {time_index}"
f"larger than maxtime {self.maxtime}")
time_chunk = time_index // self._buffer.shape[0]
if time_chunk != self._buffer.chunk_index[0]:
# need to change buffered chunk
if on_chunk_change & BufferPolicy.WRITE:
# save current
self._buffer.write(must_exist=False, file=file, dataset=dataset)
self._buffer.select((time_chunk,) + self._buffer.chunk_index[1:])
if on_chunk_change & BufferPolicy.READ:
# read new
self._buffer.read(file=file, dataset=dataset)
self._buffer_time_index = time_index % self._buffer.shape[0]
def advance(self, on_buffer_change: BufferPolicy = BufferPolicy.NOTHING,
file: Optional[File] = None,
dataset: Optional[Dataset] = None):
"""
Move to the next time index.
See TimeSeries.select for more information.
"""
self.select(self.time_index + 1, on_buffer_change, file=file, dataset=dataset)
def read(self, time_index: Optional[int] = None,
file: Optional[File] = None,
dataset: Optional[Dataset] = None):
"""
Read a chunk from file.
The time must exist in the dataset in the HDF5 file.
All stored metadata is checked against the file and an error is raised if there is a mismatch.
An existing file or dataset handle to a currently open connection can be passed in as arguments
to avoid opening the file on every call to this function.
A call to this function ensures only that the item for the given time index is read.
Whether or not other items are read depends on the details of chunking and should not be relied upon.
:param time_index: Time index to load.
If None, the currently selected time index is used.
:param file: Indicates the file to read from. If given, it must match the filename stored in the buffer.
:param dataset: Indicates the dataset to read from.
"""
if time_index is not None:
self.select(time_index, BufferPolicy.NOTHING)
fill_level = self._buffer.read(file=file, dataset=dataset)
if self._buffer_time_index >= fill_level[0]:
raise RuntimeError(f"Cannot read data for time index {self.time_index}. The dataset only contains items "
f"up to time {self._buffer.chunk_index[0] * self._buffer.shape[0] + fill_level[0] - 1}.")
def write(self, file: Optional[File] = None, dataset: Optional[File] = None):
"""
Write the buffer up to the *currently selected time* to the file.
Only the current time index is relevant for determining what is written.
For example, given a time series with buffer_length = 10, the code
series.select(3)
series.item[...] = 3
series.select(2)
series.item[...] = 2
series.write()
only writes times 0, 1, 2 to the file.
The data stored in the first assignment to item is *not* written to the file!
All stored metadata is checked against the file and an error is raised if there is a mismatch.
An existing file or dataset handle to a currently open connection can be passed in as arguments
to avoid opening the file on every call to this function.
Note that in contrast to ChunkBuffer.write, the dataset is always resized to be big enough to
include the current time and the fill level is determined automatically.
:param file: Indicates the file to write to. If given, it must match the filename stored in the buffer.
:param dataset: Indicates the dataset to write to.
"""
self._buffer.write(must_exist=False,
fill_level=(self._buffer_time_index + 1,) + self.shape,
file=file,
dataset=dataset)
def create_dataset(self, file: Optional[File] = None, filemode: str = "a", write: bool = True):
"""
Create a new dataset in the file big enough to contain the currently selected time.
:param file: If given, use this file handle to access the HDF5 file, otherwise use the stored filename.
:param filemode: Open-mode of the file, see documentation of h5py.File.
:param write: If True, write the buffer to the dataset.
Only the selected chunk is written, the content of the other chunks is undefined.
If False, no data is written, the contents of the dataset are undefined.
"""
self._buffer.create_dataset(file, filemode, write,
fill_level=(self._buffer_time_index + 1,) + self.shape)
def read_iter(self, times: slice = slice(None), file: Optional[File] = None, dataset: Optional[Dataset] = None):
"""
Return an iterator to read items successively from the file.
This iterator starts at the given time, iterates up to the given maximum time or last time
in the dataset and read data from file as needed.
Note that no data is written to the file by this iterator.
It is not save to modify the yielded items, use TimeSeries.write_iter for writing.
:param times: Slice to indicate which times to iterator over.
Each element can be None, meaning:
- times.start is None: Start at the currently selected time.
- times.stop is None: Iterate to the end of the dataset.
- times.step is None: Equivalent to times.step = 1.
:param file: Indicates the file to read from. If given, it must match the filename stored in the buffer.
:param dataset: Indicates the dataset to read from.
:return: An iterator yielding tuples of time indices and items.
"""
file = self._buffer.filename if file is None else file
dataset = self._buffer.dataset_name if dataset is None else dataset
with open_or_pass_dataset(file, dataset, None, "r") as dataset:
ntimes = dataset.shape[0]
if times.stop is not None and times.stop > ntimes:
raise ValueError(f"Number of times {times.stop} out of bounds, "
f"the dataset only contains {ntimes} time points.")
start, stop, step = times.indices(ntimes)
if start is None:
start = self.time_index
for time_index in range(start, stop, step):
self.select(time_index, BufferPolicy.READ, file=file, dataset=dataset)
yield time_index, self.item
def write_iter(self, flush: bool = True, file: Optional[File] = None, dataset: Optional[Dataset] = None):
"""
Return an iterator to write items successively to the file.
This iterator starts at the currently selected time and iterates up to
the maximum time of the dataset or, if that is None, iterates indefinitely.
Chunks are written as needed.
If the last chunk is not filled completely, it is only written if flush = True.
It is save to break out of a loop over this iterator.
Note, however, that the last chunk is only written if flush = True.
Note that no data is read from the file by this iterator.
The items retain their value unless overwritten by the user.
Use TimeSeries.read_iter for reading.
:param flush: If True, the last chunk is written to file when the iterator stops.
Otherwise, it is not written.
:param file: Indicates the file to write to. If given, it must match the filename stored in the buffer.
:param dataset: Indicates the dataset to read from.
:return: An iterator yielding tuples of time indices and items.
"""
# Like builtin range but allows for infinite loops with stop=None.
def range_(start, stop):
if stop is None:
idx = start
while True:
yield idx
idx += 1
else:
yield from range(start, stop)
try:
yield self.time_index, self.item
for time_index in range_(self.time_index + 1, self._buffer.maxshape[0]):
self.advance(BufferPolicy.WRITE, file=file, dataset=dataset)
yield time_index, self.item
finally:
if flush:
# Note on optimisation:
# In the last advance, the time index was incremented and the current item was not written.
# This line cannot lead to writing the same dataset twice.
self.write(file, dataset)
```
#### File: pentinsula/tests/test_chunkbuffer.py
```python
from io import BytesIO
from itertools import chain, product
from pathlib import Path
import random
from tempfile import TemporaryDirectory
import unittest
import h5py as h5
import numpy as np
from pentinsula import ChunkBuffer
from pentinsula.chunkbuffer import _chunk_slices
try:
from .utils import random_string, capture_variables, random_int_tuple, product_range, repeat
except ImportError:
from utils import random_string, capture_variables, random_int_tuple, product_range, repeat
N_REPEAT_TEST_CASE = 5
class TestChunkBuffer(unittest.TestCase):
@repeat(N_REPEAT_TEST_CASE)
def test_construction(self):
# valid arguments, individual shape, dtype
for dtype, maxshape in product((int, float, np.float32, np.int32, None), (None, (None,))):
filename = random_string(random.randint(1, 10))
dataset_name = random_string(random.randint(1, 10))
shape = random_int_tuple(1, 10, 4)
maxshape = maxshape if maxshape is None else maxshape * len(shape)
buffer = ChunkBuffer(filename, dataset_name,
shape=shape, dtype=dtype,
maxshape=maxshape)
self.assertEqual(buffer.filename, Path(filename))
self.assertEqual(buffer.dataset_name.relative_to("/"), Path(dataset_name))
self.assertEqual(buffer.shape, shape)
self.assertEqual(buffer.data.shape, shape)
self.assertEqual(buffer.dtype, dtype if dtype else np.float64)
self.assertEqual(buffer.data.dtype, dtype if dtype else np.float64)
self.assertEqual(buffer.maxshape, (None,) * len(shape))
# valid arguments, from array
for dtype in (int, float, np.float32, np.int32, None):
shape = random_int_tuple(1, 10, 4)
array = np.random.uniform(-10, 10, shape).astype(dtype)
buffer = ChunkBuffer(random_string(random.randint(1, 10)), random_string(random.randint(1, 10)),
data=array)
self.assertEqual(buffer.shape, shape)
self.assertEqual(buffer.dtype, dtype if dtype else np.float64)
np.testing.assert_allclose(array, buffer.data)
# valid arguments, from array with reshaping
in_shape = (10, 4)
for target_shape in ((20, 2), (40,), (5, 8)):
array = np.random.uniform(-10, 10, in_shape)
buffer = ChunkBuffer(random_string(random.randint(1, 10)), random_string(random.randint(1, 10)),
data=array, shape=target_shape)
self.assertEqual(buffer.shape, target_shape)
# invalid reshaping
array = np.random.uniform(-10, 10, (4, 10))
with self.assertRaises(ValueError):
ChunkBuffer("test.h5", "test", data=array, shape=(3,))
# invalid maxshape
with self.assertRaises(ValueError):
ChunkBuffer("test.h5", "test", shape=(1, 2), maxshape=(1,))
with self.assertRaises(ValueError):
ChunkBuffer("test.h5", "test", shape=(1, 2), maxshape=(1, 2, 3))
@repeat(N_REPEAT_TEST_CASE)
def test_load(self):
for ndim in range(1, 4):
chunk_shape = random_int_tuple(1, 10, ndim)
nchunks = random_int_tuple(1, 4, ndim)
total_shape = tuple(n * c for n, c in zip(chunk_shape, nchunks))
array = np.random.uniform(-10, 10, total_shape)
stream = BytesIO()
with h5.File(stream, "w") as h5f:
h5f.create_dataset("data", data=array, chunks=chunk_shape)
# valid, load all chunks, positive indices
for chunk_index in product_range(nchunks):
buffer = ChunkBuffer.load(stream, "data", chunk_index)
np.testing.assert_allclose(buffer.data, array[_chunk_slices(chunk_index, chunk_shape)],
err_msg=capture_variables(ndim=ndim,
chunk_shape=chunk_shape,
nchunks=nchunks,
chunk_index=chunk_index))
# negative index
neg_index = (-1,) * ndim
pos_index = tuple(n - 1 for n in nchunks)
buffer = ChunkBuffer.load(stream, "data", neg_index)
np.testing.assert_allclose(buffer.data, array[_chunk_slices(pos_index, chunk_shape)],
err_msg=capture_variables(ndim=ndim,
chunk_shape=chunk_shape,
nchunks=nchunks,
chunk_index=neg_index))
# invalid, load non-existent chunk
# outside of maxshape, discoverable through maxshape
with self.assertRaises(IndexError):
ChunkBuffer.load(stream, "data", nchunks)
# outside of maxshape, not discoverable through maxshape
with self.assertRaises(IndexError):
ChunkBuffer.load(stream, "data", (nchunks[0] + 1,) + nchunks[1:])
# within maxshape but not stored
with h5.File(stream, "w") as h5f:
h5f.create_dataset("partially_filled", shape=total_shape, chunks=chunk_shape,
maxshape=tuple(n * 2 for n in total_shape))
with self.assertRaises(IndexError):
ChunkBuffer.load(stream, "partially_filled", (nchunks[0] + 1,) + nchunks[1:])
# invalid, contiguous dataset
stream = BytesIO()
with h5.File(stream, "w") as h5f:
h5f.create_dataset("data", data=np.random.uniform(-10, 10, (5, 3)))
with self.assertRaises(RuntimeError):
ChunkBuffer.load(stream, "data", (0, 0))
@repeat(N_REPEAT_TEST_CASE)
def test_dataset_creation(self):
for ndim in range(1, 4):
max_nchunks = random_int_tuple(1, 4, ndim)
for chunk_index in product_range(max_nchunks):
chunk_shape = random_int_tuple(1, 10, ndim)
for fill_level in chain((None,), product_range((1,) * ndim, chunk_shape)):
if fill_level is None:
total_shape = tuple(n * (i + 1)
for n, i in zip(chunk_shape, chunk_index))
else:
total_shape = tuple(n * i + fl
for n, i, fl in zip(chunk_shape, chunk_index, fill_level))
chunk_data = np.random.uniform(-10, 10, chunk_shape).astype(random.choice((float, int)))
stream = BytesIO()
buffer = ChunkBuffer(stream, "data", data=chunk_data, maxshape=(None,) * ndim)
buffer.select(chunk_index)
buffer.create_dataset(stream if random.random() < 0.5 else None, filemode="w",
write=True, fill_level=fill_level)
with h5.File(stream, "r") as h5f:
dataset = h5f["data"]
self.assertEqual(dataset.shape, total_shape)
self.assertEqual(dataset.chunks, chunk_shape)
self.assertEqual(dataset.dtype, chunk_data.dtype)
self.assertEqual(dataset.maxshape, buffer.maxshape)
fill_slices = tuple(map(slice, fill_level)) if fill_level is not None else ...
np.testing.assert_allclose(ChunkBuffer.load(h5f, "data", chunk_index).data[fill_slices],
chunk_data[fill_slices])
@repeat(N_REPEAT_TEST_CASE)
def test_select(self):
for ndim in range(1, 5):
chunk_shape = random_int_tuple(1, 10, ndim)
nchunks = random_int_tuple(1, 4, ndim)
maxshape = tuple(f * n if random.random() < 0.25 else None
for f, n in zip(nchunks, chunk_shape))
buffer = ChunkBuffer("file", "data", shape=chunk_shape, maxshape=maxshape)
# valid calls
for chunk_index in product_range(nchunks):
buffer.select(chunk_index)
self.assertEqual(buffer.chunk_index, chunk_index)
def random_chunk_index():
return tuple(map(lambda n: random.randint(0, n - 1), nchunks))
# invalid number of dimensions
too_many_dims = random_chunk_index() + (0,)
with self.assertRaises(IndexError):
buffer.select(too_many_dims)
too_few_dims = random_chunk_index()[:-1]
with self.assertRaises(IndexError):
buffer.select(too_few_dims)
# index out of bounds
for dim in range(ndim):
chunk_index = random_chunk_index()
negative = chunk_index[:dim] + (random.randint(-10, -1),) + chunk_index[dim + 1:]
with self.assertRaises(IndexError):
buffer.select(negative)
if maxshape[dim] is not None:
too_large = chunk_index[:dim] + (nchunks[dim] + random.randint(1, 10),) + chunk_index[dim + 1:]
with self.assertRaises(IndexError):
buffer.select(too_large)
@repeat(N_REPEAT_TEST_CASE)
def test_read(self):
for ndim in range(1, 4):
chunk_shape = random_int_tuple(1, 10, ndim)
nchunks = random_int_tuple(1, 4, ndim)
for fill_level in chain((None,), product_range((1,) * ndim, chunk_shape)):
if fill_level is None:
total_shape = tuple(n * c for n, c in zip(chunk_shape, nchunks))
else:
total_shape = tuple(n * (c - 1) + fl
for n, c, fl in zip(chunk_shape, nchunks, fill_level))
array = np.random.uniform(-10, 10, total_shape).astype(random.choice((int, float)))
stream = BytesIO()
with h5.File(stream, "w") as h5f:
h5f.create_dataset("data", data=array, chunks=chunk_shape, maxshape=(None,) * ndim)
def validate_fill_level(chunk_index, actual_fill_level):
target_fill_level = chunk_shape if fill_level is None else fill_level
for idx, n, length, actual, target in zip(chunk_index, nchunks, chunk_shape,
actual_fill_level, target_fill_level):
if idx == n - 1:
self.assertEqual(actual, target)
else:
self.assertEqual(actual, length)
# valid
buffer = ChunkBuffer(stream, "data", shape=chunk_shape, dtype=array.dtype)
for chunk_index in product_range(nchunks):
# separate select / read
buffer.select(chunk_index)
read_fill_level = buffer.read()
validate_fill_level(chunk_index, read_fill_level)
fill_slices = tuple(map(slice, fill_level)) if fill_level is not None else ...
np.testing.assert_allclose(buffer.data[fill_slices],
array[_chunk_slices(chunk_index, chunk_shape)][fill_slices])
# read with index arg
buffer.data[...] = np.random.uniform(-20, 20, chunk_shape).astype(buffer.dtype)
read_fill_level = buffer.read(chunk_index)
validate_fill_level(chunk_index, read_fill_level)
np.testing.assert_allclose(buffer.data[fill_slices],
array[_chunk_slices(chunk_index, chunk_shape)][fill_slices])
# index out of bounds
with self.assertRaises(IndexError):
buffer.read(nchunks)
# dataset does not exist
buffer = ChunkBuffer(stream, "wrong_name", shape=chunk_shape, dtype=array.dtype)
with self.assertRaises(KeyError):
buffer.read()
# invalid chunk shape
buffer = ChunkBuffer(stream, "data", shape=tuple(random.randint(1, 10) + n for n in chunk_shape))
with self.assertRaises(RuntimeError):
buffer.read()
# invalid datatype
buffer = ChunkBuffer(stream, "data", shape=chunk_shape, dtype=np.float32)
with self.assertRaises(RuntimeError):
buffer.read()
# invalid maxshape
buffer = ChunkBuffer(stream, "data", shape=chunk_shape, dtype=array.dtype, maxshape=chunk_shape)
with self.assertRaises(RuntimeError):
buffer.read()
@repeat(N_REPEAT_TEST_CASE)
def test_write_overwrite(self):
for ndim in range(1, 4):
chunk_shape = random_int_tuple(1, 10, ndim)
nchunks = random_int_tuple(1, 4, ndim)
total_shape = tuple(n * c for n, c in zip(chunk_shape, nchunks))
stream = BytesIO()
chunk = np.random.uniform(-10, 10, chunk_shape).astype(random.choice((int, float)))
file_content = np.random.uniform(-10, 10, total_shape).astype(chunk.dtype)
with h5.File(stream, "w") as h5f:
h5f.create_dataset("data", data=file_content, chunks=chunk_shape, maxshape=(None,) * ndim)
buffer = ChunkBuffer(stream, "data", data=chunk)
# valid indices
for chunk_index in product_range(nchunks):
with h5.File(stream, "a") as h5f:
h5f["data"][...] = file_content
buffer.select(chunk_index)
buffer.write(must_exist=True)
desired_file_content = file_content.copy()
desired_file_content[_chunk_slices(chunk_index, chunk_shape)] = chunk
with h5.File(stream, "r") as h5f:
np.testing.assert_allclose(h5f["data"][()], desired_file_content)
# index out of bounds
for dim in range(ndim):
chunk_index = tuple(map(lambda n: random.randint(0, n - 1), nchunks))
chunk_index = chunk_index[:dim] + (nchunks[dim] + random.randint(1, 10),) + chunk_index[dim + 1:]
buffer.select(chunk_index)
with self.assertRaises(RuntimeError):
buffer.write(must_exist=True)
@repeat(N_REPEAT_TEST_CASE)
def test_write_extend(self):
for ndim in range(1, 4):
chunk_shape = random_int_tuple(1, 10, ndim)
nchunks = random_int_tuple(1, 5, ndim)
chunks = []
stream = BytesIO()
with h5.File(stream, "w") as h5f:
h5f.create_dataset("data", shape=chunk_shape, dtype=float,
chunks=chunk_shape, maxshape=(None,) * ndim)
buffer = ChunkBuffer(stream, "data", shape=chunk_shape, dtype=float)
for chunk_index in product_range(nchunks):
chunks.append((_chunk_slices(chunk_index, chunk_shape), np.random.uniform(-10, 10, chunk_shape)))
buffer.select(chunk_index)
buffer.data[...] = chunks[-1][1]
buffer.write(must_exist=False)
with h5.File(stream, "r") as f:
dataset = f["data"]
for chunk_slice, expected in chunks:
np.testing.assert_allclose(dataset[chunk_slice], expected)
def test_real_files(self):
with TemporaryDirectory() as tempdir:
filename = Path(tempdir) / "test_file.h5"
chunk_shape = (1, 2, 3)
array = np.random.uniform(-10, 10, chunk_shape)
buffer = ChunkBuffer(filename, "data", data=array)
buffer.create_dataset(filemode="w")
self.assertTrue(filename.exists())
with h5.File(filename, "r") as h5f:
np.testing.assert_allclose(h5f["data"][()], array)
# extend dataset with stored filename
array = np.random.uniform(-10, 10, chunk_shape)
buffer.select((1, 0, 0))
buffer.data[...] = array
buffer.write(must_exist=False)
with h5.File(filename, "r") as h5f:
np.testing.assert_allclose(h5f["data"][1:, :, :], array)
# extend dataset with passed in filename
array = np.random.uniform(-10, 10, chunk_shape)
buffer.select((1, 1, 0))
buffer.data[...] = array
buffer.write(must_exist=False, file=filename)
with h5.File(filename, "r") as h5f:
np.testing.assert_allclose(h5f["data"][1:, 2:, :], array)
# extend dataset with passed in dataset
array = np.random.uniform(-10, 10, chunk_shape)
buffer.select((1, 0, 1))
buffer.data[...] = array
with h5.File(filename, "r+") as h5f:
dataset = h5f["data"]
buffer.write(must_exist=False, dataset=dataset)
np.testing.assert_allclose(dataset[1:, :2, 3:], array)
# wrong filename
with self.assertRaises(ValueError):
buffer.write(must_exist=False, file="wrong_file.h5")
# wrong dataset
with h5.File(filename, "a") as h5f:
wrong_dataset = h5f.create_dataset("wrong_data", (1,))
with self.assertRaises(ValueError):
buffer.write(must_exist=False, dataset=wrong_dataset)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jl-wynen/perihelion-precession",
"score": 4
} |
#### File: jl-wynen/perihelion-precession/background.py
```python
from itertools import chain
import numpy as np
import sim
# image dimensions
SCREEN_WIDTH = 16
SCREEN_HEIGHT = 16
WORLD_WIDTH = 16
WORLD_HEIGHT = 16
# grid-lines
HLINES, VLINES = sim.make_grid((-WORLD_WIDTH/2, -WORLD_HEIGHT/2),
(WORLD_WIDTH/2, WORLD_HEIGHT/2),
nlines=(14, 14), resolution=(50, 50))
BACKGROUND_COLOUR = "aiphidarkachrom!50!black"
GRID_COLOUR = "white!45!aiphidarkachrom"
TRAJECTORY_COLOUR = "white!30!aiphidarkachrom"
MERCURY_COLOUR = "aiphired!45!aiphidarkachrom"
SUN_COLOUR = "aiphiyellow!35!aiphidarkachrom"
def draw_grid(img, lines, centre, rs):
# maxiumum radius at which a line is shown
max_radius = (SCREEN_WIDTH+SCREEN_HEIGHT)/2 / 3.3
for line in lines:
line = sim.flamm_projection(line, centre, rs, np.array((WORLD_WIDTH, WORLD_HEIGHT)))
for start, end in sim.neighbours(line):
radius = np.linalg.norm((start+end)/2 - centre)
# fraction of GRID_COLOUR to use for this segment
frac = 100 - min(radius / max_radius, 1) * 100
img.line([start, end], draw=f'{GRID_COLOUR}!{frac}!{BACKGROUND_COLOUR}', lw=1)
def draw_trajectory(img, trajectory):
T = len(trajectory)
for t, (start, end) in enumerate(sim.neighbours(trajectory)):
img.line([start, end], draw=f"{TRAJECTORY_COLOUR}!{t/T*100}!darkachrom", lw=4)
def evolve(mercury, nsteps, params):
trajectory = [mercury.x]
for _ in range(nsteps):
mercury = sim.advance(mercury, **params)
trajectory.append(mercury.x)
return mercury, trajectory
def main():
img = sim.tikz.Tikz(sim.Transform((-WORLD_WIDTH/2, -WORLD_HEIGHT/2),
(WORLD_WIDTH/2, WORLD_HEIGHT/2),
(0, 0),
(0+SCREEN_WIDTH, 0+SCREEN_HEIGHT)))
mercury = sim.CBody.mercury()
sun = sim.CBody.sun()
integrator_params = {"length": 2.0 * np.linalg.norm(mercury.v) / mercury.acc / 6,
"nsteps": 10,
"alpha": 5e6,
"beta": 0.0}
mercury, trajectory = evolve(mercury, 153*3, integrator_params)
draw_grid(img, chain(HLINES, VLINES), np.array((0, 0)), 0.02)
draw_trajectory(img, trajectory)
img.circle(sun.x, 1, fill=SUN_COLOUR)
img.circle(mercury.x, 0.4, fill=MERCURY_COLOUR)
sim.tikz.render(img, "background.pdf", "background.tex")
if __name__ == "__main__":
main()
``` |
{
"source": "jl-wynen/pipelines",
"score": 2
} |
#### File: pipelines/tools/build_cpp.py
```python
import os
import argparse
import shutil
import subprocess
import multiprocessing
import sys
parser = argparse.ArgumentParser(description='Build C++ library and run tests')
parser.add_argument('--prefix', default='install')
parser.add_argument('--source_dir', default='.')
parser.add_argument('--build_dir', default='build')
def run_command(cmd, shell):
"""
Run a command (supplied as a list) using subprocess.check_call
"""
os.write(1, "{}\n".format(' '.join(cmd)).encode())
return subprocess.check_call(cmd, stderr=subprocess.STDOUT, shell=shell)
def main(prefix='install', build_dir='build', source_dir='.'):
"""
Platform-independent function to run cmake, build, install and C++ tests.
"""
# Get the platform name: 'linux', 'darwin' (osx), or 'win32'.
platform = sys.platform
# Set up absolute directory paths
source_dir = os.path.abspath(source_dir)
prefix = os.path.abspath(prefix)
build_dir = os.path.abspath(build_dir)
# Default options
shell = False
parallel_flag = '-j{}'.format(multiprocessing.cpu_count())
build_config = ''
# Some flags use a syntax with a space separator instead of '='
use_space = ['-G', '-A']
# Default cmake flags
cmake_flags = {
'-G': 'Ninja',
'-DPYTHON_EXECUTABLE': shutil.which("python"),
'-DCMAKE_INSTALL_PREFIX': prefix,
'-DWITH_CTEST': 'OFF',
'-DCMAKE_INTERPROCEDURAL_OPTIMIZATION': 'ON'
}
if platform == 'darwin':
cmake_flags.update({'-DCMAKE_INTERPROCEDURAL_OPTIMIZATION': 'OFF'})
osxversion = os.environ.get('OSX_VERSION')
if osxversion is not None:
cmake_flags.update({
'-DCMAKE_OSX_DEPLOYMENT_TARGET':
osxversion,
'-DCMAKE_OSX_SYSROOT':
os.path.join('/Applications', 'Xcode.app', 'Contents',
'Developer', 'Platforms', 'MacOSX.platform',
'Developer', 'SDKs',
'MacOSX{}.sdk'.format(osxversion))
})
if platform == 'win32':
cmake_flags.update({'-G': 'Visual Studio 16 2019', '-A': 'x64'})
shell = True
build_config = 'Release'
# Additional flags for --build commands
build_flags = [parallel_flag]
if len(build_config) > 0:
build_flags += ['--config', build_config]
# Parse cmake flags
flags_list = []
for key, value in cmake_flags.items():
if key in use_space:
flags_list += [key, value]
else:
flags_list.append('{}={}'.format(key, value))
if not os.path.exists(build_dir):
os.makedirs(build_dir)
os.chdir(build_dir)
# Run cmake
run_command(['cmake'] + flags_list + [source_dir], shell=shell)
# Show cmake settings
run_command(['cmake', '-B', '.', '-S', source_dir, '-LA'], shell=shell)
# Compile C++ tests and python library
for target in ['pipelines-test', 'install']:
run_command(['cmake', '--build', '.', '--target', target] +
build_flags,
shell=shell)
# Run C++ tests
run_command([os.path.join('.', build_config, 'pipelines-test')],
shell=shell)
if __name__ == '__main__':
args = parser.parse_args()
main(prefix=args.prefix,
build_dir=args.build_dir,
source_dir=args.source_dir)
``` |
{
"source": "jl-wynen/test-loaddifcal-ci",
"score": 2
} |
#### File: jl-wynen/test-loaddifcal-ci/test.py
```python
from mantid.simpleapi import DeleteWorkspace, LoadDiffCal
from mantid.api import AnalysisDataService
import pooch
def get_file():
return pooch.create(
path=pooch.os_cache('test-loaddiffcal'),
base_url='https://public.esss.dk/groups/scipp/ess/powgen/1/',
registry={
'PG3_FERNS_d4832_2011_08_24.cal': 'md5:c181221ebef9fcf30114954268c7a6b6'
}).fetch('PG3_FERNS_d4832_2011_08_24.cal')
def main():
ws = LoadDiffCal(Filename=str(get_file()),
InstrumentFilename='POWGEN_Definition_2011-02-25.xml',
WorkspaceName='ws')
print('loaded')
for name in AnalysisDataService.Instance().getObjectNames():
if name.startswith('ws'):
DeleteWorkspace(name)
print('deleted')
if __name__ == '__main__':
main()
``` |
{
"source": "jl-wynen/Workshops",
"score": 2
} |
#### File: Workshops/DRAM-hypothesis-demo-2022-03/h5io.py
```python
from io import BytesIO
from hypothesis import given, settings
from scipp.testing import strategies as scst
import scipp as sc
@given(scst.dataarrays())
@settings(max_examples=300)
def test_data_array_hdf5(da):
f = BytesIO()
da.to_hdf5(filename=f)
f.seek(0)
loaded = sc.io.open_hdf5(filename=f)
assert sc.utils.isnear(loaded, da, equal_nan=True)
```
#### File: Workshops/ISIS-scipp-demo-2022-04/prepare_exercise_data_goes.py
```python
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
import pooch
import scipp as sc
from common import parse_datetimes
DATA_DIR = Path(__file__).parent / "data"
def flare_list_files():
registry = pooch.create(
path=DATA_DIR / "pooch",
base_url="https://hesperia.gsfc.nasa.gov/goes/goes_event_listings/",
registry={
"goes_xray_event_list_1975.txt": "md5:3b86a114ff8b89f022099e48a45490f1",
"goes_xray_event_list_1976.txt": "md5:686996b33fa10843349511534cede792",
"goes_xray_event_list_1977.txt": "md5:59af12be270031c75061f05f61e439cd",
"goes_xray_event_list_1978.txt": "md5:0f54db2c616667f8f098b77ff94c2dc7",
"goes_xray_event_list_1979.txt": "md5:f1fc69b5184298b0d161f0e2db517393",
"goes_xray_event_list_1980.txt": "md5:a0b1989cb085765fb7e05d661e5872ce",
"goes_xray_event_list_1981.txt": "md5:1d803bf83d34c8e98ec0c54e0aa7306f",
"goes_xray_event_list_1982.txt": "md5:e15b06083fade2699394836b690a7950",
"goes_xray_event_list_1983.txt": "md5:61c3456bf89aafe48cd33b4339c3908a",
"goes_xray_event_list_1984.txt": "md5:674ad932b5c4d404d32332617b1b4def",
"goes_xray_event_list_1985.txt": "md5:5bbdf63229e44e4aed03b21a90bb5058",
"goes_xray_event_list_1986.txt": "md5:198387ed43bc3564ca6f9387e6874591",
"goes_xray_event_list_1987.txt": "md5:d20e16b27eff7e7afb8e3e045a05a32d",
"goes_xray_event_list_1988.txt": "md5:990e8c2b2ddc9c41ca458ce75d115323",
"goes_xray_event_list_1989.txt": "md5:d1b36a802c9f4213b9e7862b4e0a7d70",
"goes_xray_event_list_1990.txt": "md5:fb73a5462c172cee115927901be45bf1",
"goes_xray_event_list_1991.txt": "md5:1b858943914240e13815a7d0fdeba25e",
"goes_xray_event_list_1992.txt": "md5:eb1702f6494e917a586379884e821cab",
"goes_xray_event_list_1993.txt": "md5:bb56c16c3d4112647af913907405982c",
"goes_xray_event_list_1994.txt": "md5:b30d744720cf03faa10c4c517bfe9b1f",
"goes_xray_event_list_1995.txt": "md5:a52e6dacdf7daebd587affaf50e34262",
"goes_xray_event_list_1996.txt": "md5:072d8fbb1e904528b9794dd54f703eba",
"goes_xray_event_list_1997.txt": "md5:9ab0b933143569b221b2b880c2ad0934",
"goes_xray_event_list_1998.txt": "md5:1823f627ada9d74e099dd0e4eecd8be9",
"goes_xray_event_list_1999.txt": "md5:6eb71345ef67e88e9cda3a0f3f846f18",
"goes_xray_event_list_2000.txt": "md5:06a7bb098139afdc8b98cce169e0ff13",
"goes_xray_event_list_2001.txt": "md5:f0468af28f06b0697ea72bcc9ad58115",
"goes_xray_event_list_2002.txt": "md5:a7cbface94c9b579774abe04b37e404d",
"goes_xray_event_list_2003.txt": "md5:e23c6ed9c83ad338bb214b059f484294",
"goes_xray_event_list_2004.txt": "md5:05a35e02e8263a6074f67e3bfad33f4a",
"goes_xray_event_list_2005.txt": "md5:a905049364b0d74b9653b6c02117f967",
"goes_xray_event_list_2006.txt": "md5:15dea113fa50c27691ddb3146bb67fde",
"goes_xray_event_list_2007.txt": "md5:f4dc9b1582d37b0b234444c2c7d0a250",
"goes_xray_event_list_2008.txt": "md5:52d7be510eeb98e54289b4c394a6bd86",
"goes_xray_event_list_2009.txt": "md5:433ae27934de04872b309384861e4715",
"goes_xray_event_list_2010.txt": "md5:fd36e382b14cf83782039ea8e5dab48f",
"goes_xray_event_list_2011.txt": "md5:ccaff65573afd1bf79d4ee69fa522d34",
"goes_xray_event_list_2012.txt": "md5:863df2313cd108fb12cc32e80c7c1f7a",
"goes_xray_event_list_2013.txt": "md5:719d8a73de96295cf123fcc80020d7ad",
"goes_xray_event_list_2014.txt": "md5:e2ffcf5386a702eeadd6519cd7ac28b2",
"goes_xray_event_list_2015.txt": "md5:fcbfd4aa81cf8e6fb72b6eddcab15d4d",
"goes_xray_event_list_2016.txt": "md5:8104041d10d3a2e6db74de0daecdc8ab",
"goes_xray_event_list_2017.txt": "md5:7e560e6e106d26cba50592bcc1eb8080",
"goes_xray_event_list_2018.txt": "md5:2f1e7ef54202ac8948de38151c9a7e60",
"goes_xray_event_list_2019.txt": "md5:33433e75f48f080f70c8268a58b3b44a",
"goes_xray_event_list_2020.txt": "md5:b6364df2b0fd837fe07100999b1dd1da",
"goes_xray_event_list_2021.txt": "md5:4fe373bc7896457f300955d687c107a7",
},
)
return [registry.fetch(name) for name in registry.registry]
def parse_position(s) -> Tuple[float, float]:
y_sign = +1 if s[0] == "N" else -1
x_sign = +1 if s[3] == "E" else -1
y = y_sign * float(s[1:3])
x = x_sign * float(s[4:6])
return x, y
@dataclass
class Entry:
peak_time: np.datetime64
duration: int
class_: str
x: float
y: float
region: int
@classmethod
def parse(cls, s) -> Optional[Entry]:
fields = [c for c in s.strip().split(" ") if c]
if len(fields) != 7:
return None
times = parse_datetimes(*fields[0:4])
pos = parse_position(fields[5])
return cls(
peak_time=times["peak_time"],
duration=times["duration"],
class_=fields[4],
x=pos[0],
y=pos[1],
region=int(fields[6]),
)
def load_txt_file(fname):
peak_time = []
duration = []
class_ = []
x_pos = []
y_pos = []
region = []
with open(fname, "r") as f:
for _ in range(6):
f.readline()
while line := f.readline().strip():
if (entry := Entry.parse(line)) is None:
continue
peak_time.append(entry.peak_time)
duration.append(entry.duration)
class_.append(entry.class_)
x_pos.append(entry.x)
y_pos.append(entry.y)
region.append(entry.region)
return sc.DataArray(
sc.ones(sizes={"event": len(peak_time)}, unit="count"),
coords={
"time": sc.array(dims=["event"], values=peak_time, unit="s"),
"duration": sc.array(dims=["event"], values=duration, unit="s"),
"x": sc.array(dims=["event"], values=x_pos, unit="asec"),
"y": sc.array(dims=["event"], values=y_pos, unit="asec"),
},
attrs={
"class": sc.array(dims=["event"], values=class_),
"region": sc.array(dims=["event"], values=region),
},
)
def main():
data = [load_txt_file(fname) for fname in flare_list_files()]
full = sc.concat(data, dim="event")
full.to_hdf5(DATA_DIR / "goes_flares.h5")
if __name__ == "__main__":
main()
``` |
{
"source": "jlwysf/onduty",
"score": 2
} |
#### File: site-packages/annoying/exceptions.py
```python
class Redirect(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
```
#### File: billiard/py2/reduction.py
```python
from __future__ import absolute_import
import os
import sys
import socket
import threading
from pickle import Pickler
from .. import current_process
from .._ext import _billiard, win32
from ..util import register_after_fork, debug, sub_debug
is_win32 = sys.platform == 'win32'
is_pypy = hasattr(sys, 'pypy_version_info')
is_py3k = sys.version_info[0] == 3
if not(is_win32 or is_pypy or is_py3k or hasattr(_billiard, 'recvfd')):
raise ImportError('pickling of connections not supported')
close = win32.CloseHandle if sys.platform == 'win32' else os.close
__all__ = []
# globals set later
_listener = None
_lock = None
_cache = set()
#
# ForkingPickler
#
class ForkingPickler(Pickler): # noqa
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m): # noqa
if m.__self__ is None:
return getattr, (m.__self__.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# XXX Should this subprocess import be here?
import _subprocess # noqa
def send_handle(conn, handle, destination_pid):
from ..forking import duplicate
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid): # noqa
_billiard.sendfd(conn.fileno(), handle)
def recv_handle(conn): # noqa
return _billiard.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
from ..connection import Listener
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from ..util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
sub_warning('thread for sharing handles raised exception',
exc_info=True)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
from ..forking import Popen, duplicate
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
from ..connection import Client
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_billiard.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.Connection(
handle, readable=readable, writable=writable
)
# Register `socket.socket` with `ForkingPickler`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_billiard.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.PipeConnection(
handle, readable=readable, writable=writable
)
```
#### File: django_tables2/columns/checkboxcolumn.py
```python
from __future__ import absolute_import, unicode_literals
from django.utils.safestring import mark_safe
from django_tables2.utils import AttributeDict
import warnings
from .base import Column, library
@library.register
class CheckBoxColumn(Column):
"""
A subclass of `.Column` that renders as a checkbox form input.
This column allows a user to *select* a set of rows. The selection
information can then be used to apply some operation (e.g. "delete") onto
the set of objects that correspond to the selected rows.
The value that is extracted from the :term:`table data` for this column is
used as the value for the checkbox, i.e. ``<input type="checkbox"
value="..." />``
This class implements some sensible defaults:
- HTML input's ``name`` attribute is the :term:`column name` (can override
via *attrs* argument).
- *orderable* defaults to `False`.
.. note::
You'd expect that you could select multiple checkboxes in the rendered
table and then *do something* with that. This functionality isn't
implemented. If you want something to actually happen, you'll need to
implement that yourself.
In addition to *attrs* keys supported by `.Column`, the following are
available:
- *input* -- ``<input>`` elements in both ``<td>`` and ``<th>``.
- *th__input* -- Replaces *input* attrs in header cells.
- *td__input* -- Replaces *input* attrs in body cells.
"""
def __init__(self, attrs=None, **extra):
# For backwards compatibility, passing in a normal dict effectively
# should assign attributes to the `<input>` tag.
valid = set(("input", "th__input", "td__input", "th", "td", "cell"))
if attrs and not set(attrs) & set(valid):
# if none of the keys in attrs are actually valid, assume it's some
# old code that should be be interpreted as {"td__input": ...}
warnings.warn('attrs keys must be one of %s, interpreting as {"td__input": %s}'
% (', '.join(valid), attrs), DeprecationWarning)
attrs = {"td__input": attrs}
# This is done for backwards compatible too, there used to be a
# ``header_attrs`` argument, but this has been deprecated. We'll
# maintain it for a while by translating it into ``head.checkbox``.
if "header_attrs" in extra:
warnings.warn('header_attrs argument is deprecated, '
'use attrs={"th__input": ...} instead',
DeprecationWarning)
attrs.setdefault('th__input', {}).update(extra.pop('header_attrs'))
kwargs = {'orderable': False, 'attrs': attrs}
kwargs.update(extra)
super(CheckBoxColumn, self).__init__(**kwargs)
@property
def header(self):
default = {'type': 'checkbox'}
general = self.attrs.get('input')
specific = self.attrs.get('th__input')
attrs = AttributeDict(default, **(specific or general or {}))
return mark_safe('<input %s/>' % attrs.as_html())
def render(self, value, bound_column): # pylint: disable=W0221
default = {
'type': 'checkbox',
'name': bound_column.name,
'value': value
}
general = self.attrs.get('input')
specific = self.attrs.get('td__input')
attrs = AttributeDict(default, **(specific or general or {}))
return mark_safe('<input %s/>' % attrs.as_html())
```
#### File: django_tables2/columns/urlcolumn.py
```python
from __future__ import absolute_import, unicode_literals
from django.db import models
from .base import library
from .linkcolumn import BaseLinkColumn
@library.register
class URLColumn(BaseLinkColumn):
"""
Renders URL values as hyperlinks.
Example::
>>> class CompaniesTable(tables.Table):
... www = tables.URLColumn()
...
>>> table = CompaniesTable([{"www": "http://google.com"}])
>>> table.rows[0]["www"]
u'<a href="http://google.com">http://google.com</a>'
Additional attributes for the ``<a>`` tag can be specified via
``attrs['a']``.
"""
def render(self, value):
return self.render_link(value, value)
@classmethod
def from_field(cls, field):
if isinstance(field, models.URLField):
return cls(verbose_name=field.verbose_name)
```
#### File: site-packages/django_tables2_simplefilter/views.py
```python
from django_tables2 import SingleTableView
def F(field, verbose_name, values_list):
for f in values_list:
if (type(f[0]) != type('') and type(f[0]) != type(u'')) or \
(type(f[1]) != type('') and type(f[1]) != type(u'')):
raise Exception('Filter values list should have string values for both option name and value')
return dict(field=field, verbose_name=verbose_name, values_list=values_list)
class FilteredSingleTableView(SingleTableView):
"""
Add filtering options to SingleTableView. Define list of filters in the Table
subclass (not in Table.Meta). Likely not secure.
List of filters should be list of values returned by the F function. The
values list passed into F should be a tuple of verbose value and actual
value. For example,
import django_tables2 as tables
class MyTable(tables.Table):
...
filters = (F('field1','Filter name',values_list=(('True','1'),('False','0'))),
F('field2','Another filter',values_list=[ (str(x), x.name) for x in SomeModel.objects.all()]))
In your template, include django_tables2_simplefilter/filter_selection.html,
passing in the filters variable. For example,
{% include "django_tables2_simplefilter/filter_selection.html" with filters=filters only %}
CSS classes are: filter-form, filter-item, filter-name, filter-selection,
filter-actions, filter-submit, filter-reset
"""
def get_queryset(self):
q = super(FilteredSingleTableView, self).get_queryset()
if hasattr(self.table_class, 'filters'):
h = {}
for f in self.table_class.filters:
field = f['field']
if field in self.request.GET and self.request.GET[field]:
h[field] = self.request.GET[field]
q = q.filter(**h)
return q
def get_context_data(self, **kwargs):
c = super(FilteredSingleTableView, self).get_context_data(**kwargs)
if hasattr(self.table_class, 'filters'):
h = []
for f in self.table_class.filters:
v = {}
field = f['field']
v = dict(**f)
v['selected'] = None
if field in self.request.GET and self.request.GET[field] != "":
v['selected'] = self.request.GET[field]
h.append(v)
c['filters'] = {}
c['filters']['filters'] = h
# now add base params for sorting, pagination, etc.
table = self.get_table()
base_params = [table.prefixed_order_by_field,
table.prefixed_page_field,
table.prefixed_per_page_field]
base_param_values = []
for p in base_params:
if p in self.request.GET and self.request.GET[p] != "":
base_param_values.append((p, self.request.GET[p]))
c['filters']['base_params'] = base_param_values
return c
```
#### File: site-packages/hgext/record.py
```python
from mercurial.i18n import _
from mercurial import cmdutil, commands, extensions
from mercurial import util
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
@command("record",
# same options as commit + white space diff options
[c for c in commands.table['^commit|ci'][1][:]
if c[1] != "interactive"] + commands.diffwsopts,
_('hg record [OPTION]... [FILE]...'))
def record(ui, repo, *pats, **opts):
'''interactively select changes to commit
If a list of files is omitted, all changes reported by :hg:`status`
will be candidates for recording.
See :hg:`help dates` for a list of formats valid for -d/--date.
You will be prompted for whether to record changes to each
modified file, and for files with multiple changes, for each
change to use. For each query, the following responses are
possible::
y - record this change
n - skip this change
e - edit this change manually
s - skip remaining changes to this file
f - record remaining changes to this file
d - done, skip remaining changes and files
a - record all changes to all remaining files
q - quit, recording no changes
? - display help
This command is not available when committing a merge.'''
opts["interactive"] = True
commands.commit(ui, repo, *pats, **opts)
def qrefresh(origfn, ui, repo, *pats, **opts):
if not opts['interactive']:
return origfn(ui, repo, *pats, **opts)
mq = extensions.find('mq')
def committomq(ui, repo, *pats, **opts):
# At this point the working copy contains only changes that
# were accepted. All other changes were reverted.
# We can't pass *pats here since qrefresh will undo all other
# changed files in the patch that aren't in pats.
mq.refresh(ui, repo, **opts)
# backup all changed files
cmdutil.dorecord(ui, repo, committomq, 'qrefresh', True,
cmdutil.recordfilter, *pats, **opts)
# This command registration is replaced during uisetup().
@command('qrecord',
[],
_('hg qrecord [OPTION]... PATCH [FILE]...'),
inferrepo=True)
def qrecord(ui, repo, patch, *pats, **opts):
'''interactively record a new patch
See :hg:`help qnew` & :hg:`help record` for more information and
usage.
'''
try:
mq = extensions.find('mq')
except KeyError:
raise util.Abort(_("'mq' extension not loaded"))
repo.mq.checkpatchname(patch)
def committomq(ui, repo, *pats, **opts):
opts['checkname'] = False
mq.new(ui, repo, patch, *pats, **opts)
cmdutil.dorecord(ui, repo, committomq, 'qnew', False,
cmdutil.recordfilter, *pats, **opts)
def qnew(origfn, ui, repo, patch, *args, **opts):
if opts['interactive']:
return qrecord(ui, repo, patch, *args, **opts)
return origfn(ui, repo, patch, *args, **opts)
def uisetup(ui):
try:
mq = extensions.find('mq')
except KeyError:
return
cmdtable["qrecord"] = \
(qrecord,
# same options as qnew, but copy them so we don't get
# -i/--interactive for qrecord and add white space diff options
mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
_('hg qrecord [OPTION]... PATCH [FILE]...'))
_wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
_wrapcmd('qrefresh', mq.cmdtable, qrefresh,
_("interactively select changes to refresh"))
def _wrapcmd(cmd, table, wrapfn, msg):
entry = extensions.wrapcommand(table, cmd, wrapfn)
entry[1].append(('i', 'interactive', None, msg))
```
#### File: site-packages/hgext/share.py
```python
from mercurial.i18n import _
from mercurial import cmdutil, hg, util, extensions, bookmarks
from mercurial.hg import repository, parseurl
import errno
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
@command('share',
[('U', 'noupdate', None, _('do not create a working directory')),
('B', 'bookmarks', None, _('also share bookmarks'))],
_('[-U] [-B] SOURCE [DEST]'),
norepo=True)
def share(ui, source, dest=None, noupdate=False, bookmarks=False):
"""create a new shared repository
Initialize a new repository and working directory that shares its
history (and optionally bookmarks) with another repository.
.. note::
using rollback or extensions that destroy/modify history (mq,
rebase, etc.) can cause considerable confusion with shared
clones. In particular, if two shared clones are both updated to
the same changeset, and one of them destroys that changeset
with rollback, the other clone will suddenly stop working: all
operations will fail with "abort: working directory has unknown
parent". The only known workaround is to use debugsetparents on
the broken clone to reset it to a changeset that still exists.
"""
return hg.share(ui, source, dest, not noupdate, bookmarks)
@command('unshare', [], '')
def unshare(ui, repo):
"""convert a shared repository to a normal one
Copy the store data to the repo and remove the sharedpath data.
"""
if not repo.shared():
raise util.Abort(_("this is not a shared repo"))
destlock = lock = None
lock = repo.lock()
try:
# we use locks here because if we race with commit, we
# can end up with extra data in the cloned revlogs that's
# not pointed to by changesets, thus causing verify to
# fail
destlock = hg.copystore(ui, repo, repo.path)
sharefile = repo.join('sharedpath')
util.rename(sharefile, sharefile + '.old')
repo.requirements.discard('sharedpath')
repo._writerequirements()
finally:
destlock and destlock.release()
lock and lock.release()
# update store, spath, sopener and sjoin of repo
repo.unfiltered().__init__(repo.baseui, repo.root)
def extsetup(ui):
extensions.wrapfunction(bookmarks.bmstore, 'getbkfile', getbkfile)
extensions.wrapfunction(bookmarks.bmstore, 'recordchange', recordchange)
extensions.wrapfunction(bookmarks.bmstore, 'write', write)
def _hassharedbookmarks(repo):
"""Returns whether this repo has shared bookmarks"""
try:
shared = repo.vfs.read('shared').splitlines()
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
return False
return 'bookmarks' in shared
def _getsrcrepo(repo):
"""
Returns the source repository object for a given shared repository.
If repo is not a shared repository, return None.
"""
if repo.sharedpath == repo.path:
return None
# the sharedpath always ends in the .hg; we want the path to the repo
source = repo.vfs.split(repo.sharedpath)[0]
srcurl, branches = parseurl(source)
return repository(repo.ui, srcurl)
def getbkfile(orig, self, repo):
if _hassharedbookmarks(repo):
srcrepo = _getsrcrepo(repo)
if srcrepo is not None:
repo = srcrepo
return orig(self, repo)
def recordchange(orig, self, tr):
# Continue with write to local bookmarks file as usual
orig(self, tr)
if _hassharedbookmarks(self._repo):
srcrepo = _getsrcrepo(self._repo)
if srcrepo is not None:
category = 'share-bookmarks'
tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
def write(orig, self):
# First write local bookmarks file in case we ever unshare
orig(self)
if _hassharedbookmarks(self._repo):
srcrepo = _getsrcrepo(self._repo)
if srcrepo is not None:
self._writerepo(srcrepo)
```
#### File: site-packages/mercurial/dispatch.py
```python
from i18n import _
import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback, re
import difflib
import util, commands, hg, fancyopts, extensions, hook, error
import cmdutil, encoding
import ui as uimod
class request(object):
def __init__(self, args, ui=None, repo=None, fin=None, fout=None,
ferr=None):
self.args = args
self.ui = ui
self.repo = repo
# input/output/error streams
self.fin = fin
self.fout = fout
self.ferr = ferr
def run():
"run the command in sys.argv"
sys.exit((dispatch(request(sys.argv[1:])) or 0) & 255)
def _getsimilar(symbols, value):
sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
# The cutoff for similarity here is pretty arbitrary. It should
# probably be investigated and tweaked.
return [s for s in symbols if sim(s) > 0.6]
def _formatparse(write, inst):
similar = []
if isinstance(inst, error.UnknownIdentifier):
# make sure to check fileset first, as revset can invoke fileset
similar = _getsimilar(inst.symbols, inst.function)
if len(inst.args) > 1:
write(_("hg: parse error at %s: %s\n") %
(inst.args[1], inst.args[0]))
if (inst.args[0][0] == ' '):
write(_("unexpected leading whitespace\n"))
else:
write(_("hg: parse error: %s\n") % inst.args[0])
if similar:
if len(similar) == 1:
write(_("(did you mean %r?)\n") % similar[0])
else:
ss = ", ".join(sorted(similar))
write(_("(did you mean one of %s?)\n") % ss)
def dispatch(req):
"run the command specified in req.args"
if req.ferr:
ferr = req.ferr
elif req.ui:
ferr = req.ui.ferr
else:
ferr = sys.stderr
try:
if not req.ui:
req.ui = uimod.ui()
if '--traceback' in req.args:
req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
# set ui streams from the request
if req.fin:
req.ui.fin = req.fin
if req.fout:
req.ui.fout = req.fout
if req.ferr:
req.ui.ferr = req.ferr
except util.Abort, inst:
ferr.write(_("abort: %s\n") % inst)
if inst.hint:
ferr.write(_("(%s)\n") % inst.hint)
return -1
except error.ParseError, inst:
_formatparse(ferr.write, inst)
return -1
msg = ' '.join(' ' in a and repr(a) or a for a in req.args)
starttime = time.time()
ret = None
try:
ret = _runcatch(req)
return ret
finally:
duration = time.time() - starttime
req.ui.log("commandfinish", "%s exited %s after %0.2f seconds\n",
msg, ret or 0, duration)
def _runcatch(req):
def catchterm(*args):
raise error.SignalInterrupt
ui = req.ui
try:
for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
num = getattr(signal, name, None)
if num:
signal.signal(num, catchterm)
except ValueError:
pass # happens if called in a thread
try:
try:
debugger = 'pdb'
debugtrace = {
'pdb' : pdb.set_trace
}
debugmortem = {
'pdb' : pdb.post_mortem
}
# read --config before doing anything else
# (e.g. to change trust settings for reading .hg/hgrc)
cfgs = _parseconfig(req.ui, _earlygetopt(['--config'], req.args))
if req.repo:
# copy configs that were passed on the cmdline (--config) to
# the repo ui
for sec, name, val in cfgs:
req.repo.ui.setconfig(sec, name, val, source='--config')
# if we are in HGPLAIN mode, then disable custom debugging
debugger = ui.config("ui", "debugger")
debugmod = pdb
if not debugger or ui.plain():
debugger = 'pdb'
elif '--debugger' in req.args:
# This import can be slow for fancy debuggers, so only
# do it when absolutely necessary, i.e. when actual
# debugging has been requested
try:
debugmod = __import__(debugger)
except ImportError:
pass # Leave debugmod = pdb
debugtrace[debugger] = debugmod.set_trace
debugmortem[debugger] = debugmod.post_mortem
# enter the debugger before command execution
if '--debugger' in req.args:
ui.warn(_("entering debugger - "
"type c to continue starting hg or h for help\n"))
if (debugger != 'pdb' and
debugtrace[debugger] == debugtrace['pdb']):
ui.warn(_("%s debugger specified "
"but its module was not found\n") % debugger)
debugtrace[debugger]()
try:
return _dispatch(req)
finally:
ui.flush()
except: # re-raises
# enter the debugger when we hit an exception
if '--debugger' in req.args:
traceback.print_exc()
debugmortem[debugger](sys.exc_info()[2])
ui.traceback()
raise
# Global exception handling, alphabetically
# Mercurial-specific first, followed by built-in and library exceptions
except error.AmbiguousCommand, inst:
ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
(inst.args[0], " ".join(inst.args[1])))
except error.ParseError, inst:
_formatparse(ui.warn, inst)
return -1
except error.LockHeld, inst:
if inst.errno == errno.ETIMEDOUT:
reason = _('timed out waiting for lock held by %s') % inst.locker
else:
reason = _('lock held by %s') % inst.locker
ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
except error.LockUnavailable, inst:
ui.warn(_("abort: could not lock %s: %s\n") %
(inst.desc or inst.filename, inst.strerror))
except error.CommandError, inst:
if inst.args[0]:
ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
commands.help_(ui, inst.args[0], full=False, command=True)
else:
ui.warn(_("hg: %s\n") % inst.args[1])
commands.help_(ui, 'shortlist')
except error.OutOfBandError, inst:
ui.warn(_("abort: remote error:\n"))
ui.warn(''.join(inst.args))
except error.RepoError, inst:
ui.warn(_("abort: %s!\n") % inst)
if inst.hint:
ui.warn(_("(%s)\n") % inst.hint)
except error.ResponseError, inst:
ui.warn(_("abort: %s") % inst.args[0])
if not isinstance(inst.args[1], basestring):
ui.warn(" %r\n" % (inst.args[1],))
elif not inst.args[1]:
ui.warn(_(" empty string\n"))
else:
ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
except error.CensoredNodeError, inst:
ui.warn(_("abort: file censored %s!\n") % inst)
except error.RevlogError, inst:
ui.warn(_("abort: %s!\n") % inst)
except error.SignalInterrupt:
ui.warn(_("killed!\n"))
except error.UnknownCommand, inst:
ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
try:
# check if the command is in a disabled extension
# (but don't check for extensions themselves)
commands.help_(ui, inst.args[0], unknowncmd=True)
except error.UnknownCommand:
suggested = False
if len(inst.args) == 2:
sim = _getsimilar(inst.args[1], inst.args[0])
if sim:
ui.warn(_('(did you mean one of %s?)\n') %
', '.join(sorted(sim)))
suggested = True
if not suggested:
commands.help_(ui, 'shortlist')
except error.InterventionRequired, inst:
ui.warn("%s\n" % inst)
return 1
except util.Abort, inst:
ui.warn(_("abort: %s\n") % inst)
if inst.hint:
ui.warn(_("(%s)\n") % inst.hint)
except ImportError, inst:
ui.warn(_("abort: %s!\n") % inst)
m = str(inst).split()[-1]
if m in "mpatch bdiff".split():
ui.warn(_("(did you forget to compile extensions?)\n"))
elif m in "zlib".split():
ui.warn(_("(is your Python install correct?)\n"))
except IOError, inst:
if util.safehasattr(inst, "code"):
ui.warn(_("abort: %s\n") % inst)
elif util.safehasattr(inst, "reason"):
try: # usually it is in the form (errno, strerror)
reason = inst.reason.args[1]
except (AttributeError, IndexError):
# it might be anything, for example a string
reason = inst.reason
if isinstance(reason, unicode):
# SSLError of Python 2.7.9 contains a unicode
reason = reason.encode(encoding.encoding, 'replace')
ui.warn(_("abort: error: %s\n") % reason)
elif (util.safehasattr(inst, "args")
and inst.args and inst.args[0] == errno.EPIPE):
if ui.debugflag:
ui.warn(_("broken pipe\n"))
elif getattr(inst, "strerror", None):
if getattr(inst, "filename", None):
ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
else:
ui.warn(_("abort: %s\n") % inst.strerror)
else:
raise
except OSError, inst:
if getattr(inst, "filename", None) is not None:
ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
else:
ui.warn(_("abort: %s\n") % inst.strerror)
except KeyboardInterrupt:
try:
ui.warn(_("interrupted!\n"))
except IOError, inst:
if inst.errno == errno.EPIPE:
if ui.debugflag:
ui.warn(_("\nbroken pipe\n"))
else:
raise
except MemoryError:
ui.warn(_("abort: out of memory\n"))
except SystemExit, inst:
# Commands shouldn't sys.exit directly, but give a return code.
# Just in case catch this and and pass exit code to caller.
return inst.code
except socket.error, inst:
ui.warn(_("abort: %s\n") % inst.args[-1])
except: # re-raises
myver = util.version()
# For compatibility checking, we discard the portion of the hg
# version after the + on the assumption that if a "normal
# user" is running a build with a + in it the packager
# probably built from fairly close to a tag and anyone with a
# 'make local' copy of hg (where the version number can be out
# of date) will be clueful enough to notice the implausible
# version number and try updating.
compare = myver.split('+')[0]
ct = tuplever(compare)
worst = None, ct, ''
for name, mod in extensions.extensions():
testedwith = getattr(mod, 'testedwith', '')
report = getattr(mod, 'buglink', _('the extension author.'))
if not testedwith.strip():
# We found an untested extension. It's likely the culprit.
worst = name, 'unknown', report
break
# Never blame on extensions bundled with Mercurial.
if testedwith == 'internal':
continue
tested = [tuplever(t) for t in testedwith.split()]
if ct in tested:
continue
lower = [t for t in tested if t < ct]
nearest = max(lower or tested)
if worst[0] is None or nearest < worst[1]:
worst = name, nearest, report
if worst[0] is not None:
name, testedwith, report = worst
if not isinstance(testedwith, str):
testedwith = '.'.join([str(c) for c in testedwith])
warning = (_('** Unknown exception encountered with '
'possibly-broken third-party extension %s\n'
'** which supports versions %s of Mercurial.\n'
'** Please disable %s and try your action again.\n'
'** If that fixes the bug please report it to %s\n')
% (name, testedwith, name, report))
else:
warning = (_("** unknown exception encountered, "
"please report by visiting\n") +
_("** http://mercurial.selenic.com/wiki/BugTracker\n"))
warning += ((_("** Python %s\n") % sys.version.replace('\n', '')) +
(_("** Mercurial Distributed SCM (version %s)\n") % myver) +
(_("** Extensions loaded: %s\n") %
", ".join([x[0] for x in extensions.extensions()])))
ui.log("commandexception", "%s\n%s\n", warning, traceback.format_exc())
ui.warn(warning)
raise
return -1
def tuplever(v):
try:
# Assertion: tuplever is only used for extension compatibility
# checking. Otherwise, the discarding of extra version fields is
# incorrect.
return tuple([int(i) for i in v.split('.')[0:2]])
except ValueError:
return tuple()
def aliasargs(fn, givenargs):
args = getattr(fn, 'args', [])
if args:
cmd = ' '.join(map(util.shellquote, args))
nums = []
def replacer(m):
num = int(m.group(1)) - 1
nums.append(num)
if num < len(givenargs):
return givenargs[num]
raise util.Abort(_('too few arguments for command alias'))
cmd = re.sub(r'\$(\d+|\$)', replacer, cmd)
givenargs = [x for i, x in enumerate(givenargs)
if i not in nums]
args = shlex.split(cmd)
return args + givenargs
def aliasinterpolate(name, args, cmd):
'''interpolate args into cmd for shell aliases
This also handles $0, $@ and "$@".
'''
# util.interpolate can't deal with "$@" (with quotes) because it's only
# built to match prefix + patterns.
replacemap = dict(('$%d' % (i + 1), arg) for i, arg in enumerate(args))
replacemap['$0'] = name
replacemap['$$'] = '$'
replacemap['$@'] = ' '.join(args)
# Typical Unix shells interpolate "$@" (with quotes) as all the positional
# parameters, separated out into words. Emulate the same behavior here by
# quoting the arguments individually. POSIX shells will then typically
# tokenize each argument into exactly one word.
replacemap['"$@"'] = ' '.join(util.shellquote(arg) for arg in args)
# escape '\$' for regex
regex = '|'.join(replacemap.keys()).replace('$', r'\$')
r = re.compile(regex)
return r.sub(lambda x: replacemap[x.group()], cmd)
class cmdalias(object):
def __init__(self, name, definition, cmdtable):
self.name = self.cmd = name
self.cmdname = ''
self.definition = definition
self.fn = None
self.args = []
self.opts = []
self.help = ''
self.norepo = True
self.optionalrepo = False
self.badalias = None
self.unknowncmd = False
try:
aliases, entry = cmdutil.findcmd(self.name, cmdtable)
for alias, e in cmdtable.iteritems():
if e is entry:
self.cmd = alias
break
self.shadows = True
except error.UnknownCommand:
self.shadows = False
if not self.definition:
self.badalias = _("no definition for alias '%s'") % self.name
return
if self.definition.startswith('!'):
self.shell = True
def fn(ui, *args):
env = {'HG_ARGS': ' '.join((self.name,) + args)}
def _checkvar(m):
if m.groups()[0] == '$':
return m.group()
elif int(m.groups()[0]) <= len(args):
return m.group()
else:
ui.debug("No argument found for substitution "
"of %i variable in alias '%s' definition."
% (int(m.groups()[0]), self.name))
return ''
cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
cmd = aliasinterpolate(self.name, args, cmd)
return ui.system(cmd, environ=env)
self.fn = fn
return
try:
args = shlex.split(self.definition)
except ValueError, inst:
self.badalias = (_("error in definition for alias '%s': %s")
% (self.name, inst))
return
self.cmdname = cmd = args.pop(0)
args = map(util.expandpath, args)
for invalidarg in ("--cwd", "-R", "--repository", "--repo", "--config"):
if _earlygetopt([invalidarg], args):
self.badalias = (_("error in definition for alias '%s': %s may "
"only be given on the command line")
% (self.name, invalidarg))
return
try:
tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
if len(tableentry) > 2:
self.fn, self.opts, self.help = tableentry
else:
self.fn, self.opts = tableentry
self.args = aliasargs(self.fn, args)
if cmd not in commands.norepo.split(' '):
self.norepo = False
if cmd in commands.optionalrepo.split(' '):
self.optionalrepo = True
if self.help.startswith("hg " + cmd):
# drop prefix in old-style help lines so hg shows the alias
self.help = self.help[4 + len(cmd):]
self.__doc__ = self.fn.__doc__
except error.UnknownCommand:
self.badalias = (_("alias '%s' resolves to unknown command '%s'")
% (self.name, cmd))
self.unknowncmd = True
except error.AmbiguousCommand:
self.badalias = (_("alias '%s' resolves to ambiguous command '%s'")
% (self.name, cmd))
def __call__(self, ui, *args, **opts):
if self.badalias:
hint = None
if self.unknowncmd:
try:
# check if the command is in a disabled extension
cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
hint = _("'%s' is provided by '%s' extension") % (cmd, ext)
except error.UnknownCommand:
pass
raise util.Abort(self.badalias, hint=hint)
if self.shadows:
ui.debug("alias '%s' shadows command '%s'\n" %
(self.name, self.cmdname))
if util.safehasattr(self, 'shell'):
return self.fn(ui, *args, **opts)
else:
try:
return util.checksignature(self.fn)(ui, *args, **opts)
except error.SignatureError:
args = ' '.join([self.cmdname] + self.args)
ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
raise
def addaliases(ui, cmdtable):
# aliases are processed after extensions have been loaded, so they
# may use extension commands. Aliases can also use other alias definitions,
# but only if they have been defined prior to the current definition.
for alias, definition in ui.configitems('alias'):
aliasdef = cmdalias(alias, definition, cmdtable)
try:
olddef = cmdtable[aliasdef.cmd][0]
if olddef.definition == aliasdef.definition:
continue
except (KeyError, AttributeError):
# definition might not exist or it might not be a cmdalias
pass
cmdtable[aliasdef.name] = (aliasdef, aliasdef.opts, aliasdef.help)
if aliasdef.norepo:
commands.norepo += ' %s' % alias
if aliasdef.optionalrepo:
commands.optionalrepo += ' %s' % alias
def _parse(ui, args):
options = {}
cmdoptions = {}
try:
args = fancyopts.fancyopts(args, commands.globalopts, options)
except fancyopts.getopt.GetoptError, inst:
raise error.CommandError(None, inst)
if args:
cmd, args = args[0], args[1:]
aliases, entry = cmdutil.findcmd(cmd, commands.table,
ui.configbool("ui", "strict"))
cmd = aliases[0]
args = aliasargs(entry[0], args)
defaults = ui.config("defaults", cmd)
if defaults:
args = map(util.expandpath, shlex.split(defaults)) + args
c = list(entry[1])
else:
cmd = None
c = []
# combine global options into local
for o in commands.globalopts:
c.append((o[0], o[1], options[o[1]], o[3]))
try:
args = fancyopts.fancyopts(args, c, cmdoptions, True)
except fancyopts.getopt.GetoptError, inst:
raise error.CommandError(cmd, inst)
# separate global options back out
for o in commands.globalopts:
n = o[1]
options[n] = cmdoptions[n]
del cmdoptions[n]
return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
def _parseconfig(ui, config):
"""parse the --config options from the command line"""
configs = []
for cfg in config:
try:
name, value = cfg.split('=', 1)
section, name = name.split('.', 1)
if not section or not name:
raise IndexError
ui.setconfig(section, name, value, '--config')
configs.append((section, name, value))
except (IndexError, ValueError):
raise util.Abort(_('malformed --config option: %r '
'(use --config section.name=value)') % cfg)
return configs
def _earlygetopt(aliases, args):
"""Return list of values for an option (or aliases).
The values are listed in the order they appear in args.
The options and values are removed from args.
>>> args = ['x', '--cwd', 'foo', 'y']
>>> _earlygetopt(['--cwd'], args), args
(['foo'], ['x', 'y'])
>>> args = ['x', '--cwd=bar', 'y']
>>> _earlygetopt(['--cwd'], args), args
(['bar'], ['x', 'y'])
>>> args = ['x', '-R', 'foo', 'y']
>>> _earlygetopt(['-R'], args), args
(['foo'], ['x', 'y'])
>>> args = ['x', '-Rbar', 'y']
>>> _earlygetopt(['-R'], args), args
(['bar'], ['x', 'y'])
"""
try:
argcount = args.index("--")
except ValueError:
argcount = len(args)
shortopts = [opt for opt in aliases if len(opt) == 2]
values = []
pos = 0
while pos < argcount:
fullarg = arg = args[pos]
equals = arg.find('=')
if equals > -1:
arg = arg[:equals]
if arg in aliases:
del args[pos]
if equals > -1:
values.append(fullarg[equals + 1:])
argcount -= 1
else:
if pos + 1 >= argcount:
# ignore and let getopt report an error if there is no value
break
values.append(args.pop(pos))
argcount -= 2
elif arg[:2] in shortopts:
# short option can have no following space, e.g. hg log -Rfoo
values.append(args.pop(pos)[2:])
argcount -= 1
else:
pos += 1
return values
def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
# run pre-hook, and abort if it fails
hook.hook(lui, repo, "pre-%s" % cmd, True, args=" ".join(fullargs),
pats=cmdpats, opts=cmdoptions)
ret = _runcommand(ui, options, cmd, d)
# run post-hook, passing command result
hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
result=ret, pats=cmdpats, opts=cmdoptions)
return ret
def _getlocal(ui, rpath):
"""Return (path, local ui object) for the given target path.
Takes paths in [cwd]/.hg/hgrc into account."
"""
try:
wd = os.getcwd()
except OSError, e:
raise util.Abort(_("error getting current working directory: %s") %
e.strerror)
path = cmdutil.findrepo(wd) or ""
if not path:
lui = ui
else:
lui = ui.copy()
lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
if rpath and rpath[-1]:
path = lui.expandpath(rpath[-1])
lui = ui.copy()
lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
return path, lui
def _checkshellalias(lui, ui, args, precheck=True):
"""Return the function to run the shell alias, if it is required
'precheck' is whether this function is invoked before adding
aliases or not.
"""
options = {}
try:
args = fancyopts.fancyopts(args, commands.globalopts, options)
except fancyopts.getopt.GetoptError:
return
if not args:
return
if precheck:
strict = True
norepo = commands.norepo
optionalrepo = commands.optionalrepo
def restorecommands():
commands.norepo = norepo
commands.optionalrepo = optionalrepo
cmdtable = commands.table.copy()
addaliases(lui, cmdtable)
else:
strict = False
def restorecommands():
pass
cmdtable = commands.table
cmd = args[0]
try:
aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
except (error.AmbiguousCommand, error.UnknownCommand):
restorecommands()
return
cmd = aliases[0]
fn = entry[0]
if cmd and util.safehasattr(fn, 'shell'):
d = lambda: fn(ui, *args[1:])
return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
[], {})
restorecommands()
_loaded = set()
def _dispatch(req):
args = req.args
ui = req.ui
# check for cwd
cwd = _earlygetopt(['--cwd'], args)
if cwd:
os.chdir(cwd[-1])
rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
path, lui = _getlocal(ui, rpath)
# Now that we're operating in the right directory/repository with
# the right config settings, check for shell aliases
shellaliasfn = _checkshellalias(lui, ui, args)
if shellaliasfn:
return shellaliasfn()
# Configure extensions in phases: uisetup, extsetup, cmdtable, and
# reposetup. Programs like TortoiseHg will call _dispatch several
# times so we keep track of configured extensions in _loaded.
extensions.loadall(lui)
exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
# Propagate any changes to lui.__class__ by extensions
ui.__class__ = lui.__class__
# (uisetup and extsetup are handled in extensions.loadall)
for name, module in exts:
cmdtable = getattr(module, 'cmdtable', {})
overrides = [cmd for cmd in cmdtable if cmd in commands.table]
if overrides:
ui.warn(_("extension '%s' overrides commands: %s\n")
% (name, " ".join(overrides)))
commands.table.update(cmdtable)
_loaded.add(name)
# (reposetup is handled in hg.repository)
addaliases(lui, commands.table)
if not lui.configbool("ui", "strict"):
# All aliases and commands are completely defined, now.
# Check abbreviation/ambiguity of shell alias again, because shell
# alias may cause failure of "_parse" (see issue4355)
shellaliasfn = _checkshellalias(lui, ui, args, precheck=False)
if shellaliasfn:
return shellaliasfn()
# check for fallback encoding
fallback = lui.config('ui', 'fallbackencoding')
if fallback:
encoding.fallbackencoding = fallback
fullargs = args
cmd, func, args, options, cmdoptions = _parse(lui, args)
if options["config"]:
raise util.Abort(_("option --config may not be abbreviated!"))
if options["cwd"]:
raise util.Abort(_("option --cwd may not be abbreviated!"))
if options["repository"]:
raise util.Abort(_(
"option -R has to be separated from other options (e.g. not -qR) "
"and --repository may only be abbreviated as --repo!"))
if options["encoding"]:
encoding.encoding = options["encoding"]
if options["encodingmode"]:
encoding.encodingmode = options["encodingmode"]
if options["time"]:
def get_times():
t = os.times()
if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
t = (t[0], t[1], t[2], t[3], time.clock())
return t
s = get_times()
def print_time():
t = get_times()
ui.warn(_("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
(t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
atexit.register(print_time)
uis = set([ui, lui])
if req.repo:
uis.add(req.repo.ui)
if options['verbose'] or options['debug'] or options['quiet']:
for opt in ('verbose', 'debug', 'quiet'):
val = str(bool(options[opt]))
for ui_ in uis:
ui_.setconfig('ui', opt, val, '--' + opt)
if options['traceback']:
for ui_ in uis:
ui_.setconfig('ui', 'traceback', 'on', '--traceback')
if options['noninteractive']:
for ui_ in uis:
ui_.setconfig('ui', 'interactive', 'off', '-y')
if cmdoptions.get('insecure', False):
for ui_ in uis:
ui_.setconfig('web', 'cacerts', '!', '--insecure')
if options['version']:
return commands.version_(ui)
if options['help']:
return commands.help_(ui, cmd, command=True)
elif not cmd:
return commands.help_(ui, 'shortlist')
repo = None
cmdpats = args[:]
if cmd not in commands.norepo.split():
# use the repo from the request only if we don't have -R
if not rpath and not cwd:
repo = req.repo
if repo:
# set the descriptors of the repo ui to those of ui
repo.ui.fin = ui.fin
repo.ui.fout = ui.fout
repo.ui.ferr = ui.ferr
else:
try:
repo = hg.repository(ui, path=path)
if not repo.local():
raise util.Abort(_("repository '%s' is not local") % path)
repo.ui.setconfig("bundle", "mainreporoot", repo.root, 'repo')
except error.RequirementError:
raise
except error.RepoError:
if cmd not in commands.optionalrepo.split():
if (cmd in commands.inferrepo.split() and
args and not path): # try to infer -R from command args
repos = map(cmdutil.findrepo, args)
guess = repos[0]
if guess and repos.count(guess) == len(repos):
req.args = ['--repository', guess] + fullargs
return _dispatch(req)
if not path:
raise error.RepoError(_("no repository found in '%s'"
" (.hg not found)")
% os.getcwd())
raise
if repo:
ui = repo.ui
if options['hidden']:
repo = repo.unfiltered()
args.insert(0, repo)
elif rpath:
ui.warn(_("warning: --repository ignored\n"))
msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
ui.log("command", '%s\n', msg)
d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
try:
return runcommand(lui, repo, cmd, fullargs, ui, options, d,
cmdpats, cmdoptions)
finally:
if repo and repo != req.repo:
repo.close()
def lsprofile(ui, func, fp):
format = ui.config('profiling', 'format', default='text')
field = ui.config('profiling', 'sort', default='inlinetime')
limit = ui.configint('profiling', 'limit', default=30)
climit = ui.configint('profiling', 'nested', default=5)
if format not in ['text', 'kcachegrind']:
ui.warn(_("unrecognized profiling format '%s'"
" - Ignored\n") % format)
format = 'text'
try:
from mercurial import lsprof
except ImportError:
raise util.Abort(_(
'lsprof not available - install from '
'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
p = lsprof.Profiler()
p.enable(subcalls=True)
try:
return func()
finally:
p.disable()
if format == 'kcachegrind':
import lsprofcalltree
calltree = lsprofcalltree.KCacheGrind(p)
calltree.output(fp)
else:
# format == 'text'
stats = lsprof.Stats(p.getstats())
stats.sort(field)
stats.pprint(limit=limit, file=fp, climit=climit)
def statprofile(ui, func, fp):
try:
import statprof
except ImportError:
raise util.Abort(_(
'statprof not available - install using "easy_install statprof"'))
freq = ui.configint('profiling', 'freq', default=1000)
if freq > 0:
statprof.reset(freq)
else:
ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq)
statprof.start()
try:
return func()
finally:
statprof.stop()
statprof.display(fp)
def _runcommand(ui, options, cmd, cmdfunc):
def checkargs():
try:
return cmdfunc()
except error.SignatureError:
raise error.CommandError(cmd, _("invalid arguments"))
if options['profile']:
profiler = os.getenv('HGPROF')
if profiler is None:
profiler = ui.config('profiling', 'type', default='ls')
if profiler not in ('ls', 'stat'):
ui.warn(_("unrecognized profiler '%s' - ignored\n") % profiler)
profiler = 'ls'
output = ui.config('profiling', 'output')
if output:
path = ui.expandpath(output)
fp = open(path, 'wb')
else:
fp = sys.stderr
try:
if profiler == 'ls':
return lsprofile(ui, checkargs, fp)
else:
return statprofile(ui, checkargs, fp)
finally:
if output:
fp.close()
else:
return checkargs()
```
#### File: site-packages/mercurial/error.py
```python
class RevlogError(Exception):
pass
class FilteredIndexError(IndexError):
pass
class LookupError(RevlogError, KeyError):
def __init__(self, name, index, message):
self.name = name
self.index = index
# this can't be called 'message' because at least some installs of
# Python 2.6+ complain about the 'message' property being deprecated
self.lookupmessage = message
if isinstance(name, str) and len(name) == 20:
from node import short
name = short(name)
RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
def __str__(self):
return RevlogError.__str__(self)
class FilteredLookupError(LookupError):
pass
class ManifestLookupError(LookupError):
pass
class CommandError(Exception):
"""Exception raised on errors in parsing the command line."""
class InterventionRequired(Exception):
"""Exception raised when a command requires human intervention."""
class Abort(Exception):
"""Raised if a command needs to print an error and exit."""
def __init__(self, *args, **kw):
Exception.__init__(self, *args)
self.hint = kw.get('hint')
class HookAbort(Abort):
"""raised when a validation hook fails, aborting an operation
Exists to allow more specialized catching."""
pass
class ConfigError(Abort):
"""Exception raised when parsing config files"""
class OutOfBandError(Exception):
"""Exception raised when a remote repo reports failure"""
class ParseError(Exception):
"""Raised when parsing config files and {rev,file}sets (msg[, pos])"""
class UnknownIdentifier(ParseError):
"""Exception raised when a {rev,file}set references an unknown identifier"""
def __init__(self, function, symbols):
from i18n import _
ParseError.__init__(self, _("unknown identifier: %s") % function)
self.function = function
self.symbols = symbols
class RepoError(Exception):
def __init__(self, *args, **kw):
Exception.__init__(self, *args)
self.hint = kw.get('hint')
class RepoLookupError(RepoError):
pass
class FilteredRepoLookupError(RepoLookupError):
pass
class CapabilityError(RepoError):
pass
class RequirementError(RepoError):
"""Exception raised if .hg/requires has an unknown entry."""
pass
class LockError(IOError):
def __init__(self, errno, strerror, filename, desc):
IOError.__init__(self, errno, strerror, filename)
self.desc = desc
class LockHeld(LockError):
def __init__(self, errno, filename, desc, locker):
LockError.__init__(self, errno, 'Lock held', filename, desc)
self.locker = locker
class LockUnavailable(LockError):
pass
class ResponseError(Exception):
"""Raised to print an error with part of output and exit."""
class UnknownCommand(Exception):
"""Exception raised if command is not in the command table."""
class AmbiguousCommand(Exception):
"""Exception raised if command shortcut matches more than one command."""
# derived from KeyboardInterrupt to simplify some breakout code
class SignalInterrupt(KeyboardInterrupt):
"""Exception raised on SIGTERM and SIGHUP."""
class SignatureError(Exception):
pass
class PushRaced(RuntimeError):
"""An exception raised during unbundling that indicate a push race"""
# bundle2 related errors
class BundleValueError(ValueError):
"""error raised when bundle2 cannot be processed"""
class UnsupportedPartError(BundleValueError):
def __init__(self, parttype=None, params=()):
self.parttype = parttype
self.params = params
if self.parttype is None:
msg = 'Stream Parameter'
else:
msg = parttype
if self.params:
msg = '%s - %s' % (msg, ', '.join(self.params))
ValueError.__init__(self, msg)
class ReadOnlyPartError(RuntimeError):
"""error raised when code tries to alter a part being generated"""
pass
class CensoredNodeError(RevlogError):
"""error raised when content verification fails on a censored node
Also contains the tombstone data substituted for the uncensored data.
"""
def __init__(self, filename, node, tombstone):
from node import short
RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
self.tombstone = tombstone
class CensoredBaseError(RevlogError):
"""error raised when a delta is rejected because its base is censored
A delta based on a censored revision must be formed as single patch
operation which replaces the entire base with new content. This ensures
the delta may be applied by clones which have not censored the base.
"""
```
#### File: site-packages/mercurial/pathutil.py
```python
import os, errno, stat
import encoding
import util
from i18n import _
def _lowerclean(s):
return encoding.hfsignoreclean(s.lower())
class pathauditor(object):
'''ensure that a filesystem path contains no banned components.
the following properties of a path are checked:
- ends with a directory separator
- under top-level .hg
- starts at the root of a windows drive
- contains ".."
- traverses a symlink (e.g. a/symlink_here/b)
- inside a nested repository (a callback can be used to approve
some nested repositories, e.g., subrepositories)
'''
def __init__(self, root, callback=None):
self.audited = set()
self.auditeddir = set()
self.root = root
self.callback = callback
if os.path.lexists(root) and not util.checkcase(root):
self.normcase = util.normcase
else:
self.normcase = lambda x: x
def __call__(self, path):
'''Check the relative path.
path may contain a pattern (e.g. foodir/**.txt)'''
path = util.localpath(path)
normpath = self.normcase(path)
if normpath in self.audited:
return
# AIX ignores "/" at end of path, others raise EISDIR.
if util.endswithsep(path):
raise util.Abort(_("path ends in directory separator: %s") % path)
parts = util.splitpath(path)
if (os.path.splitdrive(path)[0]
or _lowerclean(parts[0]) in ('.hg', '.hg.', '')
or os.pardir in parts):
raise util.Abort(_("path contains illegal component: %s") % path)
# Windows shortname aliases
for p in parts:
if "~" in p:
first, last = p.split("~", 1)
if last.isdigit() and first.upper() in ["HG", "HG8B6C"]:
raise util.Abort(_("path contains illegal component: %s")
% path)
if '.hg' in _lowerclean(path):
lparts = [_lowerclean(p.lower()) for p in parts]
for p in '.hg', '.hg.':
if p in lparts[1:]:
pos = lparts.index(p)
base = os.path.join(*parts[:pos])
raise util.Abort(_("path '%s' is inside nested repo %r")
% (path, base))
normparts = util.splitpath(normpath)
assert len(parts) == len(normparts)
parts.pop()
normparts.pop()
prefixes = []
while parts:
prefix = os.sep.join(parts)
normprefix = os.sep.join(normparts)
if normprefix in self.auditeddir:
break
curpath = os.path.join(self.root, prefix)
try:
st = os.lstat(curpath)
except OSError, err:
# EINVAL can be raised as invalid path syntax under win32.
# They must be ignored for patterns can be checked too.
if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
raise
else:
if stat.S_ISLNK(st.st_mode):
raise util.Abort(
_('path %r traverses symbolic link %r')
% (path, prefix))
elif (stat.S_ISDIR(st.st_mode) and
os.path.isdir(os.path.join(curpath, '.hg'))):
if not self.callback or not self.callback(curpath):
raise util.Abort(_("path '%s' is inside nested "
"repo %r")
% (path, prefix))
prefixes.append(normprefix)
parts.pop()
normparts.pop()
self.audited.add(normpath)
# only add prefixes to the cache after checking everything: we don't
# want to add "foo/bar/baz" before checking if there's a "foo/.hg"
self.auditeddir.update(prefixes)
def check(self, path):
try:
self(path)
return True
except (OSError, util.Abort):
return False
def canonpath(root, cwd, myname, auditor=None):
'''return the canonical path of myname, given cwd and root'''
if util.endswithsep(root):
rootsep = root
else:
rootsep = root + os.sep
name = myname
if not os.path.isabs(name):
name = os.path.join(root, cwd, name)
name = os.path.normpath(name)
if auditor is None:
auditor = pathauditor(root)
if name != rootsep and name.startswith(rootsep):
name = name[len(rootsep):]
auditor(name)
return util.pconvert(name)
elif name == root:
return ''
else:
# Determine whether `name' is in the hierarchy at or beneath `root',
# by iterating name=dirname(name) until that causes no change (can't
# check name == '/', because that doesn't work on windows). The list
# `rel' holds the reversed list of components making up the relative
# file name we want.
rel = []
while True:
try:
s = util.samefile(name, root)
except OSError:
s = False
if s:
if not rel:
# name was actually the same as root (maybe a symlink)
return ''
rel.reverse()
name = os.path.join(*rel)
auditor(name)
return util.pconvert(name)
dirname, basename = util.split(name)
rel.append(basename)
if dirname == name:
break
name = dirname
raise util.Abort(_("%s not under root '%s'") % (myname, root))
def normasprefix(path):
'''normalize the specified path as path prefix
Returned value can be used safely for "p.startswith(prefix)",
"p[len(prefix):]", and so on.
For efficiency, this expects "path" argument to be already
normalized by "os.path.normpath", "os.path.realpath", and so on.
See also issue3033 for detail about need of this function.
>>> normasprefix('/foo/bar').replace(os.sep, '/')
'/foo/bar/'
>>> normasprefix('/').replace(os.sep, '/')
'/'
'''
d, p = os.path.splitdrive(path)
if len(p) != len(os.sep):
return path + os.sep
else:
return path
```
#### File: site-packages/mercurial/revset.py
```python
import re
import parser, util, error, hbisect, phases
import node
import heapq
import match as matchmod
from i18n import _
import encoding
import obsolete as obsmod
import pathutil
import repoview
def _revancestors(repo, revs, followfirst):
"""Like revlog.ancestors(), but supports followfirst."""
if followfirst:
cut = 1
else:
cut = None
cl = repo.changelog
def iterate():
revqueue, revsnode = None, None
h = []
revs.sort(reverse=True)
revqueue = util.deque(revs)
if revqueue:
revsnode = revqueue.popleft()
heapq.heappush(h, -revsnode)
seen = set()
while h:
current = -heapq.heappop(h)
if current not in seen:
if revsnode and current == revsnode:
if revqueue:
revsnode = revqueue.popleft()
heapq.heappush(h, -revsnode)
seen.add(current)
yield current
for parent in cl.parentrevs(current)[:cut]:
if parent != node.nullrev:
heapq.heappush(h, -parent)
return generatorset(iterate(), iterasc=False)
def _revdescendants(repo, revs, followfirst):
"""Like revlog.descendants() but supports followfirst."""
if followfirst:
cut = 1
else:
cut = None
def iterate():
cl = repo.changelog
first = min(revs)
nullrev = node.nullrev
if first == nullrev:
# Are there nodes with a null first parent and a non-null
# second one? Maybe. Do we care? Probably not.
for i in cl:
yield i
else:
seen = set(revs)
for i in cl.revs(first + 1):
for x in cl.parentrevs(i)[:cut]:
if x != nullrev and x in seen:
seen.add(i)
yield i
break
return generatorset(iterate(), iterasc=True)
def _revsbetween(repo, roots, heads):
"""Return all paths between roots and heads, inclusive of both endpoint
sets."""
if not roots:
return baseset()
parentrevs = repo.changelog.parentrevs
visit = list(heads)
reachable = set()
seen = {}
minroot = min(roots)
roots = set(roots)
# open-code the post-order traversal due to the tiny size of
# sys.getrecursionlimit()
while visit:
rev = visit.pop()
if rev in roots:
reachable.add(rev)
parents = parentrevs(rev)
seen[rev] = parents
for parent in parents:
if parent >= minroot and parent not in seen:
visit.append(parent)
if not reachable:
return baseset()
for rev in sorted(seen):
for parent in seen[rev]:
if parent in reachable:
reachable.add(rev)
return baseset(sorted(reachable))
elements = {
"(": (21, ("group", 1, ")"), ("func", 1, ")")),
"##": (20, None, ("_concat", 20)),
"~": (18, None, ("ancestor", 18)),
"^": (18, None, ("parent", 18), ("parentpost", 18)),
"-": (5, ("negate", 19), ("minus", 5)),
"::": (17, ("dagrangepre", 17), ("dagrange", 17),
("dagrangepost", 17)),
"..": (17, ("dagrangepre", 17), ("dagrange", 17),
("dagrangepost", 17)),
":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
"not": (10, ("not", 10)),
"!": (10, ("not", 10)),
"and": (5, None, ("and", 5)),
"&": (5, None, ("and", 5)),
"%": (5, None, ("only", 5), ("onlypost", 5)),
"or": (4, None, ("or", 4)),
"|": (4, None, ("or", 4)),
"+": (4, None, ("or", 4)),
",": (2, None, ("list", 2)),
")": (0, None, None),
"symbol": (0, ("symbol",), None),
"string": (0, ("string",), None),
"end": (0, None, None),
}
keywords = set(['and', 'or', 'not'])
# default set of valid characters for the initial letter of symbols
_syminitletters = set(c for c in [chr(i) for i in xrange(256)]
if c.isalnum() or c in '._@' or ord(c) > 127)
# default set of valid characters for non-initial letters of symbols
_symletters = set(c for c in [chr(i) for i in xrange(256)]
if c.isalnum() or c in '-._/@' or ord(c) > 127)
def tokenize(program, lookup=None, syminitletters=None, symletters=None):
'''
Parse a revset statement into a stream of tokens
``syminitletters`` is the set of valid characters for the initial
letter of symbols.
By default, character ``c`` is recognized as valid for initial
letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
``symletters`` is the set of valid characters for non-initial
letters of symbols.
By default, character ``c`` is recognized as valid for non-initial
letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
Check that @ is a valid unquoted token character (issue3686):
>>> list(tokenize("@::"))
[('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
'''
if syminitletters is None:
syminitletters = _syminitletters
if symletters is None:
symletters = _symletters
pos, l = 0, len(program)
while pos < l:
c = program[pos]
if c.isspace(): # skip inter-token whitespace
pass
elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
yield ('::', None, pos)
pos += 1 # skip ahead
elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
yield ('..', None, pos)
pos += 1 # skip ahead
elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
yield ('##', None, pos)
pos += 1 # skip ahead
elif c in "():,-|&+!~^%": # handle simple operators
yield (c, None, pos)
elif (c in '"\'' or c == 'r' and
program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
if c == 'r':
pos += 1
c = program[pos]
decode = lambda x: x
else:
decode = lambda x: x.decode('string-escape')
pos += 1
s = pos
while pos < l: # find closing quote
d = program[pos]
if d == '\\': # skip over escaped characters
pos += 2
continue
if d == c:
yield ('string', decode(program[s:pos]), s)
break
pos += 1
else:
raise error.ParseError(_("unterminated string"), s)
# gather up a symbol/keyword
elif c in syminitletters:
s = pos
pos += 1
while pos < l: # find end of symbol
d = program[pos]
if d not in symletters:
break
if d == '.' and program[pos - 1] == '.': # special case for ..
pos -= 1
break
pos += 1
sym = program[s:pos]
if sym in keywords: # operator keywords
yield (sym, None, s)
elif '-' in sym:
# some jerk gave us foo-bar-baz, try to check if it's a symbol
if lookup and lookup(sym):
# looks like a real symbol
yield ('symbol', sym, s)
else:
# looks like an expression
parts = sym.split('-')
for p in parts[:-1]:
if p: # possible consecutive -
yield ('symbol', p, s)
s += len(p)
yield ('-', None, pos)
s += 1
if parts[-1]: # possible trailing -
yield ('symbol', parts[-1], s)
else:
yield ('symbol', sym, s)
pos -= 1
else:
raise error.ParseError(_("syntax error in revset '%s'") %
program, pos)
pos += 1
yield ('end', None, pos)
def parseerrordetail(inst):
"""Compose error message from specified ParseError object
"""
if len(inst.args) > 1:
return _('at %s: %s') % (inst.args[1], inst.args[0])
else:
return inst.args[0]
# helpers
def getstring(x, err):
if x and (x[0] == 'string' or x[0] == 'symbol'):
return x[1]
raise error.ParseError(err)
def getlist(x):
if not x:
return []
if x[0] == 'list':
return getlist(x[1]) + [x[2]]
return [x]
def getargs(x, min, max, err):
l = getlist(x)
if len(l) < min or (max >= 0 and len(l) > max):
raise error.ParseError(err)
return l
def isvalidsymbol(tree):
"""Examine whether specified ``tree`` is valid ``symbol`` or not
"""
return tree[0] == 'symbol' and len(tree) > 1
def getsymbol(tree):
"""Get symbol name from valid ``symbol`` in ``tree``
This assumes that ``tree`` is already examined by ``isvalidsymbol``.
"""
return tree[1]
def isvalidfunc(tree):
"""Examine whether specified ``tree`` is valid ``func`` or not
"""
return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
def getfuncname(tree):
"""Get function name from valid ``func`` in ``tree``
This assumes that ``tree`` is already examined by ``isvalidfunc``.
"""
return getsymbol(tree[1])
def getfuncargs(tree):
"""Get list of function arguments from valid ``func`` in ``tree``
This assumes that ``tree`` is already examined by ``isvalidfunc``.
"""
if len(tree) > 2:
return getlist(tree[2])
else:
return []
def getset(repo, subset, x):
if not x:
raise error.ParseError(_("missing argument"))
s = methods[x[0]](repo, subset, *x[1:])
if util.safehasattr(s, 'isascending'):
return s
return baseset(s)
def _getrevsource(repo, r):
extra = repo[r].extra()
for label in ('source', 'transplant_source', 'rebase_source'):
if label in extra:
try:
return repo[extra[label]].rev()
except error.RepoLookupError:
pass
return None
# operator methods
def stringset(repo, subset, x):
x = repo[x].rev()
if (x in subset
or x == node.nullrev and isinstance(subset, fullreposet)):
return baseset([x])
return baseset()
def symbolset(repo, subset, x):
if x in symbols:
raise error.ParseError(_("can't use %s here") % x)
return stringset(repo, subset, x)
def rangeset(repo, subset, x, y):
m = getset(repo, fullreposet(repo), x)
n = getset(repo, fullreposet(repo), y)
if not m or not n:
return baseset()
m, n = m.first(), n.last()
if m < n:
r = spanset(repo, m, n + 1)
else:
r = spanset(repo, m, n - 1)
return r & subset
def dagrange(repo, subset, x, y):
r = fullreposet(repo)
xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
return xs & subset
def andset(repo, subset, x, y):
return getset(repo, getset(repo, subset, x), y)
def orset(repo, subset, x, y):
xl = getset(repo, subset, x)
yl = getset(repo, subset - xl, y)
return xl + yl
def notset(repo, subset, x):
return subset - getset(repo, subset, x)
def listset(repo, subset, a, b):
raise error.ParseError(_("can't use a list in this context"))
def func(repo, subset, a, b):
if a[0] == 'symbol' and a[1] in symbols:
return symbols[a[1]](repo, subset, b)
raise error.UnknownIdentifier(a[1], symbols.keys())
# functions
def adds(repo, subset, x):
"""``adds(pattern)``
Changesets that add a file matching pattern.
The pattern without explicit kind like ``glob:`` is expected to be
relative to the current directory and match against a file or a
directory.
"""
# i18n: "adds" is a keyword
pat = getstring(x, _("adds requires a pattern"))
return checkstatus(repo, subset, pat, 1)
def ancestor(repo, subset, x):
"""``ancestor(*changeset)``
A greatest common ancestor of the changesets.
Accepts 0 or more changesets.
Will return empty list when passed no args.
Greatest common ancestor of a single changeset is that changeset.
"""
# i18n: "ancestor" is a keyword
l = getlist(x)
rl = fullreposet(repo)
anc = None
# (getset(repo, rl, i) for i in l) generates a list of lists
for revs in (getset(repo, rl, i) for i in l):
for r in revs:
if anc is None:
anc = repo[r]
else:
anc = anc.ancestor(repo[r])
if anc is not None and anc.rev() in subset:
return baseset([anc.rev()])
return baseset()
def _ancestors(repo, subset, x, followfirst=False):
heads = getset(repo, fullreposet(repo), x)
if not heads:
return baseset()
s = _revancestors(repo, heads, followfirst)
return subset & s
def ancestors(repo, subset, x):
"""``ancestors(set)``
Changesets that are ancestors of a changeset in set.
"""
return _ancestors(repo, subset, x)
def _firstancestors(repo, subset, x):
# ``_firstancestors(set)``
# Like ``ancestors(set)`` but follows only the first parents.
return _ancestors(repo, subset, x, followfirst=True)
def ancestorspec(repo, subset, x, n):
"""``set~n``
Changesets that are the Nth ancestor (first parents only) of a changeset
in set.
"""
try:
n = int(n[1])
except (TypeError, ValueError):
raise error.ParseError(_("~ expects a number"))
ps = set()
cl = repo.changelog
for r in getset(repo, fullreposet(repo), x):
for i in range(n):
r = cl.parentrevs(r)[0]
ps.add(r)
return subset & ps
def author(repo, subset, x):
"""``author(string)``
Alias for ``user(string)``.
"""
# i18n: "author" is a keyword
n = encoding.lower(getstring(x, _("author requires a string")))
kind, pattern, matcher = _substringmatcher(n)
return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
def bisect(repo, subset, x):
"""``bisect(string)``
Changesets marked in the specified bisect status:
- ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
- ``goods``, ``bads`` : csets topologically good/bad
- ``range`` : csets taking part in the bisection
- ``pruned`` : csets that are goods, bads or skipped
- ``untested`` : csets whose fate is yet unknown
- ``ignored`` : csets ignored due to DAG topology
- ``current`` : the cset currently being bisected
"""
# i18n: "bisect" is a keyword
status = getstring(x, _("bisect requires a string")).lower()
state = set(hbisect.get(repo, status))
return subset & state
# Backward-compatibility
# - no help entry so that we do not advertise it any more
def bisected(repo, subset, x):
return bisect(repo, subset, x)
def bookmark(repo, subset, x):
"""``bookmark([name])``
The named bookmark or all bookmarks.
If `name` starts with `re:`, the remainder of the name is treated as
a regular expression. To match a bookmark that actually starts with `re:`,
use the prefix `literal:`.
"""
# i18n: "bookmark" is a keyword
args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
if args:
bm = getstring(args[0],
# i18n: "bookmark" is a keyword
_('the argument to bookmark must be a string'))
kind, pattern, matcher = _stringmatcher(bm)
bms = set()
if kind == 'literal':
bmrev = repo._bookmarks.get(pattern, None)
if not bmrev:
raise error.RepoLookupError(_("bookmark '%s' does not exist")
% bm)
bms.add(repo[bmrev].rev())
else:
matchrevs = set()
for name, bmrev in repo._bookmarks.iteritems():
if matcher(name):
matchrevs.add(bmrev)
if not matchrevs:
raise error.RepoLookupError(_("no bookmarks exist"
" that match '%s'") % pattern)
for bmrev in matchrevs:
bms.add(repo[bmrev].rev())
else:
bms = set([repo[r].rev()
for r in repo._bookmarks.values()])
bms -= set([node.nullrev])
return subset & bms
def branch(repo, subset, x):
"""``branch(string or set)``
All changesets belonging to the given branch or the branches of the given
changesets.
If `string` starts with `re:`, the remainder of the name is treated as
a regular expression. To match a branch that actually starts with `re:`,
use the prefix `literal:`.
"""
getbi = repo.revbranchcache().branchinfo
try:
b = getstring(x, '')
except error.ParseError:
# not a string, but another revspec, e.g. tip()
pass
else:
kind, pattern, matcher = _stringmatcher(b)
if kind == 'literal':
# note: falls through to the revspec case if no branch with
# this name exists
if pattern in repo.branchmap():
return subset.filter(lambda r: matcher(getbi(r)[0]))
else:
return subset.filter(lambda r: matcher(getbi(r)[0]))
s = getset(repo, fullreposet(repo), x)
b = set()
for r in s:
b.add(getbi(r)[0])
c = s.__contains__
return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
def bumped(repo, subset, x):
"""``bumped()``
Mutable changesets marked as successors of public changesets.
Only non-public and non-obsolete changesets can be `bumped`.
"""
# i18n: "bumped" is a keyword
getargs(x, 0, 0, _("bumped takes no arguments"))
bumped = obsmod.getrevs(repo, 'bumped')
return subset & bumped
def bundle(repo, subset, x):
"""``bundle()``
Changesets in the bundle.
Bundle must be specified by the -R option."""
try:
bundlerevs = repo.changelog.bundlerevs
except AttributeError:
raise util.Abort(_("no bundle provided - specify with -R"))
return subset & bundlerevs
def checkstatus(repo, subset, pat, field):
hasset = matchmod.patkind(pat) == 'set'
mcache = [None]
def matches(x):
c = repo[x]
if not mcache[0] or hasset:
mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
m = mcache[0]
fname = None
if not m.anypats() and len(m.files()) == 1:
fname = m.files()[0]
if fname is not None:
if fname not in c.files():
return False
else:
for f in c.files():
if m(f):
break
else:
return False
files = repo.status(c.p1().node(), c.node())[field]
if fname is not None:
if fname in files:
return True
else:
for f in files:
if m(f):
return True
return subset.filter(matches)
def _children(repo, narrow, parentset):
cs = set()
if not parentset:
return baseset(cs)
pr = repo.changelog.parentrevs
minrev = min(parentset)
for r in narrow:
if r <= minrev:
continue
for p in pr(r):
if p in parentset:
cs.add(r)
return baseset(cs)
def children(repo, subset, x):
"""``children(set)``
Child changesets of changesets in set.
"""
s = getset(repo, fullreposet(repo), x)
cs = _children(repo, subset, s)
return subset & cs
def closed(repo, subset, x):
"""``closed()``
Changeset is closed.
"""
# i18n: "closed" is a keyword
getargs(x, 0, 0, _("closed takes no arguments"))
return subset.filter(lambda r: repo[r].closesbranch())
def contains(repo, subset, x):
"""``contains(pattern)``
The revision's manifest contains a file matching pattern (but might not
modify it). See :hg:`help patterns` for information about file patterns.
The pattern without explicit kind like ``glob:`` is expected to be
relative to the current directory and match against a file exactly
for efficiency.
"""
# i18n: "contains" is a keyword
pat = getstring(x, _("contains requires a pattern"))
def matches(x):
if not matchmod.patkind(pat):
pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
if pats in repo[x]:
return True
else:
c = repo[x]
m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
for f in c.manifest():
if m(f):
return True
return False
return subset.filter(matches)
def converted(repo, subset, x):
"""``converted([id])``
Changesets converted from the given identifier in the old repository if
present, or all converted changesets if no identifier is specified.
"""
# There is exactly no chance of resolving the revision, so do a simple
# string compare and hope for the best
rev = None
# i18n: "converted" is a keyword
l = getargs(x, 0, 1, _('converted takes one or no arguments'))
if l:
# i18n: "converted" is a keyword
rev = getstring(l[0], _('converted requires a revision'))
def _matchvalue(r):
source = repo[r].extra().get('convert_revision', None)
return source is not None and (rev is None or source.startswith(rev))
return subset.filter(lambda r: _matchvalue(r))
def date(repo, subset, x):
"""``date(interval)``
Changesets within the interval, see :hg:`help dates`.
"""
# i18n: "date" is a keyword
ds = getstring(x, _("date requires a string"))
dm = util.matchdate(ds)
return subset.filter(lambda x: dm(repo[x].date()[0]))
def desc(repo, subset, x):
"""``desc(string)``
Search commit message for string. The match is case-insensitive.
"""
# i18n: "desc" is a keyword
ds = encoding.lower(getstring(x, _("desc requires a string")))
def matches(x):
c = repo[x]
return ds in encoding.lower(c.description())
return subset.filter(matches)
def _descendants(repo, subset, x, followfirst=False):
roots = getset(repo, fullreposet(repo), x)
if not roots:
return baseset()
s = _revdescendants(repo, roots, followfirst)
# Both sets need to be ascending in order to lazily return the union
# in the correct order.
base = subset & roots
desc = subset & s
result = base + desc
if subset.isascending():
result.sort()
elif subset.isdescending():
result.sort(reverse=True)
else:
result = subset & result
return result
def descendants(repo, subset, x):
"""``descendants(set)``
Changesets which are descendants of changesets in set.
"""
return _descendants(repo, subset, x)
def _firstdescendants(repo, subset, x):
# ``_firstdescendants(set)``
# Like ``descendants(set)`` but follows only the first parents.
return _descendants(repo, subset, x, followfirst=True)
def destination(repo, subset, x):
"""``destination([set])``
Changesets that were created by a graft, transplant or rebase operation,
with the given revisions specified as the source. Omitting the optional set
is the same as passing all().
"""
if x is not None:
sources = getset(repo, fullreposet(repo), x)
else:
sources = fullreposet(repo)
dests = set()
# subset contains all of the possible destinations that can be returned, so
# iterate over them and see if their source(s) were provided in the arg set.
# Even if the immediate src of r is not in the arg set, src's source (or
# further back) may be. Scanning back further than the immediate src allows
# transitive transplants and rebases to yield the same results as transitive
# grafts.
for r in subset:
src = _getrevsource(repo, r)
lineage = None
while src is not None:
if lineage is None:
lineage = list()
lineage.append(r)
# The visited lineage is a match if the current source is in the arg
# set. Since every candidate dest is visited by way of iterating
# subset, any dests further back in the lineage will be tested by a
# different iteration over subset. Likewise, if the src was already
# selected, the current lineage can be selected without going back
# further.
if src in sources or src in dests:
dests.update(lineage)
break
r = src
src = _getrevsource(repo, r)
return subset.filter(dests.__contains__)
def divergent(repo, subset, x):
"""``divergent()``
Final successors of changesets with an alternative set of final successors.
"""
# i18n: "divergent" is a keyword
getargs(x, 0, 0, _("divergent takes no arguments"))
divergent = obsmod.getrevs(repo, 'divergent')
return subset & divergent
def draft(repo, subset, x):
"""``draft()``
Changeset in draft phase."""
# i18n: "draft" is a keyword
getargs(x, 0, 0, _("draft takes no arguments"))
phase = repo._phasecache.phase
target = phases.draft
condition = lambda r: phase(repo, r) == target
return subset.filter(condition, cache=False)
def extinct(repo, subset, x):
"""``extinct()``
Obsolete changesets with obsolete descendants only.
"""
# i18n: "extinct" is a keyword
getargs(x, 0, 0, _("extinct takes no arguments"))
extincts = obsmod.getrevs(repo, 'extinct')
return subset & extincts
def extra(repo, subset, x):
"""``extra(label, [value])``
Changesets with the given label in the extra metadata, with the given
optional value.
If `value` starts with `re:`, the remainder of the value is treated as
a regular expression. To match a value that actually starts with `re:`,
use the prefix `literal:`.
"""
# i18n: "extra" is a keyword
l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
# i18n: "extra" is a keyword
label = getstring(l[0], _('first argument to extra must be a string'))
value = None
if len(l) > 1:
# i18n: "extra" is a keyword
value = getstring(l[1], _('second argument to extra must be a string'))
kind, value, matcher = _stringmatcher(value)
def _matchvalue(r):
extra = repo[r].extra()
return label in extra and (value is None or matcher(extra[label]))
return subset.filter(lambda r: _matchvalue(r))
def filelog(repo, subset, x):
"""``filelog(pattern)``
Changesets connected to the specified filelog.
For performance reasons, visits only revisions mentioned in the file-level
filelog, rather than filtering through all changesets (much faster, but
doesn't include deletes or duplicate changes). For a slower, more accurate
result, use ``file()``.
The pattern without explicit kind like ``glob:`` is expected to be
relative to the current directory and match against a file exactly
for efficiency.
If some linkrev points to revisions filtered by the current repoview, we'll
work around it to return a non-filtered value.
"""
# i18n: "filelog" is a keyword
pat = getstring(x, _("filelog requires a pattern"))
s = set()
cl = repo.changelog
if not matchmod.patkind(pat):
f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
files = [f]
else:
m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
files = (f for f in repo[None] if m(f))
for f in files:
backrevref = {} # final value for: filerev -> changerev
lowestchild = {} # lowest known filerev child of a filerev
delayed = [] # filerev with filtered linkrev, for post-processing
lowesthead = None # cache for manifest content of all head revisions
fl = repo.file(f)
for fr in list(fl):
rev = fl.linkrev(fr)
if rev not in cl:
# changerev pointed in linkrev is filtered
# record it for post processing.
delayed.append((fr, rev))
continue
for p in fl.parentrevs(fr):
if 0 <= p and p not in lowestchild:
lowestchild[p] = fr
backrevref[fr] = rev
s.add(rev)
# Post-processing of all filerevs we skipped because they were
# filtered. If such filerevs have known and unfiltered children, this
# means they have an unfiltered appearance out there. We'll use linkrev
# adjustment to find one of these appearances. The lowest known child
# will be used as a starting point because it is the best upper-bound we
# have.
#
# This approach will fail when an unfiltered but linkrev-shadowed
# appearance exists in a head changeset without unfiltered filerev
# children anywhere.
while delayed:
# must be a descending iteration. To slowly fill lowest child
# information that is of potential use by the next item.
fr, rev = delayed.pop()
lkr = rev
child = lowestchild.get(fr)
if child is None:
# search for existence of this file revision in a head revision.
# There are three possibilities:
# - the revision exists in a head and we can find an
# introduction from there,
# - the revision does not exist in a head because it has been
# changed since its introduction: we would have found a child
# and be in the other 'else' clause,
# - all versions of the revision are hidden.
if lowesthead is None:
lowesthead = {}
for h in repo.heads():
fnode = repo[h].manifest().get(f)
if fnode is not None:
lowesthead[fl.rev(fnode)] = h
headrev = lowesthead.get(fr)
if headrev is None:
# content is nowhere unfiltered
continue
rev = repo[headrev][f].introrev()
else:
# the lowest known child is a good upper bound
childcrev = backrevref[child]
# XXX this does not guarantee returning the lowest
# introduction of this revision, but this gives a
# result which is a good start and will fit in most
# cases. We probably need to fix the multiple
# introductions case properly (report each
# introduction, even for identical file revisions)
# once and for all at some point anyway.
for p in repo[childcrev][f].parents():
if p.filerev() == fr:
rev = p.rev()
break
if rev == lkr: # no shadowed entry found
# XXX This should never happen unless some manifest points
# to biggish file revisions (like a revision that uses a
# parent that never appears in the manifest ancestors)
continue
# Fill the data for the next iteration.
for p in fl.parentrevs(fr):
if 0 <= p and p not in lowestchild:
lowestchild[p] = fr
backrevref[fr] = rev
s.add(rev)
return subset & s
def first(repo, subset, x):
"""``first(set, [n])``
An alias for limit().
"""
return limit(repo, subset, x)
def _follow(repo, subset, x, name, followfirst=False):
l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
c = repo['.']
if l:
x = getstring(l[0], _("%s expected a filename") % name)
if x in c:
cx = c[x]
s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
# include the revision responsible for the most recent version
s.add(cx.introrev())
else:
return baseset()
else:
s = _revancestors(repo, baseset([c.rev()]), followfirst)
return subset & s
def follow(repo, subset, x):
"""``follow([file])``
An alias for ``::.`` (ancestors of the working directory's first parent).
If a filename is specified, the history of the given file is followed,
including copies.
"""
return _follow(repo, subset, x, 'follow')
def _followfirst(repo, subset, x):
# ``followfirst([file])``
# Like ``follow([file])`` but follows only the first parent of
# every revision or file revision.
return _follow(repo, subset, x, '_followfirst', followfirst=True)
def getall(repo, subset, x):
"""``all()``
All changesets, the same as ``0:tip``.
"""
# i18n: "all" is a keyword
getargs(x, 0, 0, _("all takes no arguments"))
return subset & spanset(repo) # drop "null" if any
def grep(repo, subset, x):
"""``grep(regex)``
Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
to ensure special escape characters are handled correctly. Unlike
``keyword(string)``, the match is case-sensitive.
"""
try:
# i18n: "grep" is a keyword
gr = re.compile(getstring(x, _("grep requires a string")))
except re.error, e:
raise error.ParseError(_('invalid match pattern: %s') % e)
def matches(x):
c = repo[x]
for e in c.files() + [c.user(), c.description()]:
if gr.search(e):
return True
return False
return subset.filter(matches)
def _matchfiles(repo, subset, x):
# _matchfiles takes a revset list of prefixed arguments:
#
# [p:foo, i:bar, x:baz]
#
# builds a match object from them and filters subset. Allowed
# prefixes are 'p:' for regular patterns, 'i:' for include
# patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
# a revision identifier, or the empty string to reference the
# working directory, from which the match object is
# initialized. Use 'd:' to set the default matching mode, default
# to 'glob'. At most one 'r:' and 'd:' argument can be passed.
# i18n: "_matchfiles" is a keyword
l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
pats, inc, exc = [], [], []
rev, default = None, None
for arg in l:
# i18n: "_matchfiles" is a keyword
s = getstring(arg, _("_matchfiles requires string arguments"))
prefix, value = s[:2], s[2:]
if prefix == 'p:':
pats.append(value)
elif prefix == 'i:':
inc.append(value)
elif prefix == 'x:':
exc.append(value)
elif prefix == 'r:':
if rev is not None:
# i18n: "_matchfiles" is a keyword
raise error.ParseError(_('_matchfiles expected at most one '
'revision'))
if value != '': # empty means working directory; leave rev as None
rev = value
elif prefix == 'd:':
if default is not None:
# i18n: "_matchfiles" is a keyword
raise error.ParseError(_('_matchfiles expected at most one '
'default mode'))
default = value
else:
# i18n: "_matchfiles" is a keyword
raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
if not default:
default = 'glob'
m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
exclude=exc, ctx=repo[rev], default=default)
def matches(x):
for f in repo[x].files():
if m(f):
return True
return False
return subset.filter(matches)
def hasfile(repo, subset, x):
"""``file(pattern)``
Changesets affecting files matched by pattern.
For a faster but less accurate result, consider using ``filelog()``
instead.
This predicate uses ``glob:`` as the default kind of pattern.
"""
# i18n: "file" is a keyword
pat = getstring(x, _("file requires a pattern"))
return _matchfiles(repo, subset, ('string', 'p:' + pat))
def head(repo, subset, x):
"""``head()``
Changeset is a named branch head.
"""
# i18n: "head" is a keyword
getargs(x, 0, 0, _("head takes no arguments"))
hs = set()
for b, ls in repo.branchmap().iteritems():
hs.update(repo[h].rev() for h in ls)
return baseset(hs).filter(subset.__contains__)
def heads(repo, subset, x):
"""``heads(set)``
Members of set with no children in set.
"""
s = getset(repo, subset, x)
ps = parents(repo, subset, x)
return s - ps
def hidden(repo, subset, x):
"""``hidden()``
Hidden changesets.
"""
# i18n: "hidden" is a keyword
getargs(x, 0, 0, _("hidden takes no arguments"))
hiddenrevs = repoview.filterrevs(repo, 'visible')
return subset & hiddenrevs
def keyword(repo, subset, x):
"""``keyword(string)``
Search commit message, user name, and names of changed files for
string. The match is case-insensitive.
"""
# i18n: "keyword" is a keyword
kw = encoding.lower(getstring(x, _("keyword requires a string")))
def matches(r):
c = repo[r]
return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
c.description()])
return subset.filter(matches)
def limit(repo, subset, x):
"""``limit(set, [n])``
First n members of set, defaulting to 1.
"""
# i18n: "limit" is a keyword
l = getargs(x, 1, 2, _("limit requires one or two arguments"))
try:
lim = 1
if len(l) == 2:
# i18n: "limit" is a keyword
lim = int(getstring(l[1], _("limit requires a number")))
except (TypeError, ValueError):
# i18n: "limit" is a keyword
raise error.ParseError(_("limit expects a number"))
ss = subset
os = getset(repo, fullreposet(repo), l[0])
result = []
it = iter(os)
for x in xrange(lim):
try:
y = it.next()
if y in ss:
result.append(y)
except (StopIteration):
break
return baseset(result)
def last(repo, subset, x):
"""``last(set, [n])``
Last n members of set, defaulting to 1.
"""
# i18n: "last" is a keyword
l = getargs(x, 1, 2, _("last requires one or two arguments"))
try:
lim = 1
if len(l) == 2:
# i18n: "last" is a keyword
lim = int(getstring(l[1], _("last requires a number")))
except (TypeError, ValueError):
# i18n: "last" is a keyword
raise error.ParseError(_("last expects a number"))
ss = subset
os = getset(repo, fullreposet(repo), l[0])
os.reverse()
result = []
it = iter(os)
for x in xrange(lim):
try:
y = it.next()
if y in ss:
result.append(y)
except (StopIteration):
break
return baseset(result)
def maxrev(repo, subset, x):
"""``max(set)``
Changeset with highest revision number in set.
"""
os = getset(repo, fullreposet(repo), x)
if os:
m = os.max()
if m in subset:
return baseset([m])
return baseset()
def merge(repo, subset, x):
"""``merge()``
Changeset is a merge changeset.
"""
# i18n: "merge" is a keyword
getargs(x, 0, 0, _("merge takes no arguments"))
cl = repo.changelog
return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
def branchpoint(repo, subset, x):
"""``branchpoint()``
Changesets with more than one child.
"""
# i18n: "branchpoint" is a keyword
getargs(x, 0, 0, _("branchpoint takes no arguments"))
cl = repo.changelog
if not subset:
return baseset()
baserev = min(subset)
parentscount = [0]*(len(repo) - baserev)
for r in cl.revs(start=baserev + 1):
for p in cl.parentrevs(r):
if p >= baserev:
parentscount[p - baserev] += 1
return subset.filter(lambda r: parentscount[r - baserev] > 1)
def minrev(repo, subset, x):
"""``min(set)``
Changeset with lowest revision number in set.
"""
os = getset(repo, fullreposet(repo), x)
if os:
m = os.min()
if m in subset:
return baseset([m])
return baseset()
def modifies(repo, subset, x):
"""``modifies(pattern)``
Changesets modifying files matched by pattern.
The pattern without explicit kind like ``glob:`` is expected to be
relative to the current directory and match against a file or a
directory.
"""
# i18n: "modifies" is a keyword
pat = getstring(x, _("modifies requires a pattern"))
return checkstatus(repo, subset, pat, 0)
def named(repo, subset, x):
"""``named(namespace)``
The changesets in a given namespace.
If `namespace` starts with `re:`, the remainder of the string is treated as
a regular expression. To match a namespace that actually starts with `re:`,
use the prefix `literal:`.
"""
# i18n: "named" is a keyword
args = getargs(x, 1, 1, _('named requires a namespace argument'))
ns = getstring(args[0],
# i18n: "named" is a keyword
_('the argument to named must be a string'))
kind, pattern, matcher = _stringmatcher(ns)
namespaces = set()
if kind == 'literal':
if pattern not in repo.names:
raise error.RepoLookupError(_("namespace '%s' does not exist")
% ns)
namespaces.add(repo.names[pattern])
else:
for name, ns in repo.names.iteritems():
if matcher(name):
namespaces.add(ns)
if not namespaces:
raise error.RepoLookupError(_("no namespace exists"
" that match '%s'") % pattern)
names = set()
for ns in namespaces:
for name in ns.listnames(repo):
if name not in ns.deprecated:
names.update(repo[n].rev() for n in ns.nodes(repo, name))
names -= set([node.nullrev])
return subset & names
def node_(repo, subset, x):
"""``id(string)``
Revision non-ambiguously specified by the given hex string prefix.
"""
# i18n: "id" is a keyword
l = getargs(x, 1, 1, _("id requires one argument"))
# i18n: "id" is a keyword
n = getstring(l[0], _("id requires a string"))
if len(n) == 40:
try:
rn = repo.changelog.rev(node.bin(n))
except (LookupError, TypeError):
rn = None
else:
rn = None
pm = repo.changelog._partialmatch(n)
if pm is not None:
rn = repo.changelog.rev(pm)
if rn is None:
return baseset()
result = baseset([rn])
return result & subset
def obsolete(repo, subset, x):
"""``obsolete()``
Mutable changeset with a newer version."""
# i18n: "obsolete" is a keyword
getargs(x, 0, 0, _("obsolete takes no arguments"))
obsoletes = obsmod.getrevs(repo, 'obsolete')
return subset & obsoletes
def only(repo, subset, x):
"""``only(set, [set])``
Changesets that are ancestors of the first set that are not ancestors
of any other head in the repo. If a second set is specified, the result
is ancestors of the first set that are not ancestors of the second set
(i.e. ::<set1> - ::<set2>).
"""
cl = repo.changelog
# i18n: "only" is a keyword
args = getargs(x, 1, 2, _('only takes one or two arguments'))
include = getset(repo, fullreposet(repo), args[0])
if len(args) == 1:
if not include:
return baseset()
descendants = set(_revdescendants(repo, include, False))
exclude = [rev for rev in cl.headrevs()
if not rev in descendants and not rev in include]
else:
exclude = getset(repo, fullreposet(repo), args[1])
results = set(cl.findmissingrevs(common=exclude, heads=include))
return subset & results
def origin(repo, subset, x):
"""``origin([set])``
Changesets that were specified as a source for the grafts, transplants or
rebases that created the given revisions. Omitting the optional set is the
same as passing all(). If a changeset created by these operations is itself
specified as a source for one of these operations, only the source changeset
for the first operation is selected.
"""
if x is not None:
dests = getset(repo, fullreposet(repo), x)
else:
dests = fullreposet(repo)
def _firstsrc(rev):
src = _getrevsource(repo, rev)
if src is None:
return None
while True:
prev = _getrevsource(repo, src)
if prev is None:
return src
src = prev
o = set([_firstsrc(r) for r in dests])
o -= set([None])
return subset & o
def outgoing(repo, subset, x):
"""``outgoing([path])``
Changesets not found in the specified destination repository, or the
default push location.
"""
# Avoid cycles.
import discovery
import hg
# i18n: "outgoing" is a keyword
l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
# i18n: "outgoing" is a keyword
dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest)
revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
if revs:
revs = [repo.lookup(rev) for rev in revs]
other = hg.peer(repo, {}, dest)
repo.ui.pushbuffer()
outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
repo.ui.popbuffer()
cl = repo.changelog
o = set([cl.rev(r) for r in outgoing.missing])
return subset & o
def p1(repo, subset, x):
"""``p1([set])``
First parent of changesets in set, or the working directory.
"""
if x is None:
p = repo[x].p1().rev()
if p >= 0:
return subset & baseset([p])
return baseset()
ps = set()
cl = repo.changelog
for r in getset(repo, fullreposet(repo), x):
ps.add(cl.parentrevs(r)[0])
ps -= set([node.nullrev])
return subset & ps
def p2(repo, subset, x):
"""``p2([set])``
Second parent of changesets in set, or the working directory.
"""
if x is None:
ps = repo[x].parents()
try:
p = ps[1].rev()
if p >= 0:
return subset & baseset([p])
return baseset()
except IndexError:
return baseset()
ps = set()
cl = repo.changelog
for r in getset(repo, fullreposet(repo), x):
ps.add(cl.parentrevs(r)[1])
ps -= set([node.nullrev])
return subset & ps
def parents(repo, subset, x):
"""``parents([set])``
The set of all parents for all changesets in set, or the working directory.
"""
if x is None:
ps = set(p.rev() for p in repo[x].parents())
else:
ps = set()
cl = repo.changelog
for r in getset(repo, fullreposet(repo), x):
ps.update(cl.parentrevs(r))
ps -= set([node.nullrev])
return subset & ps
def parentspec(repo, subset, x, n):
"""``set^0``
The set.
``set^1`` (or ``set^``), ``set^2``
First or second parent, respectively, of all changesets in set.
"""
try:
n = int(n[1])
if n not in (0, 1, 2):
raise ValueError
except (TypeError, ValueError):
raise error.ParseError(_("^ expects a number 0, 1, or 2"))
ps = set()
cl = repo.changelog
for r in getset(repo, fullreposet(repo), x):
if n == 0:
ps.add(r)
elif n == 1:
ps.add(cl.parentrevs(r)[0])
elif n == 2:
parents = cl.parentrevs(r)
if len(parents) > 1:
ps.add(parents[1])
return subset & ps
def present(repo, subset, x):
"""``present(set)``
An empty set, if any revision in set isn't found; otherwise,
all revisions in set.
If any of specified revisions is not present in the local repository,
the query is normally aborted. But this predicate allows the query
to continue even in such cases.
"""
try:
return getset(repo, subset, x)
except error.RepoLookupError:
return baseset()
def public(repo, subset, x):
"""``public()``
Changeset in public phase."""
# i18n: "public" is a keyword
getargs(x, 0, 0, _("public takes no arguments"))
phase = repo._phasecache.phase
target = phases.public
condition = lambda r: phase(repo, r) == target
return subset.filter(condition, cache=False)
def remote(repo, subset, x):
"""``remote([id [,path]])``
Local revision that corresponds to the given identifier in a
remote repository, if present. Here, the '.' identifier is a
synonym for the current local branch.
"""
import hg # avoid start-up nasties
# i18n: "remote" is a keyword
l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
q = '.'
if len(l) > 0:
# i18n: "remote" is a keyword
q = getstring(l[0], _("remote requires a string id"))
if q == '.':
q = repo['.'].branch()
dest = ''
if len(l) > 1:
# i18n: "remote" is a keyword
dest = getstring(l[1], _("remote requires a repository path"))
dest = repo.ui.expandpath(dest or 'default')
dest, branches = hg.parseurl(dest)
revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
if revs:
revs = [repo.lookup(rev) for rev in revs]
other = hg.peer(repo, {}, dest)
n = other.lookup(q)
if n in repo:
r = repo[n].rev()
if r in subset:
return baseset([r])
return baseset()
def removes(repo, subset, x):
"""``removes(pattern)``
Changesets which remove files matching pattern.
The pattern without explicit kind like ``glob:`` is expected to be
relative to the current directory and match against a file or a
directory.
"""
# i18n: "removes" is a keyword
pat = getstring(x, _("removes requires a pattern"))
return checkstatus(repo, subset, pat, 2)
def rev(repo, subset, x):
"""``rev(number)``
Revision with the given numeric identifier.
"""
# i18n: "rev" is a keyword
l = getargs(x, 1, 1, _("rev requires one argument"))
try:
# i18n: "rev" is a keyword
l = int(getstring(l[0], _("rev requires a number")))
except (TypeError, ValueError):
# i18n: "rev" is a keyword
raise error.ParseError(_("rev expects a number"))
if l not in repo.changelog and l != node.nullrev:
return baseset()
return subset & baseset([l])
def matching(repo, subset, x):
"""``matching(revision [, field])``
Changesets in which a given set of fields match the set of fields in the
selected revision or set.
To match more than one field pass the list of fields to match separated
by spaces (e.g. ``author description``).
Valid fields are most regular revision fields and some special fields.
Regular revision fields are ``description``, ``author``, ``branch``,
``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
and ``diff``.
Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
contents of the revision. Two revisions matching their ``diff`` will
also match their ``files``.
Special fields are ``summary`` and ``metadata``:
``summary`` matches the first line of the description.
``metadata`` is equivalent to matching ``description user date``
(i.e. it matches the main metadata fields).
``metadata`` is the default field which is used when no fields are
specified. You can match more than one field at a time.
"""
# i18n: "matching" is a keyword
l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
revs = getset(repo, fullreposet(repo), l[0])
fieldlist = ['metadata']
if len(l) > 1:
fieldlist = getstring(l[1],
# i18n: "matching" is a keyword
_("matching requires a string "
"as its second argument")).split()
# Make sure that there are no repeated fields,
# expand the 'special' 'metadata' field type
# and check the 'files' whenever we check the 'diff'
fields = []
for field in fieldlist:
if field == 'metadata':
fields += ['user', 'description', 'date']
elif field == 'diff':
# a revision matching the diff must also match the files
# since matching the diff is very costly, make sure to
# also match the files first
fields += ['files', 'diff']
else:
if field == 'author':
field = 'user'
fields.append(field)
fields = set(fields)
if 'summary' in fields and 'description' in fields:
# If a revision matches its description it also matches its summary
fields.discard('summary')
# We may want to match more than one field
# Not all fields take the same amount of time to be matched
# Sort the selected fields in order of increasing matching cost
fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
'files', 'description', 'substate', 'diff']
def fieldkeyfunc(f):
try:
return fieldorder.index(f)
except ValueError:
# assume an unknown field is very costly
return len(fieldorder)
fields = list(fields)
fields.sort(key=fieldkeyfunc)
# Each field will be matched with its own "getfield" function
# which will be added to the getfieldfuncs array of functions
getfieldfuncs = []
_funcs = {
'user': lambda r: repo[r].user(),
'branch': lambda r: repo[r].branch(),
'date': lambda r: repo[r].date(),
'description': lambda r: repo[r].description(),
'files': lambda r: repo[r].files(),
'parents': lambda r: repo[r].parents(),
'phase': lambda r: repo[r].phase(),
'substate': lambda r: repo[r].substate,
'summary': lambda r: repo[r].description().splitlines()[0],
'diff': lambda r: list(repo[r].diff(git=True),)
}
for info in fields:
getfield = _funcs.get(info, None)
if getfield is None:
raise error.ParseError(
# i18n: "matching" is a keyword
_("unexpected field name passed to matching: %s") % info)
getfieldfuncs.append(getfield)
# convert the getfield array of functions into a "getinfo" function
# which returns an array of field values (or a single value if there
# is only one field to match)
getinfo = lambda r: [f(r) for f in getfieldfuncs]
def matches(x):
for rev in revs:
target = getinfo(rev)
match = True
for n, f in enumerate(getfieldfuncs):
if target[n] != f(x):
match = False
if match:
return True
return False
return subset.filter(matches)
def reverse(repo, subset, x):
"""``reverse(set)``
Reverse order of set.
"""
l = getset(repo, subset, x)
l.reverse()
return l
def roots(repo, subset, x):
"""``roots(set)``
Changesets in set with no parent changeset in set.
"""
s = getset(repo, fullreposet(repo), x)
subset = baseset([r for r in s if r in subset])
cs = _children(repo, subset, s)
return subset - cs
def secret(repo, subset, x):
"""``secret()``
Changeset in secret phase."""
# i18n: "secret" is a keyword
getargs(x, 0, 0, _("secret takes no arguments"))
phase = repo._phasecache.phase
target = phases.secret
condition = lambda r: phase(repo, r) == target
return subset.filter(condition, cache=False)
def sort(repo, subset, x):
"""``sort(set[, [-]key...])``
Sort set by keys. The default sort order is ascending, specify a key
as ``-key`` to sort in descending order.
The keys can be:
- ``rev`` for the revision number,
- ``branch`` for the branch name,
- ``desc`` for the commit message (description),
- ``user`` for user name (``author`` can be used as an alias),
- ``date`` for the commit date
"""
# i18n: "sort" is a keyword
l = getargs(x, 1, 2, _("sort requires one or two arguments"))
keys = "rev"
if len(l) == 2:
# i18n: "sort" is a keyword
keys = getstring(l[1], _("sort spec must be a string"))
s = l[0]
keys = keys.split()
l = []
def invert(s):
return "".join(chr(255 - ord(c)) for c in s)
revs = getset(repo, subset, s)
if keys == ["rev"]:
revs.sort()
return revs
elif keys == ["-rev"]:
revs.sort(reverse=True)
return revs
for r in revs:
c = repo[r]
e = []
for k in keys:
if k == 'rev':
e.append(r)
elif k == '-rev':
e.append(-r)
elif k == 'branch':
e.append(c.branch())
elif k == '-branch':
e.append(invert(c.branch()))
elif k == 'desc':
e.append(c.description())
elif k == '-desc':
e.append(invert(c.description()))
elif k in 'user author':
e.append(c.user())
elif k in '-user -author':
e.append(invert(c.user()))
elif k == 'date':
e.append(c.date()[0])
elif k == '-date':
e.append(-c.date()[0])
else:
raise error.ParseError(_("unknown sort key %r") % k)
e.append(r)
l.append(e)
l.sort()
return baseset([e[-1] for e in l])
def subrepo(repo, subset, x):
"""``subrepo([pattern])``
Changesets that add, modify or remove the given subrepo. If no subrepo
pattern is named, any subrepo changes are returned.
"""
# i18n: "subrepo" is a keyword
args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
if len(args) != 0:
pat = getstring(args[0], _("subrepo requires a pattern"))
m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
def submatches(names):
k, p, m = _stringmatcher(pat)
for name in names:
if m(name):
yield name
def matches(x):
c = repo[x]
s = repo.status(c.p1().node(), c.node(), match=m)
if len(args) == 0:
return s.added or s.modified or s.removed
if s.added:
return util.any(submatches(c.substate.keys()))
if s.modified:
subs = set(c.p1().substate.keys())
subs.update(c.substate.keys())
for path in submatches(subs):
if c.p1().substate.get(path) != c.substate.get(path):
return True
if s.removed:
return util.any(submatches(c.p1().substate.keys()))
return False
return subset.filter(matches)
def _stringmatcher(pattern):
"""
accepts a string, possibly starting with 're:' or 'literal:' prefix.
returns the matcher name, pattern, and matcher function.
missing or unknown prefixes are treated as literal matches.
helper for tests:
>>> def test(pattern, *tests):
... kind, pattern, matcher = _stringmatcher(pattern)
... return (kind, pattern, [bool(matcher(t)) for t in tests])
exact matching (no prefix):
>>> test('abcdefg', 'abc', 'def', 'abcdefg')
('literal', 'abcdefg', [False, False, True])
regex matching ('re:' prefix)
>>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
('re', 'a.+b', [False, False, True])
force exact matches ('literal:' prefix)
>>> test('literal:re:foobar', 'foobar', 're:foobar')
('literal', 're:foobar', [False, True])
unknown prefixes are ignored and treated as literals
>>> test('foo:bar', 'foo', 'bar', 'foo:bar')
('literal', 'foo:bar', [False, False, True])
"""
if pattern.startswith('re:'):
pattern = pattern[3:]
try:
regex = re.compile(pattern)
except re.error, e:
raise error.ParseError(_('invalid regular expression: %s')
% e)
return 're', pattern, regex.search
elif pattern.startswith('literal:'):
pattern = pattern[8:]
return 'literal', pattern, pattern.__eq__
def _substringmatcher(pattern):
kind, pattern, matcher = _stringmatcher(pattern)
if kind == 'literal':
matcher = lambda s: pattern in s
return kind, pattern, matcher
def tag(repo, subset, x):
"""``tag([name])``
The specified tag by name, or all tagged revisions if no name is given.
If `name` starts with `re:`, the remainder of the name is treated as
a regular expression. To match a tag that actually starts with `re:`,
use the prefix `literal:`.
"""
# i18n: "tag" is a keyword
args = getargs(x, 0, 1, _("tag takes one or no arguments"))
cl = repo.changelog
if args:
pattern = getstring(args[0],
# i18n: "tag" is a keyword
_('the argument to tag must be a string'))
kind, pattern, matcher = _stringmatcher(pattern)
if kind == 'literal':
# avoid resolving all tags
tn = repo._tagscache.tags.get(pattern, None)
if tn is None:
raise error.RepoLookupError(_("tag '%s' does not exist")
% pattern)
s = set([repo[tn].rev()])
else:
s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
else:
s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
return subset & s
def tagged(repo, subset, x):
return tag(repo, subset, x)
def unstable(repo, subset, x):
"""``unstable()``
Non-obsolete changesets with obsolete ancestors.
"""
# i18n: "unstable" is a keyword
getargs(x, 0, 0, _("unstable takes no arguments"))
unstables = obsmod.getrevs(repo, 'unstable')
return subset & unstables
def user(repo, subset, x):
"""``user(string)``
User name contains string. The match is case-insensitive.
If `string` starts with `re:`, the remainder of the string is treated as
a regular expression. To match a user that actually contains `re:`, use
the prefix `literal:`.
"""
return author(repo, subset, x)
# experimental
def wdir(repo, subset, x):
# i18n: "wdir" is a keyword
getargs(x, 0, 0, _("wdir takes no arguments"))
if None in subset or isinstance(subset, fullreposet):
return baseset([None])
return baseset()
# for internal use
def _list(repo, subset, x):
s = getstring(x, "internal error")
if not s:
return baseset()
ls = [repo[r].rev() for r in s.split('\0')]
s = subset
return baseset([r for r in ls if r in s])
# for internal use
def _intlist(repo, subset, x):
s = getstring(x, "internal error")
if not s:
return baseset()
ls = [int(r) for r in s.split('\0')]
s = subset
return baseset([r for r in ls if r in s])
# for internal use
def _hexlist(repo, subset, x):
s = getstring(x, "internal error")
if not s:
return baseset()
cl = repo.changelog
ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
s = subset
return baseset([r for r in ls if r in s])
symbols = {
"adds": adds,
"all": getall,
"ancestor": ancestor,
"ancestors": ancestors,
"_firstancestors": _firstancestors,
"author": author,
"bisect": bisect,
"bisected": bisected,
"bookmark": bookmark,
"branch": branch,
"branchpoint": branchpoint,
"bumped": bumped,
"bundle": bundle,
"children": children,
"closed": closed,
"contains": contains,
"converted": converted,
"date": date,
"desc": desc,
"descendants": descendants,
"_firstdescendants": _firstdescendants,
"destination": destination,
"divergent": divergent,
"draft": draft,
"extinct": extinct,
"extra": extra,
"file": hasfile,
"filelog": filelog,
"first": first,
"follow": follow,
"_followfirst": _followfirst,
"grep": grep,
"head": head,
"heads": heads,
"hidden": hidden,
"id": node_,
"keyword": keyword,
"last": last,
"limit": limit,
"_matchfiles": _matchfiles,
"max": maxrev,
"merge": merge,
"min": minrev,
"modifies": modifies,
"named": named,
"obsolete": obsolete,
"only": only,
"origin": origin,
"outgoing": outgoing,
"p1": p1,
"p2": p2,
"parents": parents,
"present": present,
"public": public,
"remote": remote,
"removes": removes,
"rev": rev,
"reverse": reverse,
"roots": roots,
"sort": sort,
"secret": secret,
"subrepo": subrepo,
"matching": matching,
"tag": tag,
"tagged": tagged,
"user": user,
"unstable": unstable,
"wdir": wdir,
"_list": _list,
"_intlist": _intlist,
"_hexlist": _hexlist,
}
# symbols which can't be used for a DoS attack for any given input
# (e.g. those which accept regexes as plain strings shouldn't be included)
# functions that just return a lot of changesets (like all) don't count here
safesymbols = set([
"adds",
"all",
"ancestor",
"ancestors",
"_firstancestors",
"author",
"bisect",
"bisected",
"bookmark",
"branch",
"branchpoint",
"bumped",
"bundle",
"children",
"closed",
"converted",
"date",
"desc",
"descendants",
"_firstdescendants",
"destination",
"divergent",
"draft",
"extinct",
"extra",
"file",
"filelog",
"first",
"follow",
"_followfirst",
"head",
"heads",
"hidden",
"id",
"keyword",
"last",
"limit",
"_matchfiles",
"max",
"merge",
"min",
"modifies",
"obsolete",
"only",
"origin",
"outgoing",
"p1",
"p2",
"parents",
"present",
"public",
"remote",
"removes",
"rev",
"reverse",
"roots",
"sort",
"secret",
"matching",
"tag",
"tagged",
"user",
"unstable",
"wdir",
"_list",
"_intlist",
"_hexlist",
])
methods = {
"range": rangeset,
"dagrange": dagrange,
"string": stringset,
"symbol": symbolset,
"and": andset,
"or": orset,
"not": notset,
"list": listset,
"func": func,
"ancestor": ancestorspec,
"parent": parentspec,
"parentpost": p1,
"only": only,
}
def optimize(x, small):
if x is None:
return 0, x
smallbonus = 1
if small:
smallbonus = .5
op = x[0]
if op == 'minus':
return optimize(('and', x[1], ('not', x[2])), small)
elif op == 'only':
return optimize(('func', ('symbol', 'only'),
('list', x[1], x[2])), small)
elif op == 'onlypost':
return optimize(('func', ('symbol', 'only'), x[1]), small)
elif op == 'dagrangepre':
return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
elif op == 'dagrangepost':
return optimize(('func', ('symbol', 'descendants'), x[1]), small)
elif op == 'rangepre':
return optimize(('range', ('string', '0'), x[1]), small)
elif op == 'rangepost':
return optimize(('range', x[1], ('string', 'tip')), small)
elif op == 'negate':
return optimize(('string',
'-' + getstring(x[1], _("can't negate that"))), small)
elif op in 'string symbol negate':
return smallbonus, x # single revisions are small
elif op == 'and':
wa, ta = optimize(x[1], True)
wb, tb = optimize(x[2], True)
# (::x and not ::y)/(not ::y and ::x) have a fast path
def isonly(revs, bases):
return (
revs[0] == 'func'
and getstring(revs[1], _('not a symbol')) == 'ancestors'
and bases[0] == 'not'
and bases[1][0] == 'func'
and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
w = min(wa, wb)
if isonly(ta, tb):
return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
if isonly(tb, ta):
return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
if wa > wb:
return w, (op, tb, ta)
return w, (op, ta, tb)
elif op == 'or':
wa, ta = optimize(x[1], False)
wb, tb = optimize(x[2], False)
if wb < wa:
wb, wa = wa, wb
return max(wa, wb), (op, ta, tb)
elif op == 'not':
o = optimize(x[1], not small)
return o[0], (op, o[1])
elif op == 'parentpost':
o = optimize(x[1], small)
return o[0], (op, o[1])
elif op == 'group':
return optimize(x[1], small)
elif op in 'dagrange range list parent ancestorspec':
if op == 'parent':
# x^:y means (x^) : y, not x ^ (:y)
post = ('parentpost', x[1])
if x[2][0] == 'dagrangepre':
return optimize(('dagrange', post, x[2][1]), small)
elif x[2][0] == 'rangepre':
return optimize(('range', post, x[2][1]), small)
wa, ta = optimize(x[1], small)
wb, tb = optimize(x[2], small)
return wa + wb, (op, ta, tb)
elif op == 'func':
f = getstring(x[1], _("not a symbol"))
wa, ta = optimize(x[2], small)
if f in ("author branch closed date desc file grep keyword "
"outgoing user"):
w = 10 # slow
elif f in "modifies adds removes":
w = 30 # slower
elif f == "contains":
w = 100 # very slow
elif f == "ancestor":
w = 1 * smallbonus
elif f in "reverse limit first _intlist":
w = 0
elif f in "sort":
w = 10 # assume most sorts look at changelog
else:
w = 1
return w + wa, (op, x[1], ta)
return 1, x
_aliasarg = ('func', ('symbol', '_aliasarg'))
def _getaliasarg(tree):
"""If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
return X, None otherwise.
"""
if (len(tree) == 3 and tree[:2] == _aliasarg
and tree[2][0] == 'string'):
return tree[2][1]
return None
def _checkaliasarg(tree, known=None):
"""Check tree contains no _aliasarg construct or only ones which
value is in known. Used to avoid alias placeholders injection.
"""
if isinstance(tree, tuple):
arg = _getaliasarg(tree)
if arg is not None and (not known or arg not in known):
raise error.UnknownIdentifier('_aliasarg', [])
for t in tree:
_checkaliasarg(t, known)
# the set of valid characters for the initial letter of symbols in
# alias declarations and definitions
_aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
if c.isalnum() or c in '._@$' or ord(c) > 127)
def _tokenizealias(program, lookup=None):
"""Parse alias declaration/definition into a stream of tokens
This allows symbol names to use also ``$`` as an initial letter
(for backward compatibility), and callers of this function should
examine whether ``$`` is used also for unexpected symbols or not.
"""
return tokenize(program, lookup=lookup,
syminitletters=_aliassyminitletters)
def _parsealiasdecl(decl):
"""Parse alias declaration ``decl``
This returns ``(name, tree, args, errorstr)`` tuple:
- ``name``: of declared alias (may be ``decl`` itself at error)
- ``tree``: parse result (or ``None`` at error)
- ``args``: list of alias argument names (or None for symbol declaration)
- ``errorstr``: detail about detected error (or None)
>>> _parsealiasdecl('foo')
('foo', ('symbol', 'foo'), None, None)
>>> _parsealiasdecl('$foo')
('$foo', None, None, "'$' not for alias arguments")
>>> _parsealiasdecl('foo::bar')
('foo::bar', None, None, 'invalid format')
>>> _parsealiasdecl('foo bar')
('foo bar', None, None, 'at 4: invalid token')
>>> _parsealiasdecl('foo()')
('foo', ('func', ('symbol', 'foo')), [], None)
>>> _parsealiasdecl('$foo()')
('$foo()', None, None, "'$' not for alias arguments")
>>> _parsealiasdecl('foo($1, $2)')
('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
>>> _parsealiasdecl('foo(bar_bar, baz.baz)')
('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
>>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
>>> _parsealiasdecl('foo(bar($1, $2))')
('foo(bar($1, $2))', None, None, 'invalid argument list')
>>> _parsealiasdecl('foo("string")')
('foo("string")', None, None, 'invalid argument list')
>>> _parsealiasdecl('foo($1, $2')
('foo($1, $2', None, None, 'at 10: unexpected token: end')
>>> _parsealiasdecl('foo("string')
('foo("string', None, None, 'at 5: unterminated string')
>>> _parsealiasdecl('foo($1, $2, $1)')
('foo', None, None, 'argument names collide with each other')
"""
p = parser.parser(_tokenizealias, elements)
try:
tree, pos = p.parse(decl)
if (pos != len(decl)):
raise error.ParseError(_('invalid token'), pos)
if isvalidsymbol(tree):
# "name = ...." style
name = getsymbol(tree)
if name.startswith('$'):
return (decl, None, None, _("'$' not for alias arguments"))
return (name, ('symbol', name), None, None)
if isvalidfunc(tree):
# "name(arg, ....) = ...." style
name = getfuncname(tree)
if name.startswith('$'):
return (decl, None, None, _("'$' not for alias arguments"))
args = []
for arg in getfuncargs(tree):
if not isvalidsymbol(arg):
return (decl, None, None, _("invalid argument list"))
args.append(getsymbol(arg))
if len(args) != len(set(args)):
return (name, None, None,
_("argument names collide with each other"))
return (name, ('func', ('symbol', name)), args, None)
return (decl, None, None, _("invalid format"))
except error.ParseError, inst:
return (decl, None, None, parseerrordetail(inst))
def _parsealiasdefn(defn, args):
"""Parse alias definition ``defn``
This function also replaces alias argument references in the
specified definition by ``_aliasarg(ARGNAME)``.
``args`` is a list of alias argument names, or None if the alias
is declared as a symbol.
This returns "tree" as parsing result.
>>> args = ['$1', '$2', 'foo']
>>> print prettyformat(_parsealiasdefn('$1 or foo', args))
(or
(func
('symbol', '_aliasarg')
('string', '$1'))
(func
('symbol', '_aliasarg')
('string', 'foo')))
>>> try:
... _parsealiasdefn('$1 or $bar', args)
... except error.ParseError, inst:
... print parseerrordetail(inst)
at 6: '$' not for alias arguments
>>> args = ['$1', '$10', 'foo']
>>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
(or
(func
('symbol', '_aliasarg')
('string', '$10'))
('symbol', 'foobar'))
>>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
(or
('string', '$1')
('string', 'foo'))
"""
def tokenizedefn(program, lookup=None):
if args:
argset = set(args)
else:
argset = set()
for t, value, pos in _tokenizealias(program, lookup=lookup):
if t == 'symbol':
if value in argset:
# emulate tokenization of "_aliasarg('ARGNAME')":
# "_aliasarg()" is an unknown symbol only used separate
# alias argument placeholders from regular strings.
yield ('symbol', '_aliasarg', pos)
yield ('(', None, pos)
yield ('string', value, pos)
yield (')', None, pos)
continue
elif value.startswith('$'):
raise error.ParseError(_("'$' not for alias arguments"),
pos)
yield (t, value, pos)
p = parser.parser(tokenizedefn, elements)
tree, pos = p.parse(defn)
if pos != len(defn):
raise error.ParseError(_('invalid token'), pos)
return tree
class revsetalias(object):
# whether own `error` information is already shown or not.
# this avoids showing same warning multiple times at each `findaliases`.
warned = False
def __init__(self, name, value):
'''Aliases like:
h = heads(default)
b($1) = ancestors($1) - ancestors(default)
'''
self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
if self.error:
self.error = _('failed to parse the declaration of revset alias'
' "%s": %s') % (self.name, self.error)
return
try:
self.replacement = _parsealiasdefn(value, self.args)
# Check for placeholder injection
_checkaliasarg(self.replacement, self.args)
except error.ParseError, inst:
self.error = _('failed to parse the definition of revset alias'
' "%s": %s') % (self.name, parseerrordetail(inst))
def _getalias(aliases, tree):
"""If tree looks like an unexpanded alias, return it. Return None
otherwise.
"""
if isinstance(tree, tuple) and tree:
if tree[0] == 'symbol' and len(tree) == 2:
name = tree[1]
alias = aliases.get(name)
if alias and alias.args is None and alias.tree == tree:
return alias
if tree[0] == 'func' and len(tree) > 1:
if tree[1][0] == 'symbol' and len(tree[1]) == 2:
name = tree[1][1]
alias = aliases.get(name)
if alias and alias.args is not None and alias.tree == tree[:2]:
return alias
return None
def _expandargs(tree, args):
"""Replace _aliasarg instances with the substitution value of the
same name in args, recursively.
"""
if not tree or not isinstance(tree, tuple):
return tree
arg = _getaliasarg(tree)
if arg is not None:
return args[arg]
return tuple(_expandargs(t, args) for t in tree)
def _expandaliases(aliases, tree, expanding, cache):
"""Expand aliases in tree, recursively.
'aliases' is a dictionary mapping user defined aliases to
revsetalias objects.
"""
if not isinstance(tree, tuple):
# Do not expand raw strings
return tree
alias = _getalias(aliases, tree)
if alias is not None:
if alias.error:
raise util.Abort(alias.error)
if alias in expanding:
raise error.ParseError(_('infinite expansion of revset alias "%s" '
'detected') % alias.name)
expanding.append(alias)
if alias.name not in cache:
cache[alias.name] = _expandaliases(aliases, alias.replacement,
expanding, cache)
result = cache[alias.name]
expanding.pop()
if alias.args is not None:
l = getlist(tree[2])
if len(l) != len(alias.args):
raise error.ParseError(
_('invalid number of arguments: %s') % len(l))
l = [_expandaliases(aliases, a, [], cache) for a in l]
result = _expandargs(result, dict(zip(alias.args, l)))
else:
result = tuple(_expandaliases(aliases, t, expanding, cache)
for t in tree)
return result
def findaliases(ui, tree, showwarning=None):
_checkaliasarg(tree)
aliases = {}
for k, v in ui.configitems('revsetalias'):
alias = revsetalias(k, v)
aliases[alias.name] = alias
tree = _expandaliases(aliases, tree, [], {})
if showwarning:
# warn about problematic (but not referred) aliases
for name, alias in sorted(aliases.iteritems()):
if alias.error and not alias.warned:
showwarning(_('warning: %s\n') % (alias.error))
alias.warned = True
return tree
def foldconcat(tree):
"""Fold elements to be concatenated by `##`
"""
if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
return tree
if tree[0] == '_concat':
pending = [tree]
l = []
while pending:
e = pending.pop()
if e[0] == '_concat':
pending.extend(reversed(e[1:]))
elif e[0] in ('string', 'symbol'):
l.append(e[1])
else:
msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
raise error.ParseError(msg)
return ('string', ''.join(l))
else:
return tuple(foldconcat(t) for t in tree)
def parse(spec, lookup=None):
p = parser.parser(tokenize, elements)
return p.parse(spec, lookup=lookup)
def posttreebuilthook(tree, repo):
# hook for extensions to execute code on the optimized tree
pass
def match(ui, spec, repo=None):
if not spec:
raise error.ParseError(_("empty query"))
lookup = None
if repo:
lookup = repo.__contains__
tree, pos = parse(spec, lookup)
if (pos != len(spec)):
raise error.ParseError(_("invalid token"), pos)
if ui:
tree = findaliases(ui, tree, showwarning=ui.warn)
tree = foldconcat(tree)
weight, tree = optimize(tree, True)
posttreebuilthook(tree, repo)
def mfunc(repo, subset=None):
if subset is None:
subset = fullreposet(repo)
if util.safehasattr(subset, 'isascending'):
result = getset(repo, subset, tree)
else:
result = getset(repo, baseset(subset), tree)
return result
return mfunc
def formatspec(expr, *args):
'''
This is a convenience function for using revsets internally, and
escapes arguments appropriately. Aliases are intentionally ignored
so that intended expression behavior isn't accidentally subverted.
Supported arguments:
%r = revset expression, parenthesized
%d = int(arg), no quoting
%s = string(arg), escaped and single-quoted
%b = arg.branch(), escaped and single-quoted
%n = hex(arg), single-quoted
%% = a literal '%'
Prefixing the type with 'l' specifies a parenthesized list of that type.
>>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
'(10 or 11):: and ((this()) or (that()))'
>>> formatspec('%d:: and not %d::', 10, 20)
'10:: and not 20::'
>>> formatspec('%ld or %ld', [], [1])
"_list('') or 1"
>>> formatspec('keyword(%s)', 'foo\\xe9')
"keyword('foo\\\\xe9')"
>>> b = lambda: 'default'
>>> b.branch = b
>>> formatspec('branch(%b)', b)
"branch('default')"
>>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
"root(_list('a\\x00b\\x00c\\x00d'))"
'''
def quote(s):
return repr(str(s))
def argtype(c, arg):
if c == 'd':
return str(int(arg))
elif c == 's':
return quote(arg)
elif c == 'r':
parse(arg) # make sure syntax errors are confined
return '(%s)' % arg
elif c == 'n':
return quote(node.hex(arg))
elif c == 'b':
return quote(arg.branch())
def listexp(s, t):
l = len(s)
if l == 0:
return "_list('')"
elif l == 1:
return argtype(t, s[0])
elif t == 'd':
return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
elif t == 's':
return "_list('%s')" % "\0".join(s)
elif t == 'n':
return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
elif t == 'b':
return "_list('%s')" % "\0".join(a.branch() for a in s)
m = l // 2
return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
ret = ''
pos = 0
arg = 0
while pos < len(expr):
c = expr[pos]
if c == '%':
pos += 1
d = expr[pos]
if d == '%':
ret += d
elif d in 'dsnbr':
ret += argtype(d, args[arg])
arg += 1
elif d == 'l':
# a list of some type
pos += 1
d = expr[pos]
ret += listexp(list(args[arg]), d)
arg += 1
else:
raise util.Abort('unexpected revspec format character %s' % d)
else:
ret += c
pos += 1
return ret
def prettyformat(tree):
def _prettyformat(tree, level, lines):
if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
lines.append((level, str(tree)))
else:
lines.append((level, '(%s' % tree[0]))
for s in tree[1:]:
_prettyformat(s, level + 1, lines)
lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
lines = []
_prettyformat(tree, 0, lines)
output = '\n'.join((' '*l + s) for l, s in lines)
return output
def depth(tree):
if isinstance(tree, tuple):
return max(map(depth, tree)) + 1
else:
return 0
def funcsused(tree):
if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
return set()
else:
funcs = set()
for s in tree[1:]:
funcs |= funcsused(s)
if tree[0] == 'func':
funcs.add(tree[1][1])
return funcs
class abstractsmartset(object):
def __nonzero__(self):
"""True if the smartset is not empty"""
raise NotImplementedError()
def __contains__(self, rev):
"""provide fast membership testing"""
raise NotImplementedError()
def __iter__(self):
"""iterate the set in the order it is supposed to be iterated"""
raise NotImplementedError()
# Attributes containing a function to perform a fast iteration in a given
# direction. A smartset can have none, one, or both defined.
#
# Default value is None instead of a function returning None to avoid
# initializing an iterator just for testing if a fast method exists.
fastasc = None
fastdesc = None
def isascending(self):
"""True if the set will iterate in ascending order"""
raise NotImplementedError()
def isdescending(self):
"""True if the set will iterate in descending order"""
raise NotImplementedError()
def min(self):
"""return the minimum element in the set"""
if self.fastasc is not None:
for r in self.fastasc():
return r
raise ValueError('arg is an empty sequence')
return min(self)
def max(self):
"""return the maximum element in the set"""
if self.fastdesc is not None:
for r in self.fastdesc():
return r
raise ValueError('arg is an empty sequence')
return max(self)
def first(self):
"""return the first element in the set (user iteration perspective)
Return None if the set is empty"""
raise NotImplementedError()
def last(self):
"""return the last element in the set (user iteration perspective)
Return None if the set is empty"""
raise NotImplementedError()
def __len__(self):
"""return the length of the smartsets
This can be expensive on smartset that could be lazy otherwise."""
raise NotImplementedError()
def reverse(self):
"""reverse the expected iteration order"""
raise NotImplementedError()
def sort(self, reverse=True):
"""get the set to iterate in an ascending or descending order"""
raise NotImplementedError()
def __and__(self, other):
"""Returns a new object with the intersection of the two collections.
This is part of the mandatory API for smartset."""
if isinstance(other, fullreposet):
return self
return self.filter(other.__contains__, cache=False)
def __add__(self, other):
"""Returns a new object with the union of the two collections.
This is part of the mandatory API for smartset."""
return addset(self, other)
def __sub__(self, other):
"""Returns a new object with the substraction of the two collections.
This is part of the mandatory API for smartset."""
c = other.__contains__
return self.filter(lambda r: not c(r), cache=False)
def filter(self, condition, cache=True):
"""Returns this smartset filtered by condition as a new smartset.
`condition` is a callable which takes a revision number and returns a
boolean.
This is part of the mandatory API for smartset."""
# builtin cannot be cached. but do not needs to
if cache and util.safehasattr(condition, 'func_code'):
condition = util.cachefunc(condition)
return filteredset(self, condition)
class baseset(abstractsmartset):
"""Basic data structure that represents a revset and contains the basic
operation that it should be able to perform.
Every method in this class should be implemented by any smartset class.
"""
def __init__(self, data=()):
if not isinstance(data, list):
data = list(data)
self._list = data
self._ascending = None
@util.propertycache
def _set(self):
return set(self._list)
@util.propertycache
def _asclist(self):
asclist = self._list[:]
asclist.sort()
return asclist
def __iter__(self):
if self._ascending is None:
return iter(self._list)
elif self._ascending:
return iter(self._asclist)
else:
return reversed(self._asclist)
def fastasc(self):
return iter(self._asclist)
def fastdesc(self):
return reversed(self._asclist)
@util.propertycache
def __contains__(self):
return self._set.__contains__
def __nonzero__(self):
return bool(self._list)
def sort(self, reverse=False):
self._ascending = not bool(reverse)
def reverse(self):
if self._ascending is None:
self._list.reverse()
else:
self._ascending = not self._ascending
def __len__(self):
return len(self._list)
def isascending(self):
"""Returns True if the collection is ascending order, False if not.
This is part of the mandatory API for smartset."""
if len(self) <= 1:
return True
return self._ascending is not None and self._ascending
def isdescending(self):
"""Returns True if the collection is descending order, False if not.
This is part of the mandatory API for smartset."""
if len(self) <= 1:
return True
return self._ascending is not None and not self._ascending
def first(self):
if self:
if self._ascending is None:
return self._list[0]
elif self._ascending:
return self._asclist[0]
else:
return self._asclist[-1]
return None
def last(self):
if self:
if self._ascending is None:
return self._list[-1]
elif self._ascending:
return self._asclist[-1]
else:
return self._asclist[0]
return None
def __repr__(self):
d = {None: '', False: '-', True: '+'}[self._ascending]
return '<%s%s %r>' % (type(self).__name__, d, self._list)
class filteredset(abstractsmartset):
"""Duck type for baseset class which iterates lazily over the revisions in
the subset and contains a function which tests for membership in the
revset
"""
def __init__(self, subset, condition=lambda x: True):
"""
condition: a function that decide whether a revision in the subset
belongs to the revset or not.
"""
self._subset = subset
self._condition = condition
self._cache = {}
def __contains__(self, x):
c = self._cache
if x not in c:
v = c[x] = x in self._subset and self._condition(x)
return v
return c[x]
def __iter__(self):
return self._iterfilter(self._subset)
def _iterfilter(self, it):
cond = self._condition
for x in it:
if cond(x):
yield x
@property
def fastasc(self):
it = self._subset.fastasc
if it is None:
return None
return lambda: self._iterfilter(it())
@property
def fastdesc(self):
it = self._subset.fastdesc
if it is None:
return None
return lambda: self._iterfilter(it())
def __nonzero__(self):
for r in self:
return True
return False
def __len__(self):
# Basic implementation to be changed in future patches.
l = baseset([r for r in self])
return len(l)
def sort(self, reverse=False):
self._subset.sort(reverse=reverse)
def reverse(self):
self._subset.reverse()
def isascending(self):
return self._subset.isascending()
def isdescending(self):
return self._subset.isdescending()
def first(self):
for x in self:
return x
return None
def last(self):
it = None
if self._subset.isascending:
it = self.fastdesc
elif self._subset.isdescending:
it = self.fastdesc
if it is None:
# slowly consume everything. This needs improvement
it = lambda: reversed(list(self))
for x in it():
return x
return None
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self._subset)
class addset(abstractsmartset):
"""Represent the addition of two sets
Wrapper structure for lazily adding two structures without losing much
performance on the __contains__ method
If the ascending attribute is set, that means the two structures are
ordered in either an ascending or descending way. Therefore, we can add
them maintaining the order by iterating over both at the same time
"""
def __init__(self, revs1, revs2, ascending=None):
self._r1 = revs1
self._r2 = revs2
self._iter = None
self._ascending = ascending
self._genlist = None
self._asclist = None
def __len__(self):
return len(self._list)
def __nonzero__(self):
return bool(self._r1) or bool(self._r2)
@util.propertycache
def _list(self):
if not self._genlist:
self._genlist = baseset(self._iterator())
return self._genlist
def _iterator(self):
"""Iterate over both collections without repeating elements
If the ascending attribute is not set, iterate over the first one and
then over the second one checking for membership on the first one so we
dont yield any duplicates.
If the ascending attribute is set, iterate over both collections at the
same time, yielding only one value at a time in the given order.
"""
if self._ascending is None:
def gen():
for r in self._r1:
yield r
inr1 = self._r1.__contains__
for r in self._r2:
if not inr1(r):
yield r
gen = gen()
else:
iter1 = iter(self._r1)
iter2 = iter(self._r2)
gen = self._iterordered(self._ascending, iter1, iter2)
return gen
def __iter__(self):
if self._ascending is None:
if self._genlist:
return iter(self._genlist)
return iter(self._iterator())
self._trysetasclist()
if self._ascending:
it = self.fastasc
else:
it = self.fastdesc
if it is None:
# consume the gen and try again
self._list
return iter(self)
return it()
def _trysetasclist(self):
"""populate the _asclist attribute if possible and necessary"""
if self._genlist is not None and self._asclist is None:
self._asclist = sorted(self._genlist)
@property
def fastasc(self):
self._trysetasclist()
if self._asclist is not None:
return self._asclist.__iter__
iter1 = self._r1.fastasc
iter2 = self._r2.fastasc
if None in (iter1, iter2):
return None
return lambda: self._iterordered(True, iter1(), iter2())
@property
def fastdesc(self):
self._trysetasclist()
if self._asclist is not None:
return self._asclist.__reversed__
iter1 = self._r1.fastdesc
iter2 = self._r2.fastdesc
if None in (iter1, iter2):
return None
return lambda: self._iterordered(False, iter1(), iter2())
def _iterordered(self, ascending, iter1, iter2):
"""produce an ordered iteration from two iterators with the same order
The ascending is used to indicated the iteration direction.
"""
choice = max
if ascending:
choice = min
val1 = None
val2 = None
choice = max
if ascending:
choice = min
try:
# Consume both iterators in an ordered way until one is
# empty
while True:
if val1 is None:
val1 = iter1.next()
if val2 is None:
val2 = iter2.next()
next = choice(val1, val2)
yield next
if val1 == next:
val1 = None
if val2 == next:
val2 = None
except StopIteration:
# Flush any remaining values and consume the other one
it = iter2
if val1 is not None:
yield val1
it = iter1
elif val2 is not None:
# might have been equality and both are empty
yield val2
for val in it:
yield val
def __contains__(self, x):
return x in self._r1 or x in self._r2
def sort(self, reverse=False):
"""Sort the added set
For this we use the cached list with all the generated values and if we
know they are ascending or descending we can sort them in a smart way.
"""
self._ascending = not reverse
def isascending(self):
return self._ascending is not None and self._ascending
def isdescending(self):
return self._ascending is not None and not self._ascending
def reverse(self):
if self._ascending is None:
self._list.reverse()
else:
self._ascending = not self._ascending
def first(self):
for x in self:
return x
return None
def last(self):
self.reverse()
val = self.first()
self.reverse()
return val
def __repr__(self):
d = {None: '', False: '-', True: '+'}[self._ascending]
return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
class generatorset(abstractsmartset):
"""Wrap a generator for lazy iteration
Wrapper structure for generators that provides lazy membership and can
be iterated more than once.
When asked for membership it generates values until either it finds the
requested one or has gone through all the elements in the generator
"""
def __init__(self, gen, iterasc=None):
"""
gen: a generator producing the values for the generatorset.
"""
self._gen = gen
self._asclist = None
self._cache = {}
self._genlist = []
self._finished = False
self._ascending = True
if iterasc is not None:
if iterasc:
self.fastasc = self._iterator
self.__contains__ = self._asccontains
else:
self.fastdesc = self._iterator
self.__contains__ = self._desccontains
def __nonzero__(self):
for r in self:
return True
return False
def __contains__(self, x):
if x in self._cache:
return self._cache[x]
# Use new values only, as existing values would be cached.
for l in self._consumegen():
if l == x:
return True
self._cache[x] = False
return False
def _asccontains(self, x):
"""version of contains optimised for ascending generator"""
if x in self._cache:
return self._cache[x]
# Use new values only, as existing values would be cached.
for l in self._consumegen():
if l == x:
return True
if l > x:
break
self._cache[x] = False
return False
def _desccontains(self, x):
"""version of contains optimised for descending generator"""
if x in self._cache:
return self._cache[x]
# Use new values only, as existing values would be cached.
for l in self._consumegen():
if l == x:
return True
if l < x:
break
self._cache[x] = False
return False
def __iter__(self):
if self._ascending:
it = self.fastasc
else:
it = self.fastdesc
if it is not None:
return it()
# we need to consume the iterator
for x in self._consumegen():
pass
# recall the same code
return iter(self)
def _iterator(self):
if self._finished:
return iter(self._genlist)
# We have to use this complex iteration strategy to allow multiple
# iterations at the same time. We need to be able to catch revision
# removed from _consumegen and added to genlist in another instance.
#
# Getting rid of it would provide an about 15% speed up on this
# iteration.
genlist = self._genlist
nextrev = self._consumegen().next
_len = len # cache global lookup
def gen():
i = 0
while True:
if i < _len(genlist):
yield genlist[i]
else:
yield nextrev()
i += 1
return gen()
def _consumegen(self):
cache = self._cache
genlist = self._genlist.append
for item in self._gen:
cache[item] = True
genlist(item)
yield item
if not self._finished:
self._finished = True
asc = self._genlist[:]
asc.sort()
self._asclist = asc
self.fastasc = asc.__iter__
self.fastdesc = asc.__reversed__
def __len__(self):
for x in self._consumegen():
pass
return len(self._genlist)
def sort(self, reverse=False):
self._ascending = not reverse
def reverse(self):
self._ascending = not self._ascending
def isascending(self):
return self._ascending
def isdescending(self):
return not self._ascending
def first(self):
if self._ascending:
it = self.fastasc
else:
it = self.fastdesc
if it is None:
# we need to consume all and try again
for x in self._consumegen():
pass
return self.first()
if self:
return it().next()
return None
def last(self):
if self._ascending:
it = self.fastdesc
else:
it = self.fastasc
if it is None:
# we need to consume all and try again
for x in self._consumegen():
pass
return self.first()
if self:
return it().next()
return None
def __repr__(self):
d = {False: '-', True: '+'}[self._ascending]
return '<%s%s>' % (type(self).__name__, d)
class spanset(abstractsmartset):
"""Duck type for baseset class which represents a range of revisions and
can work lazily and without having all the range in memory
Note that spanset(x, y) behave almost like xrange(x, y) except for two
notable points:
- when x < y it will be automatically descending,
- revision filtered with this repoview will be skipped.
"""
def __init__(self, repo, start=0, end=None):
"""
start: first revision included the set
(default to 0)
end: first revision excluded (last+1)
(default to len(repo)
Spanset will be descending if `end` < `start`.
"""
if end is None:
end = len(repo)
self._ascending = start <= end
if not self._ascending:
start, end = end + 1, start +1
self._start = start
self._end = end
self._hiddenrevs = repo.changelog.filteredrevs
def sort(self, reverse=False):
self._ascending = not reverse
def reverse(self):
self._ascending = not self._ascending
def _iterfilter(self, iterrange):
s = self._hiddenrevs
for r in iterrange:
if r not in s:
yield r
def __iter__(self):
if self._ascending:
return self.fastasc()
else:
return self.fastdesc()
def fastasc(self):
iterrange = xrange(self._start, self._end)
if self._hiddenrevs:
return self._iterfilter(iterrange)
return iter(iterrange)
def fastdesc(self):
iterrange = xrange(self._end - 1, self._start - 1, -1)
if self._hiddenrevs:
return self._iterfilter(iterrange)
return iter(iterrange)
def __contains__(self, rev):
hidden = self._hiddenrevs
return ((self._start <= rev < self._end)
and not (hidden and rev in hidden))
def __nonzero__(self):
for r in self:
return True
return False
def __len__(self):
if not self._hiddenrevs:
return abs(self._end - self._start)
else:
count = 0
start = self._start
end = self._end
for rev in self._hiddenrevs:
if (end < rev <= start) or (start <= rev < end):
count += 1
return abs(self._end - self._start) - count
def isascending(self):
return self._ascending
def isdescending(self):
return not self._ascending
def first(self):
if self._ascending:
it = self.fastasc
else:
it = self.fastdesc
for x in it():
return x
return None
def last(self):
if self._ascending:
it = self.fastdesc
else:
it = self.fastasc
for x in it():
return x
return None
def __repr__(self):
d = {False: '-', True: '+'}[self._ascending]
return '<%s%s %d:%d>' % (type(self).__name__, d,
self._start, self._end - 1)
class fullreposet(spanset):
"""a set containing all revisions in the repo
This class exists to host special optimization and magic to handle virtual
revisions such as "null".
"""
def __init__(self, repo):
super(fullreposet, self).__init__(repo)
def __and__(self, other):
"""As self contains the whole repo, all of the other set should also be
in self. Therefore `self & other = other`.
This boldly assumes the other contains valid revs only.
"""
# other not a smartset, make is so
if not util.safehasattr(other, 'isascending'):
# filter out hidden revision
# (this boldly assumes all smartset are pure)
#
# `other` was used with "&", let's assume this is a set like
# object.
other = baseset(other - self._hiddenrevs)
other.sort(reverse=self.isdescending())
return other
def prettyformatset(revs):
lines = []
rs = repr(revs)
p = 0
while p < len(rs):
q = rs.find('<', p + 1)
if q < 0:
q = len(rs)
l = rs.count('<', 0, p) - rs.count('>', 0, p)
assert l >= 0
lines.append((l, rs[p:q].rstrip()))
p = q
return '\n'.join(' ' * l + s for l, s in lines)
# tell hggettext to extract docstrings from these functions:
i18nfunctions = symbols.values()
```
#### File: site-packages/mercurial/templater.py
```python
from i18n import _
import os, re
import util, config, templatefilters, templatekw, parser, error
import revset as revsetmod
import types
import minirst
# template parsing
elements = {
"(": (20, ("group", 1, ")"), ("func", 1, ")")),
",": (2, None, ("list", 2)),
"|": (5, None, ("|", 5)),
"%": (6, None, ("%", 6)),
")": (0, None, None),
"symbol": (0, ("symbol",), None),
"string": (0, ("string",), None),
"rawstring": (0, ("rawstring",), None),
"end": (0, None, None),
}
def tokenizer(data):
program, start, end = data
pos = start
while pos < end:
c = program[pos]
if c.isspace(): # skip inter-token whitespace
pass
elif c in "(,)%|": # handle simple operators
yield (c, None, pos)
elif (c in '"\'' or c == 'r' and
program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
if c == 'r':
pos += 1
c = program[pos]
decode = False
else:
decode = True
pos += 1
s = pos
while pos < end: # find closing quote
d = program[pos]
if decode and d == '\\': # skip over escaped characters
pos += 2
continue
if d == c:
if not decode:
yield ('rawstring', program[s:pos], s)
break
yield ('string', program[s:pos], s)
break
pos += 1
else:
raise error.ParseError(_("unterminated string"), s)
elif c.isalnum() or c in '_':
s = pos
pos += 1
while pos < end: # find end of symbol
d = program[pos]
if not (d.isalnum() or d == "_"):
break
pos += 1
sym = program[s:pos]
yield ('symbol', sym, s)
pos -= 1
elif c == '}':
pos += 1
break
else:
raise error.ParseError(_("syntax error"), pos)
pos += 1
yield ('end', None, pos)
def compiletemplate(tmpl, context, strtoken="string"):
parsed = []
pos, stop = 0, len(tmpl)
p = parser.parser(tokenizer, elements)
while pos < stop:
n = tmpl.find('{', pos)
if n < 0:
parsed.append((strtoken, tmpl[pos:]))
break
bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
if strtoken == 'string' and bs % 2 == 1:
# escaped (e.g. '\{', '\\\{', but not '\\{' nor r'\{')
parsed.append((strtoken, (tmpl[pos:n - 1] + "{")))
pos = n + 1
continue
if n > pos:
parsed.append((strtoken, tmpl[pos:n]))
pd = [tmpl, n + 1, stop]
parseres, pos = p.parse(pd)
parsed.append(parseres)
return [compileexp(e, context) for e in parsed]
def compileexp(exp, context):
t = exp[0]
if t in methods:
return methods[t](exp, context)
raise error.ParseError(_("unknown method '%s'") % t)
# template evaluation
def getsymbol(exp):
if exp[0] == 'symbol':
return exp[1]
raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
def getlist(x):
if not x:
return []
if x[0] == 'list':
return getlist(x[1]) + [x[2]]
return [x]
def getfilter(exp, context):
f = getsymbol(exp)
if f not in context._filters:
raise error.ParseError(_("unknown function '%s'") % f)
return context._filters[f]
def gettemplate(exp, context):
if exp[0] == 'string' or exp[0] == 'rawstring':
return compiletemplate(exp[1], context, strtoken=exp[0])
if exp[0] == 'symbol':
return context._load(exp[1])
raise error.ParseError(_("expected template specifier"))
def runstring(context, mapping, data):
return data.decode("string-escape")
def runrawstring(context, mapping, data):
return data
def runsymbol(context, mapping, key):
v = mapping.get(key)
if v is None:
v = context._defaults.get(key)
if v is None:
try:
v = context.process(key, mapping)
except TemplateNotFound:
v = ''
if callable(v):
return v(**mapping)
if isinstance(v, types.GeneratorType):
v = list(v)
return v
def buildfilter(exp, context):
func, data = compileexp(exp[1], context)
filt = getfilter(exp[2], context)
return (runfilter, (func, data, filt))
def runfilter(context, mapping, data):
func, data, filt = data
# func() may return string, generator of strings or arbitrary object such
# as date tuple, but filter does not want generator.
thing = func(context, mapping, data)
if isinstance(thing, types.GeneratorType):
thing = stringify(thing)
try:
return filt(thing)
except (ValueError, AttributeError, TypeError):
if isinstance(data, tuple):
dt = data[1]
else:
dt = data
raise util.Abort(_("template filter '%s' is not compatible with "
"keyword '%s'") % (filt.func_name, dt))
def buildmap(exp, context):
func, data = compileexp(exp[1], context)
ctmpl = gettemplate(exp[2], context)
return (runmap, (func, data, ctmpl))
def runtemplate(context, mapping, template):
for func, data in template:
yield func(context, mapping, data)
def runmap(context, mapping, data):
func, data, ctmpl = data
d = func(context, mapping, data)
if callable(d):
d = d()
lm = mapping.copy()
for i in d:
if isinstance(i, dict):
lm.update(i)
lm['originalnode'] = mapping.get('node')
yield runtemplate(context, lm, ctmpl)
else:
# v is not an iterable of dicts, this happen when 'key'
# has been fully expanded already and format is useless.
# If so, return the expanded value.
yield i
def buildfunc(exp, context):
n = getsymbol(exp[1])
args = [compileexp(x, context) for x in getlist(exp[2])]
if n in funcs:
f = funcs[n]
return (f, args)
if n in context._filters:
if len(args) != 1:
raise error.ParseError(_("filter %s expects one argument") % n)
f = context._filters[n]
return (runfilter, (args[0][0], args[0][1], f))
raise error.ParseError(_("unknown function '%s'") % n)
def date(context, mapping, args):
""":date(date[, fmt]): Format a date. See :hg:`help dates` for formatting
strings."""
if not (1 <= len(args) <= 2):
# i18n: "date" is a keyword
raise error.ParseError(_("date expects one or two arguments"))
date = args[0][0](context, mapping, args[0][1])
fmt = None
if len(args) == 2:
fmt = stringify(args[1][0](context, mapping, args[1][1]))
try:
if fmt is None:
return util.datestr(date)
else:
return util.datestr(date, fmt)
except (TypeError, ValueError):
# i18n: "date" is a keyword
raise error.ParseError(_("date expects a date information"))
def diff(context, mapping, args):
""":diff([includepattern [, excludepattern]]): Show a diff, optionally
specifying files to include or exclude."""
if len(args) > 2:
# i18n: "diff" is a keyword
raise error.ParseError(_("diff expects one, two or no arguments"))
def getpatterns(i):
if i < len(args):
s = args[i][1].strip()
if s:
return [s]
return []
ctx = mapping['ctx']
chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
return ''.join(chunks)
def fill(context, mapping, args):
""":fill(text[, width[, initialident[, hangindent]]]): Fill many
paragraphs with optional indentation. See the "fill" filter."""
if not (1 <= len(args) <= 4):
# i18n: "fill" is a keyword
raise error.ParseError(_("fill expects one to four arguments"))
text = stringify(args[0][0](context, mapping, args[0][1]))
width = 76
initindent = ''
hangindent = ''
if 2 <= len(args) <= 4:
try:
width = int(stringify(args[1][0](context, mapping, args[1][1])))
except ValueError:
# i18n: "fill" is a keyword
raise error.ParseError(_("fill expects an integer width"))
try:
initindent = stringify(_evalifliteral(args[2], context, mapping))
hangindent = stringify(_evalifliteral(args[3], context, mapping))
except IndexError:
pass
return templatefilters.fill(text, width, initindent, hangindent)
def pad(context, mapping, args):
""":pad(text, width[, fillchar=' '[, right=False]]): Pad text with a
fill character."""
if not (2 <= len(args) <= 4):
# i18n: "pad" is a keyword
raise error.ParseError(_("pad() expects two to four arguments"))
width = int(args[1][1])
text = stringify(args[0][0](context, mapping, args[0][1]))
if args[0][0] == runstring:
text = stringify(runtemplate(context, mapping,
compiletemplate(text, context)))
right = False
fillchar = ' '
if len(args) > 2:
fillchar = stringify(args[2][0](context, mapping, args[2][1]))
if len(args) > 3:
right = util.parsebool(args[3][1])
if right:
return text.rjust(width, fillchar)
else:
return text.ljust(width, fillchar)
def get(context, mapping, args):
""":get(dict, key): Get an attribute/key from an object. Some keywords
are complex types. This function allows you to obtain the value of an
attribute on these type."""
if len(args) != 2:
# i18n: "get" is a keyword
raise error.ParseError(_("get() expects two arguments"))
dictarg = args[0][0](context, mapping, args[0][1])
if not util.safehasattr(dictarg, 'get'):
# i18n: "get" is a keyword
raise error.ParseError(_("get() expects a dict as first argument"))
key = args[1][0](context, mapping, args[1][1])
yield dictarg.get(key)
def _evalifliteral(arg, context, mapping):
t = stringify(arg[0](context, mapping, arg[1]))
if arg[0] == runstring or arg[0] == runrawstring:
yield runtemplate(context, mapping,
compiletemplate(t, context, strtoken='rawstring'))
else:
yield t
def if_(context, mapping, args):
""":if(expr, then[, else]): Conditionally execute based on the result of
an expression."""
if not (2 <= len(args) <= 3):
# i18n: "if" is a keyword
raise error.ParseError(_("if expects two or three arguments"))
test = stringify(args[0][0](context, mapping, args[0][1]))
if test:
yield _evalifliteral(args[1], context, mapping)
elif len(args) == 3:
yield _evalifliteral(args[2], context, mapping)
def ifcontains(context, mapping, args):
""":ifcontains(search, thing, then[, else]): Conditionally execute based
on whether the item "search" is in "thing"."""
if not (3 <= len(args) <= 4):
# i18n: "ifcontains" is a keyword
raise error.ParseError(_("ifcontains expects three or four arguments"))
item = stringify(args[0][0](context, mapping, args[0][1]))
items = args[1][0](context, mapping, args[1][1])
if item in items:
yield _evalifliteral(args[2], context, mapping)
elif len(args) == 4:
yield _evalifliteral(args[3], context, mapping)
def ifeq(context, mapping, args):
""":ifeq(expr1, expr2, then[, else]): Conditionally execute based on
whether 2 items are equivalent."""
if not (3 <= len(args) <= 4):
# i18n: "ifeq" is a keyword
raise error.ParseError(_("ifeq expects three or four arguments"))
test = stringify(args[0][0](context, mapping, args[0][1]))
match = stringify(args[1][0](context, mapping, args[1][1]))
if test == match:
yield _evalifliteral(args[2], context, mapping)
elif len(args) == 4:
yield _evalifliteral(args[3], context, mapping)
def join(context, mapping, args):
""":join(list, sep): Join items in a list with a delimiter."""
if not (1 <= len(args) <= 2):
# i18n: "join" is a keyword
raise error.ParseError(_("join expects one or two arguments"))
joinset = args[0][0](context, mapping, args[0][1])
if callable(joinset):
jf = joinset.joinfmt
joinset = [jf(x) for x in joinset()]
joiner = " "
if len(args) > 1:
joiner = stringify(args[1][0](context, mapping, args[1][1]))
first = True
for x in joinset:
if first:
first = False
else:
yield joiner
yield x
def label(context, mapping, args):
""":label(label, expr): Apply a label to generated content. Content with
a label applied can result in additional post-processing, such as
automatic colorization."""
if len(args) != 2:
# i18n: "label" is a keyword
raise error.ParseError(_("label expects two arguments"))
# ignore args[0] (the label string) since this is supposed to be a a no-op
yield _evalifliteral(args[1], context, mapping)
def revset(context, mapping, args):
""":revset(query[, formatargs...]): Execute a revision set query. See
:hg:`help revset`."""
if not len(args) > 0:
# i18n: "revset" is a keyword
raise error.ParseError(_("revset expects one or more arguments"))
raw = args[0][1]
ctx = mapping['ctx']
repo = ctx.repo()
def query(expr):
m = revsetmod.match(repo.ui, expr)
return m(repo)
if len(args) > 1:
formatargs = list([a[0](context, mapping, a[1]) for a in args[1:]])
revs = query(revsetmod.formatspec(raw, *formatargs))
revs = list([str(r) for r in revs])
else:
revsetcache = mapping['cache'].setdefault("revsetcache", {})
if raw in revsetcache:
revs = revsetcache[raw]
else:
revs = query(raw)
revs = list([str(r) for r in revs])
revsetcache[raw] = revs
return templatekw.showlist("revision", revs, **mapping)
def rstdoc(context, mapping, args):
""":rstdoc(text, style): Format ReStructuredText."""
if len(args) != 2:
# i18n: "rstdoc" is a keyword
raise error.ParseError(_("rstdoc expects two arguments"))
text = stringify(args[0][0](context, mapping, args[0][1]))
style = stringify(args[1][0](context, mapping, args[1][1]))
return minirst.format(text, style=style, keep=['verbose'])
def shortest(context, mapping, args):
""":shortest(node, minlength=4): Obtain the shortest representation of
a node."""
if not (1 <= len(args) <= 2):
# i18n: "shortest" is a keyword
raise error.ParseError(_("shortest() expects one or two arguments"))
node = stringify(args[0][0](context, mapping, args[0][1]))
minlength = 4
if len(args) > 1:
minlength = int(args[1][1])
cl = mapping['ctx']._repo.changelog
def isvalid(test):
try:
try:
cl.index.partialmatch(test)
except AttributeError:
# Pure mercurial doesn't support partialmatch on the index.
# Fallback to the slow way.
if cl._partialmatch(test) is None:
return False
try:
i = int(test)
# if we are a pure int, then starting with zero will not be
# confused as a rev; or, obviously, if the int is larger than
# the value of the tip rev
if test[0] == '0' or i > len(cl):
return True
return False
except ValueError:
return True
except error.RevlogError:
return False
shortest = node
startlength = max(6, minlength)
length = startlength
while True:
test = node[:length]
if isvalid(test):
shortest = test
if length == minlength or length > startlength:
return shortest
length -= 1
else:
length += 1
if len(shortest) <= length:
return shortest
def strip(context, mapping, args):
""":strip(text[, chars]): Strip characters from a string."""
if not (1 <= len(args) <= 2):
# i18n: "strip" is a keyword
raise error.ParseError(_("strip expects one or two arguments"))
text = stringify(args[0][0](context, mapping, args[0][1]))
if len(args) == 2:
chars = stringify(args[1][0](context, mapping, args[1][1]))
return text.strip(chars)
return text.strip()
def sub(context, mapping, args):
""":sub(pattern, replacement, expression): Perform text substitution
using regular expressions."""
if len(args) != 3:
# i18n: "sub" is a keyword
raise error.ParseError(_("sub expects three arguments"))
pat = stringify(args[0][0](context, mapping, args[0][1]))
rpl = stringify(args[1][0](context, mapping, args[1][1]))
src = stringify(_evalifliteral(args[2], context, mapping))
yield re.sub(pat, rpl, src)
def startswith(context, mapping, args):
""":startswith(pattern, text): Returns the value from the "text" argument
if it begins with the content from the "pattern" argument."""
if len(args) != 2:
# i18n: "startswith" is a keyword
raise error.ParseError(_("startswith expects two arguments"))
patn = stringify(args[0][0](context, mapping, args[0][1]))
text = stringify(args[1][0](context, mapping, args[1][1]))
if text.startswith(patn):
return text
return ''
def word(context, mapping, args):
""":word(number, text[, separator]): Return the nth word from a string."""
if not (2 <= len(args) <= 3):
# i18n: "word" is a keyword
raise error.ParseError(_("word expects two or three arguments, got %d")
% len(args))
try:
num = int(stringify(args[0][0](context, mapping, args[0][1])))
except ValueError:
# i18n: "word" is a keyword
raise error.ParseError(
_("Use strings like '3' for numbers passed to word function"))
text = stringify(args[1][0](context, mapping, args[1][1]))
if len(args) == 3:
splitter = stringify(args[2][0](context, mapping, args[2][1]))
else:
splitter = None
tokens = text.split(splitter)
if num >= len(tokens):
return ''
else:
return tokens[num]
methods = {
"string": lambda e, c: (runstring, e[1]),
"rawstring": lambda e, c: (runrawstring, e[1]),
"symbol": lambda e, c: (runsymbol, e[1]),
"group": lambda e, c: compileexp(e[1], c),
# ".": buildmember,
"|": buildfilter,
"%": buildmap,
"func": buildfunc,
}
funcs = {
"date": date,
"diff": diff,
"fill": fill,
"get": get,
"if": if_,
"ifcontains": ifcontains,
"ifeq": ifeq,
"join": join,
"label": label,
"pad": pad,
"revset": revset,
"rstdoc": rstdoc,
"shortest": shortest,
"startswith": startswith,
"strip": strip,
"sub": sub,
"word": word,
}
# template engine
stringify = templatefilters.stringify
def _flatten(thing):
'''yield a single stream from a possibly nested set of iterators'''
if isinstance(thing, str):
yield thing
elif not util.safehasattr(thing, '__iter__'):
if thing is not None:
yield str(thing)
else:
for i in thing:
if isinstance(i, str):
yield i
elif not util.safehasattr(i, '__iter__'):
if i is not None:
yield str(i)
elif i is not None:
for j in _flatten(i):
yield j
def parsestring(s, quoted=True):
'''unwrap quotes if quoted is True'''
if quoted:
if len(s) < 2 or s[0] != s[-1]:
raise SyntaxError(_('unmatched quotes'))
return s[1:-1]
return s
class engine(object):
'''template expansion engine.
template expansion works like this. a map file contains key=value
pairs. if value is quoted, it is treated as string. otherwise, it
is treated as name of template file.
templater is asked to expand a key in map. it looks up key, and
looks for strings like this: {foo}. it expands {foo} by looking up
foo in map, and substituting it. expansion is recursive: it stops
when there is no more {foo} to replace.
expansion also allows formatting and filtering.
format uses key to expand each item in list. syntax is
{key%format}.
filter uses function to transform value. syntax is
{key|filter1|filter2|...}.'''
def __init__(self, loader, filters={}, defaults={}):
self._loader = loader
self._filters = filters
self._defaults = defaults
self._cache = {}
def _load(self, t):
'''load, parse, and cache a template'''
if t not in self._cache:
self._cache[t] = compiletemplate(self._loader(t), self)
return self._cache[t]
def process(self, t, mapping):
'''Perform expansion. t is name of map element to expand.
mapping contains added elements for use during expansion. Is a
generator.'''
return _flatten(runtemplate(self, mapping, self._load(t)))
engines = {'default': engine}
def stylelist():
paths = templatepaths()
if not paths:
return _('no templates found, try `hg debuginstall` for more info')
dirlist = os.listdir(paths[0])
stylelist = []
for file in dirlist:
split = file.split(".")
if split[0] == "map-cmdline":
stylelist.append(split[1])
return ", ".join(sorted(stylelist))
class TemplateNotFound(util.Abort):
pass
class templater(object):
def __init__(self, mapfile, filters={}, defaults={}, cache={},
minchunk=1024, maxchunk=65536):
'''set up template engine.
mapfile is name of file to read map definitions from.
filters is dict of functions. each transforms a value into another.
defaults is dict of default map definitions.'''
self.mapfile = mapfile or 'template'
self.cache = cache.copy()
self.map = {}
if mapfile:
self.base = os.path.dirname(mapfile)
else:
self.base = ''
self.filters = templatefilters.filters.copy()
self.filters.update(filters)
self.defaults = defaults
self.minchunk, self.maxchunk = minchunk, maxchunk
self.ecache = {}
if not mapfile:
return
if not os.path.exists(mapfile):
raise util.Abort(_("style '%s' not found") % mapfile,
hint=_("available styles: %s") % stylelist())
conf = config.config()
conf.read(mapfile)
for key, val in conf[''].items():
if not val:
raise SyntaxError(_('%s: missing value') % conf.source('', key))
if val[0] in "'\"":
try:
self.cache[key] = parsestring(val)
except SyntaxError, inst:
raise SyntaxError('%s: %s' %
(conf.source('', key), inst.args[0]))
else:
val = 'default', val
if ':' in val[1]:
val = val[1].split(':', 1)
self.map[key] = val[0], os.path.join(self.base, val[1])
def __contains__(self, key):
return key in self.cache or key in self.map
def load(self, t):
'''Get the template for the given template name. Use a local cache.'''
if t not in self.cache:
try:
self.cache[t] = util.readfile(self.map[t][1])
except KeyError, inst:
raise TemplateNotFound(_('"%s" not in template map') %
inst.args[0])
except IOError, inst:
raise IOError(inst.args[0], _('template file %s: %s') %
(self.map[t][1], inst.args[1]))
return self.cache[t]
def __call__(self, t, **mapping):
ttype = t in self.map and self.map[t][0] or 'default'
if ttype not in self.ecache:
self.ecache[ttype] = engines[ttype](self.load,
self.filters, self.defaults)
proc = self.ecache[ttype]
stream = proc.process(t, mapping)
if self.minchunk:
stream = util.increasingchunks(stream, min=self.minchunk,
max=self.maxchunk)
return stream
def templatepaths():
'''return locations used for template files.'''
pathsrel = ['templates']
paths = [os.path.normpath(os.path.join(util.datapath, f))
for f in pathsrel]
return [p for p in paths if os.path.isdir(p)]
def templatepath(name):
'''return location of template file. returns None if not found.'''
for p in templatepaths():
f = os.path.join(p, name)
if os.path.exists(f):
return f
return None
def stylemap(styles, paths=None):
"""Return path to mapfile for a given style.
Searches mapfile in the following locations:
1. templatepath/style/map
2. templatepath/map-style
3. templatepath/map
"""
if paths is None:
paths = templatepaths()
elif isinstance(paths, str):
paths = [paths]
if isinstance(styles, str):
styles = [styles]
for style in styles:
# only plain name is allowed to honor template paths
if (not style
or style in (os.curdir, os.pardir)
or os.sep in style
or os.altsep and os.altsep in style):
continue
locations = [os.path.join(style, 'map'), 'map-' + style]
locations.append('map')
for path in paths:
for location in locations:
mapfile = os.path.join(path, location)
if os.path.isfile(mapfile):
return style, mapfile
raise RuntimeError("No hgweb templates found in %r" % paths)
# tell hggettext to extract docstrings from these functions:
i18nfunctions = funcs.values()
```
#### File: site-packages/mercurial/unionrepo.py
```python
from node import nullid
from i18n import _
import os
import util, mdiff, cmdutil, scmutil
import localrepo, changelog, manifest, filelog, revlog, pathutil
class unionrevlog(revlog.revlog):
def __init__(self, opener, indexfile, revlog2, linkmapper):
# How it works:
# To retrieve a revision, we just need to know the node id so we can
# look it up in revlog2.
#
# To differentiate a rev in the second revlog from a rev in the revlog,
# we check revision against repotiprev.
opener = scmutil.readonlyvfs(opener)
revlog.revlog.__init__(self, opener, indexfile)
self.revlog2 = revlog2
n = len(self)
self.repotiprev = n - 1
self.bundlerevs = set() # used by 'bundle()' revset expression
for rev2 in self.revlog2:
rev = self.revlog2.index[rev2]
# rev numbers - in revlog2, very different from self.rev
_start, _csize, _rsize, _base, linkrev, p1rev, p2rev, node = rev
if linkmapper is None: # link is to same revlog
assert linkrev == rev2 # we never link back
link = n
else: # rev must be mapped from repo2 cl to unified cl by linkmapper
link = linkmapper(linkrev)
if node in self.nodemap:
# this happens for the common revlog revisions
self.bundlerevs.add(self.nodemap[node])
continue
p1node = self.revlog2.node(p1rev)
p2node = self.revlog2.node(p2rev)
e = (None, None, None, None,
link, self.rev(p1node), self.rev(p2node), node)
self.index.insert(-1, e)
self.nodemap[node] = n
self.bundlerevs.add(n)
n += 1
def _chunk(self, rev):
if rev <= self.repotiprev:
return revlog.revlog._chunk(self, rev)
return self.revlog2._chunk(self.node(rev))
def revdiff(self, rev1, rev2):
"""return or calculate a delta between two revisions"""
if rev1 > self.repotiprev and rev2 > self.repotiprev:
return self.revlog2.revdiff(
self.revlog2.rev(self.node(rev1)),
self.revlog2.rev(self.node(rev2)))
elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
return self.baserevdiff(rev1, rev2)
return mdiff.textdiff(self.revision(self.node(rev1)),
self.revision(self.node(rev2)))
def revision(self, nodeorrev):
"""return an uncompressed revision of a given node or revision
number.
"""
if isinstance(nodeorrev, int):
rev = nodeorrev
node = self.node(rev)
else:
node = nodeorrev
rev = self.rev(node)
if node == nullid:
return ""
if rev > self.repotiprev:
text = self.revlog2.revision(node)
self._cache = (node, rev, text)
else:
text = self.baserevision(rev)
# already cached
return text
def baserevision(self, nodeorrev):
# Revlog subclasses may override 'revision' method to modify format of
# content retrieved from revlog. To use unionrevlog with such class one
# needs to override 'baserevision' and make more specific call here.
return revlog.revlog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
# Exists for the same purpose as baserevision.
return revlog.revlog.revdiff(self, rev1, rev2)
def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
raise NotImplementedError
def addgroup(self, revs, linkmapper, transaction):
raise NotImplementedError
def strip(self, rev, minlink):
raise NotImplementedError
def checksize(self):
raise NotImplementedError
class unionchangelog(unionrevlog, changelog.changelog):
def __init__(self, opener, opener2):
changelog.changelog.__init__(self, opener)
linkmapper = None
changelog2 = changelog.changelog(opener2)
unionrevlog.__init__(self, opener, self.indexfile, changelog2,
linkmapper)
def baserevision(self, nodeorrev):
# Although changelog doesn't override 'revision' method, some extensions
# may replace this class with another that does. Same story with
# manifest and filelog classes.
return changelog.changelog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return changelog.changelog.revdiff(self, rev1, rev2)
class unionmanifest(unionrevlog, manifest.manifest):
def __init__(self, opener, opener2, linkmapper):
manifest.manifest.__init__(self, opener)
manifest2 = manifest.manifest(opener2)
unionrevlog.__init__(self, opener, self.indexfile, manifest2,
linkmapper)
def baserevision(self, nodeorrev):
return manifest.manifest.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return manifest.manifest.revdiff(self, rev1, rev2)
class unionfilelog(unionrevlog, filelog.filelog):
def __init__(self, opener, path, opener2, linkmapper, repo):
filelog.filelog.__init__(self, opener, path)
filelog2 = filelog.filelog(opener2, path)
unionrevlog.__init__(self, opener, self.indexfile, filelog2,
linkmapper)
self._repo = repo
def baserevision(self, nodeorrev):
return filelog.filelog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return filelog.filelog.revdiff(self, rev1, rev2)
def iscensored(self, rev):
"""Check if a revision is censored."""
if rev <= self.repotiprev:
return filelog.filelog.iscensored(self, rev)
return self.revlog2.iscensored(rev)
class unionpeer(localrepo.localpeer):
def canpush(self):
return False
class unionrepository(localrepo.localrepository):
def __init__(self, ui, path, path2):
localrepo.localrepository.__init__(self, ui, path)
self.ui.setconfig('phases', 'publish', False, 'unionrepo')
self._url = 'union:%s+%s' % (util.expandpath(path),
util.expandpath(path2))
self.repo2 = localrepo.localrepository(ui, path2)
@localrepo.unfilteredpropertycache
def changelog(self):
return unionchangelog(self.svfs, self.repo2.svfs)
def _clrev(self, rev2):
"""map from repo2 changelog rev to temporary rev in self.changelog"""
node = self.repo2.changelog.node(rev2)
return self.changelog.rev(node)
@localrepo.unfilteredpropertycache
def manifest(self):
return unionmanifest(self.svfs, self.repo2.svfs,
self._clrev)
def url(self):
return self._url
def file(self, f):
return unionfilelog(self.svfs, f, self.repo2.svfs,
self._clrev, self)
def close(self):
self.repo2.close()
def cancopy(self):
return False
def peer(self):
return unionpeer(self)
def getcwd(self):
return os.getcwd() # always outside the repo
def instance(ui, path, create):
if create:
raise util.Abort(_('cannot create new union repository'))
parentpath = ui.config("bundle", "mainreporoot", "")
if not parentpath:
# try to find the correct path to the working directory repo
parentpath = cmdutil.findrepo(os.getcwd())
if parentpath is None:
parentpath = ''
if parentpath:
# Try to make the full path relative so we get a nice, short URL.
# In particular, we don't want temp dir names in test outputs.
cwd = os.getcwd()
if parentpath == cwd:
parentpath = ''
else:
cwd = pathutil.normasprefix(cwd)
if parentpath.startswith(cwd):
parentpath = parentpath[len(cwd):]
if path.startswith('union:'):
s = path.split(":", 1)[1].split("+", 1)
if len(s) == 1:
repopath, repopath2 = parentpath, s[0]
else:
repopath, repopath2 = s
else:
repopath, repopath2 = parentpath, path
return unionrepository(ui, repopath, repopath2)
```
#### File: site-packages/mercurial/verify.py
```python
from node import nullid, short
from i18n import _
import os
import revlog, util, error
def verify(repo):
lock = repo.lock()
try:
return _verify(repo)
finally:
lock.release()
def _normpath(f):
# under hg < 2.4, convert didn't sanitize paths properly, so a
# converted repo may contain repeated slashes
while '//' in f:
f = f.replace('//', '/')
return f
def _verify(repo):
repo = repo.unfiltered()
mflinkrevs = {}
filelinkrevs = {}
filenodes = {}
revisions = 0
badrevs = set()
errors = [0]
warnings = [0]
ui = repo.ui
cl = repo.changelog
mf = repo.manifest
lrugetctx = util.lrucachefunc(repo.changectx)
if not repo.url().startswith('file:'):
raise util.Abort(_("cannot verify bundle or remote repos"))
def err(linkrev, msg, filename=None):
if linkrev is not None:
badrevs.add(linkrev)
else:
linkrev = '?'
msg = "%s: %s" % (linkrev, msg)
if filename:
msg = "%s@%s" % (filename, msg)
ui.warn(" " + msg + "\n")
errors[0] += 1
def exc(linkrev, msg, inst, filename=None):
if isinstance(inst, KeyboardInterrupt):
ui.warn(_("interrupted"))
raise
if not str(inst):
inst = repr(inst)
err(linkrev, "%s: %s" % (msg, inst), filename)
def warn(msg):
ui.warn(msg + "\n")
warnings[0] += 1
def checklog(obj, name, linkrev):
if not len(obj) and (havecl or havemf):
err(linkrev, _("empty or missing %s") % name)
return
d = obj.checksize()
if d[0]:
err(None, _("data length off by %d bytes") % d[0], name)
if d[1]:
err(None, _("index contains %d extra bytes") % d[1], name)
if obj.version != revlog.REVLOGV0:
if not revlogv1:
warn(_("warning: `%s' uses revlog format 1") % name)
elif revlogv1:
warn(_("warning: `%s' uses revlog format 0") % name)
def checkentry(obj, i, node, seen, linkrevs, f):
lr = obj.linkrev(obj.rev(node))
if lr < 0 or (havecl and lr not in linkrevs):
if lr < 0 or lr >= len(cl):
msg = _("rev %d points to nonexistent changeset %d")
else:
msg = _("rev %d points to unexpected changeset %d")
err(None, msg % (i, lr), f)
if linkrevs:
if f and len(linkrevs) > 1:
try:
# attempt to filter down to real linkrevs
linkrevs = [l for l in linkrevs
if lrugetctx(l)[f].filenode() == node]
except Exception:
pass
warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
lr = None # can't be trusted
try:
p1, p2 = obj.parents(node)
if p1 not in seen and p1 != nullid:
err(lr, _("unknown parent 1 %s of %s") %
(short(p1), short(node)), f)
if p2 not in seen and p2 != nullid:
err(lr, _("unknown parent 2 %s of %s") %
(short(p2), short(node)), f)
except Exception, inst:
exc(lr, _("checking parents of %s") % short(node), inst, f)
if node in seen:
err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
seen[node] = i
return lr
if os.path.exists(repo.sjoin("journal")):
ui.warn(_("abandoned transaction found - run hg recover\n"))
revlogv1 = cl.version != revlog.REVLOGV0
if ui.verbose or not revlogv1:
ui.status(_("repository uses revlog format %d\n") %
(revlogv1 and 1 or 0))
havecl = len(cl) > 0
havemf = len(mf) > 0
ui.status(_("checking changesets\n"))
refersmf = False
seen = {}
checklog(cl, "changelog", 0)
total = len(repo)
for i in repo:
ui.progress(_('checking'), i, total=total, unit=_('changesets'))
n = cl.node(i)
checkentry(cl, i, n, seen, [i], "changelog")
try:
changes = cl.read(n)
if changes[0] != nullid:
mflinkrevs.setdefault(changes[0], []).append(i)
refersmf = True
for f in changes[3]:
filelinkrevs.setdefault(_normpath(f), []).append(i)
except Exception, inst:
refersmf = True
exc(i, _("unpacking changeset %s") % short(n), inst)
ui.progress(_('checking'), None)
ui.status(_("checking manifests\n"))
seen = {}
if refersmf:
# Do not check manifest if there are only changelog entries with
# null manifests.
checklog(mf, "manifest", 0)
total = len(mf)
for i in mf:
ui.progress(_('checking'), i, total=total, unit=_('manifests'))
n = mf.node(i)
lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
if n in mflinkrevs:
del mflinkrevs[n]
else:
err(lr, _("%s not in changesets") % short(n), "manifest")
try:
for f, fn in mf.readdelta(n).iteritems():
if not f:
err(lr, _("file without name in manifest"))
elif f != "/dev/null": # ignore this in very old repos
filenodes.setdefault(_normpath(f), {}).setdefault(fn, lr)
except Exception, inst:
exc(lr, _("reading manifest delta %s") % short(n), inst)
ui.progress(_('checking'), None)
ui.status(_("crosschecking files in changesets and manifests\n"))
total = len(mflinkrevs) + len(filelinkrevs) + len(filenodes)
count = 0
if havemf:
for c, m in sorted([(c, m) for m in mflinkrevs
for c in mflinkrevs[m]]):
count += 1
if m == nullid:
continue
ui.progress(_('crosschecking'), count, total=total)
err(c, _("changeset refers to unknown manifest %s") % short(m))
mflinkrevs = None # del is bad here due to scope issues
for f in sorted(filelinkrevs):
count += 1
ui.progress(_('crosschecking'), count, total=total)
if f not in filenodes:
lr = filelinkrevs[f][0]
err(lr, _("in changeset but not in manifest"), f)
if havecl:
for f in sorted(filenodes):
count += 1
ui.progress(_('crosschecking'), count, total=total)
if f not in filelinkrevs:
try:
fl = repo.file(f)
lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
except Exception:
lr = None
err(lr, _("in manifest but not in changeset"), f)
ui.progress(_('crosschecking'), None)
ui.status(_("checking files\n"))
storefiles = set()
for f, f2, size in repo.store.datafiles():
if not f:
err(None, _("cannot decode filename '%s'") % f2)
elif size > 0 or not revlogv1:
storefiles.add(_normpath(f))
files = sorted(set(filenodes) | set(filelinkrevs))
total = len(files)
for i, f in enumerate(files):
ui.progress(_('checking'), i, item=f, total=total)
try:
linkrevs = filelinkrevs[f]
except KeyError:
# in manifest but not in changelog
linkrevs = []
if linkrevs:
lr = linkrevs[0]
else:
lr = None
try:
fl = repo.file(f)
except error.RevlogError, e:
err(lr, _("broken revlog! (%s)") % e, f)
continue
for ff in fl.files():
try:
storefiles.remove(ff)
except KeyError:
err(lr, _("missing revlog!"), ff)
checklog(fl, f, lr)
seen = {}
rp = None
for i in fl:
revisions += 1
n = fl.node(i)
lr = checkentry(fl, i, n, seen, linkrevs, f)
if f in filenodes:
if havemf and n not in filenodes[f]:
err(lr, _("%s not in manifests") % (short(n)), f)
else:
del filenodes[f][n]
# verify contents
try:
l = len(fl.read(n))
rp = fl.renamed(n)
if l != fl.size(i):
if len(fl.revision(n)) != fl.size(i):
err(lr, _("unpacked size is %s, %s expected") %
(l, fl.size(i)), f)
except error.CensoredNodeError:
if ui.config("censor", "policy", "abort") == "abort":
err(lr, _("censored file data"), f)
except Exception, inst:
exc(lr, _("unpacking %s") % short(n), inst, f)
# check renames
try:
if rp:
if lr is not None and ui.verbose:
ctx = lrugetctx(lr)
found = False
for pctx in ctx.parents():
if rp[0] in pctx:
found = True
break
if not found:
warn(_("warning: copy source of '%s' not"
" in parents of %s") % (f, ctx))
fl2 = repo.file(rp[0])
if not len(fl2):
err(lr, _("empty or missing copy source revlog %s:%s")
% (rp[0], short(rp[1])), f)
elif rp[1] == nullid:
ui.note(_("warning: %s@%s: copy source"
" revision is nullid %s:%s\n")
% (f, lr, rp[0], short(rp[1])))
else:
fl2.rev(rp[1])
except Exception, inst:
exc(lr, _("checking rename of %s") % short(n), inst, f)
# cross-check
if f in filenodes:
fns = [(lr, n) for n, lr in filenodes[f].iteritems()]
for lr, node in sorted(fns):
err(lr, _("%s in manifests not found") % short(node), f)
ui.progress(_('checking'), None)
for f in storefiles:
warn(_("warning: orphan revlog '%s'") % f)
ui.status(_("%d files, %d changesets, %d total revisions\n") %
(len(files), len(cl), revisions))
if warnings[0]:
ui.warn(_("%d warnings encountered!\n") % warnings[0])
if errors[0]:
ui.warn(_("%d integrity errors encountered!\n") % errors[0])
if badrevs:
ui.warn(_("(first damaged changeset appears to be %d)\n")
% min(badrevs))
return 1
```
#### File: v2/service/__init__.py
```python
from twilio.base import deserialize
from twilio.base import values
from twilio.base.exceptions import TwilioException
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.ip_messaging.v2.service.channel import ChannelList
from twilio.rest.ip_messaging.v2.service.role import RoleList
from twilio.rest.ip_messaging.v2.service.user import UserList
class ServiceList(ListResource):
def __init__(self, version):
"""
Initialize the ServiceList
:param Version version: Version that contains the resource
:returns: twilio.rest.chat.v2.service.ServiceList
:rtype: twilio.rest.chat.v2.service.ServiceList
"""
super(ServiceList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Services'.format(**self._solution)
def create(self, friendly_name):
"""
Create a new ServiceInstance
:param unicode friendly_name: The friendly_name
:returns: Newly created ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ServiceInstance(
self._version,
payload,
)
def stream(self, limit=None, page_size=None):
"""
Streams ServiceInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.service.ServiceInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists ServiceInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.service.ServiceInstance]
"""
return list(self.stream(
limit=limit,
page_size=page_size,
))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ServiceInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServicePage
"""
params = values.of({
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ServicePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ServiceInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServicePage
"""
resource_url = self._version.absolute_url(self._uri)
if not target_url.startswith(resource_url):
raise TwilioException('Invalid target_url for ServiceInstance resource.')
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ServicePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ServiceContext
:param sid: The sid
:returns: twilio.rest.chat.v2.service.ServiceContext
:rtype: twilio.rest.chat.v2.service.ServiceContext
"""
return ServiceContext(
self._version,
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ServiceContext
:param sid: The sid
:returns: twilio.rest.chat.v2.service.ServiceContext
:rtype: twilio.rest.chat.v2.service.ServiceContext
"""
return ServiceContext(
self._version,
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V2.ServiceList>'
class ServicePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ServicePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.chat.v2.service.ServicePage
:rtype: twilio.rest.chat.v2.service.ServicePage
"""
super(ServicePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ServiceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
"""
return ServiceInstance(
self._version,
payload,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V2.ServicePage>'
class ServiceContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the ServiceContext
:param Version version: Version that contains the resource
:param sid: The sid
:returns: twilio.rest.chat.v2.service.ServiceContext
:rtype: twilio.rest.chat.v2.service.ServiceContext
"""
super(ServiceContext, self).__init__(version)
# Path Solution
self._solution = {
'sid': sid,
}
self._uri = '/Services/{sid}'.format(**self._solution)
# Dependents
self._channels = None
self._roles = None
self._users = None
def fetch(self):
"""
Fetch a ServiceInstance
:returns: Fetched ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ServiceInstance(
self._version,
payload,
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the ServiceInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, friendly_name=values.unset,
default_service_role_sid=values.unset,
default_channel_role_sid=values.unset,
default_channel_creator_role_sid=values.unset,
read_status_enabled=values.unset, reachability_enabled=values.unset,
typing_indicator_timeout=values.unset,
consumption_report_interval=values.unset,
notifications_new_message_enabled=values.unset,
notifications_new_message_template=values.unset,
notifications_new_message_sound=values.unset,
notifications_new_message_badge_count_enabled=values.unset,
notifications_added_to_channel_enabled=values.unset,
notifications_added_to_channel_template=values.unset,
notifications_added_to_channel_sound=values.unset,
notifications_removed_from_channel_enabled=values.unset,
notifications_removed_from_channel_template=values.unset,
notifications_removed_from_channel_sound=values.unset,
notifications_invited_to_channel_enabled=values.unset,
notifications_invited_to_channel_template=values.unset,
notifications_invited_to_channel_sound=values.unset,
pre_webhook_url=values.unset, post_webhook_url=values.unset,
webhook_method=values.unset, webhook_filters=values.unset,
limits_channel_members=values.unset,
limits_user_channels=values.unset):
"""
Update the ServiceInstance
:param unicode friendly_name: The friendly_name
:param unicode default_service_role_sid: The default_service_role_sid
:param unicode default_channel_role_sid: The default_channel_role_sid
:param unicode default_channel_creator_role_sid: The default_channel_creator_role_sid
:param bool read_status_enabled: The read_status_enabled
:param bool reachability_enabled: The reachability_enabled
:param unicode typing_indicator_timeout: The typing_indicator_timeout
:param unicode consumption_report_interval: The consumption_report_interval
:param bool notifications_new_message_enabled: The notifications.new_message.enabled
:param unicode notifications_new_message_template: The notifications.new_message.template
:param unicode notifications_new_message_sound: The notifications.new_message.sound
:param bool notifications_new_message_badge_count_enabled: The notifications.new_message.badge_count_enabled
:param bool notifications_added_to_channel_enabled: The notifications.added_to_channel.enabled
:param unicode notifications_added_to_channel_template: The notifications.added_to_channel.template
:param unicode notifications_added_to_channel_sound: The notifications.added_to_channel.sound
:param bool notifications_removed_from_channel_enabled: The notifications.removed_from_channel.enabled
:param unicode notifications_removed_from_channel_template: The notifications.removed_from_channel.template
:param unicode notifications_removed_from_channel_sound: The notifications.removed_from_channel.sound
:param bool notifications_invited_to_channel_enabled: The notifications.invited_to_channel.enabled
:param unicode notifications_invited_to_channel_template: The notifications.invited_to_channel.template
:param unicode notifications_invited_to_channel_sound: The notifications.invited_to_channel.sound
:param unicode pre_webhook_url: The pre_webhook_url
:param unicode post_webhook_url: The post_webhook_url
:param unicode webhook_method: The webhook_method
:param unicode webhook_filters: The webhook_filters
:param unicode limits_channel_members: The limits.channel_members
:param unicode limits_user_channels: The limits.user_channels
:returns: Updated ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'DefaultServiceRoleSid': default_service_role_sid,
'DefaultChannelRoleSid': default_channel_role_sid,
'DefaultChannelCreatorRoleSid': default_channel_creator_role_sid,
'ReadStatusEnabled': read_status_enabled,
'ReachabilityEnabled': reachability_enabled,
'TypingIndicatorTimeout': typing_indicator_timeout,
'ConsumptionReportInterval': consumption_report_interval,
'Notifications.NewMessage.Enabled': notifications_new_message_enabled,
'Notifications.NewMessage.Template': notifications_new_message_template,
'Notifications.NewMessage.Sound': notifications_new_message_sound,
'Notifications.NewMessage.BadgeCountEnabled': notifications_new_message_badge_count_enabled,
'Notifications.AddedToChannel.Enabled': notifications_added_to_channel_enabled,
'Notifications.AddedToChannel.Template': notifications_added_to_channel_template,
'Notifications.AddedToChannel.Sound': notifications_added_to_channel_sound,
'Notifications.RemovedFromChannel.Enabled': notifications_removed_from_channel_enabled,
'Notifications.RemovedFromChannel.Template': notifications_removed_from_channel_template,
'Notifications.RemovedFromChannel.Sound': notifications_removed_from_channel_sound,
'Notifications.InvitedToChannel.Enabled': notifications_invited_to_channel_enabled,
'Notifications.InvitedToChannel.Template': notifications_invited_to_channel_template,
'Notifications.InvitedToChannel.Sound': notifications_invited_to_channel_sound,
'PreWebhookUrl': pre_webhook_url,
'PostWebhookUrl': post_webhook_url,
'WebhookMethod': webhook_method,
'WebhookFilters': webhook_filters,
'Limits.ChannelMembers': limits_channel_members,
'Limits.UserChannels': limits_user_channels,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ServiceInstance(
self._version,
payload,
sid=self._solution['sid'],
)
@property
def channels(self):
"""
Access the channels
:returns: twilio.rest.chat.v2.service.channel.ChannelList
:rtype: twilio.rest.chat.v2.service.channel.ChannelList
"""
if self._channels is None:
self._channels = ChannelList(
self._version,
service_sid=self._solution['sid'],
)
return self._channels
@property
def roles(self):
"""
Access the roles
:returns: twilio.rest.chat.v2.service.role.RoleList
:rtype: twilio.rest.chat.v2.service.role.RoleList
"""
if self._roles is None:
self._roles = RoleList(
self._version,
service_sid=self._solution['sid'],
)
return self._roles
@property
def users(self):
"""
Access the users
:returns: twilio.rest.chat.v2.service.user.UserList
:rtype: twilio.rest.chat.v2.service.user.UserList
"""
if self._users is None:
self._users = UserList(
self._version,
service_sid=self._solution['sid'],
)
return self._users
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V2.ServiceContext {}>'.format(context)
class ServiceInstance(InstanceResource):
def __init__(self, version, payload, sid=None):
"""
Initialize the ServiceInstance
:returns: twilio.rest.chat.v2.service.ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
"""
super(ServiceInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'friendly_name': payload['friendly_name'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'default_service_role_sid': payload['default_service_role_sid'],
'default_channel_role_sid': payload['default_channel_role_sid'],
'default_channel_creator_role_sid': payload['default_channel_creator_role_sid'],
'read_status_enabled': payload['read_status_enabled'],
'reachability_enabled': payload['reachability_enabled'],
'typing_indicator_timeout': deserialize.integer(payload['typing_indicator_timeout']),
'consumption_report_interval': deserialize.integer(payload['consumption_report_interval']),
'limits': payload['limits'],
'pre_webhook_url': payload['pre_webhook_url'],
'post_webhook_url': payload['post_webhook_url'],
'webhook_method': payload['webhook_method'],
'webhook_filters': payload['webhook_filters'],
'notifications': payload['notifications'],
'url': payload['url'],
'links': payload['links'],
}
# Context
self._context = None
self._solution = {
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ServiceContext for this ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceContext
"""
if self._context is None:
self._context = ServiceContext(
self._version,
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def default_service_role_sid(self):
"""
:returns: The default_service_role_sid
:rtype: unicode
"""
return self._properties['default_service_role_sid']
@property
def default_channel_role_sid(self):
"""
:returns: The default_channel_role_sid
:rtype: unicode
"""
return self._properties['default_channel_role_sid']
@property
def default_channel_creator_role_sid(self):
"""
:returns: The default_channel_creator_role_sid
:rtype: unicode
"""
return self._properties['default_channel_creator_role_sid']
@property
def read_status_enabled(self):
"""
:returns: The read_status_enabled
:rtype: bool
"""
return self._properties['read_status_enabled']
@property
def reachability_enabled(self):
"""
:returns: The reachability_enabled
:rtype: bool
"""
return self._properties['reachability_enabled']
@property
def typing_indicator_timeout(self):
"""
:returns: The typing_indicator_timeout
:rtype: unicode
"""
return self._properties['typing_indicator_timeout']
@property
def consumption_report_interval(self):
"""
:returns: The consumption_report_interval
:rtype: unicode
"""
return self._properties['consumption_report_interval']
@property
def limits(self):
"""
:returns: The limits
:rtype: dict
"""
return self._properties['limits']
@property
def pre_webhook_url(self):
"""
:returns: The pre_webhook_url
:rtype: unicode
"""
return self._properties['pre_webhook_url']
@property
def post_webhook_url(self):
"""
:returns: The post_webhook_url
:rtype: unicode
"""
return self._properties['post_webhook_url']
@property
def webhook_method(self):
"""
:returns: The webhook_method
:rtype: unicode
"""
return self._properties['webhook_method']
@property
def webhook_filters(self):
"""
:returns: The webhook_filters
:rtype: unicode
"""
return self._properties['webhook_filters']
@property
def notifications(self):
"""
:returns: The notifications
:rtype: dict
"""
return self._properties['notifications']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a ServiceInstance
:returns: Fetched ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the ServiceInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, friendly_name=values.unset,
default_service_role_sid=values.unset,
default_channel_role_sid=values.unset,
default_channel_creator_role_sid=values.unset,
read_status_enabled=values.unset, reachability_enabled=values.unset,
typing_indicator_timeout=values.unset,
consumption_report_interval=values.unset,
notifications_new_message_enabled=values.unset,
notifications_new_message_template=values.unset,
notifications_new_message_sound=values.unset,
notifications_new_message_badge_count_enabled=values.unset,
notifications_added_to_channel_enabled=values.unset,
notifications_added_to_channel_template=values.unset,
notifications_added_to_channel_sound=values.unset,
notifications_removed_from_channel_enabled=values.unset,
notifications_removed_from_channel_template=values.unset,
notifications_removed_from_channel_sound=values.unset,
notifications_invited_to_channel_enabled=values.unset,
notifications_invited_to_channel_template=values.unset,
notifications_invited_to_channel_sound=values.unset,
pre_webhook_url=values.unset, post_webhook_url=values.unset,
webhook_method=values.unset, webhook_filters=values.unset,
limits_channel_members=values.unset,
limits_user_channels=values.unset):
"""
Update the ServiceInstance
:param unicode friendly_name: The friendly_name
:param unicode default_service_role_sid: The default_service_role_sid
:param unicode default_channel_role_sid: The default_channel_role_sid
:param unicode default_channel_creator_role_sid: The default_channel_creator_role_sid
:param bool read_status_enabled: The read_status_enabled
:param bool reachability_enabled: The reachability_enabled
:param unicode typing_indicator_timeout: The typing_indicator_timeout
:param unicode consumption_report_interval: The consumption_report_interval
:param bool notifications_new_message_enabled: The notifications.new_message.enabled
:param unicode notifications_new_message_template: The notifications.new_message.template
:param unicode notifications_new_message_sound: The notifications.new_message.sound
:param bool notifications_new_message_badge_count_enabled: The notifications.new_message.badge_count_enabled
:param bool notifications_added_to_channel_enabled: The notifications.added_to_channel.enabled
:param unicode notifications_added_to_channel_template: The notifications.added_to_channel.template
:param unicode notifications_added_to_channel_sound: The notifications.added_to_channel.sound
:param bool notifications_removed_from_channel_enabled: The notifications.removed_from_channel.enabled
:param unicode notifications_removed_from_channel_template: The notifications.removed_from_channel.template
:param unicode notifications_removed_from_channel_sound: The notifications.removed_from_channel.sound
:param bool notifications_invited_to_channel_enabled: The notifications.invited_to_channel.enabled
:param unicode notifications_invited_to_channel_template: The notifications.invited_to_channel.template
:param unicode notifications_invited_to_channel_sound: The notifications.invited_to_channel.sound
:param unicode pre_webhook_url: The pre_webhook_url
:param unicode post_webhook_url: The post_webhook_url
:param unicode webhook_method: The webhook_method
:param unicode webhook_filters: The webhook_filters
:param unicode limits_channel_members: The limits.channel_members
:param unicode limits_user_channels: The limits.user_channels
:returns: Updated ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
default_service_role_sid=default_service_role_sid,
default_channel_role_sid=default_channel_role_sid,
default_channel_creator_role_sid=default_channel_creator_role_sid,
read_status_enabled=read_status_enabled,
reachability_enabled=reachability_enabled,
typing_indicator_timeout=typing_indicator_timeout,
consumption_report_interval=consumption_report_interval,
notifications_new_message_enabled=notifications_new_message_enabled,
notifications_new_message_template=notifications_new_message_template,
notifications_new_message_sound=notifications_new_message_sound,
notifications_new_message_badge_count_enabled=notifications_new_message_badge_count_enabled,
notifications_added_to_channel_enabled=notifications_added_to_channel_enabled,
notifications_added_to_channel_template=notifications_added_to_channel_template,
notifications_added_to_channel_sound=notifications_added_to_channel_sound,
notifications_removed_from_channel_enabled=notifications_removed_from_channel_enabled,
notifications_removed_from_channel_template=notifications_removed_from_channel_template,
notifications_removed_from_channel_sound=notifications_removed_from_channel_sound,
notifications_invited_to_channel_enabled=notifications_invited_to_channel_enabled,
notifications_invited_to_channel_template=notifications_invited_to_channel_template,
notifications_invited_to_channel_sound=notifications_invited_to_channel_sound,
pre_webhook_url=pre_webhook_url,
post_webhook_url=post_webhook_url,
webhook_method=webhook_method,
webhook_filters=webhook_filters,
limits_channel_members=limits_channel_members,
limits_user_channels=limits_user_channels,
)
@property
def channels(self):
"""
Access the channels
:returns: twilio.rest.chat.v2.service.channel.ChannelList
:rtype: twilio.rest.chat.v2.service.channel.ChannelList
"""
return self._proxy.channels
@property
def roles(self):
"""
Access the roles
:returns: twilio.rest.chat.v2.service.role.RoleList
:rtype: twilio.rest.chat.v2.service.role.RoleList
"""
return self._proxy.roles
@property
def users(self):
"""
Access the users
:returns: twilio.rest.chat.v2.service.user.UserList
:rtype: twilio.rest.chat.v2.service.user.UserList
"""
return self._proxy.users
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V2.ServiceInstance {}>'.format(context)
``` |
{
"source": "JLXIA/esp",
"score": 2
} |
#### File: src/tools/jwt_generator.py
```python
import argparse
import sys
import jwt # pip install PyJWT and pip install cryptography.
""" This script is used to generate ES256/RS256-signed jwt token."""
"""commands to generate private_key_file:
ES256: $ openssl ecparam -genkey -name prime256v1 -noout -out private_key.pem
RS256: $ openssl genpkey -algorithm RSA -out private_key.pem -pkeyopt rsa_keygen_bits:2048
"""
def main(args):
# JWT token generation.
with open(args.private_key_file, 'r') as f:
try:
secret = f.read()
except:
print("Failed to load private key.")
sys.exit()
# Token headers
hdrs = {'alg': args.alg,
'typ': 'JWT'}
if args.kid:
hdrs['kid'] = args.kid
# Token claims
claims = {'iss': args.iss,
'sub': args.iss,
'aud': args.aud}
if args.email:
claims['email'] = args.email
if args.azp:
claims['azp'] = args.azp
if args.exp:
claims['exp'] = args.exp
# Change claim and headers field to fit needs.
jwt_token = jwt.encode(claims,
secret,
algorithm=args.alg,
headers=hdrs)
print(args.alg + "-signed jwt:")
print(jwt_token)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# positional arguments
parser.add_argument(
"alg",
help="Signing algorithm, i.e., ES256/RS256.")
parser.add_argument(
"iss",
help="Token issuer, which is also used for sub claim.")
parser.add_argument(
"aud",
help="Audience. This must match 'audience' in the security configuration"
" in the swagger spec.")
parser.add_argument(
"private_key_file",
help="The path to the generated ES256/RS256 private key file, e.g., /path/to/private_key.pem.")
#optional arguments
parser.add_argument("-e", "--email", help="Preferred e-mail address.")
parser.add_argument("-a", "--azp", help="Authorized party - the party to which the ID Token was issued.")
parser.add_argument("-x", "--exp", help="Token expiration claim.")
parser.add_argument("-k", "--kid", help="Key id.")
main(parser.parse_args())
``` |
{
"source": "jlxip/BatPack",
"score": 3
} |
#### File: jlxip/BatPack/batpack.py
```python
import zipfile, random, os, base64
charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def rzname(n=8):
return ''.join(random.choice(charset) for _ in range(n))
def gfp(path, nd='', f='', PS=False): # Get full path
sep = '\\'
if PS:
sep += '\\' # Un slash extra para escapar el slash del directorio
if path[0] == '%':
path = '$env:' + path.split('%')[1]
fp = path + sep
if not nd == '':
fp += nd + sep
if not f == '':
fp += f
if PS: fp = fp.replace(' ', '` ')
return fp
if __name__ == '__main__':
print 'B A T P A C K\n'
print 'Enter files. Leave in blank when finished.'
files = []
while True:
f = raw_input('[+] ')
if f == '':
break
else:
if os.path.isfile(f):
files.append(f)
else:
print 'The file \'%s\' does not exist.' % f
if len(files) == 0:
print 'You have to enter at least one file.'
exit()
print '\n'
possible_paths = ['%TEMP%', '.', '%APPDATA%']
print 'Path of decompression:'
for idx, v in enumerate(possible_paths):
print '%i: %s' % (idx, v)
path = raw_input('[0] ')
if path == '': path = 0;
if 0 <= int(path) < len(possible_paths):
path = possible_paths[int(path)]
else:
print 'Invalid option.'
exit()
print '\n'
print 'New directory? Leave in blank if not used.'
nd = raw_input('[?] ')
print '\n'
print 'Select the file to be executed:'
for idx, v in enumerate(files):
print '%i: %s' % (idx, v)
e = raw_input('[0] ')
if e == '': e = 0
if 0 <= int(e) < len(files):
e = files[int(e)]
else:
print 'Invalid option.'
exit()
print '\n'
print 'Delete decompressed files when execution is finished? (y/n)'
df = raw_input('[y] ').lower()
if df == '': df = 'y'
if not df == 'y' and not df == 'n':
print 'Invalid option.'
exit()
print '\n'
print 'Melt the bat? (y/n)'
m = raw_input('[n] ').lower()
if m == '': m = 'n'
if not m == 'y' and not m == 'n':
print 'Invalid option.'
exit()
print '\n'
print 'Internal zip name. Leave in blank for an 8 bytes alphanumeric random string.'
zname = raw_input('[?] ')
if zname == '': zname = rzname()
print '\n'
# TODO: OBFUSCATE BAT
print '\n'
print 'Compressing files...'
zip = zipfile.ZipFile('tmp.zip', 'w', zipfile.ZIP_DEFLATED)
for i in files:
zip.write(i)
zip.close()
print 'Encoding zip file in memory...'
b64 = base64.b64encode(open('tmp.zip', 'rb').read())
os.remove('tmp.zip')
print 'Generating bat file in memory...'
batcontent = '@echo %s>%s\r\n' % (b64, zname+'.txt')
batcontent += '@certutil -decode \"{0}.txt\" \"{0}.zip\"\r\n'.format(zname)
batcontent += '@powershell expand-archive \"%s.zip\" \"%s\"\r\n' % (zname, gfp(path, nd, '', True))
batcontent += '@del \"{0}.txt\" \"{0}.zip\"\r\n'.format(zname)
batcontent += '@\"%s\"\r\n' % gfp(path, nd, e)
if df == 'y':
batcontent += '@del '
for f in files:
batcontent += '\"'
batcontent += gfp(path, nd, f)
batcontent += '\" '
batcontent += '\r\n'
# TODO: REMOVE DIRECTORY IF NECESSARY
if m == 'y': # This has to be the last command
batcontent += '@del \"%0\"\r\n'
print 'Writing bat file...'
bat = open('packed.bat', 'w')
bat.write(batcontent)
bat.close()
print 'Finished!'
``` |
{
"source": "jlyheden/openstacksdk",
"score": 2
} |
#### File: openstacksdk/openstack/exceptions.py
```python
import json
import re
from requests import exceptions as _rex
class SDKException(Exception):
"""The base exception class for all exceptions this library raises."""
def __init__(self, message=None, extra_data=None):
self.message = self.__class__.__name__ if message is None else message
self.extra_data = extra_data
super(SDKException, self).__init__(self.message)
OpenStackCloudException = SDKException
class EndpointNotFound(SDKException):
"""A mismatch occurred between what the client and server expect."""
def __init__(self, message=None):
super(EndpointNotFound, self).__init__(message)
class InvalidResponse(SDKException):
"""The response from the server is not valid for this request."""
def __init__(self, response):
super(InvalidResponse, self).__init__()
self.response = response
class InvalidRequest(SDKException):
"""The request to the server is not valid."""
def __init__(self, message=None):
super(InvalidRequest, self).__init__(message)
class HttpException(SDKException, _rex.HTTPError):
def __init__(self, message='Error', response=None,
http_status=None,
details=None, request_id=None):
# TODO(shade) Remove http_status parameter and the ability for response
# to be None once we're not mocking Session everywhere.
if not message:
if response is not None:
message = "{name}: {code}".format(
name=self.__class__.__name__,
code=response.status_code)
else:
message = "{name}: Unknown error".format(
name=self.__class__.__name__)
# Call directly rather than via super to control parameters
SDKException.__init__(self, message=message)
_rex.HTTPError.__init__(self, message, response=response)
if response is not None:
self.request_id = response.headers.get('x-openstack-request-id')
self.status_code = response.status_code
else:
self.request_id = request_id
self.status_code = http_status
self.details = details
self.url = self.request and self.request.url or None
self.method = self.request and self.request.method or None
self.source = "Server"
if self.status_code is not None and (400 <= self.status_code < 500):
self.source = "Client"
def __unicode__(self):
# 'Error' is the default value for self.message. If self.message isn't
# 'Error', then someone has set a more informative error message
# and we should use it. If it is 'Error', then we should construct a
# better message from the information we do have.
if not self.url or self.message == 'Error':
return self.message
if self.url:
remote_error = "{source} Error for url: {url}".format(
source=self.source, url=self.url)
if self.details:
remote_error += ', '
if self.details:
remote_error += str(self.details)
return "{message}: {remote_error}".format(
message=super(HttpException, self).__str__(),
remote_error=remote_error)
def __str__(self):
return self.__unicode__()
class BadRequestException(HttpException):
"""HTTP 400 Bad Request."""
pass
class ConflictException(HttpException):
"""HTTP 409 Conflict."""
pass
class PreconditionFailedException(HttpException):
"""HTTP 412 Precondition Failed."""
pass
class MethodNotSupported(SDKException):
"""The resource does not support this operation type."""
def __init__(self, resource, method):
# This needs to work with both classes and instances.
try:
name = resource.__name__
except AttributeError:
name = resource.__class__.__name__
message = ('The %s method is not supported for %s.%s' %
(method, resource.__module__, name))
super(MethodNotSupported, self).__init__(message=message)
class DuplicateResource(SDKException):
"""More than one resource exists with that name."""
pass
class ResourceNotFound(HttpException):
"""No resource exists with that name or id."""
pass
NotFoundException = ResourceNotFound
class ResourceTimeout(SDKException):
"""Timeout waiting for resource."""
pass
class ResourceFailure(SDKException):
"""General resource failure."""
pass
class InvalidResourceQuery(SDKException):
"""Invalid query params for resource."""
pass
def _extract_message(obj):
if isinstance(obj, dict):
# Most of services: compute, network
if obj.get('message'):
return obj['message']
# Ironic starting with Stein
elif obj.get('faultstring'):
return obj['faultstring']
elif isinstance(obj, str):
# Ironic before Stein has double JSON encoding, nobody remembers why.
try:
obj = json.loads(obj)
except Exception:
pass
else:
return _extract_message(obj)
def raise_from_response(response, error_message=None):
"""Raise an instance of an HTTPException based on keystoneauth response."""
if response.status_code < 400:
return
if response.status_code == 409:
cls = ConflictException
elif response.status_code == 404:
cls = NotFoundException
elif response.status_code == 400:
cls = BadRequestException
elif response.status_code == 412:
cls = PreconditionFailedException
else:
cls = HttpException
details = None
content_type = response.headers.get('content-type', '')
if response.content and 'application/json' in content_type:
# Iterate over the nested objects to retrieve "message" attribute.
# TODO(shade) Add exception handling for times when the content type
# is lying.
try:
content = response.json()
messages = [_extract_message(obj) for obj in content.values()]
# Join all of the messages together nicely and filter out any
# objects that don't have a "message" attr.
details = '\n'.join(msg for msg in messages if msg)
except Exception:
details = response.text
elif response.content and 'text/html' in content_type:
# Split the lines, strip whitespace and inline HTML from the response.
details = [re.sub(r'<.+?>', '', i.strip())
for i in response.text.splitlines()]
details = list(set([msg for msg in details if msg]))
# Return joined string separated by colons.
details = ': '.join(details)
if not details:
details = response.reason if response.reason else response.text
http_status = response.status_code
request_id = response.headers.get('x-openstack-request-id')
raise cls(
message=error_message, response=response, details=details,
http_status=http_status, request_id=request_id
)
class UnsupportedServiceVersion(Warning):
"""The user has configured a major version that SDK doesn't know."""
class ArgumentDeprecationWarning(Warning):
"""A deprecated argument has been provided."""
pass
class ConfigException(SDKException):
"""Something went wrong with parsing your OpenStack Config."""
class NotSupported(SDKException):
"""Request cannot be performed by any supported API version."""
class ValidationException(SDKException):
"""Validation failed for resource."""
class TaskManagerStopped(SDKException):
"""Operations were attempted on a stopped TaskManager."""
class ServiceDisabledException(ConfigException):
"""This service is disabled for reasons."""
class ServiceDiscoveryException(SDKException):
"""The service cannot be discovered."""
``` |
{
"source": "jlyheden/rsyncbacker",
"score": 3
} |
#### File: rsyncbacker/tests/test_utils.py
```python
__author__ = 'johan'
import unittest
from rsyncbacker.util import *
class TestUtils(unittest.TestCase):
def test_ip_to_hex(self):
self.assertEquals("0x73FF0861", ip_to_hex("172.16.17.32"))
def test_hex_to_dotted(self):
self.assertEquals("255.255.255.0", hex_to_dotted("0xffffff00"))
def test_get_ipv4_network(self):
self.assertEquals("192.168.0.0", get_ipv4_network("192.168.0.1", "255.255.255.0"))
def test_get_ip_from_unknown(self):
self.assertEquals("127.0.0.1", get_ip_from_unknown("127.0.0.1"))
self.assertEquals("127.0.0.1", get_ip_from_unknown("localhost"))
def test_is_host_on_lan_one_interface(self):
ifaces = [
{
"ip": "192.168.1.10",
"netmask": "255.255.255.0"
}
]
self.assertTrue(is_host_on_lan("192.168.1.150", ifaces))
self.assertFalse(is_host_on_lan("192.168.2.150", ifaces))
def test_is_host_on_lan_two_interfaces(self):
ifaces = [
{
"ip": "192.168.1.10",
"netmask": "255.255.255.0"
},
{
"ip": "10.0.0.123",
"netmask": "255.0.0.0"
}
]
self.assertTrue(is_host_on_lan("192.168.1.150", ifaces))
self.assertFalse(is_host_on_lan("192.168.2.150", ifaces))
self.assertTrue(is_host_on_lan("10.100.23.54", ifaces))
self.assertFalse(is_host_on_lan("172.16.0.4", ifaces))
``` |
{
"source": "JLyons1985/SmartMirrorServer",
"score": 3
} |
#### File: JLyons1985/SmartMirrorServer/capturepositives.py
```python
import glob
import os
import sys
import select
import cv2
import serverConfig
import face
# Prefix for positive training image filenames.
POSITIVE_FILE_PREFIX = 'positive_'
def is_letter_input(letter):
# Utility function to check if a specific character is available on stdin.
# Comparison is case insensitive.
if select.select([sys.stdin,],[],[],0.0)[0]:
input_char = sys.stdin.read(1)
return input_char.lower() == letter.lower()
return False
def captureImage():
print('Capturing image...')
image = camera.read()
# Convert image to grayscale.
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Get coordinates of single face in captured image.
result = face.detect_single(image)
if result is None:
print('Could not detect single face! Check the image in capture.pgm' \
' to see what was captured and try again with only one face visible.')
exit
x, y, w, h = result
# Crop image as close as possible to desired face aspect ratio.
# Might be smaller if face is near edge of image.
crop = face.crop(image, x, y, w, h)
# Save image to file.
filename = os.path.join(serverConfig.IMAGE_DIR, serverConfig.POSITIVE_DIR, POSITIVE_FILE_PREFIX + '%03d.pgm' % count)
cv2.imwrite(filename, crop)
print('Found face and wrote training image', filename)
if __name__ == '__main__':
camera = serverConfig.get_camera()
# Create the directory for positive training images if it doesn't exist.
print('What folder are we saving to?')
folderToSave = sys.stdin.readline()[0:-1]
print('Saving to: ' + folderToSave)
if not os.path.exists(serverConfig.IMAGE_DIR + folderToSave):
os.makedirs(serverConfig.IMAGE_DIR + folderToSave)
# Find the largest ID of existing positive images.
# Start new images after this ID value.
files = sorted(glob.glob(os.path.join(serverConfig.IMAGE_DIR, folderToSave,
POSITIVE_FILE_PREFIX + '[0-9][0-9][0-9].pgm')))
count = 0
if len(files) > 0:
# Grab the count from the last filename.
count = int(files[-1][-7:-4])+1
print('Capturing positive training images.')
print('Press button or type c (and press enter) to capture an image.')
print('Press Ctrl-C to quit.')
while True:
# Check if button was pressed or 'c' was received, then capture image.
if is_letter_input('c'):
print('Capturing image...')
image = camera.read()
# Convert image to grayscale.
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Get coordinates of single face in captured image.
result = face.detect_single(image)
if result is None:
print('Could not detect single face! Check the image in capture.pgm' \
' to see what was captured and try again with only one face visible.')
continue
x, y, w, h = result
# Crop image as close as possible to desired face aspect ratio.
# Might be smaller if face is near edge of image.
crop = face.crop(image, x, y, w, h)
# Save image to file.
filename = os.path.join(serverConfig.IMAGE_DIR, folderToSave, POSITIVE_FILE_PREFIX + '%03d.pgm' % count)
cv2.imwrite(filename, crop)
print('Found face and wrote training image', filename)
count += 1
```
#### File: JLyons1985/SmartMirrorServer/RecognizeFace.py
```python
import cv2
import serverConfig
import face
def recognizeFace(model):
# Initialize camer and box.
camera = serverConfig.get_camera()
# Check for the positive face and unlock if found.
print("Trying to read an image from the camera.")
image = camera.read()
# Convert image to grayscale.
print("Converting image to greyscale.")
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
cv2.equalizeHist(image, image)
# Get coordinates of single face in captured image.
print("Trying to detect a single face.")
result = face.detect_single(image)
if result is None:
print('Could not detect single face! Check the image in capture.pgm' \
' to see what was captured and try again with only one face visible.')
return 'NoFace'
x, y, w, h = result
# Crop and resize image to face.
crop = face.resize(face.crop(image, x, y, w, h))
# Test face against model.
label, confidence = model.predict(crop)
print(label)
print(confidence)
if label == serverConfig.NEGATIVE_LABEL:
return 'Neg'
else:
for i in range(len(serverConfig.USERS)):
if label == serverConfig.POSITIVE_LABELS[i] and confidence < serverConfig.POSITIVE_THRESHOLD :
print('Found a match')
return serverConfig.USERS[i]
# Must not be a match
print('No Match')
return 'Neg'
``` |
{
"source": "jlyons871/Tax-Calculator",
"score": 3
} |
#### File: Tax-Calculator/taxcalc/behavior.py
```python
import numpy as np
import copy
def update_income(behavioral_effect, calcY):
delta_inc = np.where(calcY.c00100 > 0, behavioral_effect, 0)
# Attribute the behavioral effects across itemized deductions,
# wages, and other income.
_itemized = np.where(calcY.c04470 < calcY._standard,
0, calcY.c04470)
# TODO, verify that this is needed.
delta_wages = (delta_inc * calcY.e00200 /
(calcY.c00100 + _itemized + .001))
other_inc = calcY.c00100 - calcY.e00200
delta_other_inc = (delta_inc * other_inc /
(calcY.c00100 + _itemized + .001))
delta_itemized = (delta_inc * _itemized /
(calcY.c00100 + _itemized + .001))
calcY.e00200 = calcY.e00200 + delta_wages
calcY.e00300 = calcY.e00300 + delta_other_inc
calcY.e19570 = np.where(_itemized > 0,
calcY.e19570 + delta_itemized, 0)
# TODO, we should create a behavioral modification
# variable instead of using e19570
calcY.calc_all()
return calcY
def behavior(calcX, calcY, elast_wrt_atr=0.4, inc_effect=0.15,
update_income=update_income):
"""
Modify plan Y records to account for micro-feedback effect that arrise
from moving from plan X to plan Y.
"""
# Calculate marginal tax rates for plan x and plan y.
mtrX = calcX.mtr('e00200')
mtrY = calcY.mtr('e00200')
# Calculate the percent change in after-tax rate.
pct_diff_atr = ((1-mtrY) - (1-mtrX))/(1-mtrX)
calcY_behavior = copy.deepcopy(calcY)
# Calculate the magnitude of the substitution and income effects.
substitution_effect = (elast_wrt_atr * pct_diff_atr *
(calcX._ospctax))
calcY_behavior = update_income(substitution_effect, calcY_behavior)
income_effect = inc_effect * (calcY_behavior._ospctax - calcX._ospctax)
calcY_behavior = update_income(income_effect, calcY_behavior)
return calcY_behavior
```
#### File: Tax-Calculator/taxcalc/decorators.py
```python
import numpy as np
import pandas as pd
import inspect
from .parameters import default_data
from numba import jit, vectorize, guvectorize
from functools import wraps
from six import StringIO
import ast
import toolz
class GetReturnNode(ast.NodeVisitor):
"""
A Visitor to get the return tuple names from a calc-style function
"""
def visit_Return(self, node):
if isinstance(node.value, ast.Tuple):
return [e.id for e in node.value.elts]
else:
return [node.value.id]
def dataframe_guvectorize(dtype_args, dtype_sig):
"""
Extracts numpy arrays from caller arguments and passes them
to guvectorized numba functions
"""
def make_wrapper(func):
vecd_f = guvectorize(dtype_args, dtype_sig)(func)
@wraps(func)
def wrapper(*args, **kwargs):
# np_arrays = [getattr(args[0], i).values for i in theargs]
arrays = [arg.values for arg in args]
ans = vecd_f(*arrays)
return ans
return wrapper
return make_wrapper
def dataframe_vectorize(dtype_args):
"""
Extracts numpy arrays from caller arguments and passes them
to vectorized numba functions
"""
def make_wrapper(func):
vecd_f = vectorize(dtype_args)(func)
@wraps(func)
def wrapper(*args, **kwargs):
arrays = [arg.values for arg in args]
ans = vecd_f(*arrays)
return ans
return wrapper
return make_wrapper
def dataframe_wrap_guvectorize(dtype_args, dtype_sig):
"""
Extracts particular numpy arrays from caller argments and passes
them to guvectorize. Goes one step further than dataframe_guvectorize
by looking for the column names in the dataframe and just extracting those
"""
def make_wrapper(func):
theargs = inspect.getargspec(func).args
vecd_f = guvectorize(dtype_args, dtype_sig)(func)
def wrapper(*args, **kwargs):
np_arrays = [getattr(args[0], i).values for i in theargs]
ans = vecd_f(*np_arrays)
return ans
return wrapper
return make_wrapper
def create_apply_function_string(sigout, sigin, parameters):
"""
Create a string for a function of the form::
def ap_fuc(x_0, x_1, x_2, ...):
for i in range(len(x_0)):
x_0[i], ... = jitted_f(x_j[i], ...)
return x_0[i], ...
where the specific args to jitted_f and the number of
values to return is determined by sigout and sigin
Parameters
----------
sigout: iterable of the out arguments
sigin: iterable of the in arguments
parameters: iterable of which of the args (from in_args) are parameter
variables (as opposed to column records). This influences
how we construct the '_apply' function
Returns
-------
a String representing the function
"""
s = StringIO()
total_len = len(sigout) + len(sigin)
out_args = ["x_" + str(i) for i in range(0, len(sigout))]
in_args = ["x_" + str(i) for i in range(len(sigout), total_len)]
s.write("def ap_func({0}):\n".format(",".join(out_args + in_args)))
s.write(" for i in range(len(x_0)):\n")
out_index = [x + "[i]" for x in out_args]
in_index = []
for arg, _var in zip(in_args, sigin):
in_index.append(arg + "[i]" if _var not in parameters else arg)
s.write(" " + ",".join(out_index) + " = ")
s.write("jitted_f(" + ",".join(in_index) + ")\n")
s.write(" return " + ",".join(out_args) + "\n")
return s.getvalue()
def create_toplevel_function_string(args_out, args_in, pm_or_pf,
kwargs_for_func={}):
"""
Create a string for a function of the form::
def hl_func(x_0, x_1, x_2, ...):
outputs = (...) = calc_func(...)
header = [...]
return DataFrame(data, columns=header)
where the specific args to jitted_f and the number of
values to return is destermined by sigout and sigin
Parameters
----------
args_out: iterable of the out arguments
args_in: iterable of the in arguments
pm_or_pf: iterable of strings for object that holds each arg
kwargs_for_func: dictionary of keyword args for the function
Returns
-------
a String representing the function
"""
s = StringIO()
s.write("def hl_func(pm, pf")
if kwargs_for_func:
kwargs = ",".join(str(k) + "=" + str(v) for k, v in
kwargs_for_func.items())
s.write(", " + kwargs + " ")
s.write("):\n")
s.write(" from pandas import DataFrame\n")
s.write(" import numpy as np\n")
s.write(" outputs = \\\n")
outs = []
for arg in kwargs_for_func:
args_in.remove(arg)
for p, attr in zip(pm_or_pf, args_out + args_in):
outs.append(p + "." + attr + ", ")
outs = [m_or_f + "." + arg for m_or_f, arg in zip(pm_or_pf, args_out)]
s.write(" (" + ", ".join(outs) + ") = \\\n")
s.write(" " + "applied_f(")
for p, attr in zip(pm_or_pf, args_out + args_in):
s.write(p + "." + attr + ", ")
for arg in kwargs_for_func:
s.write(arg + ", ")
s.write(")\n")
s.write(" header = [")
col_headers = ["'" + out + "'" for out in args_out]
s.write(", ".join(col_headers))
s.write("]\n")
if len(args_out) == 1:
s.write(" return DataFrame(data=outputs,"
"columns=header)")
else:
s.write(" return DataFrame(data=np.column_stack("
"outputs),columns=header)")
return s.getvalue()
def make_apply_function(func, out_args, in_args, parameters, do_jit=True,
**kwargs):
"""
Takes a '_calc' function and creates the necessary Python code for an
_apply style function. Will also jit the function if desired
Parameters
----------
func: the 'calc' style function
out_args: list of out arguments for the apply function
in_args: list of in arguments for the apply function
parameters: iterable of which of the args (from in_args) are parameter
variables (as opposed to column records). This influences
how we construct the '_apply' function
do_jit: Bool, if True, jit the resulting apply function
Returns
-------
'_apply' style function
"""
jitted_f = jit(**kwargs)(func)
apfunc = create_apply_function_string(out_args, in_args, parameters)
func_code = compile(apfunc, "<string>", "exec")
fakeglobals = {}
eval(func_code, {"jitted_f": jitted_f}, fakeglobals)
if do_jit:
return jit(**kwargs)(fakeglobals['ap_func'])
else:
return fakeglobals['ap_func']
def apply_jit(dtype_sig_out, dtype_sig_in, parameters=None, **kwargs):
"""
make a decorator that takes in a _calc-style function, handle
the apply step
"""
if not parameters:
parameters = []
def make_wrapper(func):
theargs = inspect.getargspec(func).args
jitted_f = jit(**kwargs)(func)
jitted_apply = make_apply_function(func, dtype_sig_out,
dtype_sig_in, parameters,
**kwargs)
def wrapper(*args, **kwargs):
in_arrays = []
out_arrays = []
for farg in theargs:
if hasattr(args[0], farg):
in_arrays.append(getattr(args[0], farg))
else:
in_arrays.append(getattr(args[1], farg))
for farg in dtype_sig_out:
if hasattr(args[0], farg):
out_arrays.append(getattr(args[0], farg))
else:
out_arrays.append(getattr(args[1], farg))
final_array = out_arrays + in_arrays
ans = jitted_apply(*final_array)
return ans
return wrapper
return make_wrapper
def iterate_jit(parameters=None, **kwargs):
"""
make a decorator that takes in a _calc-style function, create a
function that handles the "high-level" function and the "_apply"
style function
Note: perhaps a better "bigger picture" description of what this does?
"""
if not parameters:
parameters = []
def make_wrapper(func):
# Step 1. Wrap this function in apply_jit
# from apply_jit
# Get the input arguments from the function
in_args = inspect.getargspec(func).args
try:
jit_args = inspect.getargspec(jit).args + ['nopython']
except TypeError:
#print ("This should only be seen in RTD, if not install numba!")
return func
kwargs_for_func = toolz.keyfilter(in_args.__contains__, kwargs)
kwargs_for_jit = toolz.keyfilter(jit_args.__contains__, kwargs)
# Any name that is a taxcalc parameter (or the special case 'puf'
# Boolean flag is given special treatment. Identify those names here
allowed_parameters = list(default_data(metadata=True).keys())
allowed_parameters += list(arg[1:] for arg in default_data(metadata=True).keys())
allowed_parameters.append("puf")
additional_parameters = [arg for arg in in_args if arg in allowed_parameters]
additional_parameters += parameters
# Remote duplicates
all_parameters = list(set(additional_parameters))
src = inspect.getsourcelines(func)[0]
# Discover the return arguments by walking
# the AST of the function
all_returned_vals = []
gnr = GetReturnNode()
all_out_args = None
for node in ast.walk(ast.parse(''.join(src))):
all_out_args = gnr.visit(node)
if all_out_args:
break
if not all_out_args:
raise ValueError("Can't find return statement in function!")
# Now create the apply jitted function
applied_jitted_f = make_apply_function(func,
list(reversed(all_out_args)),
in_args,
parameters=all_parameters,
do_jit=True,
**kwargs_for_jit)
def wrapper(*args, **kwargs):
in_arrays = []
out_arrays = []
pm_or_pf = []
for farg in all_out_args + in_args:
if hasattr(args[0], farg):
in_arrays.append(getattr(args[0], farg))
pm_or_pf.append("pm")
elif hasattr(args[1], farg):
in_arrays.append(getattr(args[1], farg))
pm_or_pf.append("pf")
elif farg not in kwargs_for_func:
raise ValueError("Unknown arg: " + farg)
# Create the high level function
high_level_func = create_toplevel_function_string(all_out_args,
list(in_args),
pm_or_pf,
kwargs_for_func)
func_code = compile(high_level_func, "<string>", "exec")
fakeglobals = {}
eval(func_code, {"applied_f": applied_jitted_f}, fakeglobals)
high_level_fn = fakeglobals['hl_func']
ans = high_level_fn(*args, **kwargs)
return ans
return wrapper
return make_wrapper
```
#### File: Tax-Calculator/taxcalc/parameters.py
```python
import numpy as np
from .utils import expand_array
import os
import json
from pkg_resources import resource_stream, Requirement
DEFAULT_START_YEAR = 2013
class Parameters(object):
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
PARAM_FILENAME = "params.json"
params_path = os.path.join(CUR_PATH, PARAM_FILENAME)
#Mapping of year to inflation rate
__rates = {2013:0.015, 2014:0.020, 2015:0.022, 2016:0.020, 2017:0.021,
2018:0.022, 2019:0.023, 2020:0.024, 2021:0.024, 2022:0.024,
2023:0.024, 2024:0.024}
@classmethod
def from_file(cls, file_name, **kwargs):
if file_name:
with open(file_name) as f:
params = json.loads(f.read())
else:
params = None
return cls(data=params, **kwargs)
def __init__(self, start_year=DEFAULT_START_YEAR, budget_years=12,
inflation_rate=None, inflation_rates=None, data=None,
**kwargs):
if inflation_rate and inflation_rates:
raise ValueError("Can only specify either one constant inflation"
" rate or a list of inflation rates")
self._inflation_rates = None
if inflation_rate:
self._inflation_rates = [inflation_rate] * budget_years
if inflation_rates:
assert len(inflation_rates) == budget_years
self._inflation_rates = [inflation_rates[start_year + i]
for i in range(0, budget_years)]
if not self._inflation_rates:
self._inflation_rates = [self.__rates[start_year + i]
for i in range(0, budget_years)]
self._current_year = start_year
self._start_year = start_year
self._budget_years = budget_years
if data:
self._vals = data
else:
self._vals = default_data(metadata=True)
# INITIALIZE
for name, data in self._vals.items():
cpi_inflated = data.get('cpi_inflated', False)
values = data['value']
setattr(self, name, expand_array(values,
inflate=cpi_inflated, inflation_rates=self._inflation_rates,
num_years=budget_years))
self.set_year(start_year)
def update(self, year_mods):
"""
Take a dictionary of year: {name:val} mods and set them on this Params object.
'year_mods' is a dictionary of year: mods where mods is a dict of key:value pairs
and key_cpi:Bool pairs. The key_cpi:Bool pairs indicate if the value for 'key'
should be inflated
Parameters:
----------
mods: dict
"""
if not all(isinstance(k, int) for k in year_mods.keys()):
raise ValueError("Every key must be a year, e.g. 2011, 2012, etc.")
defaults = default_data(metadata=True)
for year, mods in year_mods.items():
num_years_to_expand = (self.start_year + self.budget_years) - year
for name, values in mods.items():
if name.endswith("_cpi"):
continue
if name in defaults:
default_cpi = defaults[name].get('cpi_inflated', False)
else:
default_cpi = False
cpi_inflated = mods.get(name + "_cpi", default_cpi)
if year == self.start_year and year == self.current_year:
nval = expand_array(values,
inflate=cpi_inflated,
inflation_rates=self._inflation_rates,
num_years=num_years_to_expand)
setattr(self, name, nval)
elif year <= self.current_year and year >= self.start_year:
# advance until the parameter is in line with the current
# year
num_years_to_skip=self.current_year - year
offset_year = year - self.start_year
inf_rates = [self._inflation_rates[offset_year + i]
for i in range(0, num_years_to_expand)]
nval = expand_array(values,
inflate=cpi_inflated,
inflation_rates=inf_rates,
num_years=num_years_to_expand)
if self.current_year > self.start_year:
cur_val = getattr(self, name)
offset = self.current_year - self.start_year
cur_val[offset:] = nval[num_years_to_skip:]
else:
setattr(self, name, nval[num_years_to_skip:])
else: # year > current_year
msg = ("Can't specify a parameter for a year that is in the"
" future because we don't know how to fill in the "
" values for the years between {0} and {1}.")
raise ValueError(msg.format(self.current_year, year))
# Set up the '_X = [a, b,...]' variables as 'X = a'
self.set_year(self._current_year)
@property
def current_year(self):
return self._current_year
@property
def start_year(self):
return self._start_year
@property
def budget_years(self):
return self._budget_years
def increment_year(self):
self._current_year += 1
self.set_year(self._current_year)
def set_year(self, yr):
for name, vals in self._vals.items():
arr = getattr(self, name)
setattr(self, name[1:], arr[yr-self._start_year])
def default_data(metadata=False, start_year=None):
""" Retreive of default parameters """
parampath = Parameters.params_path
if not os.path.exists(parampath):
path_in_egg = os.path.join("taxcalc", Parameters.PARAM_FILENAME)
buf = resource_stream(Requirement.parse("taxcalc"), path_in_egg)
_bytes = buf.read()
as_string = _bytes.decode("utf-8")
params = json.loads(as_string)
else:
with open(Parameters.params_path) as f:
params = json.load(f)
if start_year:
for k, v in params.items():
first_year = v.get('start_year', DEFAULT_START_YEAR)
assert isinstance(first_year, int)
if start_year < first_year:
msg = "Can't set a start year of {0}, because it is before {1}"
raise ValueError(msg.format(start_year, first_year))
#Set the new start year:
v['start_year'] = start_year
#Work with the values
vals = v['value']
last_year_for_data = first_year + len(vals) - 1
if last_year_for_data < start_year:
if v['row_label']:
v['row_label'] = ["2015"]
#Need to produce new values
new_val = vals[-1]
if v['cpi_inflated'] is True:
if isinstance(new_val, list):
for y in range(last_year_for_data, start_year):
new_val = [x * (1.0 + Parameters._Parameters__rates[y]) for x in new_val]
else:
for y in range(last_year_for_data, start_year):
new_val *= 1.0 + Parameters._Parameters__rates[y]
#Set the new values
v['value'] = [new_val]
else:
#Need to get rid of [first_year, ..., start_year-1] values
years_to_chop = start_year - first_year
if v['row_label']:
v['row_label'] = v['row_label'][years_to_chop:]
v['value'] = v['value'][years_to_chop:]
if (metadata):
return params
else:
return { k: v['value'] for k,v in params.items()}
```
#### File: taxcalc/tests/test_decorators.py
```python
import os
import sys
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_path, "../../"))
sys.path.append(os.path.join(cur_path, "../"))
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from numba import jit, vectorize, guvectorize
from taxcalc import *
@extract_array
@vectorize(['int32(int32)'])
def fnvec_ifelse_df(inc_in):
ans = -42
if inc_in < 5:
ans = -42
if inc_in >= 5 and inc_in < 8:
ans = 42
if inc_in >= 8:
ans = 99
return ans
@dataframe_vectorize(['int32(int32)'])
def fnvec_ifelse_df2(inc_in):
"""Docstring"""
ans = -42
if inc_in < 5:
ans = -42
if inc_in >= 5 and inc_in < 8:
ans = 42
if inc_in >= 8:
ans = 99
return ans
@extract_array
@guvectorize(["void(int32[:],int32[:])"], "(x) -> (x)")
def fnvec_copy_df(inc_in, inc_out):
for i in range(inc_in.shape[0]):
inc_out[i] = inc_in[i]
@dataframe_guvectorize(["void(int32[:],int32[:])"], "(x) -> (x)")
def fnvec_copy_df2(inc_in, inc_out):
"""Docstring"""
for i in range(inc_in.shape[0]):
inc_out[i] = inc_in[i]
def test_with_df_wrapper():
x = np.array([4, 5, 9], dtype='i4')
y = np.array([0, 0, 0], dtype='i4')
df = pd.DataFrame(data=np.column_stack((x, y)), columns=['x', 'y'])
fnvec_copy_df(df.x, df.y)
assert np.all(df.x.values == df.y.values)
z = fnvec_ifelse_df(df.x)
assert np.all(np.array([-42, 42, 99], dtype='i4') == z)
def test_with_dataframe_guvec():
x = np.array([4, 5, 9], dtype='i4')
y = np.array([0, 0, 0], dtype='i4')
df = pd.DataFrame(data=np.column_stack((x, y)), columns=['x', 'y'])
fnvec_copy_df2(df.x, df.y)
assert fnvec_copy_df2.__name__ == 'fnvec_copy_df2'
assert fnvec_copy_df2.__doc__ == 'Docstring'
assert np.all(df.x.values == df.y.values)
def test_with_dataframe_vec():
x = np.array([4, 5, 9], dtype='i4')
y = np.array([0, 0, 0], dtype='i4')
df = pd.DataFrame(data=np.column_stack((x, y)), columns=['x', 'y'])
z = fnvec_ifelse_df2(df.x)
assert fnvec_ifelse_df2.__name__ == 'fnvec_ifelse_df2'
assert fnvec_ifelse_df2.__doc__ == 'Docstring'
assert np.all(np.array([-42, 42, 99], dtype='i4') == z)
@dataframe_wrap_guvectorize(["void(int32[:],int32[:])"], "(x) -> (x)")
def fnvec_copy_dfw(x, y):
for i in range(x.shape[0]):
y[i] = x[i]
def test_with_dataframe_wrap_guvectorize():
x = np.array([4, 5, 9], dtype='i4')
y = np.array([0, 0, 0], dtype='i4')
df = pd.DataFrame(data=np.column_stack((x, y)), columns=['x', 'y'])
fnvec_copy_dfw(df)
assert(np.all(df.x == df.y))
def test_create_apply_function_string():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], [])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3[i],x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_apply_function_string_with_params():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], ['d'])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3,x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_toplevel_function_string_mult_outputs():
ans = create_toplevel_function_string(['a', 'b'], ['d', 'e'],
['pm', 'pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" outputs = \\\n"
" (pm.a, pm.b) = \\\n"
" applied_f(pm.a, pm.b, pf.d, pm.e, )\n"
" header = ['a', 'b']\n"
" return DataFrame(data=np.column_stack(outputs),"
"columns=header)")
assert ans == exp
def test_create_toplevel_function_string():
ans = create_toplevel_function_string(['a'], ['d', 'e'],
['pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" outputs = \\\n"
" (pm.a) = \\\n"
" applied_f(pm.a, pf.d, pm.e, )\n"
" header = ['a']\n"
" return DataFrame(data=outputs,"
"columns=header)")
assert ans == exp
def some_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def test_make_apply_function():
ans = make_apply_function(some_calc, ['a', 'b'], ['x', 'y', 'z'], [],
do_jit=True, no_python=True)
assert ans
@apply_jit(["a", "b"], ["x", "y", "z"], nopython=True)
def Magic_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def Magic(pm, pf):
# Adjustments
outputs = \
pf.a, pf.b = Magic_calc(pm, pf)
header = ['a', 'b']
return DataFrame(data=np.column_stack(outputs),
columns=header)
@iterate_jit(nopython=True)
def Magic_calc2(x, y, z):
a = x + y
b = x + y + z
return (a, b)
class Foo(object):
pass
@iterate_jit(nopython=True)
def bar(MARS):
if MARS == 1 or MARS == 6:
_sep = 2
else: _sep = 1
return _sep
@iterate_jit(nopython=True)
def ret_everything(a, b, c, d, e, f):
c = a + b
d = a + b
e = a + b
f = a + b
return (c, d, e,
f)
def test_magic_apply_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_magic_iterate_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic_calc2(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_bar_iterate_jit():
pm = Foo()
pf = Foo()
pf.MARS = np.ones((5,))
pf._sep = np.ones((5,))
ans = bar(pm, pf)
exp = DataFrame(data=[2.0] * 5, columns=["_sep"])
assert_frame_equal(ans, exp)
def test_ret_everything_iterate_jit():
pm = Foo()
pf = Foo()
pf.a = np.ones((5,))
pf.b = np.ones((5,))
pf.c = np.ones((5,))
pf.d = np.ones((5,))
pf.e = np.ones((5,))
pf.f = np.ones((5,))
ans = ret_everything(pm, pf)
exp = DataFrame(data=[[2.0, 2.0, 2.0, 2.0]] * 5,
columns=["c", "d", "e", "f"])
assert_frame_equal(ans, exp)
@iterate_jit(parameters=['puf'], nopython=True, puf=True)
def Magic_calc3(x, y, z, puf):
a = x + y
if (puf):
b = x + y + z
else:
b = 42
return (a, b)
def test_function_takes_kwarg():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc3(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
def test_function_takes_kwarg_nondefault_value():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc3(pm, pf, puf=False)
exp = DataFrame(data=[[2.0, 42.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
@iterate_jit(nopython=True, puf=True)
def Magic_calc4(x, y, z, puf):
a = x + y
if (puf):
b = x + y + z
else:
b = 42
return (a, b)
def test_function_no_parameters_listed():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc4(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
@iterate_jit(parameters=['w'], nopython=True, puf=True)
def Magic_calc5(w, x, y, z, puf):
a = x + y
if (puf):
b = w[0] + x + y + z
else:
b = 42
return (a, b)
def test_function_parameters_optional():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pm.w = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc5(pm, pf)
exp = DataFrame(data=[[2.0, 4.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
```
#### File: taxcalc/tests/test_records.py
```python
import os
import sys
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(CUR_PATH, "../../"))
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
import tempfile
from numba import jit, vectorize, guvectorize
from taxcalc import *
from taxcalc.utils import expand_array
tax_dta_path = os.path.join(CUR_PATH, "../../tax_all1991_puf.gz")
def test_create_records():
r = Records(tax_dta_path)
assert r
def test_create_records_from_file():
r = Records.from_file(tax_dta_path)
assert r
def test_imputation():
e17500 = np.array([20., 4.4, 5.])
e00100 = np.array([40., 8.1, 90.1])
e18400 = np.array([25., 34., 10.])
e18425 = np.array([42., 20.3, 49.])
e62100 = np.array([75., 12.4, 84.])
e00700 = np.array([43.3, 34.1, 3.4])
e04470 = np.array([21.2, 12., 13.1])
e21040 = np.array([45.9, 3., 45.])
e18500 = np.array([33.1, 18.2, 39.])
e20800 = np.array([0.9, 32., 52.1])
cmbtp_itemizer = np.array([68.4, -31.0025, -84.7])
"""
Test case values:
x = max(0., e17500 - max(0., e00100) * 0.075) = [17., 3.7925, 0]
medical_adjustment = min(x, 0.025 * max(0.,e00100)) = [-1.,-.2025,0]
state_adjustment = max(0, max(e18400, e18425)) = [42., 34., 49.]
_cmbtp_itemizer = (e62100 - medical_adjustment + e00700 + e04470 + e21040
- z - e00100 - e18500 - e20800)
= [68.4, -31.0025 ,-84.7]
"""
test_itemizer = records.imputation(e17500, e00100, e18400, e18425,
e62100, e00700, e04470,
e21040, e18500, e20800)
assert(np.allclose(cmbtp_itemizer, test_itemizer))
``` |
{
"source": "jlyoung/queuemonitor",
"score": 3
} |
#### File: jlyoung/queuemonitor/queuemonitor.py
```python
import logging
import pprint
import subprocess
import sys
import time
from urlparse import urljoin
import yaml
from pync import Notifier
from retrying import retry
from selenium import webdriver
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
logging.basicConfig(format='[%(asctime)s] %(message)s', level=logging.INFO)
queue_front_page = set()
def retry_if_stale_element_reference_exception(exception):
"""Return whether the exception is a StaleElementReferenceException exception."""
return isinstance(exception, StaleElementReferenceException)
def retry_if_element_not_visible_exception(exception):
"""Return whether the exception is a ElementNotVisibleException exception."""
return isinstance(exception, ElementNotVisibleException)
@retry(retry_on_exception=retry_if_stale_element_reference_exception)
def check_queue(driver):
"""Reload the Incoming Queue page. Raise Notification Center message if there is a new case."""
global queue_front_page
time.sleep(10)
driver.find_element_by_id("00BE0000001ELz7_refresh").click()
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "x-grid3-body")))
rows = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "x-grid3-row")))
pp = pprint.PrettyPrinter(indent=4)
for row in rows:
row_dict = {}
case_number = row.find_element_by_class_name("x-grid3-col-CASES_CASE_NUMBER")
row_dict["case_number"] = case_number.text
case_url = row.find_element_by_tag_name("a").get_attribute("href")
row_dict["case_url"] = urljoin(driver.current_url, case_url)
row_dict["problem_statement"] = row.find_element_by_class_name("x-grid3-col-00NE0000002C0mc").text
try:
row_dict["severity"] = row.find_element_by_class_name("x-grid3-col-00NE0000002BvKo").text.split()[0]
except:
row_dict["severity"] = ""
if row_dict["case_number"] not in queue_front_page:
message = u"New {severity} case #{case_number}: {problem_statement}".format(severity=row_dict["severity"], case_number=row_dict["case_number"], problem_statement=row_dict["problem_statement"])
Notifier.notify(message, sound="Sosumi", open=row_dict["case_url"])
subprocess.call(['/usr/bin/say', '"New Case Notification"', '-v', 'Tessa'])
pp.pprint(row_dict)
logging.info(message)
logging.info(u"Adding case {case_number} to the set of known cases...".format(case_number=row_dict["case_number"]))
queue_front_page.add(row_dict["case_number"])
logging.info("Awaiting new case notifications...")
@retry(retry_on_exception=retry_if_stale_element_reference_exception)
def populate_cases(driver):
"""Populate the initial queue_front_page set with existing case information."""
global queue_front_page
pp = pprint.PrettyPrinter(indent=4)
time.sleep(1)
x_grid3_body = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CLASS_NAME, "x-grid3-body")))
rows = x_grid3_body.find_elements_by_class_name("x-grid3-row")
for row in rows:
row_dict = {}
case_number = row.find_element_by_class_name("x-grid3-col-CASES_CASE_NUMBER")
row_dict["case_number"] = case_number.text
row_dict["case_url"] = urljoin(driver.current_url, case_number.find_element_by_tag_name("a").get_attribute("href"))
row_dict["problem_statement"] = row.find_element_by_class_name("x-grid3-col-00NE0000002C0mc").text
try:
row_dict["severity"] = row.find_element_by_class_name("x-grid3-col-00NE0000002BvKo").text.split()[0]
except:
row_dict["severity"] = ""
pp.pprint(row_dict)
queue_front_page.add(row_dict["case_number"])
logging.info("End listing of initial first page of incoming queue.")
@retry(retry_on_exception=retry_if_element_not_visible_exception, wait_fixed=1000)
def click_initial_response_column_header(driver):
"""Sort by Initial Response timestamp."""
title = "//div[@title='Initial Response Time']"
initial_response_time_element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, title)))
time.sleep(2)
driver.execute_script("return arguments[0].scrollIntoView();", initial_response_time_element)
time.sleep(3)
logging.info("Clicking column...")
driver.find_element_by_xpath(title).click()
def main():
"""Alert user via Notification Center messages when new cases arrive in Incoming Queue."""
global queue_front_page
username = None
password = <PASSWORD>
logging.info("Accessing credentials...")
with open("credentials.yml", "r") as stream:
try:
credentials = yaml.load(stream)
username = credentials["username"]
password = <PASSWORD>["password"]
except yaml.YAMLError as exc:
logging.error(exc)
sys.exit(1)
chrome_options = Options()
chrome_options.add_argument("restore-last-session")
chrome_options.add_argument("start-maximized")
driver = webdriver.Chrome(executable_path="/usr/local/bin/chromedriver", chrome_options=chrome_options)
driver.maximize_window()
logging.info("Accessing Hortonworks Okta Login Page...")
driver.get("https://hortonworks.okta.com")
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "username")))
element.clear()
element.send_keys(username)
element = driver.find_element_by_name("password")
element.clear()
element.send_keys(password)
time.sleep(5)
# "remember" checkbox changed and clicking on it isn't working anymore.
# removed the "remember me" checkbox check action
logging.info("Logging into Okta...")
# Sign in button changed. Have to use a css selector to click on it now.
driver.find_element_by_css_selector("input[class='button button-primary']").click()
time.sleep(5)
logging.info("Accessing SalesForce Incoming Queue..")
driver.get("https://hortonworks.my.salesforce.com/500?fcf=00BE0000001ELz7")
logging.info("Sorting by Initial Response Time Descending...")
click_initial_response_column_header(driver)
logging.info("Sorting by Initial Response Time Ascending...")
click_initial_response_column_header(driver)
# Initial page load
logging.info("Listing initial first page of incoming queue...")
populate_cases(driver)
# Incoming Queue reloads
logging.info("Awaiting new case notifications...")
while True:
check_queue(driver)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()
``` |
{
"source": "jlyu26/News-Recommendation-Engine",
"score": 3
} |
#### File: News-Recommendation-Engine/backend_server/service.py
```python
import operations
import pyjsonrpc
SERVER_HOST = 'localhost'
SERVER_PORT = 4040
class RequestHandler(pyjsonrpc.HttpRequestHandler):
""" RPC request handler """
@pyjsonrpc.rpcmethod
def add(self, num1, num2): # pylint: disable=no-self-use
""" Test method """
print "add is called with %d and %d" % (num1, num2)
return num1 + num2
""" Get news summaries for a user """
@pyjsonrpc.rpcmethod
def getNewsSummariesForUser(self, user_id, page_num):
return operations.getNewsSummariesForUser(user_id, page_num)
""" Log user news clicks """
@pyjsonrpc.rpcmethod
def logNewsClickForUser(self, user_id, news_id):
return operations.logNewsClickForUser(user_id, news_id)
# Threading HTTP Server
HTTP_SERVER = pyjsonrpc.ThreadingHttpServer(
server_address=(SERVER_HOST, SERVER_PORT),
RequestHandlerClass=RequestHandler
)
print "Starting HTTP server on %s:%d" % (SERVER_HOST, SERVER_PORT)
HTTP_SERVER.serve_forever()
```
#### File: News-Recommendation-Engine/news_pipeline/queue_helper.py
```python
import os
import sys
# import common package in parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import news_api_client
from cloudAMQP_client import CloudAMQPClient
SCRAPE_NEWS_TASK_QUEUE_URL = "amqp://mcyrgohw:CB44sIsZxuz-IInG5a5ESFGrnP0iIda<EMAIL>.<EMAIL>amqp.com/mcyrgohw"
SCRAPE_NEWS_TASK_QUEUE_NAME = "tap-news-scrape-news-task-queue"
DEDUPE_NEWS_TASK_QUEUE_URL = 'amqp://sspuqxlv:[email protected]/sspuqxlv'
DEDUPE_NEWS_TASK_QUEUE_NAME = 'tap-news-dedupe-news-task-queue'
def clearQueue(queue_url, queue_name):
scrape_news_queue_client = CloudAMQPClient(queue_url, queue_name)
num_of_messages = 0
while True:
if scrape_news_queue_client is not None:
msg = scrape_news_queue_client.get_message()
if msg is None:
print "Clear %d messages." % num_of_messages
return
num_of_messages += 1
if __name__ == "__main__":
clearQueue(SCRAPE_NEWS_TASK_QUEUE_URL, SCRAPE_NEWS_TASK_QUEUE_NAME)
clearQueue(DEDUPE_NEWS_TASK_QUEUE_URL, DEDUPE_NEWS_TASK_QUEUE_NAME)
``` |
{
"source": "jlyu/ps-algorithms-and-ds",
"score": 4
} |
#### File: sorting/merge-sort/MergeSort.py
```python
def mergesort(seq):
mid = len(seq) // 2
left, right = seq[:mid], seq[mid:]
if len(left) > 1:
left = mergesort(left)
if len(right) > 1:
right = mergesort(right)
result = []
while left and right:
if left[-1] >= right[-1]:
result.append(left.pop())
print result
else:
result.append(right.pop())
print result
result.reverse()
return (left or right) + result
def unit_test():
seq = [3, 4, 9, 6, 1, 8, 2, 7, 5]
print mergesort(seq)
if __name__ == "__main__":
unit_test()
``` |
{
"source": "jlzarates/plantcv",
"score": 3
} |
#### File: plantcv/morphology/fill_segments.py
```python
import os
import cv2
import numpy as np
from skimage.segmentation import watershed
from plantcv.plantcv import fatal_error
from plantcv.plantcv import outputs
from plantcv.plantcv import color_palette
from plantcv.plantcv import params
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
def fill_segments(mask, objects):
"""Fills masked segments from contours.
Inputs:
mask = Binary image, single channel, object = 1 and background = 0
objects = List of contours
Returns:
filled_img = Filled mask
:param mask: numpy.ndarray
:param object: list
:return filled_img: numpy.ndarray
"""
params.device += 1
h,w = mask.shape
markers = np.zeros((h,w))
labels = np.arange(len(objects)) + 1
for i,l in enumerate(labels):
cv2.drawContours(markers, objects, i ,int(l) , 5)
# Fill as a watershed segmentation from contours as markers
filled_mask = watershed(mask==0, markers=markers,
mask=mask!=0,compactness=0)
# Count area in pixels of each segment
ids, counts = np.unique(filled_mask, return_counts=True)
outputs.add_observation(variable='segment_area', trait='segment area',
method='plantcv.plantcv.morphology.fill_segments',
scale='pixels', datatype=list,
value=counts[1:].tolist(),
label=(ids[1:]-1).tolist())
rgb_vals = color_palette(num=len(labels), saved=True)
filled_img = np.zeros((h,w,3), dtype=np.uint8)
for l in labels:
for ch in range(3):
filled_img[:,:,ch][filled_mask==l] = rgb_vals[l-1][ch]
if params.debug == 'print':
print_image(filled_img, os.path.join(params.debug_outdir, str(params.device) + '_filled_img.png'))
elif params.debug == 'plot':
plot_image(filled_img)
return filled_img
``` |
{
"source": "JLZ-coder/bestOffer-API",
"score": 2
} |
#### File: bestOffer-API/mainApp/__init__.py
```python
from flask import Flask
import os
from . import users
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
app.url_map.strict_slashes = False
# a simple page that says hello
@app.route('/')
def hello():
return 'Hello, World!'
app.register_blueprint(users.bp)
return app
``` |
{
"source": "JLZong/Improving-lexicon-based-classification-using-n-grams",
"score": 3
} |
#### File: Improving-lexicon-based-classification-using-n-grams/vaderDemo/Test.py
```python
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import pandas as pd
import docx
import numpy as np
import csv
import pandas as pd
def test():
sentences=[]
document = docx.Document("testdata/Dataset.docx")
for para in document.paragraphs:
sentences.append(para.text)
paragraphSentiments = 0.0
analyzer = SentimentIntensityAnalyzer()
for sentence in sentences:
vs = analyzer.polarity_scores(sentence)
print("{:-<65} {}".format(sentence, str(vs)))
print("AVERAGE SENTIMENT FOR PARAGRAPH: \t" + str(round(paragraphSentiments / len(sentences), 4)))
def test_news():
sentences = []
document = docx.Document("testdata/Dataset_news.docx")
for para in document.paragraphs:
sentences.append(para.text)
paragraphSentiments = 0.0
analyzer = SentimentIntensityAnalyzer()
positive=[]
negative=[]
neutral=[]
for sentence in sentences:
vs = analyzer.polarity_scores(sentence)
print("{:-<65} {}".format(sentence, str(vs)))
positive.append(vs["pos"])
negative.append(vs["neg"])
neutral.append(vs["neu"])
print("AVERAGE SENTIMENT FOR PARAGRAPH: \t" + str(round(paragraphSentiments / len(sentences), 4)))
df=pd.DataFrame({"neg":negative,
"neu":neutral,
"pos":positive})
df.to_csv("testdata/news_tri.csv")
def test_story():
sentences = []
document = docx.Document("testdata/Dataset_story.docx")
positive = []
negative = []
neutral = []
for para in document.paragraphs:
sentences.append(para.text)
paragraphSentiments = 0.0
analyzer = SentimentIntensityAnalyzer()
for sentence in sentences:
vs = analyzer.polarity_scores(sentence)
positive.append(vs["pos"])
negative.append(vs["neg"])
neutral.append(vs["neu"])
print("{:-<65} {}".format(sentence, str(vs)))
print("AVERAGE SENTIMENT FOR PARAGRAPH: \t" + str(round(paragraphSentiments / len(sentences), 4)))
df = pd.DataFrame({"neg": negative,
"neu": neutral,
"pos": positive})
df.to_csv("testdata/story_tri.csv")
def test_scifi():
sentences = []
document = docx.Document("testdata/Dataset_sci-fi.docx")
positive = []
negative = []
neutral = []
for para in document.paragraphs:
sentences.append(para.text)
paragraphSentiments = 0.0
analyzer = SentimentIntensityAnalyzer()
for sentence in sentences:
vs = analyzer.polarity_scores(sentence)
positive.append(vs["pos"])
negative.append(vs["neg"])
neutral.append(vs["neu"])
print("{:-<65} {}".format(sentence, str(vs)))
print("AVERAGE SENTIMENT FOR PARAGRAPH: \t" + str(round(paragraphSentiments / len(sentences), 4)))
df = pd.DataFrame({"neg": negative,
"neu": neutral,
"pos": positive})
df.to_csv("testdata/sci-fi_tri.csv")
if __name__ == "__main__":
print('-----------------------------------------------------------------Test Dataset-----------------------------------------------------------------')
test()
print(
'-----------------------------------------------------------------News Dataset-----------------------------------------------------------------')
test_news()
print(
'-----------------------------------------------------------------Story Dataset-----------------------------------------------------------------')
test_story()
print(
'-----------------------------------------------------------------Sci-fi Dataset-----------------------------------------------------------------')
test_scifi()
``` |
{
"source": "jm009/vdebug",
"score": 3
} |
#### File: vdebug/tests/test_dbgp_api.py
```python
from . import setup
import unittest
import vdebug.connection
import vdebug.dbgp
try:
from unittest.mock import MagicMock, patch
except ImportError:
from mock import MagicMock, patch
class ApiTest(unittest.TestCase):
"""Test the Api class in the vdebug.dbgp module."""
init_msg = """<?xml version="1.0"
encoding="iso-8859-1"?>\n<init
xmlns="urn:debugger_api_v1"
xmlns:xdebug="http://xdebug.org/dbgp/xdebug"
fileuri="file:///usr/local/bin/cake" language="PHP"
api_version="1.0" appid="30130"
idekey="netbeans-xdebug"><engine
version="2.2.0"><![CDATA[Xdebug]]></engine><author><![CDATA[<NAME>]]></author><url><![CDATA[http://xdebug.org]]></url><copyright><![CDATA[Copyright
(c) 2002-2012 by <NAME>]]></copyright></init>"""
def setUp(self):
with patch('vdebug.connection.ConnectionHandler') as c:
self.c = c.return_value
self.c.recv_msg.return_value = self.init_msg
self.c.isconnected.return_value = 1
self.p = vdebug.dbgp.Api(self.c)
def test_init_msg_parsed(self):
"""Test that the init message from the debugger is
parsed successfully"""
assert self.p.language == "php"
assert self.p.version == "1.0"
assert self.p.idekey == "netbeans-xdebug"
def test_status_send_adds_trans_id(self):
"""Test that the status command sends the right
format command and adds a transaction ID"""
self.p.conn.send_msg = MagicMock()
self.p.status()
self.p.conn.send_msg.assert_called_once_with('status -i 1')
def test_status_retval(self):
"""Test that the status command receives a message from the api."""
self.p.conn.recv_msg.return_value = """<?xml
version="1.0" encoding="iso-8859-1"?>\n
<response command="status"
xmlns="urn:debugger_api_v1"
status="starting"
reason="ok"
transaction_id="transaction_id">
message data
</response>"""
status_res = self.p.status()
assert str(status_res) == "starting"
def test_run_retval(self):
"""Test that the run command receives a message from the api."""
self.p.conn.recv_msg.return_value = """<?xml
version="1.0" encoding="iso-8859-1"?>\n
<response command="run"
xmlns="urn:debugger_api_v1"
status="running"
reason="ok"
transaction_id="transaction_id">
message data
</response>"""
status_res = self.p.run()
assert str(status_res) == "running"
def test_step_into_retval(self):
"""Test that the step_into command receives a message from the api."""
self.p.conn.recv_msg.return_value = """<?xml
version="1.0" encoding="iso-8859-1"?>\n
<response command="step_into"
xmlns="urn:debugger_api_v1"
status="break"
reason="ok"
transaction_id="transaction_id">
message data
</response>"""
status_res = self.p.run()
assert str(status_res) == "break"
def test_step_over_retval(self):
"""Test that the step_over command receives a message from the api."""
self.p.conn.recv_msg.return_value = """<?xml
version="1.0" encoding="iso-8859-1"?>\n
<response command="step_into"
xmlns="urn:debugger_api_v1"
status="break"
reason="ok"
transaction_id="transaction_id">
message data
</response>"""
status_res = self.p.run()
assert str(status_res) == "break"
def test_step_out_retval(self):
"""Test that the step_out command receives a message from the api."""
self.p.conn.recv_msg.return_value = """<?xml
version="1.0" encoding="iso-8859-1"?>\n
<response command="step_into"
xmlns="urn:debugger_api_v1"
status="break"
reason="ok"
transaction_id="transaction_id">
message data
</response>"""
status_res = self.p.run()
assert str(status_res) == "break"
def test_stop_retval(self):
"""Test that the stop command receives a message from the api."""
self.p.conn.recv_msg.return_value = """<?xml
version="1.0" encoding="iso-8859-1"?>\n
<response command="stop"
xmlns="urn:debugger_api_v1"
status="stopping"
reason="ok"
transaction_id="transaction_id">
message data
</response>"""
status_res = self.p.run()
assert str(status_res) == "stopping"
def test_detatch_retval(self):
"""Test that the detatch command receives a message from the api."""
self.p.conn.recv_msg.return_value = """<?xml
version="1.0" encoding="iso-8859-1"?>\n
<response command="detatch"
xmlns="urn:debugger_api_v1"
status="stopped"
reason="ok"
transaction_id="transaction_id">
message data
</response>"""
status_res = self.p.run()
assert str(status_res) == "stopped"
def test_feature_get_retval(self):
"""Test that the feature_get command receives a message from the api."""
self.p.conn.recv_msg.return_value = """<?xml
version="1.0" encoding="iso-8859-1"?>\n<response
xmlns="urn:debugger_api_v1"
xmlns:xdebug="http://xdebug.org/dbgp/xdebug"
command="feature_get" transaction_id="2"
feature_name="encoding"
supported="1"><![CDATA[iso-8859-1]]></response>"""
res = self.p.feature_get('encoding')
self.assertEqual(str(res),"iso-8859-1")
self.assertEqual(res.is_supported(),1)
class apiInvalidInitTest(unittest.TestCase):
init_msg = """<?xml version="1.0"
encoding="iso-8859-1"?>\n<init
xmlns="urn:debugger_api_v1"
xmlns:xdebug="http://xdebug.org/dbgp/xdebug"
fileuri="file:///usr/local/bin/cake" language="PHP"
api_version="1.0" appid="30130"
idekey="netbeans-xdebug"><engine
version="2.2.0"><![CDATA[Xdebug]]></engine><author><![CDATA[<NAME>]]></author><url><![CDATA[http://xdebug.org]]></url><copyright><![CDATA[Copyright
(c) 2002-2012 by <NAME>]]></copyright></init>"""
invalid_init_msg = """<?xml version="1.0"
encoding="iso-8859-1"?>\n<invalid
xmlns="urn:debugger_api_v1">\n</invalid>"""
def test_invalid_response_raises_error(self):
with patch('vdebug.connection.ConnectionHandler') as c:
c = c.return_value
c.recv_msg.return_value = self.invalid_init_msg
c.isconnected.return_value = 1
re = "Invalid XML response from debugger"
self.assertRaisesRegex(vdebug.dbgp.ResponseError,re,vdebug.dbgp.Api,c)
```
#### File: vdebug/tests/test_log.py
```python
from . import setup
import sys
import unittest
import vdebug.log
try:
from unittest import mock
except ImportError:
import mock
class LoggerTest(unittest.TestCase):
level = 1
text = 'dummy text'
time_tuple = (2042, 4, 2, 1, 42, 42, 0, 0, 0)
time_string = 'Mon 02 2042 01:42:42'
def setUp(self):
self.logger = vdebug.log.Logger(self.level)
self.worker = mock.Mock()
self.logger._actual_log = self.worker
def test_log_with_same_level(self):
self.logger.log(self.text, self.level)
self.worker.assert_called_once_with(self.text, self.level)
def test_log_with_higher_level(self):
self.logger.log(self.text, self.level+1)
self.worker.assert_not_called()
def test_log_with_lower_level(self):
self.logger.log(self.text, self.level-1)
self.worker.assert_called_once_with(self.text, self.level-1)
def test_time(self):
with mock.patch('time.localtime',
mock.Mock(return_value=self.time_tuple)):
string = self.logger.time()
self.assertEqual(string, self.time_string)
def test_format(self):
with mock.patch('time.localtime',
mock.Mock(return_value=self.time_tuple)):
string = self.logger.format(self.text, self.level)
expected = '- [Info] {%s} %s' % (self.time_string, self.text)
self.assertEqual(string, expected)
class WindowLoggerTest(unittest.TestCase):
level = 1
def setUp(self):
self.window = mock.Mock()
self.logger = vdebug.log.WindowLogger(self.level, self.window)
def test_log_with_open_window(self):
self.window.is_open = True
ret = self.logger.log('dummy text', self.level)
self.assertIsNone(ret)
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6:
self.window.create.assert_not_called()
self.window.write.assert_called_once()
def test_log_with_no_window(self):
self.window.is_open = False
ret = self.logger.log('dummy text', self.level)
self.assertIsNone(ret)
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6:
self.window.create.assert_called_once()
self.window.write.assert_called_once()
def test_shutdown(self):
self.logger.shutdown()
self.assertFalse(self.window.is_open)
class FileLoggerTest(unittest.TestCase):
filename = '/tmp/vdebug-test-log-file'
level = 2
if sys.version_info[0] == 3:
open_name = 'builtins.open'
elif sys.version_info[0] == 2:
open_name = '__builtin__.open'
def setUp(self):
self.logger = vdebug.log.FileLogger(self.level, self.filename)
def test_log_opens_file(self):
with mock.patch(self.open_name, mock.mock_open()) as mocked_open:
self.logger.log('text', self.level)
mocked_open.assert_called_once_with(self.filename, 'w')
handle = mocked_open()
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6:
handle.write.assert_called_once()
handle.flush.assert_called_once()
def test_log_with_open_file(self):
handle = mock.Mock()
self.logger.f = handle
with mock.patch(self.open_name, mock.mock_open()) as mocked_open:
self.logger.log('text', self.level)
mocked_open.assert_not_called()
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6:
handle.write.assert_called_once()
handle.flush.assert_called_once()
def test_shutdown_without_file(self):
with mock.patch(self.open_name, mock.mock_open()) as mocked_open:
self.logger.shutdown()
handle = mocked_open()
handle.close.assert_not_called()
def test_shutdown_with_file(self):
with mock.patch(self.open_name, mock.mock_open()) as mocked_open:
self.logger.log('text', self.level)
self.logger.shutdown()
mocked_open.assert_called_once_with(self.filename, 'w')
handle = mocked_open()
handle.close.assert_called_once_with()
``` |
{
"source": "J-M0/PGPy",
"score": 2
} |
#### File: PGPy/pgpy/pgp.py
```python
import binascii
import collections
try:
import collections.abc as collections_abc
except ImportError:
collections_abc = collections
import contextlib
import copy
import functools
import itertools
import operator
import os
import re
import warnings
import weakref
import six
from datetime import datetime
from cryptography.hazmat.primitives import hashes
from .constants import CompressionAlgorithm
from .constants import Features
from .constants import HashAlgorithm
from .constants import ImageEncoding
from .constants import KeyFlags
from .constants import NotationDataFlags
from .constants import PacketTag
from .constants import PubKeyAlgorithm
from .constants import RevocationKeyClass
from .constants import RevocationReason
from .constants import SignatureType
from .constants import SymmetricKeyAlgorithm
from .decorators import KeyAction
from .errors import PGPDecryptionError
from .errors import PGPError
from .packet import Key
from .packet import MDC
from .packet import Packet
from .packet import Primary
from .packet import Private
from .packet import PubKeyV4
from .packet import PubSubKeyV4
from .packet import PrivKeyV4
from .packet import PrivSubKeyV4
from .packet import Public
from .packet import Sub
from .packet import UserID
from .packet import UserAttribute
from .packet.packets import CompressedData
from .packet.packets import IntegrityProtectedSKEData
from .packet.packets import IntegrityProtectedSKEDataV1
from .packet.packets import LiteralData
from .packet.packets import OnePassSignature
from .packet.packets import OnePassSignatureV3
from .packet.packets import PKESessionKey
from .packet.packets import PKESessionKeyV3
from .packet.packets import Signature
from .packet.packets import SignatureV4
from .packet.packets import SKEData
from .packet.packets import Marker
from .packet.packets import SKESessionKey
from .packet.packets import SKESessionKeyV4
from .packet.types import Opaque
from .types import Armorable
from .types import Fingerprint
from .types import ParentRef
from .types import PGPObject
from .types import SignatureVerification
from .types import SorteDeque
__all__ = ['PGPSignature',
'PGPUID',
'PGPMessage',
'PGPKey',
'PGPKeyring']
class PGPSignature(Armorable, ParentRef, PGPObject):
_reason_for_revocation = collections.namedtuple('ReasonForRevocation', ['code', 'comment'])
@property
def __sig__(self):
return self._signature.signature.__sig__()
@property
def cipherprefs(self):
"""
A ``list`` of preferred symmetric algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredSymmetricAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredSymmetricAlgorithms'])).flags
return []
@property
def compprefs(self):
"""
A ``list`` of preferred compression algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredCompressionAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredCompressionAlgorithms'])).flags
return []
@property
def created(self):
"""
A :py:obj:`~datetime.datetime` of when this signature was created.
"""
return self._signature.subpackets['h_CreationTime'][-1].created
@property
def embedded(self):
return self.parent is not None
@property
def expires_at(self):
"""
A :py:obj:`~datetime.datetime` of when this signature expires, if a signature expiration date is specified.
Otherwise, ``None``
"""
if 'SignatureExpirationTime' in self._signature.subpackets:
expd = next(iter(self._signature.subpackets['SignatureExpirationTime'])).expires
return self.created + expd
return None
@property
def exportable(self):
"""
``False`` if this signature is marked as being not exportable. Otherwise, ``True``.
"""
if 'ExportableCertification' in self._signature.subpackets:
return bool(next(iter(self._signature.subpackets['ExportableCertification'])))
return True
@property
def features(self):
"""
A ``set`` of implementation features specified in this signature, if any. Otherwise, an empty ``set``.
"""
if 'Features' in self._signature.subpackets:
return next(iter(self._signature.subpackets['Features'])).flags
return set()
@property
def hash2(self):
return self._signature.hash2
@property
def hashprefs(self):
"""
A ``list`` of preferred hash algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredHashAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredHashAlgorithms'])).flags
return []
@property
def hash_algorithm(self):
"""
The :py:obj:`~constants.HashAlgorithm` used when computing this signature.
"""
return self._signature.halg
@property
def is_expired(self):
"""
``True`` if the signature has an expiration date, and is expired. Otherwise, ``False``
"""
expires_at = self.expires_at
if expires_at is not None and expires_at != self.created:
return expires_at < datetime.utcnow()
return False
@property
def key_algorithm(self):
"""
The :py:obj:`~constants.PubKeyAlgorithm` of the key that generated this signature.
"""
return self._signature.pubalg
@property
def key_expiration(self):
if 'KeyExpirationTime' in self._signature.subpackets:
return next(iter(self._signature.subpackets['KeyExpirationTime'])).expires
return None
@property
def key_flags(self):
"""
A ``set`` of :py:obj:`~constants.KeyFlags` specified in this signature, if any. Otherwise, an empty ``set``.
"""
if 'KeyFlags' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_KeyFlags'])).flags
return set()
@property
def keyserver(self):
"""
The preferred key server specified in this signature, if any. Otherwise, an empty ``str``.
"""
if 'PreferredKeyServer' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredKeyServer'])).uri
return ''
@property
def keyserverprefs(self):
"""
A ``list`` of :py:obj:`~constants.KeyServerPreferences` in this signature, if any. Otherwise, an empty ``list``.
"""
if 'KeyServerPreferences' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_KeyServerPreferences'])).flags
return []
@property
def magic(self):
return "SIGNATURE"
@property
def notation(self):
"""
A ``dict`` of notation data in this signature, if any. Otherwise, an empty ``dict``.
"""
return dict((nd.name, nd.value) for nd in self._signature.subpackets['NotationData'])
@property
def policy_uri(self):
"""
The policy URI specified in this signature, if any. Otherwise, an empty ``str``.
"""
if 'Policy' in self._signature.subpackets:
return next(iter(self._signature.subpackets['Policy'])).uri
return ''
@property
def revocable(self):
"""
``False`` if this signature is marked as being not revocable. Otherwise, ``True``.
"""
if 'Revocable' in self._signature.subpackets:
return bool(next(iter(self._signature.subpackets['Revocable'])))
return True
@property
def revocation_key(self):
if 'RevocationKey' in self._signature.subpackets:
raise NotImplementedError()
return None
@property
def revocation_reason(self):
if 'ReasonForRevocation' in self._signature.subpackets:
subpacket = next(iter(self._signature.subpackets['ReasonForRevocation']))
return self._reason_for_revocation(subpacket.code, subpacket.string)
return None
@property
def attested_certifications(self):
"""
Returns a set of all the hashes of attested certifications covered by this Attestation Key Signature.
Unhashed subpackets are ignored.
"""
if self._signature.sigtype != SignatureType.Attestation:
return set()
ret = set()
hlen = self.hash_algorithm.digest_size
for n in self._signature.subpackets['h_AttestedCertifications']:
attestations = bytes(n.attested_certifications)
for i in range(0, len(attestations), hlen):
ret.add(attestations[i:i+hlen])
return ret
@property
def signer(self):
"""
The 16-character Key ID of the key that generated this signature.
"""
return self._signature.signer
@property
def signer_fingerprint(self):
"""
The fingerprint of the key that generated this signature, if it contained. Otherwise, an empty ``str``.
"""
if 'IssuerFingerprint' in self._signature.subpackets:
return next(iter(self._signature.subpackets['IssuerFingerprint'])).issuer_fingerprint
return ''
@property
def intended_recipients(self):
"""
Returns an iterator over all the primary key fingerprints marked as intended encrypted recipients for this signature.
"""
return map(lambda x: x.intended_recipient, self._signature.subpackets['IntendedRecipient'])
@property
def target_signature(self):
return NotImplemented
@property
def type(self):
"""
The :py:obj:`~constants.SignatureType` of this signature.
"""
return self._signature.sigtype
@classmethod
def new(cls, sigtype, pkalg, halg, signer, created=None):
sig = PGPSignature()
if created is None:
created=datetime.utcnow()
sigpkt = SignatureV4()
sigpkt.header.tag = 2
sigpkt.header.version = 4
sigpkt.subpackets.addnew('CreationTime', hashed=True, created=created)
sigpkt.subpackets.addnew('Issuer', _issuer=signer)
sigpkt.sigtype = sigtype
sigpkt.pubalg = pkalg
if halg is not None:
sigpkt.halg = halg
sig._signature = sigpkt
return sig
def __init__(self):
"""
PGPSignature objects represent OpenPGP compliant signatures.
PGPSignature implements the ``__str__`` method, the output of which will be the signature object in
OpenPGP-compliant ASCII-armored format.
PGPSignature implements the ``__bytes__`` method, the output of which will be the signature object in
OpenPGP-compliant binary format.
"""
super(PGPSignature, self).__init__()
self._signature = None
def __bytearray__(self):
return self._signature.__bytearray__()
def __repr__(self):
return "<PGPSignature [{:s}] object at 0x{:02x}>".format(self.type.name, id(self))
def __lt__(self, other):
return self.created < other.created
def __or__(self, other):
if isinstance(other, Signature):
if self._signature is None:
self._signature = other
return self
##TODO: this is not a great way to do this
if other.__class__.__name__ == 'EmbeddedSignature':
self._signature = other
return self
raise TypeError
def __copy__(self):
# because the default shallow copy isn't actually all that useful,
# and deepcopy does too much work
sig = super(PGPSignature, self).__copy__()
# sig = PGPSignature()
# sig.ascii_headers = self.ascii_headers.copy()
sig |= copy.copy(self._signature)
return sig
def attests_to(self, othersig):
'returns True if this signature attests to othersig (acknolwedges it for redistribution)'
if not isinstance(othersig, PGPSignature):
raise TypeError
h = self.hash_algorithm.hasher
h.update(othersig._signature.canonical_bytes())
return h.digest() in self.attested_certifications
def hashdata(self, subject):
_data = bytearray()
if isinstance(subject, six.string_types):
try:
subject = subject.encode('utf-8')
except UnicodeEncodeError:
subject = subject.encode('charmap')
"""
All signatures are formed by producing a hash over the signature
data, and then using the resulting hash in the signature algorithm.
"""
if self.type == SignatureType.BinaryDocument:
"""
For binary document signatures (type 0x00), the document data is
hashed directly.
"""
if isinstance(subject, (SKEData, IntegrityProtectedSKEData)):
_data += subject.__bytearray__()
else:
_data += bytearray(subject)
if self.type == SignatureType.CanonicalDocument:
"""
For text document signatures (type 0x01), the
document is canonicalized by converting line endings to <CR><LF>,
and the resulting data is hashed.
"""
_data += re.subn(br'\r?\n', b'\r\n', subject)[0]
if self.type in {SignatureType.Generic_Cert, SignatureType.Persona_Cert, SignatureType.Casual_Cert,
SignatureType.Positive_Cert, SignatureType.CertRevocation, SignatureType.Subkey_Binding,
SignatureType.PrimaryKey_Binding}:
"""
When a signature is made over a key, the hash data starts with the
octet 0x99, followed by a two-octet length of the key, and then body
of the key packet. (Note that this is an old-style packet header for
a key packet with two-octet length.) ...
Key revocation signatures (types 0x20 and 0x28)
hash only the key being revoked.
"""
_s = b''
if isinstance(subject, PGPUID):
_s = subject._parent.hashdata
elif isinstance(subject, PGPKey) and not subject.is_primary:
_s = subject._parent.hashdata
elif isinstance(subject, PGPKey) and subject.is_primary:
_s = subject.hashdata
if len(_s) > 0:
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.Subkey_Binding, SignatureType.PrimaryKey_Binding}:
"""
A subkey binding signature
(type 0x18) or primary key binding signature (type 0x19) then hashes
the subkey using the same format as the main key (also using 0x99 as
the first octet).
"""
if subject.is_primary:
_s = subject.subkeys[self.signer].hashdata
else:
_s = subject.hashdata
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.KeyRevocation, SignatureType.SubkeyRevocation, SignatureType.DirectlyOnKey}:
"""
The signature is calculated directly on the key being revoked. A
revoked key is not to be used. Only revocation signatures by the
key being revoked, or by an authorized revocation key, should be
considered valid revocation signatures.
Subkey revocation signature
The signature is calculated directly on the subkey being revoked.
A revoked subkey is not to be used. Only revocation signatures
by the top-level signature key that is bound to this subkey, or
by an authorized revocation key, should be considered valid
revocation signatures.
- clarification from draft-ietf-openpgp-rfc4880bis-02:
Primary key revocation signatures (type 0x20) hash
only the key being revoked. Subkey revocation signature (type 0x28)
hash first the primary key and then the subkey being revoked
Signature directly on a key
This signature is calculated directly on a key. It binds the
information in the Signature subpackets to the key, and is
appropriate to be used for subpackets that provide information
about the key, such as the Revocation Key subpacket. It is also
appropriate for statements that non-self certifiers want to make
about the key itself, rather than the binding between a key and a
name.
"""
if self.type == SignatureType.SubkeyRevocation:
# hash the primary key first if this is a Subkey Revocation signature
_s = subject.parent.hashdata
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
_s = subject.hashdata
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.Generic_Cert, SignatureType.Persona_Cert, SignatureType.Casual_Cert,
SignatureType.Positive_Cert, SignatureType.CertRevocation}:
"""
A certification signature (type 0x10 through 0x13) hashes the User
ID being bound to the key into the hash context after the above
data. ... A V4 certification
hashes the constant 0xB4 for User ID certifications or the constant
0xD1 for User Attribute certifications, followed by a four-octet
number giving the length of the User ID or User Attribute data, and
then the User ID or User Attribute data.
...
The [certificate revocation] signature
is computed over the same data as the certificate that it
revokes, and should have a later creation date than that
certificate.
"""
_s = subject.hashdata
if subject.is_uid:
_data += b'\xb4'
else:
_data += b'\xd1'
_data += self.int_to_bytes(len(_s), 4) + _s
# if this is a new signature, do update_hlen
if 0 in list(self._signature.signature):
self._signature.update_hlen()
"""
Once the data body is hashed, then a trailer is hashed. (...)
A V4 signature hashes the packet body
starting from its first field, the version number, through the end
of the hashed subpacket data. Thus, the fields hashed are the
signature version, the signature type, the public-key algorithm, the
hash algorithm, the hashed subpacket length, and the hashed
subpacket body.
V4 signatures also hash in a final trailer of six octets: the
version of the Signature packet, i.e., 0x04; 0xFF; and a four-octet,
big-endian number that is the length of the hashed data from the
Signature packet (note that this number does not include these final
six octets).
"""
hcontext = bytearray()
hcontext.append(self._signature.header.version if not self.embedded else self._signature._sig.header.version)
hcontext.append(self.type)
hcontext.append(self.key_algorithm)
hcontext.append(self.hash_algorithm)
hcontext += self._signature.subpackets.__hashbytearray__()
hlen = len(hcontext)
_data += hcontext
_data += b'\x04\xff'
_data += self.int_to_bytes(hlen, 4)
return bytes(_data)
def make_onepass(self):
onepass = OnePassSignatureV3()
onepass.sigtype = self.type
onepass.halg = self.hash_algorithm
onepass.pubalg = self.key_algorithm
onepass.signer = self.signer
onepass.update_hlen()
return onepass
def parse(self, packet):
unarmored = self.ascii_unarmor(packet)
data = unarmored['body']
if unarmored['magic'] is not None and unarmored['magic'] != 'SIGNATURE':
raise ValueError('Expected: SIGNATURE. Got: {}'.format(str(unarmored['magic'])))
if unarmored['headers'] is not None:
self.ascii_headers = unarmored['headers']
# load *one* packet from data
pkt = Packet(data)
if pkt.header.tag == PacketTag.Signature and not isinstance(pkt, Opaque):
self._signature = pkt
else:
raise ValueError('Expected: Signature. Got: {:s}'.format(pkt.__class__.__name__))
class PGPUID(ParentRef):
@property
def __sig__(self):
return list(self._signatures)
def _splitstring(self):
'''returns name, comment email from User ID string'''
if not isinstance(self._uid, UserID):
return ("", "", "")
if self._uid.uid == "":
return ("", "", "")
rfc2822 = re.match(r"""^
# name should always match something
(?P<name>.+?)
# comment *optionally* matches text in parens following name
# this should never come after email and must be followed immediately by
# either the email field, or the end of the packet.
(\ \((?P<comment>.+?)\)(?=(\ <|$)))?
# email *optionally* matches text in angle brackets following name or comment
# this should never come before a comment, if comment exists,
# but can immediately follow name if comment does not exist
(\ <(?P<email>.+)>)?
$
""", self._uid.uid, flags=re.VERBOSE).groupdict()
return (rfc2822['name'], rfc2822['comment'] or "", rfc2822['email'] or "")
@property
def name(self):
"""If this is a User ID, the stored name. If this is not a User ID, this will be an empty string."""
return self._splitstring()[0]
@property
def comment(self):
"""
If this is a User ID, this will be the stored comment. If this is not a User ID, or there is no stored comment,
this will be an empty string.,
"""
return self._splitstring()[1]
@property
def email(self):
"""
If this is a User ID, this will be the stored email address. If this is not a User ID, or there is no stored
email address, this will be an empty string.
"""
return self._splitstring()[2]
@property
def userid(self):
"""
If this is a User ID, this will be the full UTF-8 string. If this is not a User ID, this will be ``None``.
"""
return self._uid.uid if isinstance(self._uid, UserID) else None
@property
def image(self):
"""
If this is a User Attribute, this will be the stored image. If this is not a User Attribute, this will be ``None``.
"""
return self._uid.image.image if isinstance(self._uid, UserAttribute) else None
@property
def is_primary(self):
"""
If the most recent, valid self-signature specifies this as being primary, this will be True. Otherwise, False.
"""
if self.selfsig is not None:
return bool(next(iter(self.selfsig._signature.subpackets['h_PrimaryUserID']), False))
return False
@property
def is_uid(self):
"""
``True`` if this is a User ID, otherwise False.
"""
return isinstance(self._uid, UserID)
@property
def is_ua(self):
"""
``True`` if this is a User Attribute, otherwise False.
"""
return isinstance(self._uid, UserAttribute)
@property
def selfsig(self):
"""
This will be the most recent, self-signature of this User ID or Attribute. If there isn't one, this will be ``None``.
"""
if self.parent is not None:
return next((sig for sig in reversed(self._signatures) if sig.signer == self.parent.fingerprint.keyid), None)
@property
def signers(self):
"""
This will be a set of all of the key ids which have signed this User ID or Attribute.
"""
return set(s.signer for s in self.__sig__)
@property
def hashdata(self):
if self.is_uid:
return self._uid.__bytearray__()[len(self._uid.header):]
if self.is_ua:
return self._uid.subpackets.__bytearray__()
@property
def third_party_certifications(self):
'''
A generator returning all third-party certifications
'''
if self.parent is None:
return
fpr = self.parent.fingerprint
keyid = self.parent.fingerprint.keyid
for sig in self._signatures:
if (sig.signer_fingerprint != '' and fpr != sig.signer_fingerprint) or (sig.signer != keyid):
yield sig
def attested_to(self, certifications):
'''filter certifications, only returning those that have been attested to by the first party'''
# first find the set of the most recent valid Attestation Key Signatures:
if self.parent is None:
return
mostrecent = None
attestations = []
now = datetime.utcnow()
fpr = self.parent.fingerprint
keyid = self.parent.fingerprint.keyid
for sig in self._signatures:
if sig._signature.sigtype == SignatureType.Attestation and \
((sig.signer_fingerprint == fpr) or (sig.signer == keyid)) and \
self.parent.verify(self, sig) and \
sig.created <= now:
if mostrecent is None or sig.created > mostrecent:
attestations = [sig]
mostrecent = sig.created
elif sig.created == mostrecent:
attestations.append(sig)
# now filter the certifications:
for certification in certifications:
for a in attestations:
if a.attests_to(certification):
yield certification
@property
def attested_third_party_certifications(self):
'''
A generator that provides a list of all third-party certifications attested to
by the primary key.
'''
return self.attested_to(self.third_party_certifications)
@classmethod
def new(cls, pn, comment="", email=""):
"""
Create a new User ID or photo.
:param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo.
Otherwise, it will be used as the name field for a User ID.
:type pn: ``bytearray``, ``str``, ``unicode``
:param comment: The comment field for a User ID. Ignored if this is a photo.
:type comment: ``str``, ``unicode``
:param email: The email address field for a User ID. Ignored if this is a photo.
:type email: ``str``, ``unicode``
:returns: :py:obj:`PGPUID`
"""
uid = PGPUID()
if isinstance(pn, bytearray):
uid._uid = UserAttribute()
uid._uid.image.image = pn
uid._uid.image.iencoding = ImageEncoding.encodingof(pn)
uid._uid.update_hlen()
else:
uid._uid = UserID()
uidstr = pn
if comment:
uidstr += ' (' + comment + ')'
if email:
uidstr += ' <' + email + '>'
uid._uid.uid = uidstr
uid._uid.update_hlen()
return uid
def __init__(self):
"""
PGPUID objects represent User IDs and User Attributes for keys.
PGPUID implements the ``__format__`` method for User IDs, returning a string in the format
'name (comment) <email>', leaving out any comment or email fields that are not present.
"""
super(PGPUID, self).__init__()
self._uid = None
self._signatures = SorteDeque()
def __repr__(self):
if self.selfsig is not None:
return "<PGPUID [{:s}][{}] at 0x{:02X}>".format(self._uid.__class__.__name__, self.selfsig.created, id(self))
return "<PGPUID [{:s}] at 0x{:02X}>".format(self._uid.__class__.__name__, id(self))
def __lt__(self, other): # pragma: no cover
if self.is_uid == other.is_uid:
if self.is_primary == other.is_primary:
mysig = self.selfsig
othersig = other.selfsig
if mysig is None:
return not (othersig is None)
if othersig is None:
return False
return mysig > othersig
if self.is_primary:
return True
return False
if self.is_uid and other.is_ua:
return True
if self.is_ua and other.is_uid:
return False
def __or__(self, other):
if isinstance(other, PGPSignature):
self._signatures.insort(other)
if self.parent is not None and self in self.parent._uids:
self.parent._uids.resort(self)
return self
if isinstance(other, UserID) and self._uid is None:
self._uid = other
return self
if isinstance(other, UserAttribute) and self._uid is None:
self._uid = other
return self
raise TypeError("unsupported operand type(s) for |: '{:s}' and '{:s}'"
"".format(self.__class__.__name__, other.__class__.__name__))
def __copy__(self):
# because the default shallow copy isn't actually all that useful,
# and deepcopy does too much work
uid = PGPUID()
uid |= copy.copy(self._uid)
for sig in self._signatures:
uid |= copy.copy(sig)
return uid
def __format__(self, format_spec):
if self.is_uid:
comment = six.u("") if self.comment == "" else six.u(" ({:s})").format(self.comment)
email = six.u("") if self.email == "" else six.u(" <{:s}>").format(self.email)
return six.u("{:s}{:s}{:s}").format(self.name, comment, email)
raise NotImplementedError
class PGPMessage(Armorable, PGPObject):
@staticmethod
def dash_unescape(text):
return re.subn(r'^- ', '', text, flags=re.MULTILINE)[0]
@staticmethod
def dash_escape(text):
return re.subn(r'^-', '- -', text, flags=re.MULTILINE)[0]
@property
def encrypters(self):
"""A ``set`` containing all key ids (if any) to which this message was encrypted."""
return set(m.encrypter for m in self._sessionkeys if isinstance(m, PKESessionKey))
@property
def filename(self):
"""If applicable, returns the original filename of the message. Otherwise, returns an empty string."""
if self.type == 'literal':
return self._message.filename
return ''
@property
def is_compressed(self):
"""``True`` if this message will be compressed when exported"""
return self._compression != CompressionAlgorithm.Uncompressed
@property
def is_encrypted(self):
"""``True`` if this message is encrypted; otherwise, ``False``"""
return isinstance(self._message, (SKEData, IntegrityProtectedSKEData))
@property
def is_sensitive(self):
"""``True`` if this message is marked sensitive; otherwise ``False``"""
return self.type == 'literal' and self._message.filename == '_CONSOLE'
@property
def is_signed(self):
"""
``True`` if this message is signed; otherwise, ``False``.
Should always be ``False`` if the message is encrypted.
"""
return len(self._signatures) > 0
@property
def issuers(self):
"""A ``set`` containing all key ids (if any) which have signed or encrypted this message."""
return self.encrypters | self.signers
@property
def magic(self):
if self.type == 'cleartext':
return "SIGNATURE"
return "MESSAGE"
@property
def message(self):
"""The message contents"""
if self.type == 'cleartext':
return self.bytes_to_text(self._message)
if self.type == 'literal':
return self._message.contents
if self.type == 'encrypted':
return self._message
@property
def signatures(self):
"""A ``set`` containing all key ids (if any) which have signed this message."""
return list(self._signatures)
@property
def signers(self):
"""A ``set`` containing all key ids (if any) which have signed this message."""
return set(m.signer for m in self._signatures)
@property
def type(self):
##TODO: it might be better to use an Enum for the output of this
if isinstance(self._message, (six.string_types, six.binary_type, bytearray)):
return 'cleartext'
if isinstance(self._message, LiteralData):
return 'literal'
if isinstance(self._message, (SKEData, IntegrityProtectedSKEData)):
return 'encrypted'
raise NotImplementedError
def __init__(self):
"""
PGPMessage objects represent OpenPGP message compositions.
PGPMessage implements the ``__str__`` method, the output of which will be the message composition in
OpenPGP-compliant ASCII-armored format.
PGPMessage implements the ``__bytes__`` method, the output of which will be the message composition in
OpenPGP-compliant binary format.
Any signatures within the PGPMessage that are marked as being non-exportable will not be included in the output
of either of those methods.
"""
super(PGPMessage, self).__init__()
self._compression = CompressionAlgorithm.Uncompressed
self._message = None
self._mdc = None
self._signatures = SorteDeque()
self._sessionkeys = []
def __bytearray__(self):
if self.is_compressed:
comp = CompressedData()
comp.calg = self._compression
comp.packets = [pkt for pkt in self]
comp.update_hlen()
return comp.__bytearray__()
_bytes = bytearray()
for pkt in self:
_bytes += pkt.__bytearray__()
return _bytes
def __str__(self):
if self.type == 'cleartext':
tmpl = u"-----BEGIN PGP SIGNED MESSAGE-----\n" \
u"{hhdr:s}\n" \
u"{cleartext:s}\n" \
u"{signature:s}"
# only add a Hash: header if we actually have at least one signature
hashes = set(s.hash_algorithm.name for s in self.signatures)
hhdr = 'Hash: {hashes:s}\n'.format(hashes=','.join(sorted(hashes))) if hashes else ''
return tmpl.format(hhdr=hhdr,
cleartext=self.dash_escape(self.bytes_to_text(self._message)),
signature=super(PGPMessage, self).__str__())
return super(PGPMessage, self).__str__()
def __iter__(self):
if self.type == 'cleartext':
for sig in self._signatures:
yield sig
elif self.is_encrypted:
for sig in self._signatures:
yield sig
for pkt in self._sessionkeys:
yield pkt
yield self.message
else:
##TODO: is it worth coming up with a way of disabling one-pass signing?
for sig in reversed(self._signatures):
ops = sig.make_onepass()
if sig is not self._signatures[-1]:
ops.nested = True
yield ops
yield self._message
if self._mdc is not None: # pragma: no cover
yield self._mdc
for sig in self._signatures:
yield sig
def __or__(self, other):
if isinstance(other, Marker):
return self
if isinstance(other, CompressedData):
self._compression = other.calg
for pkt in other.packets:
self |= pkt
return self
if isinstance(other, (six.string_types, six.binary_type, bytearray)):
if self._message is None:
self._message = self.text_to_bytes(other)
return self
if isinstance(other, (LiteralData, SKEData, IntegrityProtectedSKEData)):
if self._message is None:
self._message = other
return self
if isinstance(other, MDC):
if self._mdc is None:
self._mdc = other
return self
if isinstance(other, OnePassSignature):
# these are "generated" on the fly during composition
return self
if isinstance(other, Signature):
other = PGPSignature() | other
if isinstance(other, PGPSignature):
self._signatures.insort(other)
return self
if isinstance(other, (PKESessionKey, SKESessionKey)):
self._sessionkeys.append(other)
return self
if isinstance(other, PGPMessage):
self._message = other._message
self._mdc = other._mdc
self._compression = other._compression
self._sessionkeys += other._sessionkeys
self._signatures += other._signatures
return self
raise NotImplementedError(str(type(other)))
def __copy__(self):
msg = super(PGPMessage, self).__copy__()
msg._compression = self._compression
msg._message = copy.copy(self._message)
msg._mdc = copy.copy(self._mdc)
for sig in self._signatures:
msg |= copy.copy(sig)
for sk in self._sessionkeys:
msg |= copy.copy(sk)
return msg
@classmethod
def new(cls, message, **kwargs):
"""
Create a new PGPMessage object.
:param message: The message to be stored.
:type message: ``str``, ``unicode``, ``bytes``, ``bytearray``
:returns: :py:obj:`PGPMessage`
The following optional keyword arguments can be used with :py:meth:`PGPMessage.new`:
:keyword file: if True, ``message`` should be a path to a file. The contents of that file will be read and used
as the contents of the message.
:type file: ``bool``
:keyword cleartext: if True, the message will be cleartext with inline signatures.
:type cleartext: ``bool``
:keyword sensitive: if True, the filename will be set to '_CONSOLE' to signal other OpenPGP clients to treat
this message as being 'for your eyes only'. Ignored if cleartext is True.
:type sensitive: ``bool``
:keyword format: Set the message format identifier. Ignored if cleartext is True.
:type format: ``str``
:keyword compression: Set the compression algorithm for the new message.
Defaults to :py:obj:`CompressionAlgorithm.ZIP`. Ignored if cleartext is True.
:keyword encoding: Set the Charset header for the message.
:type encoding: ``str`` representing a valid codec in codecs
"""
# TODO: have 'codecs' above (in :type encoding:) link to python documentation page on codecs
cleartext = kwargs.pop('cleartext', False)
format = kwargs.pop('format', None)
sensitive = kwargs.pop('sensitive', False)
compression = kwargs.pop('compression', CompressionAlgorithm.ZIP)
file = kwargs.pop('file', False)
charset = kwargs.pop('encoding', None)
filename = ''
mtime = datetime.utcnow()
msg = PGPMessage()
if charset:
msg.charset = charset
# if format in 'tu' and isinstance(message, (six.binary_type, bytearray)):
# # if message format is text or unicode and we got binary data, we'll need to transcode it to UTF-8
# message =
if file and os.path.isfile(message):
filename = message
message = bytearray(os.path.getsize(filename))
mtime = datetime.utcfromtimestamp(os.path.getmtime(filename))
with open(filename, 'rb') as mf:
mf.readinto(message)
# if format is None, we can try to detect it
if format is None:
if isinstance(message, six.text_type):
# message is definitely UTF-8 already
format = 'u'
elif cls.is_ascii(message):
# message is probably text
format = 't'
else:
# message is probably binary
format = 'b'
# if message is a binary type and we're building a textual message, we need to transcode the bytes to UTF-8
if isinstance(message, (six.binary_type, bytearray)) and (cleartext or format in 'tu'):
message = message.decode(charset or 'utf-8')
if cleartext:
msg |= message
else:
# load literal data
lit = LiteralData()
lit._contents = bytearray(msg.text_to_bytes(message))
lit.filename = '_CONSOLE' if sensitive else os.path.basename(filename)
lit.mtime = mtime
lit.format = format
# if cls.is_ascii(message):
# lit.format = 't'
lit.update_hlen()
msg |= lit
msg._compression = compression
return msg
def encrypt(self, passphrase, sessionkey=None, **prefs):
"""
encrypt(passphrase, [sessionkey=None,] **prefs)
Encrypt the contents of this message using a passphrase.
:param passphrase: The passphrase to use for encrypting this message.
:type passphrase: ``str``, ``unicode``, ``bytes``
:param sessionkey: Provide a session key to use when encrypting something. Default is ``None``.
If ``None``, a session key of the appropriate length will be generated randomly.
.. warning::
Care should be taken when making use of this option! Session keys *absolutely need*
to be unpredictable! Use the ``gen_key()`` method on the desired
:py:obj:`~constants.SymmetricKeyAlgorithm` to generate the session key!
:type sessionkey: ``bytes``, ``str``
:raises: :py:exc:`~errors.PGPEncryptionError`
:returns: A new :py:obj:`PGPMessage` containing the encrypted contents of this message.
"""
cipher_algo = prefs.pop('cipher', SymmetricKeyAlgorithm.AES256)
hash_algo = prefs.pop('hash', HashAlgorithm.SHA256)
# set up a new SKESessionKeyV4
skesk = SKESessionKeyV4()
skesk.s2k.usage = 255
skesk.s2k.specifier = 3
skesk.s2k.halg = hash_algo
skesk.s2k.encalg = cipher_algo
skesk.s2k.count = skesk.s2k.halg.tuned_count
if sessionkey is None:
sessionkey = cipher_algo.gen_key()
skesk.encrypt_sk(passphrase, sessionkey)
del passphrase
msg = PGPMessage() | skesk
if not self.is_encrypted:
skedata = IntegrityProtectedSKEDataV1()
skedata.encrypt(sessionkey, cipher_algo, self.__bytes__())
msg |= skedata
else:
msg |= self
return msg
def decrypt(self, passphrase):
"""
Attempt to decrypt this message using a passphrase.
:param passphrase: The passphrase to use to attempt to decrypt this message.
:type passphrase: ``str``, ``unicode``, ``bytes``
:raises: :py:exc:`~errors.PGPDecryptionError` if decryption failed for any reason.
:returns: A new :py:obj:`PGPMessage` containing the decrypted contents of this message
"""
if not self.is_encrypted:
raise PGPError("This message is not encrypted!")
for skesk in iter(sk for sk in self._sessionkeys if isinstance(sk, SKESessionKey)):
try:
symalg, key = skesk.decrypt_sk(passphrase)
decmsg = PGPMessage()
decmsg.parse(self.message.decrypt(key, symalg))
except (TypeError, ValueError, NotImplementedError, PGPDecryptionError):
continue
else:
del passphrase
break
else:
raise PGPDecryptionError("Decryption failed")
return decmsg
def parse(self, packet):
unarmored = self.ascii_unarmor(packet)
data = unarmored['body']
if unarmored['magic'] is not None and unarmored['magic'] not in ['MESSAGE', 'SIGNATURE']:
raise ValueError('Expected: MESSAGE. Got: {}'.format(str(unarmored['magic'])))
if unarmored['headers'] is not None:
self.ascii_headers = unarmored['headers']
# cleartext signature
if unarmored['magic'] == 'SIGNATURE':
# the composition for this will be the 'cleartext' as a str,
# followed by one or more signatures (each one loaded into a PGPSignature)
self |= self.dash_unescape(unarmored['cleartext'])
while len(data) > 0:
pkt = Packet(data)
if not isinstance(pkt, Signature): # pragma: no cover
warnings.warn("Discarded unexpected packet: {:s}".format(pkt.__class__.__name__), stacklevel=2)
continue
self |= PGPSignature() | pkt
else:
while len(data) > 0:
self |= Packet(data)
class PGPKey(Armorable, ParentRef, PGPObject):
"""
11.1. Transferable Public Keys
OpenPGP users may transfer public keys. The essential elements of a
transferable public key are as follows:
- One Public-Key packet
- Zero or more revocation signatures
- One or more User ID packets
- After each User ID packet, zero or more Signature packets
(certifications)
- Zero or more User Attribute packets
- After each User Attribute packet, zero or more Signature packets
(certifications)
- Zero or more Subkey packets
- After each Subkey packet, one Signature packet, plus optionally a
revocation
The Public-Key packet occurs first. Each of the following User ID
packets provides the identity of the owner of this public key. If
there are multiple User ID packets, this corresponds to multiple
means of identifying the same unique individual user; for example, a
user may have more than one email address, and construct a User ID
for each one.
Immediately following each User ID packet, there are zero or more
Signature packets. Each Signature packet is calculated on the
immediately preceding User ID packet and the initial Public-Key
packet. The signature serves to certify the corresponding public key
and User ID. In effect, the signer is testifying to his or her
belief that this public key belongs to the user identified by this
User ID.
Within the same section as the User ID packets, there are zero or
more User Attribute packets. Like the User ID packets, a User
Attribute packet is followed by zero or more Signature packets
calculated on the immediately preceding User Attribute packet and the
initial Public-Key packet.
User Attribute packets and User ID packets may be freely intermixed
in this section, so long as the signatures that follow them are
maintained on the proper User Attribute or User ID packet.
After the User ID packet or Attribute packet, there may be zero or
more Subkey packets. In general, subkeys are provided in cases where
the top-level public key is a signature-only key. However, any V4
key may have subkeys, and the subkeys may be encryption-only keys,
signature-only keys, or general-purpose keys. V3 keys MUST NOT have
subkeys.
Each Subkey packet MUST be followed by one Signature packet, which
should be a subkey binding signature issued by the top-level key.
For subkeys that can issue signatures, the subkey binding signature
MUST contain an Embedded Signature subpacket with a primary key
binding signature (0x19) issued by the subkey on the top-level key.
Subkey and Key packets may each be followed by a revocation Signature
packet to indicate that the key is revoked. Revocation signatures
are only accepted if they are issued by the key itself, or by a key
that is authorized to issue revocations via a Revocation Key
subpacket in a self-signature by the top-level key.
Transferable public-key packet sequences may be concatenated to allow
transferring multiple public keys in one operation.
11.2. Transferable Secret Keys
OpenPGP users may transfer secret keys. The format of a transferable
secret key is the same as a transferable public key except that
secret-key and secret-subkey packets are used instead of the public
key and public-subkey packets. Implementations SHOULD include self-
signatures on any user IDs and subkeys, as this allows for a complete
public key to be automatically extracted from the transferable secret
key. Implementations MAY choose to omit the self-signatures,
especially if a transferable public key accompanies the transferable
secret key.
"""
@property
def __key__(self):
return self._key.keymaterial
@property
def __sig__(self):
return list(self._signatures)
@property
def created(self):
"""A :py:obj:`~datetime.datetime` object of the creation date and time of the key, in UTC."""
return self._key.created
@property
def expires_at(self):
"""A :py:obj:`~datetime.datetime` object of when this key is to be considered expired, if any. Otherwise, ``None``"""
try:
expires = min(sig.key_expiration for sig in itertools.chain(iter(uid.selfsig for uid in self.userids), self.self_signatures)
if sig.key_expiration is not None)
except ValueError:
return None
else:
return (self.created + expires)
@property
def fingerprint(self):
"""The fingerprint of this key, as a :py:obj:`~pgpy.types.Fingerprint` object."""
if self._key:
return self._key.fingerprint
@property
def hashdata(self):
# when signing a key, only the public portion of the keys is hashed
# if this is a private key, the private components of the key material need to be left out
pub = self._key if self.is_public else self._key.pubkey()
return pub.__bytearray__()[len(pub.header):]
@property
def is_expired(self):
"""``True`` if this key is expired, otherwise ``False``"""
expires = self.expires_at
if expires is not None:
return expires <= datetime.utcnow()
return False
@property
def is_primary(self):
"""``True`` if this is a primary key; ``False`` if this is a subkey"""
return isinstance(self._key, Primary) and not isinstance(self._key, Sub)
@property
def is_protected(self):
"""``True`` if this is a private key that is protected with a passphrase, otherwise ``False``"""
if self.is_public:
return False
return self._key.protected
@property
def is_public(self):
"""``True`` if this is a public key, otherwise ``False``"""
return isinstance(self._key, Public) and not isinstance(self._key, Private)
@property
def is_unlocked(self):
"""``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True``"""
if self.is_public:
return True
if not self.is_protected:
return True
return self._key.unlocked
@property
def key_algorithm(self):
"""The :py:obj:`constants.PubKeyAlgorithm` pertaining to this key"""
return self._key.pkalg
@property
def key_size(self):
"""
The size pertaining to this key. ``int`` for non-EC key algorithms; :py:obj:`constants.EllipticCurveOID` for EC keys.
.. versionadded:: 0.4.1
"""
if self.key_algorithm in {PubKeyAlgorithm.ECDSA, PubKeyAlgorithm.ECDH, PubKeyAlgorithm.EdDSA}:
return self._key.keymaterial.oid
# check if keymaterial is not an Opaque class containing a bytearray
param = next(iter(self._key.keymaterial))
if isinstance(param, bytearray):
return 0
return param.bit_length()
@property
def magic(self):
return '{:s} KEY BLOCK'.format('PUBLIC' if (isinstance(self._key, Public) and not isinstance(self._key, Private)) else
'PRIVATE' if isinstance(self._key, Private) else '')
@property
def pubkey(self):
"""If the :py:obj:`PGPKey` object is a private key, this method returns a corresponding public key object with
all the trimmings. If it is already a public key, just return it.
"""
if self.is_public:
return self
if self._sibling is None or isinstance(self._sibling, weakref.ref):
# create a new key shell
pub = PGPKey()
pub.ascii_headers = self.ascii_headers.copy()
# get the public half of the primary key
pub._key = self._key.pubkey()
# get the public half of each subkey
for skid, subkey in self.subkeys.items():
pub |= subkey.pubkey
# copy user ids and user attributes
for uid in self._uids:
pub |= copy.copy(uid)
# copy signatures that weren't copied with uids
for sig in self._signatures:
if sig.parent is None:
pub |= copy.copy(sig)
# keep connect the two halves using a weak reference
self._sibling = weakref.ref(pub)
pub._sibling = weakref.ref(self)
# copy parent
if self.parent:
pub._parent = weakref.ref(self.parent)
return self._sibling()
@pubkey.setter
def pubkey(self, pubkey):
if self.is_public:
raise TypeError("cannot add public sibling to pubkey")
if not pubkey.is_public:
raise TypeError("sibling must be public")
if self._sibling is not None and self._sibling() is not None:
raise ValueError("public key reference already set")
if pubkey.fingerprint != self.fingerprint:
raise ValueError("key fingerprint mismatch")
# TODO: sync packets with sibling
self._sibling = weakref.ref(pubkey)
pubkey._sibling = weakref.ref(self)
@property
def self_signatures(self):
keyid, keytype = (self.fingerprint.keyid, SignatureType.DirectlyOnKey) if self.is_primary \
else (self.parent.fingerprint.keyid, SignatureType.Subkey_Binding)
##TODO: filter out revoked signatures as well
for sig in iter(sig for sig in self._signatures
if all([sig.type == keytype, sig.signer == keyid, not sig.is_expired])):
yield sig
@property
def signers(self):
"""A ``set`` of key ids of keys that were used to sign this key"""
return {sig.signer for sig in self.__sig__}
@property
def revocation_signatures(self):
keyid, keytype = (self.fingerprint.keyid, SignatureType.KeyRevocation) if self.is_primary \
else (self.parent.fingerprint.keyid, SignatureType.SubkeyRevocation)
for sig in iter(sig for sig in self._signatures
if all([sig.type == keytype, sig.signer == keyid, not sig.is_expired])):
yield sig
@property
def subkeys(self):
"""An :py:obj:`~collections.OrderedDict` of subkeys bound to this primary key, if applicable,
selected by 16-character keyid."""
return self._children
@property
def userids(self):
"""A ``list`` of :py:obj:`PGPUID` objects containing User ID information about this key"""
return [ u for u in self._uids if u.is_uid ]
@property
def userattributes(self):
"""A ``list`` of :py:obj:`PGPUID` objects containing one or more images associated with this key"""
return [u for u in self._uids if u.is_ua]
@property
def revocation_keys(self):
"""A ``generator`` with the list of keys that can revoke this key.
See also :py:func:`PGPSignature.revocation_key`"""
for sig in self._signatures:
if sig.revocation_key:
yield sig.revocation_key
@classmethod
def new(cls, key_algorithm, key_size, created=None):
"""
Generate a new PGP key
:param key_algorithm: Key algorithm to use.
:type key_algorithm: :py:obj:`~constants.PubKeyAlgorithm`
:param key_size: Key size in bits, unless `key_algorithm` is :py:obj:`~constants.PubKeyAlgorithm.ECDSA` or
:py:obj:`~constants.PubKeyAlgorithm.ECDH`, in which case it should be the Curve OID to use.
:type key_size: ``int`` or :py:obj:`~constants.EllipticCurveOID`
:param created: When was the key created? (``None`` or unset means now)
:type created: :py:obj:`~datetime.datetime` or ``None``
:return: A newly generated :py:obj:`PGPKey`
"""
# new private key shell first
key = PGPKey()
if key_algorithm in {PubKeyAlgorithm.RSAEncrypt, PubKeyAlgorithm.RSASign}: # pragma: no cover
warnings.warn('{:s} is deprecated - generating key using RSAEncryptOrSign'.format(key_algorithm.name))
key_algorithm = PubKeyAlgorithm.RSAEncryptOrSign
# generate some key data to match key_algorithm and key_size
key._key = PrivKeyV4.new(key_algorithm, key_size, created=created)
return key
def __init__(self):
"""
PGPKey objects represent OpenPGP compliant keys along with all of their associated data.
PGPKey implements the `__str__` method, the output of which will be the key composition in
OpenPGP-compliant ASCII-armored format.
PGPKey implements the `__bytes__` method, the output of which will be the key composition in
OpenPGP-compliant binary format.
Any signatures within the PGPKey that are marked as being non-exportable will not be included in the output
of either of those methods.
"""
super(PGPKey, self).__init__()
self._key = None
self._children = collections.OrderedDict()
self._signatures = SorteDeque()
self._uids = SorteDeque()
self._sibling = None
self._require_usage_flags = True
def __bytearray__(self):
_bytes = bytearray()
# us
_bytes += self._key.__bytearray__()
# our signatures; ignore embedded signatures
for sig in iter(s for s in self._signatures if not s.embedded and s.exportable):
_bytes += sig.__bytearray__()
# one or more User IDs, followed by their signatures
for uid in self._uids:
_bytes += uid._uid.__bytearray__()
for s in [s for s in uid._signatures if s.exportable]:
_bytes += s.__bytearray__()
# subkeys
for sk in self._children.values():
_bytes += sk.__bytearray__()
return _bytes
def __repr__(self):
if self._key is not None:
return "<PGPKey [{:s}][0x{:s}] at 0x{:02X}>" \
"".format(self._key.__class__.__name__, self.fingerprint.keyid, id(self))
return "<PGPKey [unknown] at 0x{:02X}>" \
"".format(id(self))
def __contains__(self, item):
if isinstance(item, PGPKey): # pragma: no cover
return item.fingerprint.keyid in self.subkeys
if isinstance(item, Fingerprint): # pragma: no cover
return item.keyid in self.subkeys
if isinstance(item, PGPUID):
return item in self._uids
if isinstance(item, PGPSignature):
return item in self._signatures
raise TypeError
def __or__(self, other, from_sib=False):
if isinstance(other, Key) and self._key is None:
self._key = other
elif isinstance(other, PGPKey) and not other.is_primary and other.is_public == self.is_public:
other._parent = self
self._children[other.fingerprint.keyid] = other
elif isinstance(other, PGPSignature):
self._signatures.insort(other)
# if this is a subkey binding signature that has embedded primary key binding signatures, add them to parent
if other.type == SignatureType.Subkey_Binding:
for es in iter(pkb for pkb in other._signature.subpackets['EmbeddedSignature']):
esig = PGPSignature() | es
esig._parent = other
self._signatures.insort(esig)
elif isinstance(other, PGPUID):
other._parent = weakref.ref(self)
self._uids.insort(other)
else:
raise TypeError("unsupported operand type(s) for |: '{:s}' and '{:s}'"
"".format(self.__class__.__name__, other.__class__.__name__))
if isinstance(self._sibling, weakref.ref) and not from_sib:
sib = self._sibling()
if sib is None:
self._sibling = None
else: # pragma: no cover
sib.__or__(copy.copy(other), True)
return self
def __copy__(self):
key = super(PGPKey, self).__copy__()
key._key = copy.copy(self._key)
for uid in self._uids:
key |= copy.copy(uid)
for id, subkey in self._children.items():
key |= copy.copy(subkey)
for sig in self._signatures:
if sig.embedded:
# embedded signatures don't need to be explicitly copied
continue
key |= copy.copy(sig)
return key
def protect(self, passphrase, enc_alg, hash_alg):
"""
Add a passphrase to a private key. If the key is already passphrase protected, it should be unlocked before
a new passphrase can be specified.
Has no effect on public keys.
:param passphrase: A passphrase to protect the key with
:type passphrase: ``str``, ``unicode``
:param enc_alg: Symmetric encryption algorithm to use to protect the key
:type enc_alg: :py:obj:`~constants.SymmetricKeyAlgorithm`
:param hash_alg: Hash algorithm to use in the String-to-Key specifier
:type hash_alg: :py:obj:`~constants.HashAlgorithm`
"""
##TODO: specify strong defaults for enc_alg and hash_alg
if self.is_public:
# we can't protect public keys because only private key material is ever protected
warnings.warn("Public keys cannot be passphrase-protected", stacklevel=2)
return
if self.is_protected and not self.is_unlocked:
# we can't protect a key that is already protected unless it is unlocked first
warnings.warn("This key is already protected with a passphrase - "
"please unlock it before attempting to specify a new passphrase", stacklevel=2)
return
for sk in itertools.chain([self], self.subkeys.values()):
sk._key.protect(passphrase, enc_alg, hash_alg)
del passphrase
@contextlib.contextmanager
def unlock(self, passphrase):
"""
Context manager method for unlocking passphrase-protected private keys. Has no effect if the key is not both
private and passphrase-protected.
When the context managed block is exited, the unprotected private key material is removed.
Example::
privkey = PGPKey()
privkey.parse(keytext)
assert privkey.is_protected
assert privkey.is_unlocked is False
# privkey.sign("some text") <- this would raise an exception
with privkey.unlock("TheCorrectPassphrase"):
# privkey is now unlocked
assert privkey.is_unlocked
# so you can do things with it
sig = privkey.sign("some text")
# privkey is no longer unlocked
assert privkey.is_unlocked is False
Emits a :py:obj:`~warnings.UserWarning` if the key is public or not passphrase protected.
:param passphrase: The passphrase to be used to unlock this key.
:type passphrase: ``str``
:raises: :py:exc:`~pgpy.errors.PGPDecryptionError` if the passphrase is incorrect
"""
if self.is_public:
# we can't unprotect public keys because only private key material is ever protected
warnings.warn("Public keys cannot be passphrase-protected", stacklevel=3)
yield self
return
if not self.is_protected:
# we can't unprotect private keys that are not protected, because there is no ciphertext to decrypt
warnings.warn("This key is not protected with a passphrase", stacklevel=3)
yield self
return
try:
for sk in itertools.chain([self], self.subkeys.values()):
sk._key.unprotect(passphrase)
del passphrase
yield self
finally:
# clean up here by deleting the previously decrypted secret key material
for sk in itertools.chain([self], self.subkeys.values()):
sk._key.keymaterial.clear()
def add_uid(self, uid, selfsign=True, **prefs):
"""
Add a User ID to this key.
:param uid: The user id to add
:type uid: :py:obj:`~pgpy.PGPUID`
:param selfsign: Whether or not to self-sign the user id before adding it
:type selfsign: ``bool``
Valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`.
Any such keyword arguments are ignored if selfsign is ``False``
"""
uid._parent = self
if selfsign:
uid |= self.certify(uid, SignatureType.Positive_Cert, **prefs)
self |= uid
def get_uid(self, search):
"""
Find and return a User ID that matches the search string given.
:param search: A text string to match name, comment, or email address against
:type search: ``str``, ``unicode``
:return: The first matching :py:obj:`~pgpy.PGPUID`, or ``None`` if no matches were found.
"""
if self.is_primary:
return next((u for u in self._uids if search in filter(lambda a: a is not None, (u.name, u.comment, u.email))), None)
return self.parent.get_uid(search)
def del_uid(self, search):
"""
Find and remove a user id that matches the search string given. This method does not modify the corresponding
:py:obj:`~pgpy.PGPUID` object; it only removes it from the list of user ids on the key.
:param search: A text string to match name, comment, or email address against
:type search: ``str``, ``unicode``
"""
u = self.get_uid(search)
if u is None:
raise KeyError("uid '{:s}' not found".format(search))
u._parent = None
self._uids.remove(u)
def add_subkey(self, key, **prefs):
"""
Add a key as a subkey to this key.
:param key: A private :py:obj:`~pgpy.PGPKey` that does not have any subkeys of its own
:keyword usage: A ``set`` of key usage flags, as :py:obj:`~constants.KeyFlags` for the subkey to be added.
:type usage: ``set``
Other valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`
"""
if self.is_public:
raise PGPError("Cannot add a subkey to a public key. Add the subkey to the private component first!")
if key.is_public:
raise PGPError("Cannot add a public key as a subkey to this key")
if key.is_primary:
if len(key._children) > 0:
raise PGPError("Cannot add a key that already has subkeys as a subkey!")
# convert key into a subkey
npk = PrivSubKeyV4()
npk.pkalg = key._key.pkalg
npk.created = key._key.created
npk.keymaterial = key._key.keymaterial
key._key = npk
key._key.update_hlen()
self._children[key.fingerprint.keyid] = key
key._parent = self
##TODO: skip this step if the key already has a subkey binding signature
bsig = self.bind(key, **prefs)
key |= bsig
def _get_key_flags(self, user=None):
if self.is_primary:
if user is not None:
user = self.get_uid(user)
elif len(self._uids) == 0:
return {KeyFlags.Certify}
else:
user = next(iter(self.userids))
# RFC 4880 says that primary keys *must* be capable of certification
return {KeyFlags.Certify} | user.selfsig.key_flags
return next(self.self_signatures).key_flags
def _sign(self, subject, sig, **prefs):
"""
The actual signing magic happens here.
:param subject: The subject to sign
:param sig: The :py:obj:`PGPSignature` object the new signature is to be encapsulated within
:returns: ``sig``, after the signature is added to it.
"""
user = prefs.pop('user', None)
uid = None
if user is not None:
uid = self.get_uid(user)
else:
uid = next(iter(self.userids), None)
if uid is None and self.parent is not None:
uid = next(iter(self.parent.userids), None)
if sig.hash_algorithm is None:
sig._signature.halg = next((h for h in uid.selfsig.hashprefs if h.is_supported), HashAlgorithm.SHA256)
if uid is not None and sig.hash_algorithm not in uid.selfsig.hashprefs:
warnings.warn("Selected hash algorithm not in key preferences", stacklevel=4)
# signature options that can be applied at any level
expires = prefs.pop('expires', None)
notation = prefs.pop('notation', None)
revocable = prefs.pop('revocable', True)
policy_uri = prefs.pop('policy_uri', None)
intended_recipients = prefs.pop('intended_recipients', [])
for intended_recipient in intended_recipients:
if isinstance(intended_recipient, PGPKey) and isinstance(intended_recipient._key, PubKeyV4):
sig._signature.subpackets.addnew('IntendedRecipient', hashed=True, version=4,
intended_recipient=intended_recipient.fingerprint)
elif isinstance(intended_recipient, Fingerprint):
# FIXME: what if it's not a v4 fingerprint?
sig._signature.subpackets.addnew('IntendedRecipient', hashed=True, version=4,
intended_recipient=intended_recipient)
else:
warnings.warn("Intended Recipient is not a PGPKey, ignoring")
if expires is not None:
# expires should be a timedelta, so if it's a datetime, turn it into a timedelta
if isinstance(expires, datetime):
expires = expires - self.created
sig._signature.subpackets.addnew('SignatureExpirationTime', hashed=True, expires=expires)
if revocable is False:
sig._signature.subpackets.addnew('Revocable', hashed=True, bflag=revocable)
if notation is not None:
for name, value in notation.items():
# mark all notations as human readable unless value is a bytearray
flags = NotationDataFlags.HumanReadable
if isinstance(value, bytearray):
flags = 0x00
sig._signature.subpackets.addnew('NotationData', hashed=True, flags=flags, name=name, value=value)
if policy_uri is not None:
sig._signature.subpackets.addnew('Policy', hashed=True, uri=policy_uri)
if user is not None and uid is not None:
signers_uid = "{:s}".format(uid)
sig._signature.subpackets.addnew('SignersUserID', hashed=True, userid=signers_uid)
# handle an edge case for timestamp signatures vs standalone signatures
if sig.type == SignatureType.Timestamp and len(sig._signature.subpackets._hashed_sp) > 1:
sig._signature.sigtype = SignatureType.Standalone
if prefs.pop('include_issuer_fingerprint', True):
if isinstance(self._key, PrivKeyV4):
sig._signature.subpackets.addnew('IssuerFingerprint', hashed=True, _version=4, _issuer_fpr=self.fingerprint)
sigdata = sig.hashdata(subject)
h2 = sig.hash_algorithm.hasher
h2.update(sigdata)
sig._signature.hash2 = bytearray(h2.digest()[:2])
_sig = self._key.sign(sigdata, getattr(hashes, sig.hash_algorithm.name)())
if _sig is NotImplemented:
raise NotImplementedError(self.key_algorithm)
sig._signature.signature.from_signer(_sig)
sig._signature.update_hlen()
return sig
@KeyAction(KeyFlags.Sign, is_unlocked=True, is_public=False)
def sign(self, subject, **prefs):
"""
Sign text, a message, or a timestamp using this key.
:param subject: The text to be signed
:type subject: ``str``, :py:obj:`~pgpy.PGPMessage`, ``None``
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
The following optional keyword arguments can be used with :py:meth:`PGPKey.sign`, as well as
:py:meth:`PGPKey.certify`, :py:meth:`PGPKey.revoke`, and :py:meth:`PGPKey.bind`:
:keyword expires: Set an expiration date for this signature
:type expires: :py:obj:`~datetime.datetime`, :py:obj:`~datetime.timedelta`
:keyword notation: Add arbitrary notation data to this signature.
:type notation: ``dict``
:keyword policy_uri: Add a URI to the signature that should describe the policy under which the signature
was issued.
:type policy_uri: ``str``
:keyword revocable: If ``False``, this signature will be marked non-revocable
:type revocable: ``bool``
:keyword user: Specify which User ID to use when creating this signature. Also adds a "Signer's User ID"
to the signature.
:type user: ``str``
:keyword created: Specify the time that the signature should be made. If unset or None,
it will use the present time.
:type created: :py:obj:`~datetime.datetime`
:keyword intended_recipients: Specify a list of :py:obj:`PGPKey` objects that will be encrypted to.
:type intended_recipients: ``list``
:keyword include_issuer_fingerprint: Whether to include a hashed subpacket indicating the issuer fingerprint.
(only for v4 keys, defaults to True)
:type include_issuer_fingerprint: ``bool``
"""
sig_type = SignatureType.BinaryDocument
hash_algo = prefs.pop('hash', None)
if subject is None:
sig_type = SignatureType.Timestamp
if isinstance(subject, PGPMessage):
if subject.type == 'cleartext':
sig_type = SignatureType.CanonicalDocument
subject = subject.message
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
return self._sign(subject, sig, **prefs)
@KeyAction(KeyFlags.Certify, is_unlocked=True, is_public=False)
def certify(self, subject, level=SignatureType.Generic_Cert, **prefs):
"""
certify(subject, level=SignatureType.Generic_Cert, **prefs)
Sign a key or a user id within a key.
:param subject: The user id or key to be certified.
:type subject: :py:obj:`PGPKey`, :py:obj:`PGPUID`
:param level: :py:obj:`~constants.SignatureType.Generic_Cert`, :py:obj:`~constants.SignatureType.Persona_Cert`,
:py:obj:`~constants.SignatureType.Casual_Cert`, or :py:obj:`~constants.SignatureType.Positive_Cert`.
Only used if subject is a :py:obj:`PGPUID`; otherwise, it is ignored.
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.certify`.
These optional keywords only make sense, and thus only have an effect, when self-signing a key or User ID:
:keyword usage: A ``set`` of key usage flags, as :py:obj:`~constants.KeyFlags`.
This keyword is ignored for non-self-certifications.
:type usage: ``set``
:keyword ciphers: A list of preferred symmetric ciphers, as :py:obj:`~constants.SymmetricKeyAlgorithm`.
This keyword is ignored for non-self-certifications.
:type ciphers: ``list``
:keyword hashes: A list of preferred hash algorithms, as :py:obj:`~constants.HashAlgorithm`.
This keyword is ignored for non-self-certifications.
:type hashes: ``list``
:keyword compression: A list of preferred compression algorithms, as :py:obj:`~constants.CompressionAlgorithm`.
This keyword is ignored for non-self-certifications.
:type compression: ``list``
:keyword key_expiration: Specify a key expiration date for when this key should expire, or a
:py:obj:`~datetime.timedelta` of how long after the key was created it should expire.
This keyword is ignored for non-self-certifications.
:type key_expiration: :py:obj:`datetime.datetime`, :py:obj:`datetime.timedelta`
:keyword attested_certifications: A list of third-party certifications, as :py:obj:`PGPSignature`, that
the certificate holder wants to attest to for redistribution with the certificate.
Alternatively, any element in the list can be a ``bytes`` or ``bytearray`` object
of the appropriate length (the length of this certification's digest).
This keyword is only used for signatures of type Attestation.
:type attested_certifications: ``list``
:keyword keyserver: Specify the URI of the preferred key server of the user.
This keyword is ignored for non-self-certifications.
:type keyserver: ``str``, ``unicode``, ``bytes``
:keyword keyserver_flags: A set of Key Server Preferences, as :py:obj:`~constants.KeyServerPreferences`.
:type keyserver_flags: ``set``
:keyword primary: Whether or not to consider the certified User ID as the primary one.
This keyword is ignored for non-self-certifications, and any certifications directly on keys.
:type primary: ``bool``
These optional keywords only make sense, and thus only have an effect, when signing another key or User ID:
:keyword trust: Specify the level and amount of trust to assert when certifying a public key. Should be a tuple
of two ``int`` s, specifying the trust level and trust amount. See
`RFC 4880 Section 5.2.3.13. Trust Signature <https://tools.ietf.org/html/rfc4880#section-5.2.3.13>`_
for more on what these values mean.
:type trust: ``tuple`` of two ``int`` s
:keyword regex: Specify a regular expression to constrain the specified trust signature in the resulting signature.
Symbolically signifies that the specified trust signature only applies to User IDs which match
this regular expression.
This is meaningless without also specifying trust level and amount.
:type regex: ``str``
:keyword exportable: Whether this certification is exportable or not.
:type exportable: ``bool``
"""
hash_algo = prefs.pop('hash', None)
sig_type = level
if isinstance(subject, PGPKey):
sig_type = SignatureType.DirectlyOnKey
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
# signature options that only make sense in certifications
usage = prefs.pop('usage', None)
exportable = prefs.pop('exportable', None)
if usage is not None:
sig._signature.subpackets.addnew('KeyFlags', hashed=True, flags=usage)
if exportable is not None:
sig._signature.subpackets.addnew('ExportableCertification', hashed=True, bflag=exportable)
keyfp = self.fingerprint
if isinstance(subject, PGPKey):
keyfp = subject.fingerprint
if isinstance(subject, PGPUID) and subject._parent is not None:
keyfp = subject._parent.fingerprint
if keyfp == self.fingerprint:
# signature options that only make sense in self-certifications
cipher_prefs = prefs.pop('ciphers', None)
hash_prefs = prefs.pop('hashes', None)
compression_prefs = prefs.pop('compression', None)
key_expires = prefs.pop('key_expiration', None)
keyserver_flags = prefs.pop('keyserver_flags', None)
keyserver = prefs.pop('keyserver', None)
primary_uid = prefs.pop('primary', None)
attested_certifications = prefs.pop('attested_certifications', [])
if key_expires is not None:
# key expires should be a timedelta, so if it's a datetime, turn it into a timedelta
if isinstance(key_expires, datetime):
key_expires = key_expires - self.created
sig._signature.subpackets.addnew('KeyExpirationTime', hashed=True, expires=key_expires)
if cipher_prefs is not None:
sig._signature.subpackets.addnew('PreferredSymmetricAlgorithms', hashed=True, flags=cipher_prefs)
if hash_prefs:
sig._signature.subpackets.addnew('PreferredHashAlgorithms', hashed=True, flags=hash_prefs)
if sig.hash_algorithm is None:
sig._signature.halg = hash_prefs[0]
if sig.hash_algorithm is None:
sig._signature.halg = HashAlgorithm.SHA256
if compression_prefs is not None:
sig._signature.subpackets.addnew('PreferredCompressionAlgorithms', hashed=True, flags=compression_prefs)
if keyserver_flags is not None:
sig._signature.subpackets.addnew('KeyServerPreferences', hashed=True, flags=keyserver_flags)
if keyserver is not None:
sig._signature.subpackets.addnew('PreferredKeyServer', hashed=True, uri=keyserver)
if primary_uid is not None:
sig._signature.subpackets.addnew('PrimaryUserID', hashed=True, primary=primary_uid)
cert_sigtypes = {SignatureType.Generic_Cert, SignatureType.Persona_Cert,
SignatureType.Casual_Cert, SignatureType.Positive_Cert,
SignatureType.CertRevocation}
# Features is always set on certifications:
if sig._signature.sigtype in cert_sigtypes:
sig._signature.subpackets.addnew('Features', hashed=True, flags=Features.pgpy_features)
# If this is an attestation, then we must include a Attested Certifications subpacket:
if sig._signature.sigtype == SignatureType.Attestation:
attestations = set()
for attestation in attested_certifications:
if isinstance(attestation, PGPSignature) and attestation.type in cert_sigtypes:
h = sig.hash_algorithm.hasher
h.update(attestation._signature.canonical_bytes())
attestations.add(h.digest())
elif isinstance(attestation, (bytes,bytearray)) and len(attestation) == sig.hash_algorithm.digest_size:
attestations.add(attestation)
else:
warnings.warn("Attested Certification element is neither a PGPSignature certification nor " +
"a bytes object of size %d, ignoring"%(sig.hash_algorithm.digest_size))
sig._signature.subpackets.addnew('AttestedCertifications', hashed=True, attested_certifications=b''.join(sorted(attestations)))
else:
# signature options that only make sense in non-self-certifications
trust = prefs.pop('trust', None)
regex = prefs.pop('regex', None)
if trust is not None:
sig._signature.subpackets.addnew('TrustSignature', hashed=True, level=trust[0], amount=trust[1])
if regex is not None:
sig._signature.subpackets.addnew('RegularExpression', hashed=True, regex=regex)
return self._sign(subject, sig, **prefs)
@KeyAction(KeyFlags.Certify, is_unlocked=True, is_public=False)
def revoke(self, target, **prefs):
"""
Revoke a key, a subkey, or all current certification signatures of a User ID that were generated by this key so far.
:param target: The key to revoke
:type target: :py:obj:`PGPKey`, :py:obj:`PGPUID`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoke`.
:keyword reason: Defaults to :py:obj:`constants.RevocationReason.NotSpecified`
:type reason: One of :py:obj:`constants.RevocationReason`.
:keyword comment: Defaults to an empty string.
:type comment: ``str``
"""
hash_algo = prefs.pop('hash', None)
if isinstance(target, PGPUID):
sig_type = SignatureType.CertRevocation
elif isinstance(target, PGPKey):
##TODO: check to make sure that the key that is being revoked:
# - is this key
# - is one of this key's subkeys
# - specifies this key as its revocation key
if target.is_primary:
sig_type = SignatureType.KeyRevocation
else:
sig_type = SignatureType.SubkeyRevocation
else: # pragma: no cover
raise TypeError
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
# signature options that only make sense when revoking
reason = prefs.pop('reason', RevocationReason.NotSpecified)
comment = prefs.pop('comment', "")
sig._signature.subpackets.addnew('ReasonForRevocation', hashed=True, code=reason, string=comment)
return self._sign(target, sig, **prefs)
@KeyAction(is_unlocked=True, is_public=False)
def revoker(self, revoker, **prefs):
"""
Generate a signature that specifies another key as being valid for revoking this key.
:param revoker: The :py:obj:`PGPKey` to specify as a valid revocation key.
:type revoker: :py:obj:`PGPKey`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoker`.
:keyword sensitive: If ``True``, this sets the sensitive flag on the RevocationKey subpacket. Currently,
this has no other effect.
:type sensitive: ``bool``
"""
hash_algo = prefs.pop('hash', None)
sig = PGPSignature.new(SignatureType.DirectlyOnKey, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
# signature options that only make sense when adding a revocation key
sensitive = prefs.pop('sensitive', False)
keyclass = RevocationKeyClass.Normal | (RevocationKeyClass.Sensitive if sensitive else 0x00)
sig._signature.subpackets.addnew('RevocationKey',
hashed=True,
algorithm=revoker.key_algorithm,
fingerprint=revoker.fingerprint,
keyclass=keyclass)
# revocation keys should really not be revocable themselves
prefs['revocable'] = False
return self._sign(self, sig, **prefs)
@KeyAction(is_unlocked=True, is_public=False)
def bind(self, key, **prefs):
"""
Bind a subkey to this key.
In addition to the optional keyword arguments accepted for self-signatures by :py:meth:`PGPkey.certify`,
the following optional keyword arguments can be used with :py:meth:`PGPKey.bind`.
:keyword crosssign: If ``False``, do not attempt a cross-signature (defaults to ``True``). Subkeys
which are not capable of signing will not produce a cross-signature in any case.
Setting ``crosssign`` to ``False`` is likely to produce subkeys that will be rejected
by some other OpenPGP implementations.
:type crosssign: ``bool``
"""
hash_algo = prefs.pop('hash', None)
if self.is_primary and not key.is_primary:
sig_type = SignatureType.Subkey_Binding
elif key.is_primary and not self.is_primary:
sig_type = SignatureType.PrimaryKey_Binding
else: # pragma: no cover
raise PGPError
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
if sig_type == SignatureType.Subkey_Binding:
# signature options that only make sense in subkey binding signatures
usage = prefs.pop('usage', None)
if usage is not None:
sig._signature.subpackets.addnew('KeyFlags', hashed=True, flags=usage)
crosssig = None
# if possible, have the subkey create a primary key binding signature
if key.key_algorithm.can_sign and prefs.pop('crosssign', True):
subkeyid = key.fingerprint.keyid
if not key.is_public:
crosssig = key.bind(self)
elif subkeyid in self.subkeys: # pragma: no cover
crosssig = self.subkeys[subkeyid].bind(self)
if crosssig is None:
if usage is None:
raise PGPError('subkey with no key usage flags (may be used for any purpose, including signing) requires a cross-signature')
if KeyFlags.Sign in usage:
raise PGPError('subkey marked for signing usage requires a cross-signature')
else:
sig._signature.subpackets.addnew('EmbeddedSignature', hashed=False, _sig=crosssig._signature)
return self._sign(key, sig, **prefs)
def verify(self, subject, signature=None):
"""
Verify a subject with a signature using this key.
:param subject: The subject to verify
:type subject: ``str``, ``unicode``, ``None``, :py:obj:`PGPMessage`, :py:obj:`PGPKey`, :py:obj:`PGPUID`
:param signature: If the signature is detached, it should be specified here.
:type signature: :py:obj:`PGPSignature`
:returns: :py:obj:`~pgpy.types.SignatureVerification`
"""
sspairs = []
# some type checking
if not isinstance(subject, (type(None), PGPMessage, PGPKey, PGPUID, PGPSignature, six.string_types, bytes, bytearray)):
raise TypeError("Unexpected subject value: {:s}".format(str(type(subject))))
if not isinstance(signature, (type(None), PGPSignature)):
raise TypeError("Unexpected signature value: {:s}".format(str(type(signature))))
def _filter_sigs(sigs):
_ids = {self.fingerprint.keyid} | set(self.subkeys)
return [ sig for sig in sigs if sig.signer in _ids ]
# collect signature(s)
if signature is None:
if isinstance(subject, PGPMessage):
sspairs += [ (sig, subject.message) for sig in _filter_sigs(subject.signatures) ]
if isinstance(subject, (PGPUID, PGPKey)):
sspairs += [ (sig, subject) for sig in _filter_sigs(subject.__sig__) ]
if isinstance(subject, PGPKey):
# user ids
sspairs += [ (sig, uid) for uid in subject.userids for sig in _filter_sigs(uid.__sig__) ]
# user attributes
sspairs += [ (sig, ua) for ua in subject.userattributes for sig in _filter_sigs(ua.__sig__) ]
# subkey binding signatures
sspairs += [ (sig, subkey) for subkey in subject.subkeys.values() for sig in _filter_sigs(subkey.__sig__) ]
elif signature.signer in {self.fingerprint.keyid} | set(self.subkeys):
sspairs += [(signature, subject)]
if len(sspairs) == 0:
raise PGPError("No signatures to verify")
# finally, start verifying signatures
sigv = SignatureVerification()
for sig, subj in sspairs:
if self.fingerprint.keyid != sig.signer and sig.signer in self.subkeys:
sigv &= self.subkeys[sig.signer].verify(subj, sig)
else:
verified = self._key.verify(sig.hashdata(subj), sig.__sig__, getattr(hashes, sig.hash_algorithm.name)())
if verified is NotImplemented:
raise NotImplementedError(sig.key_algorithm)
sigv.add_sigsubj(sig, self, subj, verified)
return sigv
@KeyAction(KeyFlags.EncryptCommunications, KeyFlags.EncryptStorage, is_public=True)
def encrypt(self, message, sessionkey=None, **prefs):
"""encrypt(message[, sessionkey=None], **prefs)
Encrypt a PGPMessage using this key.
:param message: The message to encrypt.
:type message: :py:obj:`PGPMessage`
:param sessionkey: Provide a session key to use when encrypting something. Default is ``None``.
If ``None``, a session key of the appropriate length will be generated randomly.
.. warning::
Care should be taken when making use of this option! Session keys *absolutely need*
to be unpredictable! Use the ``gen_key()`` method on the desired
:py:obj:`~constants.SymmetricKeyAlgorithm` to generate the session key!
:type sessionkey: ``bytes``, ``str``
:raises: :py:exc:`~errors.PGPEncryptionError` if encryption failed for any reason.
:returns: A new :py:obj:`PGPMessage` with the encrypted contents of ``message``
The following optional keyword arguments can be used with :py:meth:`PGPKey.encrypt`:
:keyword cipher: Specifies the symmetric block cipher to use when encrypting the message.
:type cipher: :py:obj:`~constants.SymmetricKeyAlgorithm`
:keyword user: Specifies the User ID to use as the recipient for this encryption operation, for the purposes of
preference defaults and selection validation.
:type user: ``str``, ``unicode``
"""
user = prefs.pop('user', None)
uid = None
if user is not None:
uid = self.get_uid(user)
else:
uid = next(iter(self.userids), None)
if uid is None and self.parent is not None:
uid = next(iter(self.parent.userids), None)
pref_cipher = next(c for c in uid.selfsig.cipherprefs if c.is_supported)
cipher_algo = prefs.pop('cipher', pref_cipher)
if cipher_algo not in uid.selfsig.cipherprefs:
warnings.warn("Selected symmetric algorithm not in key preferences", stacklevel=3)
if message.is_compressed and message._compression not in uid.selfsig.compprefs:
warnings.warn("Selected compression algorithm not in key preferences", stacklevel=3)
if sessionkey is None:
sessionkey = cipher_algo.gen_key()
# set up a new PKESessionKeyV3
pkesk = PKESessionKeyV3()
pkesk.encrypter = bytearray(binascii.unhexlify(self.fingerprint.keyid.encode('latin-1')))
pkesk.pkalg = self.key_algorithm
# pkesk.encrypt_sk(self.__key__, cipher_algo, sessionkey)
pkesk.encrypt_sk(self._key, cipher_algo, sessionkey)
if message.is_encrypted: # pragma: no cover
_m = message
else:
_m = PGPMessage()
skedata = IntegrityProtectedSKEDataV1()
skedata.encrypt(sessionkey, cipher_algo, message.__bytes__())
_m |= skedata
_m |= pkesk
return _m
@KeyAction(is_unlocked=True, is_public=False)
def decrypt(self, message):
"""
Decrypt a PGPMessage using this key.
:param message: An encrypted :py:obj:`PGPMessage`
:raises: :py:exc:`~errors.PGPError` if the key is not private, or protected but not unlocked.
:raises: :py:exc:`~errors.PGPDecryptionError` if decryption fails for any other reason.
:returns: A new :py:obj:`PGPMessage` with the decrypted contents of ``message``.
"""
if not message.is_encrypted:
warnings.warn("This message is not encrypted", stacklevel=3)
return message
if self.fingerprint.keyid not in message.encrypters:
sks = set(self.subkeys)
mis = set(message.encrypters)
if sks & mis:
skid = list(sks & mis)[0]
return self.subkeys[skid].decrypt(message)
raise PGPError("Cannot decrypt the provided message with this key")
pkesk = next(pk for pk in message._sessionkeys if pk.pkalg == self.key_algorithm and pk.encrypter == self.fingerprint.keyid)
alg, key = pkesk.decrypt_sk(self._key)
# now that we have the symmetric cipher used and the key, we can decrypt the actual message
decmsg = PGPMessage()
decmsg.parse(message.message.decrypt(key, alg))
return decmsg
def parse(self, data):
unarmored = self.ascii_unarmor(data)
data = unarmored['body']
if unarmored['magic'] is not None and 'KEY' not in unarmored['magic']:
raise ValueError('Expected: KEY. Got: {}'.format(str(unarmored['magic'])))
if unarmored['headers'] is not None:
self.ascii_headers = unarmored['headers']
# parse packets
# keys will hold other keys parsed here
keys = collections.OrderedDict()
# orphaned will hold all non-opaque orphaned packets
orphaned = []
# last holds the last non-signature thing processed
##TODO: see issue #141 and fix this better
_getpkt = lambda d: (Packet(d) if d else None) # flake8: noqa
# some packets are filtered out
getpkt = filter(lambda p: p.header.tag != PacketTag.Trust, iter(functools.partial(_getpkt, data), None))
def pktgrouper():
class PktGrouper(object):
def __init__(self):
self.last = None
def __call__(self, pkt):
if pkt.header.tag != PacketTag.Signature:
self.last = '{:02X}_{:s}'.format(id(pkt), pkt.__class__.__name__)
return self.last
return PktGrouper()
while True:
for group in iter(group for _, group in itertools.groupby(getpkt, key=pktgrouper()) if not _.endswith('Opaque')):
pkt = next(group)
# deal with pkt first
if isinstance(pkt, Key):
pgpobj = (self if self._key is None else PGPKey()) | pkt
elif isinstance(pkt, (UserID, UserAttribute)):
pgpobj = PGPUID() | pkt
else: # pragma: no cover
break
# add signatures to whatever we got
[ operator.ior(pgpobj, PGPSignature() | sig) for sig in group if not isinstance(sig, Opaque) ]
# and file away pgpobj
if isinstance(pgpobj, PGPKey):
if pgpobj.is_primary:
keys[(pgpobj.fingerprint.keyid, pgpobj.is_public)] = pgpobj
else:
keys[next(reversed(keys))] |= pgpobj
elif isinstance(pgpobj, PGPUID):
# parent is likely the most recently parsed primary key
keys[next(reversed(keys))] |= pgpobj
else: # pragma: no cover
break
else:
# finished normally
break
# this will only be reached called if the inner loop hit a break
warnings.warn("Warning: Orphaned packet detected! {:s}".format(repr(pkt)), stacklevel=2) # pragma: no cover
orphaned.append(pkt) # pragma: no cover
for pkt in group: # pragma: no cover
orphaned.append(pkt)
# remove the reference to self from keys
[ keys.pop((getattr(self, 'fingerprint.keyid', '~'), None), t) for t in (True, False) ]
# return {'keys': keys, 'orphaned': orphaned}
return keys
class PGPKeyring(collections_abc.Container, collections_abc.Iterable, collections_abc.Sized):
def __init__(self, *args):
"""
PGPKeyring objects represent in-memory keyrings that can contain any combination of supported private and public
keys. It can not currently be conveniently exported to a format that can be understood by GnuPG.
"""
super(PGPKeyring, self).__init__()
self._keys = {}
self._pubkeys = collections.deque()
self._privkeys = collections.deque()
self._aliases = collections.deque([{}])
self.load(*args)
def __contains__(self, alias):
aliases = set().union(*self._aliases)
if isinstance(alias, six.string_types):
return alias in aliases or alias.replace(' ', '') in aliases
return alias in aliases # pragma: no cover
def __len__(self):
return len(self._keys)
def __iter__(self): # pragma: no cover
for pgpkey in itertools.chain(self._pubkeys, self._privkeys):
yield pgpkey
def _get_key(self, alias):
for m in self._aliases:
if alias in m:
return self._keys[m[alias]]
if alias.replace(' ', '') in m:
return self._keys[m[alias.replace(' ', '')]]
raise KeyError(alias)
def _get_keys(self, alias):
return [self._keys[m[alias]] for m in self._aliases if alias in m]
def _sort_alias(self, alias):
# remove alias from all levels of _aliases, and sort by created time and key half
# so the order of _aliases from left to right:
# - newer keys come before older ones
# - private keys come before public ones
#
# this list is sorted in the opposite direction from that, because they will be placed into self._aliases
# from right to left.
pkids = sorted(list(set().union(m.pop(alias) for m in self._aliases if alias in m)),
key=lambda pkid: (self._keys[pkid].created, self._keys[pkid].is_public))
# drop the now-sorted aliases into place
for depth, pkid in enumerate(pkids):
self._aliases[depth][alias] = pkid
# finally, remove any empty dicts left over
while {} in self._aliases: # pragma: no cover
self._aliases.remove({})
def _add_alias(self, alias, pkid):
# brand new alias never seen before!
if alias not in self:
self._aliases[-1][alias] = pkid
# this is a duplicate alias->key link; ignore it
elif alias in self and pkid in set(m[alias] for m in self._aliases if alias in m):
pass # pragma: no cover
# this is an alias that already exists, but points to a key that is not already referenced by it
else:
adepth = len(self._aliases) - len([None for m in self._aliases if alias in m]) - 1
# all alias maps have this alias, so increase total depth by 1
if adepth == -1:
self._aliases.appendleft({})
adepth = 0
self._aliases[adepth][alias] = pkid
self._sort_alias(alias)
def _add_key(self, pgpkey):
pkid = id(pgpkey)
if pkid not in self._keys:
self._keys[pkid] = pgpkey
# add to _{pub,priv}keys if this is either a primary key, or a subkey without one
if pgpkey.parent is None:
if pgpkey.is_public:
self._pubkeys.append(pkid)
else:
self._privkeys.append(pkid)
# aliases
self._add_alias(pgpkey.fingerprint, pkid)
self._add_alias(pgpkey.fingerprint.keyid, pkid)
self._add_alias(pgpkey.fingerprint.shortid, pkid)
for uid in pgpkey.userids:
self._add_alias(uid.name, pkid)
if uid.comment:
self._add_alias(uid.comment, pkid)
if uid.email:
self._add_alias(uid.email, pkid)
# subkeys
for subkey in pgpkey.subkeys.values():
self._add_key(subkey)
def load(self, *args):
r"""
Load all keys provided into this keyring object.
:param \*args: Each arg in ``args`` can be any of the formats supported by :py:meth:`PGPKey.from_file` and
:py:meth:`PGPKey.from_blob` or a :py:class:`PGPKey` instance, or a ``list`` or ``tuple`` of these.
:type \*args: ``list``, ``tuple``, ``str``, ``unicode``, ``bytes``, ``bytearray``
:returns: a ``set`` containing the unique fingerprints of all of the keys that were loaded during this operation.
"""
def _preiter(first, iterable):
yield first
for item in iterable:
yield item
loaded = set()
for key in iter(item for ilist in iter(ilist if isinstance(ilist, (tuple, list)) else [ilist] for ilist in args)
for item in ilist):
keys = {}
if isinstance(key, PGPKey):
_key = key
elif os.path.isfile(key):
_key, keys = PGPKey.from_file(key)
else:
_key, keys = PGPKey.from_blob(key)
for ik in _preiter(_key, keys.values()):
self._add_key(ik)
loaded |= {ik.fingerprint} | {isk.fingerprint for isk in ik.subkeys.values()}
return list(loaded)
@contextlib.contextmanager
def key(self, identifier):
"""
A context-manager method. Yields the first :py:obj:`PGPKey` object that matches the provided identifier.
:param identifier: The identifier to use to select a loaded key.
:type identifier: :py:exc:`PGPMessage`, :py:exc:`PGPSignature`, ``str``
:raises: :py:exc:`KeyError` if there is no loaded key that satisfies the identifier.
"""
if isinstance(identifier, PGPMessage):
for issuer in identifier.issuers:
if issuer in self:
identifier = issuer
break
if isinstance(identifier, PGPSignature):
identifier = identifier.signer
yield self._get_key(identifier)
def fingerprints(self, keyhalf='any', keytype='any'):
"""
List loaded fingerprints with some optional filtering.
:param keyhalf: Can be 'any', 'public', or 'private'. If 'public', or 'private', the fingerprints of keys of the
the other type will not be included in the results.
:type keyhalf: ``str``
:param keytype: Can be 'any', 'primary', or 'sub'. If 'primary' or 'sub', the fingerprints of keys of the
the other type will not be included in the results.
:type keytype: ``str``
:returns: a ``set`` of fingerprints of keys matching the filters specified.
"""
return {pk.fingerprint for pk in self._keys.values()
if pk.is_primary in [True if keytype in ['primary', 'any'] else None,
False if keytype in ['sub', 'any'] else None]
if pk.is_public in [True if keyhalf in ['public', 'any'] else None,
False if keyhalf in ['private', 'any'] else None]}
def unload(self, key):
"""
Unload a loaded key and its subkeys.
:param key: The key to unload.
:type key: :py:obj:`PGPKey`
The easiest way to do this is to select a key using :py:meth:`PGPKeyring.key` first::
with keyring.key("DSA von TestKey") as key:
keyring.unload(key)
"""
assert isinstance(key, PGPKey)
pkid = id(key)
if pkid in self._keys:
# remove references
[ kd.remove(pkid) for kd in [self._pubkeys, self._privkeys] if pkid in kd ]
# remove the key
self._keys.pop(pkid)
# remove aliases
for m, a in [ (m, a) for m in self._aliases for a, p in m.items() if p == pkid ]:
m.pop(a)
# do a re-sort of this alias if it was not unique
if a in self:
self._sort_alias(a)
# if key is a primary key, unload its subkeys as well
if key.is_primary:
[ self.unload(sk) for sk in key.subkeys.values() ]
``` |
{
"source": "J-M0/yaspin",
"score": 3
} |
#### File: yaspin/examples/colors.py
```python
import time
from yaspin import yaspin
def all_colors():
with yaspin(text="Colors!").bouncingBar as sp:
time.sleep(2)
colors = ("red", "green", "yellow", "blue", "magenta", "cyan", "white")
for color in colors:
sp.color, sp.text = color, color
time.sleep(2)
def all_highlights():
with yaspin(text="Highlights!").pong as sp:
time.sleep(2)
highlights = (
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
"on_white",
"on_grey",
)
for highlight in highlights:
text = "On {0} color".format(highlight.split("_")[1])
sp.on_color, sp.text = highlight, text
time.sleep(2)
def all_attributes():
with yaspin(text="Attributes!").point as sp:
time.sleep(2)
attrs = ("bold", "dark", "underline", "blink", "reverse", "concealed")
# New spinner instance should be created every iteration since
# multiple simultaneous color attributes are supported. Hence,
# updating attribute of the instance will add new attribute to
# the existing list of previous attributes.
for attr in attrs:
with yaspin().point as sp:
sp.attrs, sp.text = [attr], attr
time.sleep(2)
def main():
all_colors()
all_highlights()
all_attributes()
if __name__ == "__main__":
main()
```
#### File: yaspin/examples/finalizers.py
```python
import time
from yaspin import yaspin
def default_finalizers():
with yaspin(text="Downloading...") as sp:
time.sleep(2)
sp.ok()
with yaspin(text="Downloading...") as sp:
time.sleep(2)
sp.fail()
def custom_finalizers():
with yaspin(text="Processing...") as sp:
time.sleep(2)
# Make finalizer green
sp.green.ok("✔")
with yaspin(text="Processing...") as sp:
time.sleep(2)
# Make finalizer red
sp.red.fail("✘")
def main():
default_finalizers()
custom_finalizers()
if __name__ == "__main__":
main()
```
#### File: yaspin/examples/right_spinner.py
```python
import time
from yaspin import yaspin
def main():
with yaspin(text="Right spinner", side="right", color="cyan") as sp:
time.sleep(2)
# Switch to left spinner
sp.side = "left" # or just ``sp.left``
sp.text = "Left spinner"
time.sleep(2)
if __name__ == "__main__":
main()
```
#### File: yaspin/yaspin/core.py
```python
import contextlib
import datetime
import functools
import itertools
import signal
import sys
import threading
import time
from typing import List, Set, Union
from termcolor import colored
from .base_spinner import Spinner, default_spinner
from .constants import COLOR_ATTRS, COLOR_MAP, SPINNER_ATTRS
from .helpers import to_unicode
class Yaspin: # pylint: disable=useless-object-inheritance,too-many-instance-attributes
"""Implements a context manager that spawns a thread
to write spinner frames into a tty (stdout) during
context execution.
"""
# When Python finds its output attached to a terminal,
# it sets the sys.stdout.encoding attribute to the terminal's encoding.
# The print statement's handler will automatically encode unicode
# arguments into bytes.
def __init__( # pylint: disable=too-many-arguments
self,
spinner=None,
text="",
color=None,
on_color=None,
attrs=None,
reversal=False,
side="left",
sigmap=None,
timer=False,
):
# Spinner
self._spinner = self._set_spinner(spinner)
self._frames = self._set_frames(self._spinner, reversal)
self._interval = self._set_interval(self._spinner)
self._cycle = self._set_cycle(self._frames)
# Color Specification
self._color = self._set_color(color) if color else color
self._on_color = self._set_on_color(on_color) if on_color else on_color
self._attrs = self._set_attrs(attrs) if attrs else set()
self._color_func = self._compose_color_func()
# Other
self._text = text
self._side = self._set_side(side)
self._reversal = reversal
self._timer = timer
self._start_time = None
self._stop_time = None
# Helper flags
self._stop_spin = None
self._hide_spin = None
self._spin_thread = None
self._last_frame = None
self._stdout_lock = threading.Lock()
self._hidden_level = 0
# Signals
# In Python 2 signal.SIG* are of type int.
# In Python 3 signal.SIG* are enums.
#
# Signal = Union[enum.Enum, int]
# SigHandler = Union[enum.Enum, Callable]
self._sigmap = sigmap if sigmap else {} # Dict[Signal, SigHandler]
# Maps signals to their default handlers in order to reset
# custom handlers set by ``sigmap`` at the cleanup phase.
self._dfl_sigmap = {} # Dict[Signal, SigHandler]
#
# Dunders
#
def __repr__(self):
return "<Yaspin frames={0!s}>".format(self._frames)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, traceback):
# Avoid stop() execution for the 2nd time
if self._spin_thread.is_alive():
self.stop()
return False # nothing is handled
def __call__(self, fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
return inner
def __getattr__(self, name):
# CLI spinners
if name in SPINNER_ATTRS:
from .spinners import Spinners # pylint: disable=import-outside-toplevel
sp = getattr(Spinners, name)
self.spinner = sp
# Color Attributes: "color", "on_color", "attrs"
elif name in COLOR_ATTRS:
attr_type = COLOR_MAP[name]
# Call appropriate property setters;
# _color_func is updated automatically by setters.
if attr_type == "attrs":
self.attrs = [name] # calls property setter
if attr_type in ("color", "on_color"):
setattr(self, attr_type, name) # calls property setter
# Side: "left" or "right"
elif name in ("left", "right"):
self.side = name # calls property setter
# Common error for unsupported attributes
else:
raise AttributeError(
"'{0}' object has no attribute: '{1}'".format(
self.__class__.__name__, name
)
)
return self
#
# Properties
#
@property
def spinner(self):
return self._spinner
@spinner.setter
def spinner(self, sp):
self._spinner = self._set_spinner(sp)
self._frames = self._set_frames(self._spinner, self._reversal)
self._interval = self._set_interval(self._spinner)
self._cycle = self._set_cycle(self._frames)
@property
def text(self):
return self._text
@text.setter
def text(self, txt):
self._text = txt
@property
def color(self):
return self._color
@color.setter
def color(self, value):
self._color = self._set_color(value) if value else value
self._color_func = self._compose_color_func() # update
@property
def on_color(self):
return self._on_color
@on_color.setter
def on_color(self, value):
self._on_color = self._set_on_color(value) if value else value
self._color_func = self._compose_color_func() # update
@property
def attrs(self):
return list(self._attrs)
@attrs.setter
def attrs(self, value):
new_attrs = self._set_attrs(value) if value else set()
self._attrs = self._attrs.union(new_attrs)
self._color_func = self._compose_color_func() # update
@property
def side(self):
return self._side
@side.setter
def side(self, value):
self._side = self._set_side(value)
@property
def reversal(self):
return self._reversal
@reversal.setter
def reversal(self, value):
self._reversal = value
self._frames = self._set_frames(self._spinner, self._reversal)
self._cycle = self._set_cycle(self._frames)
@property
def elapsed_time(self):
if self._start_time is None:
return 0
if self._stop_time is None:
return time.time() - self._start_time
return self._stop_time - self._start_time
#
# Public
#
def start(self):
if self._sigmap:
self._register_signal_handlers()
if sys.stdout.isatty():
self._hide_cursor()
self._start_time = time.time()
self._stop_time = None # Reset value to properly calculate subsequent spinner starts (if any) # pylint: disable=line-too-long
self._stop_spin = threading.Event()
self._hide_spin = threading.Event()
self._spin_thread = threading.Thread(target=self._spin)
self._spin_thread.start()
def stop(self):
self._stop_time = time.time()
if self._dfl_sigmap:
# Reset registered signal handlers to default ones
self._reset_signal_handlers()
if self._spin_thread:
self._stop_spin.set()
self._spin_thread.join()
sys.stdout.write("\r")
self._clear_line()
if sys.stdout.isatty():
self._show_cursor()
def hide(self):
"""Hide the spinner to allow for custom writing to the terminal."""
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and not self._hide_spin.is_set():
with self._stdout_lock:
# set the hidden spinner flag
self._hide_spin.set()
# clear the current line
sys.stdout.write("\r")
self._clear_line()
# flush the stdout buffer so the current line
# can be rewritten to
sys.stdout.flush()
@contextlib.contextmanager
def hidden(self):
"""Hide the spinner within a block, can be nested"""
if self._hidden_level == 0:
self.hide()
self._hidden_level += 1
try:
yield
finally:
self._hidden_level -= 1
if self._hidden_level == 0:
self.show()
def show(self):
"""Show the hidden spinner."""
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and self._hide_spin.is_set():
with self._stdout_lock:
# clear the hidden spinner flag
self._hide_spin.clear()
# clear the current line so the spinner is not appended to it
sys.stdout.write("\r")
self._clear_line()
def write(self, text):
"""Write text in the terminal without breaking the spinner."""
# similar to tqdm.write()
# https://pypi.python.org/pypi/tqdm#writing-messages
with self._stdout_lock:
sys.stdout.write("\r")
self._clear_line()
if isinstance(text, (str, bytes)):
_text = to_unicode(text)
else:
_text = str(text)
# Ensure output is Unicode
assert isinstance(_text, str)
sys.stdout.write("{0}\n".format(_text))
def ok(self, text="OK"):
"""Set Ok (success) finalizer to a spinner."""
_text = text if text else "OK"
self._freeze(_text)
def fail(self, text="FAIL"):
"""Set fail finalizer to a spinner."""
_text = text if text else "FAIL"
self._freeze(_text)
#
# Protected
#
def _freeze(self, final_text):
"""Stop spinner, compose last frame and 'freeze' it."""
text = to_unicode(final_text)
self._last_frame = self._compose_out(text, mode="last")
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
with self._stdout_lock:
sys.stdout.write(self._last_frame)
def _spin(self):
while not self._stop_spin.is_set():
if self._hide_spin.is_set():
# Wait a bit to avoid wasting cycles
time.sleep(self._interval)
continue
# Compose output
spin_phase = next(self._cycle)
out = self._compose_out(spin_phase)
# Write
with self._stdout_lock:
sys.stdout.write(out)
self._clear_line()
sys.stdout.flush()
# Wait
self._stop_spin.wait(self._interval)
def _compose_color_func(self):
return functools.partial(
colored,
color=self._color,
on_color=self._on_color,
attrs=list(self._attrs),
)
def _compose_out(self, frame, mode=None):
# Ensure Unicode input
assert isinstance(frame, str)
assert isinstance(self._text, str)
text = self._text
# Colors
if self._color_func is not None:
frame = self._color_func(frame)
# Position
if self._side == "right":
frame, text = text, frame
if self._timer:
sec, fsec = divmod(round(100 * self.elapsed_time), 100)
text += " ({}.{:02.0f})".format(datetime.timedelta(seconds=sec), fsec)
# Mode
if not mode:
out = "\r{0} {1}".format(frame, text)
else:
out = "{0} {1}\n".format(frame, text)
# Ensure output is Unicode
assert isinstance(out, str)
return out
def _register_signal_handlers(self):
# SIGKILL cannot be caught or ignored, and the receiving
# process cannot perform any clean-up upon receiving this
# signal.
if signal.SIGKILL in self._sigmap.keys():
raise ValueError(
"Trying to set handler for SIGKILL signal. "
"SIGKILL cannot be cought or ignored in POSIX systems."
)
for sig, sig_handler in self._sigmap.items():
# A handler for a particular signal, once set, remains
# installed until it is explicitly reset. Store default
# signal handlers for subsequent reset at cleanup phase.
dfl_handler = signal.getsignal(sig)
self._dfl_sigmap[sig] = dfl_handler
# ``signal.SIG_DFL`` and ``signal.SIG_IGN`` are also valid
# signal handlers and are not callables.
if callable(sig_handler):
# ``signal.signal`` accepts handler function which is
# called with two arguments: signal number and the
# interrupted stack frame. ``functools.partial`` solves
# the problem of passing spinner instance into the handler
# function.
sig_handler = functools.partial(sig_handler, spinner=self)
signal.signal(sig, sig_handler)
def _reset_signal_handlers(self):
for sig, sig_handler in self._dfl_sigmap.items():
signal.signal(sig, sig_handler)
#
# Static
#
@staticmethod
def _set_color(value: str) -> str:
available_values = [k for k, v in COLOR_MAP.items() if v == "color"]
if value not in available_values:
raise ValueError(
"'{0}': unsupported color value. Use one of the: {1}".format(
value, ", ".join(available_values)
)
)
return value
@staticmethod
def _set_on_color(value: str) -> str:
available_values = [k for k, v in COLOR_MAP.items() if v == "on_color"]
if value not in available_values:
raise ValueError(
"'{0}': unsupported on_color value. "
"Use one of the: {1}".format(value, ", ".join(available_values))
)
return value
@staticmethod
def _set_attrs(attrs: List[str]) -> Set[str]:
available_values = [k for k, v in COLOR_MAP.items() if v == "attrs"]
for attr in attrs:
if attr not in available_values:
raise ValueError(
"'{0}': unsupported attribute value. "
"Use one of the: {1}".format(attr, ", ".join(available_values))
)
return set(attrs)
@staticmethod
def _set_spinner(spinner):
if hasattr(spinner, "frames") and hasattr(spinner, "interval"):
if not spinner.frames or not spinner.interval:
sp = default_spinner
else:
sp = spinner
else:
sp = default_spinner
return sp
@staticmethod
def _set_side(side: str) -> str:
if side not in ("left", "right"):
raise ValueError(
"'{0}': unsupported side value. " "Use either 'left' or 'right'."
)
return side
@staticmethod
def _set_frames(spinner: Spinner, reversal: bool) -> Union[str, List]:
uframes = None # unicode frames
uframes_seq = None # sequence of unicode frames
if isinstance(spinner.frames, str):
uframes = spinner.frames
# TODO (pavdmyt): support any type that implements iterable
if isinstance(spinner.frames, (list, tuple)):
# Empty ``spinner.frames`` is handled by ``Yaspin._set_spinner``
if spinner.frames and isinstance(spinner.frames[0], bytes):
uframes_seq = [to_unicode(frame) for frame in spinner.frames]
else:
uframes_seq = spinner.frames
_frames = uframes or uframes_seq
if not _frames:
# Empty ``spinner.frames`` is handled by ``Yaspin._set_spinner``.
# This code is very unlikely to be executed. However, it's still
# here to be on a safe side.
raise ValueError("{0!r}: no frames found in spinner".format(spinner))
# Builtin ``reversed`` returns reverse iterator,
# which adds unnecessary difficulty for returning
# unicode value;
# Hence using [::-1] syntax
frames = _frames[::-1] if reversal else _frames
return frames
@staticmethod
def _set_interval(spinner):
# Milliseconds to Seconds
return spinner.interval * 0.001
@staticmethod
def _set_cycle(frames):
return itertools.cycle(frames)
@staticmethod
def _hide_cursor():
sys.stdout.write("\033[?25l")
sys.stdout.flush()
@staticmethod
def _show_cursor():
sys.stdout.write("\033[?25h")
sys.stdout.flush()
@staticmethod
def _clear_line():
sys.stdout.write("\033[K")
``` |
{
"source": "jm12138/CannyDetector",
"score": 2
} |
#### File: CannyDetector/cannydet/units.py
```python
import cv2
import numpy as np
from scipy.signal import gaussian
def get_state_dict(filter_size=5, std=1.0, map_func=lambda x:x):
generated_filters = gaussian(filter_size, std=std).reshape([1, filter_size
]).astype(np.float32)
gaussian_filter_horizontal = generated_filters[None, None, ...]
gaussian_filter_vertical = generated_filters.T[None, None, ...]
sobel_filter_horizontal = np.array([[[
[1., 0., -1.],
[2., 0., -2.],
[1., 0., -1.]]]],
dtype='float32'
)
sobel_filter_vertical = np.array([[[
[1., 2., 1.],
[0., 0., 0.],
[-1., -2., -1.]]]],
dtype='float32'
)
directional_filter = np.array(
[[[[ 0., 0., 0.],
[ 0., 1., -1.],
[ 0., 0., 0.]]],
[[[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., -1.]]],
[[[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., -1., 0.]]],
[[[ 0., 0., 0.],
[ 0., 1., 0.],
[-1., 0., 0.]]],
[[[ 0., 0., 0.],
[-1., 1., 0.],
[ 0., 0., 0.]]],
[[[-1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]]],
[[[ 0., -1., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]]],
[[[ 0., 0., -1.],
[ 0., 1., 0.],
[ 0., 0., 0.]]]],
dtype=np.float32
)
connect_filter = np.array([[[
[1., 1., 1.],
[1., 0., 1.],
[1., 1., 1.]]]],
dtype=np.float32
)
return {
'gaussian_filter_horizontal.weight': map_func(gaussian_filter_horizontal),
'gaussian_filter_vertical.weight': map_func(gaussian_filter_vertical),
'sobel_filter_horizontal.weight': map_func(sobel_filter_horizontal),
'sobel_filter_vertical.weight': map_func(sobel_filter_vertical),
'directional_filter.weight': map_func(directional_filter),
'connect_filter.weight': map_func(connect_filter)
}
def auto_canny(image, input=None, sigma=0.33, canny_func=cv2.Canny, scale=1):
# 计算单通道像素强度的中位数
v = np.median(image)
# 选择合适的lower和upper值,然后应用它们
lower = int(max(0, (1.0 - sigma) * v)) * scale
upper = int(min(255, (1.0 + sigma) * v)) * scale
if input is not None:
edged = canny_func(input, lower, upper)
else:
edged = canny_func(image, lower, upper)
return edged
``` |
{
"source": "jm12138/car-behavioral-cloning-paddle",
"score": 2
} |
#### File: car-behavioral-cloning-paddle/car/model.py
```python
import paddle.nn as nn
def build_model(keep_prob=0.5):
model = nn.Sequential(
nn.Conv2D(in_channels=3,
out_channels=24,
kernel_size=5,
stride=2,
padding='valid',
data_format='NHWC'), nn.ELU(),
nn.Conv2D(in_channels=24,
out_channels=36,
kernel_size=5,
stride=2,
padding='valid',
data_format='NHWC'), nn.ELU(),
nn.Conv2D(in_channels=36,
out_channels=48,
kernel_size=5,
stride=2,
padding='valid',
data_format='NHWC'), nn.ELU(),
nn.Conv2D(in_channels=48,
out_channels=64,
kernel_size=(3, 3),
padding='valid',
data_format='NHWC'), nn.ELU(),
nn.Conv2D(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
padding='valid',
data_format='NHWC'), nn.ELU(), nn.Dropout(keep_prob),
nn.Flatten(), nn.Linear(1152, 100), nn.ELU(), nn.Linear(100, 50),
nn.ELU(), nn.Linear(50, 10), nn.ELU(), nn.Linear(10, 1))
return model
``` |
{
"source": "jm12138/deep_sort_paddle",
"score": 3
} |
#### File: jm12138/deep_sort_paddle/deepsort.py
```python
import numpy as np
from model import Detector, Embedding
from deep_sort import NearestNeighborDistanceMetric, Detection, Tracker
__all__ = ['DeepSort']
class DeepSort(object):
def __init__(
self,
det_model_dir,
emb_model_dir,
use_gpu=False,
run_mode='fluid',
use_dynamic_shape=False,
trt_min_shape=1,
trt_max_shape=1280,
trt_opt_shape=640,
trt_calib_mode=False,
cpu_threads=1,
enable_mkldnn=False,
threshold=0.5,
max_cosine_distance=0.2,
nn_budget=100,
max_iou_distance=0.9,
max_age=70,
n_init=3
):
self.threshold = threshold
self.detector = Detector(
model_dir=det_model_dir,
use_gpu=use_gpu,
run_mode=run_mode,
use_dynamic_shape=use_dynamic_shape,
trt_min_shape=trt_min_shape,
trt_max_shape=trt_max_shape,
trt_opt_shape=trt_opt_shape,
trt_calib_mode=trt_calib_mode,
cpu_threads=cpu_threads,
enable_mkldnn=enable_mkldnn
)
self.emb = Embedding(
emb_model_dir,
use_gpu,
enable_mkldnn,
cpu_threads
)
metric = NearestNeighborDistanceMetric(
"cosine",
max_cosine_distance,
nn_budget
)
self.tracker = Tracker(
metric,
max_iou_distance=max_iou_distance,
max_age=max_age,
n_init=n_init
)
def update(self, ori_img):
self.height, self.width = ori_img.shape[:2]
results = self.detector.predict(
ori_img[np.newaxis, ...],
self.threshold
)
if results is None:
return None
else:
tlwh, xyxy, confidences = results
if not confidences.tolist():
return None
# generate detections
features = self.get_features(xyxy, ori_img)
detections = [Detection(tlwh[i], conf, features[i])
for i, conf in enumerate(confidences)]
# update tracker
self.tracker.predict()
self.tracker.update(detections)
# output bbox identities
outputs = []
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
box = track.to_tlbr()
x1, y1, x2, y2 = box
track_id = track.track_id
outputs.append(np.array([x1, y1, x2, y2, track_id], dtype=np.int))
if len(outputs) > 0:
outputs = np.stack(outputs, axis=0)
return outputs
def get_features(self, xyxy, ori_img):
crops = []
for bbox in xyxy:
crop = ori_img[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
crops.append(crop)
features = self.emb.predict(crops)
return features
``` |
{
"source": "jm12138/HED-Paddle",
"score": 3
} |
#### File: HED-Paddle/model/hed.py
```python
import paddle
import paddle.nn as nn
class HEDBlock(nn.Layer):
def __init__(self, in_channels, out_channels, num_convs, with_pool=True):
super().__init__()
# VGG Block
if with_pool:
pool = nn.MaxPool2D(kernel_size=2, stride=2)
self.add_sublayer('pool', pool)
conv1 = nn.Conv2D(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
relu = nn.ReLU()
self.add_sublayer('conv1', conv1)
self.add_sublayer('relu1', relu)
for _ in range(num_convs-1):
conv = nn.Conv2D(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.add_sublayer(f'conv{_+2}', conv)
self.add_sublayer(f'relu{_+2}', relu)
self.layer_names = [name for name in self._sub_layers.keys()]
# Socre Layer
self.score = nn.Conv2D(
in_channels=out_channels, out_channels=1, kernel_size=1, stride=1, padding=0)
def forward(self, input):
for name in self.layer_names:
input = self._sub_layers[name](input)
return input, self.score(input)
class HED(nn.Layer):
def __init__(self,
channels=[3, 64, 128, 256, 512, 512],
nums_convs=[2, 2, 3, 3, 3],
with_pools=[False, True, True, True, True]):
super().__init__()
'''
HED model implementation in Paddle.
Fix the padding parameter and use simple Bilinear Upsampling.
'''
assert (len(channels) - 1) == len(nums_convs), '(len(channels) -1) != len(nums_convs).'
# HED Blocks
for index, num_convs in enumerate(nums_convs):
block = HEDBlock(in_channels=channels[index], out_channels=channels[index+1], num_convs=num_convs, with_pool=with_pools[index])
self.add_sublayer(f'block{index+1}', block)
self.layer_names = [name for name in self._sub_layers.keys()]
# Output Layers
self.out = nn.Conv2D(in_channels=len(nums_convs), out_channels=1, kernel_size=1, stride=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
h, w = input.shape[2:]
scores = []
for index, name in enumerate(self.layer_names):
input, score = self._sub_layers[name](input)
if index > 0:
score = nn.functional.upsample(score, size=[h, w], mode='bilinear')
scores.append(score)
output = self.out(paddle.concat(scores, 1))
return self.sigmoid(output)
if __name__ == '__main__':
model = HED()
out = model(paddle.randn((1, 3, 256, 256)))
print(out.shape)
``` |
{
"source": "jm12138/MLP-Mixer-Paddle",
"score": 3
} |
#### File: jm12138/MLP-Mixer-Paddle/model.py
```python
import paddle
import paddle.nn as nn
class MlpBlock(nn.Layer):
def __init__(self, features_dim, mlp_dim):
super().__init__()
self.fc_0 = nn.Linear(features_dim, mlp_dim)
self.fc_1 = nn.Linear(mlp_dim, features_dim)
def forward(self, x):
y = self.fc_0(x)
y = nn.functional.gelu(y)
y = self.fc_1(y)
return y
class MixerBlock(nn.Layer):
def __init__(self, token_dim, channels_dim,
tokens_mlp_dim, channels_mlp_dim,
norm_layer=nn.LayerNorm, epsilon=1e-6):
super().__init__()
self.norm_0 = norm_layer(channels_dim, epsilon=epsilon)
self.token_mixing = MlpBlock(token_dim, tokens_mlp_dim)
self.norm_1 = norm_layer(channels_dim, epsilon=epsilon)
self.channel_mixing = MlpBlock(channels_dim, channels_mlp_dim)
def forward(self, x):
y = self.norm_0(x)
y = y.transpose((0, 2, 1))
y = self.token_mixing(y)
y = y.transpose((0, 2, 1))
x = x + y
y = self.norm_1(x)
y = self.channel_mixing(y)
x = x + y
return x
class MlpMixer(nn.Layer):
def __init__(self, img_size=(224, 224), patch_size=(16, 16),
num_blocks=12, hidden_dim=768,
tokens_mlp_dim=384, channels_mlp_dim=3072,
norm_layer=nn.LayerNorm, epsilon=1e-6,
class_dim=1000):
super().__init__()
self.class_dim = class_dim
self.stem = nn.Conv2D(
3, hidden_dim, kernel_size=patch_size, stride=patch_size)
blocks = [
MixerBlock(
(img_size[0] // patch_size[0]) ** 2,
hidden_dim,
tokens_mlp_dim,
channels_mlp_dim,
norm_layer,
epsilon
) for _ in range(num_blocks)
]
self.blocks = nn.Sequential(*blocks)
self.pre_head_layer_norm = norm_layer(hidden_dim, epsilon=epsilon)
if class_dim > 0:
self.head = nn.Linear(hidden_dim, class_dim)
def forward(self, inputs):
x = self.stem(inputs)
x = x.transpose((0, 2, 3, 1))
x = x.flatten(1, 2)
x = self.blocks(x)
x = self.pre_head_layer_norm(x)
if self.class_dim > 0:
x = x.mean(axis=1)
x = self.head(x)
return x
def mixer_b(pretrained=False, **kwargs):
'''
Model: MLP-mixer-base
Params:
pretrained: load the pretrained model
img_size: input image size
patch_size: patch size
num_classes: number of classes
num_blocks: number of MixerBlock
hidden_dim: dim of hidden
tokens_mlp_dim: dim of tokens_mlp
channels_mlp_dim: dim of channels_mlp
'''
model = MlpMixer(
hidden_dim=768,
num_blocks=12,
tokens_mlp_dim=384,
channels_mlp_dim=3072,
**kwargs
)
if pretrained:
path = paddle.utils.download.get_weights_path_from_url('https://bj.bcebos.com/v1/ai-studio-online/8fcd0b6ba98042d68763bbcbfe96375cbfd97ffed8334ac09787ef73ecf9989f?responseContentDisposition=attachment%3B%20filename%3Dimagenet1k_Mixer-B_16.pdparams')
model.set_dict(paddle.load(path))
return model
def mixer_l(pretrained=False, **kwargs):
'''
Model: MLP-mixer-large
Params:
pretrained: load the pretrained model
img_size: input image size
patch_size: patch size
num_classes: number of classes
num_blocks: number of MixerBlock
hidden_dim: dim of hidden
tokens_mlp_dim: dim of tokens_mlp
channels_mlp_dim: dim of channels_mlp
'''
model = MlpMixer(
hidden_dim=1024,
num_blocks=24,
tokens_mlp_dim=512,
channels_mlp_dim=4096,
**kwargs
)
if pretrained:
path = paddle.utils.download.get_weights_path_from_url('https://bj.bcebos.com/v1/ai-studio-online/ca74ababd4834e34b089c1485989738de4fdf6a97be645ed81b6e39449c5815c?responseContentDisposition=attachment%3B%20filename%3Dimagenet1k_Mixer-L_16.pdparams')
model.set_dict(paddle.load(path))
return model
``` |
{
"source": "jm12138/Paddle-CheXNet",
"score": 2
} |
#### File: Paddle-CheXNet/chexnet/densenet.py
```python
import os
import math
import paddle
import paddle.nn as nn
from paddle import ParamAttr
from paddle.nn.initializer import Uniform
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.utils.download import get_weights_path_from_url
def load_dygraph_pretrain(model, path=None):
if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):
raise ValueError("Model pretrain path {} does not "
"exists.".format(path))
param_state_dict = paddle.load(path + ".pdparams")
model.set_dict(param_state_dict)
return
def load_dygraph_pretrain_from_url(model, pretrained_url, use_ssld):
if use_ssld:
pretrained_url = pretrained_url.replace("_pretrained",
"_ssld_pretrained")
local_weight_path = get_weights_path_from_url(pretrained_url).replace(
".pdparams", "")
load_dygraph_pretrain(model, path=local_weight_path)
return
MODEL_URLS = {
"DenseNet121":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams",
"DenseNet161":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams",
"DenseNet169":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams",
"DenseNet201":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams",
"DenseNet264":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams",
}
class BNACConvLayer(nn.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
pad=0,
groups=1,
act="relu",
name=None):
super(BNACConvLayer, self).__init__()
self._batch_norm = BatchNorm(
num_channels,
act=act,
param_attr=ParamAttr(name=name + '_bn_scale'),
bias_attr=ParamAttr(name + '_bn_offset'),
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
self._conv = Conv2D(in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=pad,
groups=groups,
weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
def forward(self, input):
y = self._batch_norm(input)
y = self._conv(y)
return y
class DenseLayer(nn.Layer):
def __init__(self, num_channels, growth_rate, bn_size, dropout, name=None):
super(DenseLayer, self).__init__()
self.dropout = dropout
self.bn_ac_func1 = BNACConvLayer(num_channels=num_channels,
num_filters=bn_size * growth_rate,
filter_size=1,
pad=0,
stride=1,
name=name + "_x1")
self.bn_ac_func2 = BNACConvLayer(num_channels=bn_size * growth_rate,
num_filters=growth_rate,
filter_size=3,
pad=1,
stride=1,
name=name + "_x2")
if dropout:
self.dropout_func = Dropout(p=dropout, mode="downscale_in_infer")
def forward(self, input):
conv = self.bn_ac_func1(input)
conv = self.bn_ac_func2(conv)
if self.dropout:
conv = self.dropout_func(conv)
conv = paddle.concat([input, conv], axis=1)
return conv
class DenseBlock(nn.Layer):
def __init__(self,
num_channels,
num_layers,
bn_size,
growth_rate,
dropout,
name=None):
super(DenseBlock, self).__init__()
self.dropout = dropout
self.dense_layer_func = []
pre_channel = num_channels
for layer in range(num_layers):
self.dense_layer_func.append(
self.add_sublayer(
"{}_{}".format(name, layer + 1),
DenseLayer(num_channels=pre_channel,
growth_rate=growth_rate,
bn_size=bn_size,
dropout=dropout,
name=name + '_' + str(layer + 1))))
pre_channel = pre_channel + growth_rate
def forward(self, input):
conv = input
for func in self.dense_layer_func:
conv = func(conv)
return conv
class TransitionLayer(nn.Layer):
def __init__(self, num_channels, num_output_features, name=None):
super(TransitionLayer, self).__init__()
self.conv_ac_func = BNACConvLayer(num_channels=num_channels,
num_filters=num_output_features,
filter_size=1,
pad=0,
stride=1,
name=name)
self.pool2d_avg = AvgPool2D(kernel_size=2, stride=2, padding=0)
def forward(self, input):
y = self.conv_ac_func(input)
y = self.pool2d_avg(y)
return y
class ConvBNLayer(nn.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
pad=0,
groups=1,
act="relu",
name=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=pad,
groups=groups,
weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
self._batch_norm = BatchNorm(
num_filters,
act=act,
param_attr=ParamAttr(name=name + '_bn_scale'),
bias_attr=ParamAttr(name + '_bn_offset'),
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
def forward(self, input):
y = self._conv(input)
y = self._batch_norm(y)
return y
class DenseNet(nn.Layer):
def __init__(self, layers=60, bn_size=4, dropout=0, class_num=1000):
super(DenseNet, self).__init__()
supported_layers = [121, 161, 169, 201, 264]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(
supported_layers, layers)
densenet_spec = {
121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (64, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32]),
264: (64, 32, [6, 12, 64, 48])
}
num_init_features, growth_rate, block_config = densenet_spec[layers]
self.conv1_func = ConvBNLayer(num_channels=3,
num_filters=num_init_features,
filter_size=7,
stride=2,
pad=3,
act='relu',
name="conv1")
self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
self.block_config = block_config
self.dense_block_func_list = []
self.transition_func_list = []
pre_num_channels = num_init_features
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.dense_block_func_list.append(
self.add_sublayer(
"db_conv_{}".format(i + 2),
DenseBlock(num_channels=pre_num_channels,
num_layers=num_layers,
bn_size=bn_size,
growth_rate=growth_rate,
dropout=dropout,
name='conv' + str(i + 2))))
num_features = num_features + num_layers * growth_rate
pre_num_channels = num_features
if i != len(block_config) - 1:
self.transition_func_list.append(
self.add_sublayer(
"tr_conv{}_blk".format(i + 2),
TransitionLayer(num_channels=pre_num_channels,
num_output_features=num_features // 2,
name='conv' + str(i + 2) + "_blk")))
pre_num_channels = num_features // 2
num_features = num_features // 2
self.batch_norm = BatchNorm(
num_features,
act="relu",
param_attr=ParamAttr(name='conv5_blk_bn_scale'),
bias_attr=ParamAttr(name='conv5_blk_bn_offset'),
moving_mean_name='conv5_blk_bn_mean',
moving_variance_name='conv5_blk_bn_variance')
self.pool2d_avg = AdaptiveAvgPool2D(1)
stdv = 1.0 / math.sqrt(num_features * 1.0)
self.num_features = num_features
self.out = Linear(num_features,
class_num,
weight_attr=ParamAttr(initializer=Uniform(
-stdv, stdv),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset"))
def forward(self, input):
conv = self.conv1_func(input)
conv = self.pool2d_max(conv)
for i, num_layers in enumerate(self.block_config):
conv = self.dense_block_func_list[i](conv)
if i != len(self.block_config) - 1:
conv = self.transition_func_list[i](conv)
conv = self.batch_norm(conv)
y = self.pool2d_avg(conv)
y = paddle.flatten(y, start_axis=1, stop_axis=-1)
y = self.out(y)
return y
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def DenseNet121(pretrained=False, use_ssld=False, **kwargs):
model = DenseNet(layers=121, **kwargs)
_load_pretrained(pretrained,
model,
MODEL_URLS["DenseNet121"],
use_ssld=use_ssld)
return model
def DenseNet161(pretrained=False, use_ssld=False, **kwargs):
model = DenseNet(layers=161, **kwargs)
_load_pretrained(pretrained,
model,
MODEL_URLS["DenseNet161"],
use_ssld=use_ssld)
return model
def DenseNet169(pretrained=False, use_ssld=False, **kwargs):
model = DenseNet(layers=169, **kwargs)
_load_pretrained(pretrained,
model,
MODEL_URLS["DenseNet169"],
use_ssld=use_ssld)
return model
def DenseNet201(pretrained=False, use_ssld=False, **kwargs):
model = DenseNet(layers=201, **kwargs)
_load_pretrained(pretrained,
model,
MODEL_URLS["DenseNet201"],
use_ssld=use_ssld)
return model
def DenseNet264(pretrained=False, use_ssld=False, **kwargs):
model = DenseNet(layers=264, **kwargs)
_load_pretrained(pretrained,
model,
MODEL_URLS["DenseNet264"],
use_ssld=use_ssld)
return model
```
#### File: jm12138/Paddle-CheXNet/train.py
```python
import math
import argparse
from paddle import Model
from paddle.nn import BCELoss
from paddle.optimizer import Adam
from paddle.vision import transforms
from paddle.callbacks import EarlyStopping
from paddle.optimizer.lr import PiecewiseDecay
from chexnet.model import CheXNet
from chexnet.data import ChestXrayDataSet
from chexnet.utility import N_CLASSES, CLASS_NAMES, AUROC
def train(args):
model = CheXNet(N_CLASSES)
normalize = transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
train_dataset = ChestXrayDataSet(data_dir=args.data_dir,
image_list_file=args.train_list,
transform=transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), normalize
]))
val_dataset = ChestXrayDataSet(data_dir=args.data_dir,
image_list_file=args.val_list,
transform=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(), normalize
]))
steps_per_epoch = math.ceil(len(train_dataset) / args.batch_size)
decay_epochs = [
int(epoch_num) for epoch_num in args.decay_epochs.split(',')
]
scheduler_lr = PiecewiseDecay(
boundaries=[epoch * steps_per_epoch for epoch in decay_epochs],
values=[
args.learning_rate * (args.decay_factor**i)
for i in range(len(decay_epochs) + 1)
],
last_epoch=-1,
verbose=False)
opt = Adam(scheduler_lr, parameters=model.parameters())
model = Model(model)
model.prepare(optimizer=opt,
loss=BCELoss(),
metrics=AUROC(num_classes=N_CLASSES,
class_names=CLASS_NAMES))
early_stopping = EarlyStopping(monitor='AUROC_avg',
mode='max',
patience=10,
verbose=1,
min_delta=0,
baseline=None,
save_best_model=True)
model.fit(train_data=train_dataset,
eval_data=val_dataset,
batch_size=args.batch_size,
epochs=args.epoch,
eval_freq=1,
log_freq=10,
save_dir=args.save_dir,
save_freq=1,
verbose=2,
drop_last=False,
shuffle=True,
num_workers=0,
callbacks=[early_stopping])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset/images')
parser.add_argument('--train_list',
type=str,
default='dataset/labels/train_list.txt')
parser.add_argument('--val_list',
type=str,
default='dataset/labels/val_list.txt')
parser.add_argument('--save_dir', type=str, default='save')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--epoch', type=int, default=20)
parser.add_argument('--decay_epochs', type=str, default='10,15,18')
parser.add_argument('--decay_factor', type=float, default=0.1)
args = parser.parse_args()
train(args)
``` |
{
"source": "jm1261/GMRAnalysis",
"score": 3
} |
#### File: GMRAnalysis/GMRScripts/experiment_settings.py
```python
import os
def ReadInSettings(dir_name):
'''
Reads in the experiment settings file outputted from GMRX setup detailing
the number of images, integration time, initial/final wavelength and step,
time step and image numbers.
Args:
dir_name, string directory containing experiment settings document
Returns:
an array containing each line of the experiment settings document
'''
exp_settings = {
'int_time': 0.0,
'slit': 0.0,
'wav_i': 0.0,
'wav_f': 0.0,
'wav_s': 0.0,
'time_s': 0,
'filenames': []
}
with open(os.path.join(dir_name, 'experiment_settings.txt'), 'r') as exp:
lines = exp.readlines()
for line in lines:
if not line.strip():
continue
if 'integration time' in line.lower():
exp_settings['int_time'] = float(line.split(':')[1].strip())
if 'slit widths' in line.lower():
exp_settings['slit'] = int(line.split(':')[1].strip())
if 'initial wavelength' in line.lower():
exp_settings['wav_i'] = float(line.split(':')[1].strip())
if 'final wavelength' in line.lower():
exp_settings['wav_f'] = float(line.split(':')[1].strip())
if 'wavelength step' in line.lower():
exp_settings['wav_s'] = float(line.split(':')[1].strip())
if 'time step' in line.lower():
exp_settings['time_s'] = int(line.split(':')[1].strip())
if 'hs_img_' in line.lower():
exp_settings['filenames'].append(line.split('\t')[0].strip())
return exp_settings
def FindWavSettings(dir_name):
'''
Reads in the experiment settings file outputted from GMRX setups detailing
the number of images, integration time, initial/final wavelength and step,
time step and image numbers.
Args:
dir_name, string directory containing experiment settings document
Returns:
only returns the wavelength initial/final/step parameters
'''
my_settings = ReadInSettings(dir_name)
wavelength_settings = []
wavelength_settings.append(my_settings[5].split(': ')[1])
wavelength_settings.append(my_settings[6].split(': ')[1])
wavelength_settings.append(my_settings[7].split(': ')[1])
return wavelength_settings
if __name__ == '__main__':
main_dir = '/Users/chris/Documents/SoftwareDev/Python' \
'/GMR Analysis Project/Put_Data_Here'
exp_settings = ReadInSettings(main_dir)
print(exp_settings)
# wavelength_settings = FindWavSettings(img_dir)
``` |
{
"source": "JM1F/Slimer",
"score": 3
} |
#### File: JM1F/Slimer/Slimer.py
```python
import pygame as pg
import math
import random
from collections import *
import heapq
import sys
import os
# // Libary imports //
WIDTH = 1024
HEIGHT = 768
vector = pg.math.Vector2
TILESIZE = 64
GRIDWIDTH = WIDTH / TILESIZE
GRIDHEIGHT = HEIGHT / TILESIZE
PLAYERVELOCITY = 5
# // Globals //
class PriorityQueue:
# Priority queue initialize
def __init__(self):
self.nodes = []
def put(self, node, cost):
# Put an item into the Queue
heapq.heappush(self.nodes, (cost, node))
def get(self):
# Return the item in slot one and pop it
return heapq.heappop(self.nodes)[1]
def empty(self):
# Returns true of false
return len(self.nodes) == 0
class Wall(pg.sprite.Sprite):
# Initialize wall with x and y coordinate and the image
def __init__(self, x, y, image):
pg.sprite.Sprite.__init__(self)
self.image = image
self.rect = pg.Rect(x, y, 64, 64)
self.x = x
self.y = y
class Door(pg.sprite.Sprite):
def __init__(self, x, y, image, doordirection):
pg.sprite.Sprite.__init__(self)
self.image = image
# Makes sure that the door in placed on the
# outer edge with
# the right hitbox for the four different directions.
if doordirection == 1 or doordirection == 2:
self.rect = pg.Rect(x, y, 32, 128)
if doordirection == 3 or doordirection == 4:
self.rect = pg.Rect(x, y, 128, 32)
self.x = x
self.y = y
class ClosedDoor(pg.sprite.Sprite):
def __init__(self, x, y, image, doordirection):
pg.sprite.Sprite.__init__(self)
self.image = image
# Makes sure that the door in placed on the
# outer edge with
# the right hitbox for the four different directions.
if doordirection == 1 or doordirection == 2:
self.rect = pg.Rect(x, y, 32, 128)
if doordirection == 3 or doordirection == 4:
self.rect = pg.Rect(x, y, 128, 16)
self.x = x
self.y = y
class Projectile(pg.sprite.Sprite):
def __init__(self, startposx, startposy, endposx, endposy, velocity, screen, projtype):
pg.sprite.Sprite.__init__(self)
# Checks what type of projectile
if projtype == 1:
self.image = pg.image.load("Projectile.png")
if projtype == 2:
self.image = pg.image.load("orb_red.png")
self.rect = self.image.get_rect()
self.x = startposx
self.y = startposy
self.endx = endposx
self.endy = endposy
self.screen = screen
# Create x and y vectors for projectiles
self.difx = self.x - self.endx
self.dify = self.y - self.endy
self.vel = velocity
def update(self):
# Calls the off screen check which just checks if the hitbox of
# the projectiles go off screen, if so, they are terminated
if self.OffScreencheck() == True:
self.kill()
print("Killed")
# Creating the hypotenuse vector equalling to the distance
self.dist = math.hypot(self.difx, self.dify)
try:
# Calculate the velocity
self.vx = (self.difx / self.dist) * self.vel
self.vy = (self.dify / self.dist) * self.vel
# Move the projectile taking away the velocity each time
self.x -= self.vx
self.y -= self.vy
self.rect.center = (self.x, self.y)
except ZeroDivisionError:
#Except statment to catch a zero
#error in the case that
#self.distance is equal to zero.
self.kill()
def OffScreencheck(self):
# Check to see if the projectile is off screen
if (self.rect[0] + 16) < 0 or self.rect[0] > WIDTH or (self.rect[1] + 16) < 0 or self.rect[1] > HEIGHT:
return True
else:
return
class Enemy(pg.sprite.Sprite):
def __init__(self, map, startposx, startposy, health = 60):
pg.sprite.Sprite.__init__(self)
self.imgindex = []
# Initalizing all images
self.imgindex.append(pg.image.load("emain1.png").convert_alpha())
self.imgindex.append(pg.image.load("emain2.png").convert_alpha())
self.imgindex.append(pg.image.load("emain3.png").convert_alpha())
self.imgindex.append(pg.image.load("emain4.png").convert_alpha())
self.imgindex.append(pg.image.load("emain5.png").convert_alpha())
self.imgindex.append(pg.image.load("emain6.png").convert_alpha())
self.imgindex.append(pg.image.load("emain7.png").convert_alpha())
self.imgindex.append(pg.image.load("emain8.png").convert_alpha())
self.vx, self.vy = 0, 0
self.index = 0
self.room = map
self.image = self.imgindex[self.index]
self.timer = 0
self.rect = self.image.get_rect()
self.rect.x = startposx
self.health = health
self.rect.y = startposy
self.center = self.rect.center
# Setting the speed of the enemy
self.enemy_speed = 4
self.hitbox_rect = pg.Rect(startposx, startposy, 60,60)
def getCenter(self):
# Returns enemy sprites center
self.center = self.rect.center
return self.center
def update(self, proj_group, playercenter, node, wall_group, itemnode, specialitem):
self.goal = node
# Checks to see if the enemies health is at zero, then
# the object is deleted with the .kill() command
if self.health == 0:
self.kill()
return True
# Returns the node that the enemy is in
self.currentnode = self.get_node()
if itemnode == 1:
# Breadth first search
self.path = breadthfirst_search(self.room,specialitem, self.currentnode)
else:
# A star search
self.path = aStar(self.room,self.goal, self.currentnode)
if self.currentnode != False:
# Checks to see if the current node is on the goal node
if self.currentnode != self.goal:
try:
# Add the current node to the next node in the list.
self.current = self.currentnode + self.path[(self.currentnode)]
except:
# Do nothing
pass
# Pass the current node into a back-up node for other
# iteratons
self.backup = self.currentnode
else:
# Checks to see if the current node is the goal node
if self.currentnode != self.goal:
try:
self.current = self.backup + self.path[(self.backup)]
except:
pass
if self.currentnode == False:
pass
else:
# Works out the directional vector
self.dif = self.currentnode - self.current
# Checking what direction the next path is
if self.dif == [1, 0]:
self.rect.x -= self.enemy_speed
elif self.dif == [-1, 0]:
self.rect.x += self.enemy_speed
elif self.dif == [0, 1]:
self.rect.y -= self.enemy_speed
elif self.dif == [0, -1]:
self.rect.y += self.enemy_speed
# Checks to see if the enemy gets hit by a projectile
# and takes away 10 health
if pg.sprite.spritecollide(self, proj_group, True):
self.health = self.health - 10
self.rect = self.hitbox_rect
self.playercenter = playercenter
# Rotate towards player procedure
self.rotatetowardsPlayer()
self.random_timer = random.randint(2, 5)
self.timer += self.random_timer
# This is where you a able to adjust the
# animation time, for the cycles of the pictures.
if (self.timer % 20) == 0:
self.index += 1
if self.index >= len(self.imgindex):
self.index = 0
self.image = self.imgindex[self.index]
# Rotates the enemy to look towards the player
def rotatetowardsPlayer(self):
self.angle_vec = math.atan2((self.center[0] - self.playercenter[0]),(self.center[1] - self.playercenter[1]))
# The angle is converted from radians to degrees
self.angle = math.degrees(self.angle_vec)
# Rotates the image about an angle
self.newimage = pg.transform.rotate(self.image, self.angle - 180)
oldcenter = self.rect.center
self.newrect = self.newimage.get_rect()
self.newrect.center = oldcenter
def get_node(self):
# Need to be set to False again for
# when the procedure is called again in the update section
self.iny = False
self.inx = False
self.playercenter = self.getCenter()
for i in range(0, WIDTH, TILESIZE):
# Checking if the player's center "x"
# coordinate is between a tile's area coordinates
if i == self.rect.x and (i + 64) == (self.rect.x + 64) :
self.coordx = i
self.inx = True
for j in range(0, HEIGHT, TILESIZE):
if j == self.rect.y and (j + 64) == (self.rect.y + 64) :
self.coordy = j
self.iny = True
# Searching through the tile list and
# mapping out what tile the player's center is in
if self.iny == True and self.inx == True:
# dividing the x and y coordinates
# by 64 and minusing 1 to get into list form
x = int(self.coordx / 64)
y = int(self.coordy / 64)
return (x, y)
return False
def draw(self):
self.screen = maingame.returnGameScreen()
if self.health == 0:
return True
# Only draws to the screen if the health is 0
else:
try:
self.screen.blit(self.newimage, self.newrect)
except:
self.screen.blit(self.image, self.rect)
class Item(pg.sprite.Sprite):
def __init__(self, startposx, startposy, item_number):
# initalizing item sprite
pg.sprite.Sprite.__init__(self)
# Assigning items to the correct image
if item_number == 1:
self.image = pg.image.load("item1.png")
if item_number == 2:
self.image = pg.image.load("orb_green.png")
self.rect = pg.Rect((startposx + 16), (startposy + 16), 32, 32)
self.x = startposx
self.y = startposy
class PlayerSprite(pg.sprite.Sprite):
def __init__(self, startposx, startposy, health = 100):
pg.sprite.Sprite.__init__(self)
self.imgindex = []
# Loading all animation images
self.imgindex.append(pg.image.load("main1.png").convert_alpha())
self.imgindex.append(pg.image.load("main2.png").convert_alpha())
self.imgindex.append(pg.image.load("main3.png").convert_alpha())
self.imgindex.append(pg.image.load("main4.png").convert_alpha())
self.imgindex.append(pg.image.load("main5.png").convert_alpha())
self.imgindex.append(pg.image.load("main6.png").convert_alpha())
self.imgindex.append(pg.image.load("main7.png").convert_alpha())
self.imgindex.append(pg.image.load("main8.png").convert_alpha())
self.index = 0
self.health = health
self.image = self.imgindex[self.index]
self.timer = 0
self.pos = vector(startposx, startposy) * TILESIZE
self.rect = self.image.get_rect()
self.center = self.rect.center
self.hitbox_rect = pg.Rect(startposx, startposy, 60,60)
self.original_image = self.image.copy()
self.vx, self.vy = 0, 0
self.rect.x = startposx
self.rect.y = startposy
self.hittimer = 0
self.boss_hittimer = 0
def keys(self):
# Key logging what direction the player is moving
self.vx, self.vy = 0, 0
keys = pg.key.get_pressed()
if keys[pg.K_w]:
self.vy = -PLAYERVELOCITY
if keys[pg.K_a]:
self.vx = -PLAYERVELOCITY
if keys[pg.K_s]:
self.vy = PLAYERVELOCITY
if keys[pg.K_d]:
self.vx = PLAYERVELOCITY
if self.vx != 0 and self.vy != 0:
self.vx *= 0.7071
self.vy *= 0.7071
def rotate(self):
# The player rotates to face the mouse
self.mousex, self.mousey = pg.mouse.get_pos()
self.PLAYERCENTER = self.center
self.angle_vec = math.atan2((self.mousex - self.PLAYERCENTER[0]),(self.mousey - self.PLAYERCENTER[1]))
self.angle = math.degrees(self.angle_vec)
# Rotate the image
self.newimage = pg.transform.rotate(self.image, self.angle)
oldcenter = self.rect.center
self.newrect = self.newimage.get_rect()
self.newrect.center = oldcenter
def draw(self):
self.screen = maingame.returnGameScreen()
self.percentage = self.health / 100
xcoord = self.percentage * 416
# If the boss is not in that room draw the health bar at
# the bottom
if self.boss == None:
if self.health >= 0:
pg.draw.rect(self.screen,(0, 0, 0),[16, (HEIGHT -48), 416, 42])
pg.draw.rect(self.screen,(95, 99, 88),[20,(HEIGHT -48), 416, 38])
pg.draw.rect(self.screen,(227, 2, 43),[20,(HEIGHT -48), xcoord, 38])
# If in the bossroom the health bar is raised up
else:
if self.health >= 0:
pg.draw.rect(self.screen,(0, 0, 0),[70, (HEIGHT -112), 416, 42])
pg.draw.rect(self.screen,(95, 99, 88),[74,(HEIGHT -112), 416, 38])
pg.draw.rect(self.screen,(227, 2, 43),[74,(HEIGHT -112), xcoord, 38])
self.screen.blit(self.newimage, self.newrect)
def update(self, wall_group, closeddoor_group, closedEdoor_group, doors_group, Edoors_group, enemies, items, boss_projectiles,boss):
self.rect = self.hitbox_rect
self.boss = boss
self.keys()
self.rotate()
# Move player in the x coordinate
self.rect.x += self.vx
# - Collisions -
# Checks to see if the player can pick up a health pack
if self.health != 100:
# When collided +10 health is gained
if pg.sprite.spritecollide(self, items, True):
self.health += 10
# Enemy collision between player
collision = pg.sprite.spritecollide(self, enemies, False)
if collision:
# If the hit timer MOD 100 is 0 then take off 10 health
if (self.hittimer % 100) == 0 and self.hittimer != 0:
self.health = self.health - 10
# Takes off 10 health as soon as the hitboxes connect.
if self.hittimer == 0:
self.health = self.health - 10
self.hittimer += 1
else:
self.hittimer = 0
# Checks to see if it's the boss room, by checking if the
# boss object is created
if boss != None:
boss_collision = pg.sprite.spritecollide(self, boss, False)
if boss_collision:
# If the hit timer MOD 100 is 0 then take
# off 25 health
if (self.boss_hittimer % 100) == 0 and self.boss_hittimer != 0:
self.health = self.health - 25
# Takes off 25 health as soon as the
# hitboxes connect.
if self.boss_hittimer == 0:
self.health = self.health - 25
self.boss_hittimer += 1
else:
self.boss_hittimer = 0
# Checks to see if the projectiles of the boss hits the
# player and takes off 15 health
if pg.sprite.spritecollide(self, boss_projectiles, True):
self.health = self.health - 15
# Delete the object the player if the health falls below 0
if self.health <= 0:
self.kill()
return True
# Collisions for the walls in the map
self.collisionlist = pg.sprite.spritecollide(self, wall_group, False)
for collided in self.collisionlist:
# Checks to see is the velocity is greather than 0
# in the X plane
if self.vx > 0:
self.rect.right = collided.rect.left
else:
self.rect.left = collided.rect.right
# Colissions for the closed doors on the map
self.collisionlist2 = pg.sprite.spritecollide(self, closeddoor_group, False)
for collided in self.collisionlist2:
if self.vx > 0:
self.rect.right = collided.rect.left
else:
self.rect.left = collided.rect.right
# Collision for the close Exit doors on the map
self.collisionlist3 = pg.sprite.spritecollide(self, closedEdoor_group, False)
for collided in self.collisionlist3:
if self.vx > 0:
self.rect.right = collided.rect.left
else:
self.rect.left = collided.rect.right
# Move player in the y coordinate
self.rect.y += self.vy
# Collisions for the walls in the map
self.collisionlist = pg.sprite.spritecollide(self, wall_group, False)
for collided in self.collisionlist:
# Checks to see is the velocity is greather than 0
# in the Y plane
if self.vy > 0:
self.rect.bottom = collided.rect.top
else:
self.rect.top = collided.rect.bottom
# Colissions for the closed doors on the map
self.collisionlist2 = pg.sprite.spritecollide(self, closeddoor_group, False)
for collided in self.collisionlist2:
if self.vy > 0:
self.rect.bottom = collided.rect.top
else:
self.rect.top = collided.rect.bottom
# Collision for the close Exit doors on the map for the "y" axis
self.collisionlist3 = pg.sprite.spritecollide(self, closedEdoor_group, False)
for collided in self.collisionlist3:
if self.vy > 0:
self.rect.bottom = collided.rect.top
else:
self.rect.top = collided.rect.bottom
# Collision for the open doors on the map
self.doorcollisionlist = pg.sprite.spritecollide(self, doors_group, False)
if len(self.doorcollisionlist) > 0:
return 1
# Collision for the Exit open doors on the map, returns 2 when collided
self.doorlist = pg.sprite.spritecollide(self, Edoors_group, False)
if len(self.doorlist) > 0:
return 2
self.timer += 4
# This is where you a able to adjust the
# animation time, for the cycles of the pictures.
if (self.timer % 20) == 0:
self.index += 1
if self.index >= len(self.imgindex):
self.index = 0
self.image = self.imgindex[self.index]
def LoadInto_OldMap(self, doordirection):
# check to see the door direction the load
# in to the coordinates
if doordirection == 1:
self.rect.x = (WIDTH - 144)
self.rect.y = (352)
elif doordirection == 2:
self.rect.x = (64)
self.rect.y = (352)
elif doordirection == 3:
self.rect.x = (480)
self.rect.y = (64)
elif doordirection == 4:
self.rect.x = (512 - 32)
self.rect.y = (HEIGHT - 144)
def LoadInto_NewMap(self, doordirection):
# check to see the door direction the load
# in to the coordinates
if doordirection == 1:
self.rect.x = (64)
self.rect.y = (352)
elif doordirection == 2:
self.rect.x = (WIDTH - 144)
self.rect.y = (352)
elif doordirection == 3:
self.rect.x = (512 - 32)
self.rect.y = (HEIGHT - 144)
elif doordirection == 4:
self.rect.x = (512 - 32)
self.rect.y = (64)
def getCenter(self):
# Return the player center
self.center = self.rect.center
return self.center
class MapGrid:
def __init__(self, width, height):
self.width = width
self.height = height
# Node connections in all four directions
self.node_connections = [vector(1, 0), vector(-1, 0), vector(0, 1), vector(0, -1)]
self.walls = []
self.enemies = []
def withinBoundary(self, node):
# Check if node is within the screen boundary
return 0 <= node.x < self.width and 0 <= node.y < self.height
def passable(self, node):
# Return true or false if the node is a wall
return node not in self.walls
def node_neighbours(self, node):
neighbours = [node + connection for connection in self.node_connections]
if (node[0] + node[1]) % 2:
# Reverses objects in a list
neighbours.reverse()
# Filters out nodes that are walls or outside the boundary
neighbours = filter(self.withinBoundary, neighbours)
neighbours = filter(self.passable, neighbours)
return neighbours
def aStar(graph, start, end):
# Initialize a priority queue
Pqueue = PriorityQueue()
Pqueue.put((start), 0)
# Initialzing path and cost dictionary
path = {}
cost = {}
# Setting the starting node None and cost to 0
path[(start)] = None
cost[(start)] = 0
# Iterate while the priority queue is not empty
while not Pqueue.empty():
current = Pqueue.get()
if current == end:
# Break used to stop the astar search when the
# current node and goal node are the same
break
# Check the next neighbouring nodes of the
# current node
for next in graph.node_neighbours(vector(current)):
next = vector_to_integer(next)
# Get the next node cost
next_cost = cost[current] + graph.cost(current, next)
if next not in cost or next_cost < cost[next]:
# Get the priority by adding the next cost and
# the hueristic value
priority = next_cost + heuristicValue(vector(end), vector(next))
cost[next] = next_cost
# Puts the node into the priority queue
Pqueue.put(next, priority)
path[next] = vector(current) - vector(next)
return path
def heuristicValue(a, b):
# Return the hueristic value
return (abs(a.x - b.x) + abs(a.y - b.y)) * 10
class WeightedGrid(MapGrid):
def __init__(self, width, height):
# Use of inheritance from the MapGrid
super().__init__(width, height)
self.weights = {}
def cost(self, from_node, to_node):
# Checks if the node distance is equal to 1, same as
# all four directions of the node
if (vector(to_node) - vector(from_node)).length_squared() == 1:
return self.weights.get(to_node, 0) + 10
def breadthfirst_search(graph, start, end):
queue = deque()
queue.append(start)
path = {}
path[start] = None
while len(queue) > 0:
current = queue.popleft()
if current == end:
break
if graph.node_neighbours(current) != None:
for next in graph.node_neighbours(current):
if vector_to_integer(next) not in path:
queue.append(next)
path[vector_to_integer(next)] = current - next
return path
def vector_to_integer(v):
# Returns an integer from a vector input, v
return (int(v.x), int(v.y))
class Room:
def __init__(self, RoomNum, player, screen, direction, prevdirection):
self.screen = screen
self.RoomNum = RoomNum
self.room = WeightedGrid(GRIDWIDTH, GRIDHEIGHT)
self.walls = pg.sprite.Group()
self.doors = pg.sprite.Group()
self.ExitDoors = pg.sprite.Group()
self.closedExitDoors = pg.sprite.Group()
self.closeddoors = pg.sprite.Group()
self.enemies = pg.sprite.Group()
self.items = pg.sprite.Group()
self.specialitems = pg.sprite.Group()
self.player = player
self.doordirection = direction
# Gives restrictions on where objects
# can spawn in specific rooms
if RoomNum == 0:
self.roomLayout = [["B","B","B","B","B","B","B","B","B","B","B","B","B","B","B","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","B","B","B","","","","","","B"],["B","","","","","","","B","P","B","","","","","","B"],["B","","","","","","","B","B","B","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","B","B","B","B","B","B","B","B","B","B","B","B","B","B","B"]]
else:
self.roomLayout = [["B","B","B","B","B","B","B","B","B","B","B","B","B","B","B","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","P","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","","","","","","","","","","","","","","","B"],["B","B","B","B","B","B","B","B","B","B","B","B","B","B","B","B"]]
self.prevdoordirection = prevdirection
self.iny = False
self.inx = False
self.door_replaced = False
# Wall vectors on the outside boundary
self.w = [(0,0), (1,0), (2,0), (3,0), (4,0), (5,0), (6,0), (7,0), (8,0), (9,0), (10,0), (11,0), (12,0), (13,0), (14,0), (15,0), (15,1), (15,2), (15,3), (15,4), (15,5), (15,6), (15,7), (15,8), (15,9), (15,10), (15,11), (14,11), (13,11), (12,11), (11,11), (10,11), (9,11), (8,11), (7,11), (6,11), (5,11), (4,11), (3,11), (2,11), (1,11), (0,11), (0,10), (0,9), (0,8), (0,7), (0,6), (0,5), (0,4), (0,3), (0,2), (0,1)]
for wall in self.w:
self.room.walls.append(vector(wall))
if RoomNum != 5:
self.AddItems()
self.CreateRoomWalls()
self.AddEnemies()
self.CreateBoundary()
#Checks for the last door direction so
# it doesn't repeat and have two doors in one spot
self.CheckPrevDirection()
self.CheckCurrentDirection()
pg.sprite.groupcollide(self.closeddoors, self.walls, False, True)
pg.sprite.groupcollide(self.closedExitDoors, self.walls, False, True)
def CheckCurrentDirection(self):
if self.doordirection == 1:
self.door = ClosedDoor(WIDTH - 64, 320, pg.image.load("closeddoorright.png").convert_alpha(), 1)
self.closeddoors.add(self.door)
elif self.doordirection == 2:
self.door = ClosedDoor(32, 320, pg.image.load("closeddoorleft.png").convert_alpha(), 2)
self.closeddoors.add(self.door)
elif self.doordirection == 3:
self.door = ClosedDoor(448, 48, pg.image.load("closeddoortop.png").convert_alpha(), 3)
self.closeddoors.add(self.door)
elif self.doordirection == 4:
self.door = ClosedDoor(448, (HEIGHT - 64), pg.image.load("closeddoorbottom.png").convert_alpha(), 4)
self.closeddoors.add(self.door)
elif self.doordirection == 0:
if self.prevdoordirection == 1:
self.door = ClosedDoor(32, 320, pg.image.load("closeddoorleft.png").convert_alpha(), 2 )
self.closeddoors.add(self.door)
elif self.prevdoordirection == 2:
self.door = ClosedDoor(WIDTH - 64, 320, pg.image.load("closeddoorright.png").convert_alpha(), 1)
self.closeddoors.add(self.door)
elif self.prevdoordirection == 3:
self.door = ClosedDoor(448, (HEIGHT - 64), pg.image.load("closeddoorbottom.png").convert_alpha(), 4)
self.closeddoors.add(self.door)
elif self.prevdoordirection == 4:
self.door = ClosedDoor(448, 48, pg.image.load("closeddoortop.png").convert_alpha(), 3)
self.closeddoors.add(self.door)
def CheckPrevDirection(self):
if self.prevdoordirection == 1:
self.door = ClosedDoor(32, 320, pg.image.load("closeddoorleft.png").convert_alpha(), 2 )
self.closedExitDoors.add(self.door)
elif self.prevdoordirection == 2:
self.door = ClosedDoor(WIDTH - 64, 320, pg.image.load("closeddoorright.png").convert_alpha(), 1)
self.closedExitDoors.add(self.door)
elif self.prevdoordirection == 3:
self.door = ClosedDoor(448, (HEIGHT - 64), pg.image.load("closeddoorbottom.png").convert_alpha(), 4)
self.closedExitDoors.add(self.door)
elif self.prevdoordirection == 4:
self.door = ClosedDoor(448, 48, pg.image.load("closeddoortop.png").convert_alpha(), 3)
self.closedExitDoors.add(self.door)
def AddItems(self):
# Add 0-2 items in a random spot in the room
self.Item_Amount = random.randint(0, 2)
self.itemnumber = 0
if self.Item_Amount == 1:
# Probability of gettng the item
self.itemnumber = random.randint(1, 30)
for i in range(self.Item_Amount):
validSpot = False
while validSpot == False:
item_ynumber = random.randint(1, 11)
item_xnumber = random.randint(1, 15)
if self.roomLayout[item_ynumber][item_xnumber] == "":
self.roomLayout[item_ynumber][item_xnumber] = "I"
if self.itemnumber >= 5:
self.sitem = Item((item_xnumber * TILESIZE),(item_ynumber * TILESIZE), 2)
self.specitem = (item_xnumber, item_ynumber)
self.specialitems.add(self.sitem)
else:
self.item = Item((item_xnumber * TILESIZE),(item_ynumber * TILESIZE), 1)
self.items.add(self.item)
validSpot = True
def AddEnemies(self):
self.enemylist = []
# Add a random amount of enemies
self.Enemy_Amount = random.randint(3 ,5)
for i in range(self.Enemy_Amount):
validSpot = False
while validSpot == False:
# Generating a random x and y coordinate
ynumber = random.randint(3, 8)
xnumber = random.randint(3, 12)
if self.roomLayout[ynumber][xnumber] == "":
self.roomLayout[ynumber][xnumber] = "E"
# Initializing the enemy(s) into the game
self.enemy = Enemy(self.room, xnumber * TILESIZE,ynumber * TILESIZE)
# Adding enemies to enemy group
self.enemies.add(self.enemy)
self.enemylist.append(self.enemy)
validSpot = True
# Minimum of 3 enemies
self.enemy1 = self.enemylist[0]
self.enemy2 = self.enemylist[1]
self.enemy3 = self.enemylist[2]
# If there are more than 3, enemies are added
# to the group respectively
if self.Enemy_Amount > 3:
self.enemy4 = self.enemylist[3]
if self.Enemy_Amount > 4:
self.enemy5 = self.enemylist[4]
def CreateRoomWalls(self):
#Initilising the list layout of the map
validSpot = False
# the for loop gives the amount of objects
# you want in each room, max 6 otherwise an error occurs
# due to too many objects trying to fit in
for i in range(6):
validSpot = False
while validSpot == False:
# Gives a random chance to ge the
# different block types
blocktype = random.randint(1, 8)
ynumber = random.randint(3, 8)
xnumber = random.randint(3, 12)
if blocktype == 1:
if self.roomLayout[ynumber][xnumber] == "":
try:
if self.roomLayout[ynumber][xnumber + 2] == "" and self.roomLayout[ynumber + 1][xnumber + 2] == "" and self.roomLayout[ynumber ][xnumber - 1] == "" and self.roomLayout[ynumber + 1][xnumber -1 ] == "" and self.roomLayout[ynumber - 1 ][xnumber] == "" and self.roomLayout[ynumber - 1 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 2 ][xnumber] == "" and self.roomLayout[ynumber + 2 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 1 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 1 ][xnumber] == "" and self.roomLayout[ynumber][xnumber + 1] == "":
self.room.walls.append(vector(xnumber, ynumber))
self.room.walls.append(vector(xnumber+1, ynumber))
self.room.walls.append(vector(xnumber+1, ynumber+1))
self.room.walls.append(vector(xnumber, ynumber+1))
self.roomLayout[ynumber][xnumber] = "W"
self.roomLayout[ynumber][xnumber + 1] = "W"
self.roomLayout[ynumber + 1][xnumber] = "W"
self.roomLayout[ynumber + 1 ][xnumber + 1] = "W"
# 2x2 block
self.wall = Wall(xnumber * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber+1) * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber+1) * TILESIZE,(ynumber+1) * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall(xnumber * TILESIZE,(ynumber+1) * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
validSpot = True
except:
print("error")
validSpot = False
if blocktype == 2:
if self.roomLayout[ynumber][xnumber] == "":
try:
if self.roomLayout[ynumber][xnumber + 2] == "" and self.roomLayout[ynumber + 1][xnumber + 2] == "" and self.roomLayout[ynumber ][xnumber - 1] == "" and self.roomLayout[ynumber + 1][xnumber -1 ] == "" and self.roomLayout[ynumber - 1 ][xnumber] == "" and self.roomLayout[ynumber - 1 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 2 ][xnumber] == "" and self.roomLayout[ynumber + 2 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 1 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 1 ][xnumber] == "" and self.roomLayout[ynumber][xnumber + 1] == "":
self.room.walls.append(vector(xnumber, ynumber))
self.room.walls.append(vector(xnumber+1, ynumber))
self.room.walls.append(vector(xnumber, ynumber+1))
self.roomLayout[ynumber][xnumber] = "W"
self.roomLayout[ynumber][xnumber + 1] = "W"
self.roomLayout[ynumber + 1][xnumber] = "W"
# Right "L"
self.wall = Wall(xnumber * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber+1) * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber) * TILESIZE,(ynumber+1) * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
validSpot = True
except:
print("error")
validSpot = False
if blocktype == 3:
if self.roomLayout[ynumber][xnumber] == "":
try:
if self.roomLayout[ynumber][xnumber + 2] == "" and self.roomLayout[ynumber + 1][xnumber + 2] == "" and self.roomLayout[ynumber ][xnumber - 1] == "" and self.roomLayout[ynumber + 1][xnumber -1 ] == "" and self.roomLayout[ynumber - 1 ][xnumber] == "" and self.roomLayout[ynumber - 1 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 2 ][xnumber] == "" and self.roomLayout[ynumber + 2 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 1 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 1 ][xnumber] == "" and self.roomLayout[ynumber][xnumber + 1] == "":
self.room.walls.append(vector(xnumber, ynumber))
self.room.walls.append(vector(xnumber+1, ynumber))
self.room.walls.append(vector(xnumber+1, ynumber+1))
self.roomLayout[ynumber][xnumber] = "W"
self.roomLayout[ynumber][xnumber + 1] = "W"
self.roomLayout[ynumber + 1][xnumber + 1] = "W"
# Left "L"
self.wall = Wall(xnumber * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber+1) * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber + 1) * TILESIZE,(ynumber+1) * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
validSpot = True
except:
print("error")
validSpot = False
if blocktype == 4:
if self.roomLayout[ynumber][xnumber] == "":
try:
if self.roomLayout[ynumber][xnumber + 1] == "" and self.roomLayout[ynumber][xnumber - 1] == "" and self.roomLayout[ynumber + 1][xnumber] == "" and self.roomLayout[ynumber - 1][xnumber] == "":
# One Block
self.room.walls.append(vector(xnumber, ynumber))
self.roomLayout[ynumber][xnumber] = "W"
self.wall = Wall(xnumber * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
validSpot = True
except:
print("error")
validSpot = False
if blocktype == 5:
if self.roomLayout[ynumber][xnumber] == "":
try:
if self.roomLayout[ynumber][xnumber + 2] == "" and self.roomLayout[ynumber + 1][xnumber + 1] == "" and self.roomLayout[ynumber ][xnumber - 1] == "" and self.roomLayout[ynumber - 1][xnumber -1 ] == "" and self.roomLayout[ynumber - 1 ][xnumber] == "" and self.roomLayout[ynumber - 2 ][xnumber + 1 ] == "" and self.roomLayout[ynumber - 2 ][xnumber] == "" and self.roomLayout[ynumber - 1 ][xnumber + 2 ] == "" and self.roomLayout[ynumber - 1 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 1 ][xnumber] == "" and self.roomLayout[ynumber][xnumber + 1] == "":
self.room.walls.append(vector(xnumber, ynumber))
self.room.walls.append(vector(xnumber+1, ynumber -1))
self.room.walls.append(vector(xnumber+1, ynumber))
self.roomLayout[ynumber][xnumber] = "W"
self.roomLayout[ynumber][xnumber + 1] = "W"
self.roomLayout[ynumber - 1][xnumber + 1] = "W"
# right up "L"
self.wall = Wall(xnumber * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber+1) * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber + 1) * TILESIZE,(ynumber-1) * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
validSpot = True
except:
print("error")
validSpot = False
if blocktype == 6:
if self.roomLayout[ynumber][xnumber] == "":
try:
if self.roomLayout[ynumber][xnumber + 2] == "" and self.roomLayout[ynumber + 1][xnumber + 1] == "" and self.roomLayout[ynumber ][xnumber - 1] == "" and self.roomLayout[ynumber - 1][xnumber -1 ] == "" and self.roomLayout[ynumber - 1 ][xnumber] == "" and self.roomLayout[ynumber - 2 ][xnumber + 1 ] == "" and self.roomLayout[ynumber - 2 ][xnumber] == "" and self.roomLayout[ynumber -1 ][xnumber + 2 ] == "" and self.roomLayout[ynumber - 1 ][xnumber + 1 ] == "" and self.roomLayout[ynumber + 1 ][xnumber] == "" and self.roomLayout[ynumber][xnumber + 1] == "":
self.room.walls.append(vector(xnumber, ynumber))
self.room.walls.append(vector(xnumber+1, ynumber))
self.room.walls.append(vector(xnumber, ynumber -1))
self.roomLayout[ynumber][xnumber] = "W"
self.roomLayout[ynumber][xnumber + 1] = "W"
self.roomLayout[ynumber - 1][xnumber] = "W"
# left up "L"
self.wall = Wall(xnumber * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber+1) * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber) * TILESIZE,(ynumber-1) * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
validSpot = True
except:
print("error")
validSpot = False
if blocktype == 7:
if self.roomLayout[ynumber][xnumber] == "":
try:
if self.roomLayout[ynumber][xnumber + 2] == "" and self.roomLayout[ynumber][xnumber + 1] == "" and self.roomLayout[ynumber][xnumber - 1] == "" and self.roomLayout[ynumber + 1][xnumber] == "" and self.roomLayout[ynumber - 1][xnumber] == "" and self.roomLayout[ynumber - 1][xnumber + 1] == "" and self.roomLayout[ynumber + 1][xnumber + 1] == "":
self.room.walls.append(vector(xnumber, ynumber))
self.room.walls.append(vector(xnumber+1, ynumber))
self.roomLayout[ynumber][xnumber] = "W"
self.roomLayout[ynumber][xnumber + 1] = "W"
# Horizontal 2x1 wall
self.wall = Wall(xnumber * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber+1) * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
validSpot = True
except:
print("error")
validSpot = False
if blocktype == 8:
if self.roomLayout[ynumber][xnumber] == "":
try:
if self.roomLayout[ynumber][xnumber + 1] == "" and self.roomLayout[ynumber][xnumber - 1] == "" and self.roomLayout[ynumber + 1][xnumber - 1] == "" and self.roomLayout[ynumber + 1][xnumber + 1] == "" and self.roomLayout[ynumber + 2][xnumber] == "" and self.roomLayout[ynumber - 1][xnumber] == "" and self.roomLayout[ynumber + 1][xnumber] == "":
self.room.walls.append(vector(xnumber, ynumber))
self.room.walls.append(vector(xnumber, ynumber +1))
self.roomLayout[ynumber][xnumber] = "W"
self.roomLayout[ynumber + 1][xnumber ] = "W"
# Vertical block wall
self.wall = Wall(xnumber * TILESIZE,ynumber * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
self.wall = Wall((xnumber) * TILESIZE,(ynumber+1) * TILESIZE, pg.image.load("normwall.png"))
self.walls.add(self.wall)
validSpot = True
except:
print("error")
validSpot = False
def CreateBoundary(self):
# Creates boundary of outer edge of the room
for i in range(16):
# Top boundary
self.wall = Wall(i * TILESIZE,0, pg.image.load("topwall.png"))
self.walls.add(self.wall)
for k in range(16):
# Bottom boundary
self.wall = Wall(k * TILESIZE,(HEIGHT - 64), pg.image.load("bottomwall.png"))
self.walls.add(self.wall)
for u in range(1, 11):
# Left boundary
self.wall = Wall(0, u * TILESIZE, pg.image.load("leftwall.png"))
self.walls.add(self.wall)
for o in range(1, 11):
# Right boundary
self.wall = Wall((WIDTH - 64), o * TILESIZE, pg.image.load("rightwall.png"))
self.walls.add(self.wall)
def update(self, proj):
# Checks to see if there is a special item in the room
if len(self.specialitems) > 0:
self.specialtrue = 1
else:
self.specialtrue = 0
self.specitem = None
doorcheck = 0
self.proj = proj
if self.RoomNum < 5:
# Update enemies in the room if the room is not number 5
self.enemy1.update(self.proj, self.player.rect.center, self.PlayerNodeCheck(), self.walls, self.specialtrue, self.specitem)
self.enemynode1 = self.enemy1.get_node()
self.enemy2.update(self.proj, self.player.rect.center, self.PlayerNodeCheck(), self.walls,self.specialtrue, self.specitem)
self.enemynode2 = self.enemy2.get_node()
self.enemy3.update(self.proj, self.player.rect.center, self.PlayerNodeCheck(), self.walls, self.specialtrue, self.specitem)
self.enemynode3 = self.enemy3.get_node()
if self.Enemy_Amount > 3:
self.enemy4.update(self.proj, self.player.rect.center, self.PlayerNodeCheck(), self.walls, self.specialtrue, self.specitem)
self.enemynode4 = self.enemy4.get_node()
if self.Enemy_Amount > 4:
self.enemy5.update(self.proj, self.player.rect.center, self.PlayerNodeCheck(), self.walls, self.specialtrue, self.specitem)
self.enemynode5 = self.enemy5.get_node()
if self.Enemy_Amount > 5:
self.enemy6.update(self.proj, self.player.rect.center, self.PlayerNodeCheck(), self.walls, self.specialtrue, self.specitem)
self.enemynode6 = self.enemy6.get_node()
else:
# Update the boss if it is room 5
self.Boss.update(self.proj, self.walls)
# Allows the projectiles to have collisions between the room objects
pg.sprite.groupcollide(self.proj, self.walls, True, False)
pg.sprite.groupcollide(self.proj, self.closeddoors, True, False)
pg.sprite.groupcollide(self.proj, self.closedExitDoors, True, False)
if self.door_replaced == False:
if len(self.enemies) == 0:
if self.prevdoordirection == 1:
self.door = Door(32, 320, pg.image.load("opendoorright.png").convert_alpha(), 2 )
self.ExitDoors.add(self.door)
elif self.prevdoordirection == 2:
self.door = Door(WIDTH - 64, 320, pg.image.load("opendoorright.png").convert_alpha(), 1)
self.ExitDoors.add(self.door)
elif self.prevdoordirection == 3:
self.door = Door(448, (HEIGHT - 64), pg.image.load("opendoortop.png").convert_alpha(), 4)
self.ExitDoors.add(self.door)
elif self.prevdoordirection == 4:
self.door = Door(448, 32, pg.image.load("opendoortop.png").convert_alpha(), 3)
self.ExitDoors.add(self.door)
##############
if self.doordirection == 1:
self.door = Door(WIDTH - 64, 320, pg.image.load("opendoorright.png").convert_alpha(), 1)
self.doors.add(self.door)
elif self.doordirection == 2:
self.door = Door(32, 320, pg.image.load("opendoorright.png").convert_alpha(), 2)
self.doors.add(self.door)
elif self.doordirection == 3:
self.door = Door(448, 32, pg.image.load("opendoortop.png").convert_alpha(), 3)
self.doors.add(self.door)
elif self.doordirection == 4:
self.door = Door(448, (HEIGHT - 64), pg.image.load("opendoortop.png").convert_alpha(), 4)
self.doors.add(self.door)
pg.sprite.groupcollide(self.closeddoors, self.doors, True, False)
pg.sprite.groupcollide(self.closedExitDoors, self.ExitDoors, True, False)
self.door_replaced = True
if self.RoomNum != 5:
self.collidedwithdoor = self.player.update(self.walls, self.closeddoors, self.closedExitDoors, self.doors, self.ExitDoors, self.enemies,self.items, None, None)
if self.collidedwithdoor == 1:
for projectile in self.proj:
projectile.kill()
self.player.LoadInto_NewMap(self.doordirection)
return 1
if self.collidedwithdoor == 2:
for projectile in self.proj:
projectile.kill()
self.player.LoadInto_OldMap(self.prevdoordirection)
return 2
def PlayerNodeCheck(self):
# Need to be set to False again for when
# the procedure is called again in the update section
self.iny = False
self.inx = False
self.playercenter = self.player.getCenter()
for i in range(0, WIDTH, TILESIZE):
# Checking if the player's center "x"
# coordinate is between a tile's area coordinates
if i > self.playercenter[0] and (i - 64) < self.playercenter[0] :
self.coordx = i
self.inx = True
for j in range(0, HEIGHT, TILESIZE):
if j < self.playercenter[1] and (j + 64) > self.playercenter[1] :
self.coordy = j
self.iny = True
# Searching through the tile list and
# mapping out what tile the player's center is in
if self.iny == True and self.inx == True:
# Dividing the x and y coordinates
# by 64 and minusing 1 to get into list form
x = int(self.coordx / 64 - 1)
y = int(self.coordy / 64)
self.tempx = x
self.tempy = y
return (x, y)
else:
return (self.tempx, self.tempy)
def draw(self):
self.walls.draw(self.screen)
self.specialitems.draw(self.screen)
self.items.draw(self.screen)
# Draw enemies when the room num is not 5
if self.RoomNum < 5:
self.enemy1.draw()
self.enemy2.draw()
self.enemy3.draw()
if self.Enemy_Amount > 3:
self.enemy4.draw()
if self.Enemy_Amount > 4:
self.enemy5.draw()
self.closeddoors.draw(self.screen)
self.closedExitDoors.draw(self.screen)
if self.RoomNum != 5:
if len(self.enemies) == 0:
self.doors.draw(self.screen)
self.ExitDoors.draw(self.screen)
class Boss(pg.sprite.Sprite):
def __init__(self, startposx, startposy, player, health = 1200):
pg.sprite.Sprite.__init__(self)
self.boss_projectiles = pg.sprite.Group()
self.imgindex = []
# Initalizing all images
self.imgindex.append(pg.image.load("boss.v2.png").convert_alpha())
self.imgindex.append(pg.image.load("boss2.v2.png").convert_alpha())
self.imgindex.append(pg.image.load("boss3.v2.png").convert_alpha())
self.imgindex.append(pg.image.load("boss4.v2.png").convert_alpha())
self.imgindex.append(pg.image.load("boss5.v2.png").convert_alpha())
self.imgindex.append(pg.image.load("boss6.v2.png").convert_alpha())
self.imgindex.append(pg.image.load("boss7.v2.png").convert_alpha())
self.imgindex.append(pg.image.load("boss8.v2.png").convert_alpha())
self.index = 0
self.image = self.imgindex[self.index]
self.rect = self.image.get_rect()
self.rect.x = startposx
self.rect.y = startposy
self.health = health
self.player = player
self.vx, self.vy = 0, 0
self.first_movement = False
self.direction = random.randint(1, 4)
self.timer = 0
self.stimer = 0
self.wait = 0
self.screen = maingame.returnGameScreen()
def update(self, Group, wall_group):
# Boss dies at 0 health
if self.health == 0:
self.kill()
return True
if pg.sprite.spritecollide(self, Group, True):
self.health = self.health - 15
# Setting the hit timer for boss
if self.timer != 150:
self.Boss_AttackCycle()
self.rect.x += self.vx
self.rect.y += self.vy
# Directions to bounce
if self.rect.x == 64 or self.rect.x == (WIDTH - 256):
if self.direction == 1:
self.direction = 4
elif self.direction == 2:
self.direction = 3
elif self.direction == 3:
self.direction = 2
elif self.direction == 4:
self.direction = 1
if self.rect.y == 64 or self.rect.y == (HEIGHT - 256):
if self.direction == 1:
self.direction = 3
elif self.direction == 2:
self.direction = 4
elif self.direction == 3:
self.direction = 1
elif self.direction == 4:
self.direction = 2
self.timer += 1
else:
self.vx, self.vy = 0, 0
if self.wait != 50:
self.wait += 1
if self.wait == 25:
self.shoot_projectiles()
elif self.wait == 49:
self.shoot_projectiles()
else:
self.wait = 0
self.timer = 0
self.playercenter = self.player.getCenter()
self.boss_projectiles.update()
self.rotatetowardsPlayer()
self.random_timer = random.randint(20, 25)
self.stimer += self.random_timer
# This is where you a able to adjust the
# animation time, for the cycles of the pictures.
if (self.stimer % 5) == 0:
self.index += 1
if self.index >= len(self.imgindex):
self.index = 0
self.image = self.imgindex[self.index]
def rotatetowardsPlayer(self):
self.angle_vec = math.atan2((self.rect.center[0] - self.playercenter[0]),(self.rect.center[1] - self.playercenter[1]))
# The angle is converted from radians to degrees
self.angle = math.degrees(self.angle_vec)
self.newimage = pg.transform.rotate(self.image, self.angle - 180)
oldcenter = self.rect.center
self.newrect = self.newimage.get_rect()
self.newrect.center = oldcenter
def shoot_projectiles(self):
self.boss_center = self.rect.center
self.newproj = Projectile(self.boss_center[0], self.boss_center[1], self.playercenter[0] + 128, self.playercenter[1] + 128, 10, self.screen, 2)
self.boss_projectiles.add(self.newproj)
# Object added to a group
self.newproj = Projectile(self.boss_center[0], self.boss_center[1], self.playercenter[0], self.playercenter[1], 10, self.screen, 2)
self.boss_projectiles.add(self.newproj)
# Object added to a group
self.newproj = Projectile(self.boss_center[0], self.boss_center[1], self.playercenter[0] - 128, self.playercenter[1] - 128, 10, self.screen, 2)
self.boss_projectiles.add(self.newproj)
# Object added to a group
def Boss_AttackCycle(self):
if self.direction == 2:
self.vx, self.vy = 4, 4
self.vx *= -1
self.vy *= -1
if self.direction == 1:
self.vx, self.vy = -4, -4
self.vx *= -1
self.vy *= -1
if self.direction == 3:
self.vx, self.vy = -4, 4
self.vx *= -1
self.vy *= -1
if self.direction == 4:
self.vx, self.vy = 4, -4
self.vx *= -1
self.vy *= -1
def draw(self):
self.percentage = self.health / 1200
xcoord = self.percentage * 512
if self.health != 0:
self.boss_projectiles.draw(self.screen)
pg.draw.rect(self.screen,(0, 0, 0),[253, 13, 518, 38])
pg.draw.rect(self.screen,(95, 99, 88),[256, 16, 512, 32])
pg.draw.rect(self.screen,(227, 2, 43),[256, 16, xcoord, 32])
try:
self.screen.blit(self.newimage, self.newrect)
except:
self.screen.blit(self.image, self.rect)
# Creating an object which inherits from the class Room
class BossRoom(Room):
def __init__(self, RoomNum, player, screen, direction, prevdirection):
# Super allows the parameters to be
# taken from the Room class while allowing to
# have its own __init__ to add differnt
# more specific parameters.
super().__init__(RoomNum, player, screen, direction, prevdirection)
self.boss_group = pg.sprite.Group()
self.AddBoss()
self.player = player
self.boss_dead = False
self.winimage = pg.image.load("winscreen.png").convert_alpha()
def AddBoss(self):
# Add the boss to the room
self.Boss = Boss(448, 320, self.player)
self.boss_group.add(self.Boss)
def update(self, projectile):
# Updating the bossroom and the projectile in it
super(BossRoom, self).update(projectile)
self.collidedwithdoor = self.player.update(self.walls, self.closeddoors, self.closedExitDoors, self.doors, self.ExitDoors, self.enemies, self.items, self.Boss.boss_projectiles, self.boss_group)
if self.collidedwithdoor == 1:
for projectile in self.proj:
projectile.kill()
self.player.LoadInto_NewMap(self.doordirection)
return 1
if self.collidedwithdoor == 2:
for projectile in self.proj:
projectile.kill()
self.player.LoadInto_OldMap(self.prevdoordirection)
return 2
# Check to see if the boss is dead
if len(self.boss_group) == 0:
self.boss_dead = True
def draw(self):
# Draw procedure for the boss room
super(BossRoom, self).draw()
self.screen = maingame.returnGameScreen()
if self.boss_dead == True:
# Draw the win screen once boss is beaten
self.screen.blit(self.winimage, (0,0))
else:
self.walls.draw(self.screen)
self.Boss.draw()
self.screen = maingame.returnGameScreen()
class Game:
def __init__(self):
# initializing the main object
pg.init()
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption("Dungeon Game")
self.clock = pg.time.Clock()
self.bg = pg.image.load("back.png").convert_alpha()
self.RoomNum = 0
def gameintro(self):
# load intro screen
self.image = pg.image.load("intropic.png")
timer = 0
intro_stage = True
while intro_stage == True:
for event in pg.event.get():
if event.type == pg.QUIT:
self.quitGame()
if event.type == pg.MOUSEBUTTONUP:
intro_stage = False
if timer < 250:
self.screen.fill((255,255,255))
if timer == 250:
self.screen.fill((209, 209, 209))
if timer == 500:
self.screen.fill((161, 161, 161))
if timer == 750:
self.screen.fill((112, 112, 112))
if timer >= 1000:
self.screen.fill((89, 89, 89))
self.screen.blit(self.image, (32,32))
timer += 1
pg.display.flip()
def deathscreen(self):
# load death screen
self.image = pg.image.load("deathscreen.png")
timer = 0
intro_stage = True
while intro_stage == True:
for event in pg.event.get():
if event.type == pg.QUIT:
self.quitGame()
if timer < 250:
self.screen.fill((255,255,255))
if timer == 250:
self.screen.fill((209, 209, 209))
if timer == 500:
self.screen.fill((161, 161, 161))
if timer == 750:
self.screen.fill((112, 112, 112))
if timer >= 1000:
self.screen.fill((89, 89, 89))
self.screen.blit(self.image, (32,32))
timer += 1
pg.display.flip()
def CheckforOppositeDoorDirection(self):
# checking what the opposite door direction is
if self.doordirection == 1:
self.doordirection = random.randint(1, 4)
if self.doordirection == 2:
self.doordirection += 1
elif self.doordirection == 2:
self.doordirection = random.randint(1, 4)
if self.doordirection == 1:
self.doordirection += 1
elif self.doordirection == 3:
self.doordirection = random.randint(1, 4)
if self.doordirection == 4:
self.doordirection -= 2
elif self.doordirection == 4:
self.doordirection = random.randint(1, 4)
if self.doordirection == 3:
self.doordirection -= 2
def CreateNewGame(self):
# Creating new sprite groups
self.projectiles = pg.sprite.Group()
self.player_group = pg.sprite.Group()
#Initilizing the player into the game
# with coordinates as parameters
self.player = PlayerSprite(512, 384)
self.player_group.add(self.player)
self.doordirection = random.randint(1, 4)
self.exitdoor2 = self.doordirection
# Initializing each room in the game
self.Room_0 = Room(self.RoomNum, self.player, self.screen, self.doordirection, 0)
self.CheckforOppositeDoorDirection()
self.exitdoor3 = self.doordirection
self.Room_1 = Room(self.RoomNum + 1, self.player, self.screen,self.doordirection, self.exitdoor2)
self.CheckforOppositeDoorDirection()
self.exitdoor4 = self.doordirection
self.Room_2 = Room(self.RoomNum + 2, self.player, self.screen,self.doordirection, self.exitdoor3)
self.CheckforOppositeDoorDirection()
self.exitdoor5 = self.doordirection
self.Room_3 = Room(self.RoomNum + 3, self.player, self.screen,self.doordirection, self.exitdoor4)
self.CheckforOppositeDoorDirection()
self.exitdoor6 = self.doordirection
self.Room_4 = Room(self.RoomNum + 4, self.player, self.screen,self.doordirection, self.exitdoor5)
self.CheckforOppositeDoorDirection()
self.Room_5 = BossRoom(self.RoomNum +5, self.player, self.screen,0,self.exitdoor6 )
def returnGameScreen(self):
self.screen = self.screen
return self.screen
# Returns the game's screen
def drawBackground(self):
self.screen.blit(self.bg, (0,0))
def MainGameLoop(self):
self.gameRunning = True
# Main Game Loop
while self.gameRunning:
# Setting the clock tick rate to 60 ticks
self.clock.tick(60)
self.getEvents()
self.update()
self.CreateImage()
def CreateImage(self):
# Drawing sub-section of the main loop
self.drawBackground()
# Each room has a different object with
# different data to keep the
# data in that room's required 'data pack'
self.projectiles.draw(self.screen)
if self.RoomNum == 0:
self.Room_0.draw()
elif self.RoomNum == 1:
self.Room_1.draw()
elif self.RoomNum == 2:
self.Room_2.draw()
elif self.RoomNum == 3:
self.Room_3.draw()
elif self.RoomNum == 4:
self.Room_4.draw()
elif self.RoomNum == 5:
self.player.draw()
self.Room_5.draw()
if self.RoomNum != 5:
self.player.draw()
# Flips the display at the end to change the image
pg.display.flip()
def AimLine(self):
# Testing attribute to visually see the vector
# of the player's aim
pg.draw.line(self.screen, (0, 0, 0), (self.mousex, self.mousey), (self.PLAYERCENTER))
def DrawGrid(self):
# Draws a grid with gives a reference for testing
for i in range(0, WIDTH, TILESIZE):
pg.draw.line(self.screen, (0, 0, 0), (i, 0), (i, HEIGHT))
for j in range(0, HEIGHT, TILESIZE):
pg.draw.line(self.screen, (0, 0, 0), (0, j), (WIDTH, j))
def getEvents(self):
self.PLAYERCENTER = self.player.getCenter()
self.mousex, self.mousey = pg.mouse.get_pos()
for event in pg.event.get():
self.mouse = pg.mouse.get_pressed()
if event.type == pg.MOUSEBUTTONUP:
if event.button == 1:
# Doesn't allow more than 5 projectiles on
# screen at once
if len(self.projectiles) < 5:
# Creates new projectile object
self.newproj = Projectile(self.PLAYERCENTER[0], self.PLAYERCENTER[1], self.mousex, self.mousey, 15, self.screen, 1)
self.projectiles.add(self.newproj)
# Object added to a group
# When the top right cross is clicked, the
# program closes
if event.type == pg.QUIT:
self.quitGame()
def update(self):
# Check to see if the player is dead
if self.player.health <= 0:
self.gameRunning = False
self.projectiles.update()
# Update section for all rooms
if self.RoomNum == 0:
if self.Room_0.update(self.projectiles) == 1:
self.RoomNum = 1
elif self.RoomNum == 1:
door1 = self.Room_1.update(self.projectiles)
if door1 == 1:
self.RoomNum = 2
if door1 == 2:
self.RoomNum -= 1
elif self.RoomNum == 2:
door2 = self.Room_2.update(self.projectiles)
if door2 == 1:
self.RoomNum = 3
if door2 == 2:
self.RoomNum -= 1
elif self.RoomNum == 3:
door3 = self.Room_3.update(self.projectiles)
if door3 == 1:
self.RoomNum = 4
if door3 == 2:
self.RoomNum -= 1
elif self.RoomNum == 4:
door4 = self.Room_4.update(self.projectiles)
if door4 == 1:
self.RoomNum = 5
if door4 == 2:
self.RoomNum -= 1
elif self.RoomNum == 5:
if self.Room_5.update(self.projectiles) == 2:
self.RoomNum = 4
# quit procedure
def quitGame(self):
pg.quit()
sys.exit()
# Creates the main game object
maingame = Game()
while True:
maingame.gameintro()
maingame.CreateNewGame()
maingame.MainGameLoop()
maingame.deathscreen()
``` |
{
"source": "JM1F/Weather-App",
"score": 3
} |
#### File: JM1F/Weather-App/GUI.py
```python
from tkinter import *
import tkinter.font
import tkinter.ttk as ttk
import webbrowser
import requests
import datetime
import calendar
from pytz import timezone
import config
APIKEY = config.APIKEY
# Quit button class, closes GUI.
class quitButton(Frame):
def __init__(self, master=None):
super().__init__(master)
powerImg = PhotoImage(file = "guiImages\power.png")
self.button = Button(master, image= powerImg, text="QUIT", command=master.quit, bd=0, highlightcolor="#91C46B",activebackground="#91C46B", bg="#91C46B")
self.button.image = powerImg
self.button.place(x=13,y=550)
# Settings button class, opens setting window when changing location.
class settingButton(Frame):
def __init__(self, master=None):
super().__init__(master)
powerImg = PhotoImage(file = "guiImages\settings.png")
self.button = Button(master, image= powerImg, text="SETTINGS", command=settingWindow, bd=0, highlightcolor="#91C46B",activebackground="#91C46B", bg="#91C46B")
self.button.image = powerImg
self.button.place(x=13,y=500)
# Refresh button class, refreshes all the weather data and time intervals.
class refreshButton(Frame):
def __init__(self, master=None):
super().__init__(master)
powerImg = PhotoImage(file = "guiImages\erefresh.png")
self.button = Button(master, image= powerImg, text="REFRESH", command=displayCurrentConditions, bd=0,highlightcolor="#000000",activebackground="#1E2226", bg="#1E2226", relief = SUNKEN)
self.button.image = powerImg
self.button.place(x=977,y=14)
# Class to create the border design for the GUI.
class Border():
def __init__(self, master=None):
self.master = master
bg = Canvas(self.master, width=1024, height=600, bd=0, highlightthickness=0, relief='ridge', background="#FFFFFF")
bg.create_rectangle(60, 0, 1024, 59, fill="#34383C", outline="#393E42")
bg.create_rectangle(0, 0, 60, 60, fill="#1E2226", outline="#23272C")
bg.create_rectangle(0, 60, 60, 600, fill="#91C46B", outline="#97C774")
bg.create_rectangle(213,528 , 337,529 , fill="#1E2226", outline="#1E2226")
bg.create_rectangle(346,528 , 470,529 , fill="#1E2226", outline="#1E2226")
bg.create_rectangle(479,528 , 603,529 , fill="#1E2226", outline="#1E2226")
bg.create_rectangle(612,528 , 736,529 , fill="#1E2226", outline="#1E2226")
bg.create_rectangle(745,528 , 869,529 , fill="#1E2226", outline="#1E2226")
bg.create_rectangle(835, 0, 1024, 59, fill="#1E2226", outline="#23272C")
bg.create_rectangle(500, 200, 900,201, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(500, 250, 900,251, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(500, 300, 900,301, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(75, 280, 76,315, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(873, 350, 874,585, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(208, 350, 874,351, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(208, 585, 874,584, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(208, 350, 209,585, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(341, 350, 342,585, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(474, 350, 475,585, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(607, 350, 608,585, fill="#A8A8A8", outline="#C9C9C9")
bg.create_rectangle(740, 350, 741,585, fill="#A8A8A8", outline="#C9C9C9")
bg.pack()
# Class for Logo design in GUI.
class Logo():
def __init__(self, master=None):
self.master = master
logo = PhotoImage(file = "guiImages\cloud.png")
cloudImage = Label(master, image=logo, bg="#1E2226")
cloudImage.image = logo
cloudImage.place(x=13,y=13)
# Class for new setting window to change location for weather.
class settingWindow():
def __init__(self, master=None):
self.master = master
# Generates a top level window.
self.settings_window = Toplevel(window, bg = "#7E848B", height= 500, width = 500)
self.changeWindow()
self.settings_window.transient(window)
self.settings_window.grab_set()
window.wait_window(self.settings_window)
# Sets up the settings window for location data transported through the rest api
def changeWindow(self):
self.settings_window.resizable(0,0)
self.bg = Canvas(self.settings_window, width=500, height=500, bd=0, highlightthickness=0, relief='ridge', background="#FFFFFF")
self.bg.pack()
map_image = PhotoImage(file = "guiImages\mapimage.png")
self.mapLink = Label(self.settings_window, text="map", cursor="hand2", image=map_image, relief= RAISED)
self.mapLink.image = map_image
self.variable = StringVar(self.settings_window)
self.mapLink.place(x=150, y=75)
self.mapLink.bind("<Button-1>", lambda e: webbrowser.open_new("http://192.168.0.3/Web%20App%20Code/index.html"))
self.save_button = Button(self.settings_window, text="Save", bd=0, command=self.get_Location, bg="#FFFFFF", fg="#4CA73E", font=("Impact", 25), relief =FLAT, activebackground="#4CA73E", activeforeground="#FFFFFF",height = 0, width = 7)
self.save_button.place(x= 192, y= 400)
self.settingsLabel = Label(self.settings_window, text="Settings", bg="#FFFFFF", fg="#000000", font=("Calibri", 30), bd = 0)
self.settingsLabelfont = tkinter.font.Font(self.settingsLabel, self.settingsLabel.cget("font"))
self.settingsLabelfont.configure(underline = True)
self.settingsLabel.configure(font=self.settingsLabelfont)
self.settingsLabel.place(x= 185, y= 10)
self.city_names, self.data, self.ids = getLocationData()
self.city_names = self.city_names
setcity_names = list(set(self.city_names))
self.variable = StringVar()
self.w = ttk.Combobox(self.settings_window,textvariable = self.variable, state= "readonly", values= [*setcity_names])
self.w.bind("<<ComboboxSelected>>",lambda e: self.settings_window.focus())
self.w.configure(width = 20)
self.w.configure(font = ("Calibri", 20 ))
self.w.place(x= 100, y=310)
self.chooseLable = Label(self.settings_window, text="Choose a city...", bg="#FFFFFF", fg="#878787", font=("Calibri", 12), bd = 0)
self.chooseLable.place(x= 100, y=285)
tooltip_image = PhotoImage(file = "guiImages\qmark.png")
toolTipIcon = Label(self.settings_window, text="button 1", image=tooltip_image, bg="#FFFFFF")
toolTipIcon.image = tooltip_image
toolTipIcon.place(x=355, y=80)
toolTipIcon_ttp = createToolTip(toolTipIcon, "Click on the map to choose\nthe city location within the web\nbrowser, then the city results will\nbe avaliable to choose from the\ndrop-down box.")
# Gets the city location name from the settings window.
def get_Location(self):
city = self.variable.get()
index = 0
for i in self.city_names:
if i == city:
index = index
saveDataTxt(index, self.data)
break
else:
index += 1
displayCurrentConditions()
# Tooltip class.
class createToolTip():
def __init__(self, widget, text='widget info'):
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.close)
# Mouse over event displays the text box with tooltip
def enter(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
# Creates a toplevel window
self.tw = Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = Label(self.tw, text=self.text, justify='left',
background='white', relief='solid', borderwidth=1,
font=("times", "8", "normal"))
label.pack(ipadx=1)
# Mouse leave event destroys the text box on the tooltip
def close(self, event=None):
if self.tw:
self.tw.destroy()
# Writes data of the city id location. IDs provided by openweathermap.org
def overwriteData(data1, data2, id):
savedLatLngDataW = open("savedLatLngData.txt", "w")
savedLatLngDataW.write(id)
savedLatLngDataW.close()
getConditions()
# Gets from API.
def saveDataTxt(index, data):
city_id = data['list'][index]["id"]
lat = data['list'][index]["coord"]["lat"]
long = data['list'][index]["coord"]["lon"]
overwriteData(lat, long, str(city_id))
# Gets all location data from API
def getLocationData():
# Rest API Address
url2 = "http://127.0.0.1:5000/"
res2 = requests.get(url2)
data2 = res2.json()
try:
lat = data2["coords"]["lat"]
long = data2["coords"]["long"]
lat = float(lat)
long = float(long)
except:
print("Click on the map...")
citynames = []
ids = []
# Weather data API address for specific location
url3 = "http://api.openweathermap.org/data/2.5/find?lat={}&lon={}&cnt=10&appid={}".format(lat, long, APIKEY)
res3 = requests.get(url3)
data3 = res3.json()
name = data3['list']
for i in range(len(name)):
cityname = data3['list'][i]["name"]
citynames.append(cityname)
for j in range(len(name)):
id = data3['list'][i]["id"]
ids.append(id)
return citynames, data3, ids
# Gets current day and also the following to display on the GUI
def listOfWeekdays():
list_of_weekdays = []
# Timmezone set to this so the days line up with the 3/hr weather data blocks
tz1 = timezone("Etc/GMT-3")
for i in range(8):
NextDay_Date = datetime.datetime.now(tz=tz1) + datetime.timedelta(days=i)
NextDay_Date = NextDay_Date.weekday()
Day = calendar.day_abbr[NextDay_Date]
list_of_weekdays.append(Day)
return list_of_weekdays
# Changes data and images for all respective times for 5 days.
def ChangeIcon(day_index, day_list):
createDailyRadioButton.getButtonTimes
cityi = cityid.get()
firstIndexLength = (len(day_list[0]) - 1)
# Day 1 Section
if day_index == "1":
if button1.get() == "0":
acessDailyAPIData(0, cityi,day_index)
if button1.get() == "1":
acessDailyAPIData(1, cityi,day_index)
if button1.get() == "2":
acessDailyAPIData(2, cityi,day_index)
file_directorystring1 = ("WeatherPics\{}.png").format(day_10_icon.get())
icon_image1 = PhotoImage(file = file_directorystring1)
WIcon_1image = Label(window, text="icon", image=icon_image1, bd=0)
WIcon_1image.image = icon_image1
daymaxt = Label(textvariable=day_10_maxt, bg="#FFFFFF", fg="#000000", font=("Verdana", 20), bd = 0)
daymint = Label(textvariable=day_10_mint, bg="#FFFFFF", fg="#878787", font=("Verdana", 14), bd = 0)
daymaxt.place(x=215,y= 465)
daymint.place(x=215,y= 500)
WIcon_1image.place(x=224, y=370)
# Day 2 Section
if day_index == "2":
if button2.get() == "0":
acessDailyAPIData((firstIndexLength+2), cityi,day_index)
if button2.get() == "1":
acessDailyAPIData((firstIndexLength+5), cityi,day_index)
if button2.get() == "2":
acessDailyAPIData((firstIndexLength+8), cityi,day_index)
file_directorystring2 = ("WeatherPics\{}.png").format(day_20_icon.get())
icon_image2 = PhotoImage(file = file_directorystring2)
WIcon_2image = Label(window, text="icon", image=icon_image2, bd=0)
WIcon_2image.image = icon_image2
daymaxt2 = Label(textvariable=day_20_maxt, bg="#FFFFFF", fg="#000000", font=("Verdana", 20), bd = 0)
daymint2 = Label(textvariable=day_20_mint, bg="#FFFFFF", fg="#878787", font=("Verdana", 14), bd = 0)
daymaxt2.place(x=348,y= 465)
daymint2.place(x=348,y= 500)
WIcon_2image.place(x=357, y=370)
# Day 3 Section
if day_index == "3":
if button3.get() == "0":
acessDailyAPIData((firstIndexLength+10), cityi,day_index)
if button3.get() == "1":
acessDailyAPIData((firstIndexLength+13), cityi,day_index)
if button3.get() == "2":
acessDailyAPIData((firstIndexLength+16), cityi,day_index)
file_directorystring3 = ("WeatherPics\{}.png").format(day_30_icon.get())
icon_image3 = PhotoImage(file = file_directorystring3)
WIcon_3image = Label(window, text="icon", image=icon_image3, bd=0)
WIcon_3image.image = icon_image3
daymaxt3 = Label(textvariable=day_30_maxt, bg="#FFFFFF", fg="#000000", font=("Verdana", 20), bd = 0)
daymint3 = Label(textvariable=day_30_mint, bg="#FFFFFF", fg="#878787", font=("Verdana", 14), bd = 0)
daymaxt3.place(x=481,y= 465)
daymint3.place(x=481,y= 500)
WIcon_3image.place(x=490, y=370)
# Day 4 Section
if day_index == "4":
if button4.get() == "0":
acessDailyAPIData(firstIndexLength+18, cityi,day_index)
if button4.get() == "1":
acessDailyAPIData(firstIndexLength+21, cityi,day_index)
if button4.get() == "2":
acessDailyAPIData(firstIndexLength+24, cityi,day_index)
file_directorystring4 = ("WeatherPics\{}.png").format(day_40_icon.get())
icon_image4 = PhotoImage(file = file_directorystring4)
WIcon_4image = Label(window, text="icon", image=icon_image4, bd=0)
WIcon_4image.image = icon_image4
daymaxt4 = Label(textvariable=day_40_maxt, bg="#FFFFFF", fg="#000000", font=("Verdana", 20), bd = 0)
daymint4 = Label(textvariable=day_40_mint, bg="#FFFFFF", fg="#878787", font=("Verdana", 14), bd = 0)
daymaxt4.place(x=614,y= 465)
daymint4.place(x=614,y= 500)
WIcon_4image.place(x=623, y=370)
# Day 5 Section
if day_index == "5":
if button5.get() == "0":
acessDailyAPIData(firstIndexLength+26, cityi,day_index)
if button5.get() == "1":
acessDailyAPIData(firstIndexLength+29, cityi,day_index)
if button5.get() == "2":
acessDailyAPIData(firstIndexLength+31, cityi,day_index)
file_directorystring5 = ("WeatherPics\{}.png").format(day_50_icon.get())
icon_image5 = PhotoImage(file = file_directorystring5)
WIcon_5image = Label(window, text="icon", image=icon_image5, bd=0)
WIcon_5image.image = icon_image5
daymaxt5 = Label(textvariable=day_50_maxt, bg="#FFFFFF", fg="#000000", font=("Verdana", 20), bd = 0)
daymint5 = Label(textvariable=day_50_mint, bg="#FFFFFF", fg="#878787", font=("Verdana", 14), bd = 0)
daymaxt5.place(x=747,y= 465)
daymint5.place(x=747,y= 500)
WIcon_5image.place(x=756, y=370)
# Creates the radio buttons for each day
class createDailyRadioButton():
def __init__(self, xcoord, var, day_index, timeVariable1,timeVariable2,timeVariable3 ):
buttonValue = [0, 1, 2]
self.radiobuttonList = []
self.day_index = day_index
self.dayList = self.makeTimeSeperations()
self.r1 = Radiobutton(window, variable = var,textvariable=timeVariable1, value = buttonValue[0], indicatoron= 0, background="#91C46B",activebackground ="#CCCCCC", command=lambda: ChangeIcon(self.day_index,self.dayList), height=3, width=5, bd=0, relief=FLAT)
self.r1.place(x = xcoord+42, y =530)
self.r2 = Radiobutton(window, variable = var,textvariable=timeVariable2, value = buttonValue[1], indicatoron= 0, background="#91C46B",activebackground ="#CCCCCC", command=lambda: ChangeIcon(self.day_index,self.dayList), height=3, width=5, bd=0, relief=FLAT)
self.r2.place(x = xcoord+84, y =530)
self.r3 = Radiobutton(window, variable = var,textvariable=timeVariable3, value = buttonValue[2], indicatoron= 0, background="#91C46B",activebackground ="#CCCCCC", command=lambda: ChangeIcon(self.day_index,self.dayList), height=3, width=5, bd=0, relief=FLAT)
self.r3.place(x = xcoord+126, y =530)
self.radiobuttonList.append(self.r1)
self.getButtonTimes()
self.radiobuttonList.append(self.r2)
self.radiobuttonList.append(self.r3)
xcoord = int(xcoord)
xcoord += 42
def updateRadioButtons(self):
ChangeIcon(self.day_index, self.dayList)
# Gets the times for each individual button to display on GUI.
def getButtonTimes(self):
url4 = "http://api.openweathermap.org/data/2.5/forecast?id={}&appid={}&units=metric".format(cityid.get(), APIKEY)
res4 = requests.get(url4)
self.day_data = res4.json()
self.dayList = self.makeTimeSeperations()
if self.day_index == "1":
day_10_time.set(self.dayList[0][0])
try:
day_11_time.set(self.dayList[0][1])
except:
# Checks to see if there is no other time apart from one on that day. Other buttons are disabled because there are no other times.
if day_10_time.get() == "21:00":
day_11_time.set("")
self.r2.config(state ="disabled")
day_12_time.set("")
self.r3.config(state ="disabled")
try:
day_12_time.set(self.dayList[0][2])
except:
if day_11_time.get() == "21:00":
day_12_time.set("")
self.r3.config(state = "disabled")
# Sets times of all respective days.
if self.day_index == "2":
day_20_time.set(self.dayList[1][1])
day_21_time.set(self.dayList[1][4])
day_22_time.set(self.dayList[1][7])
if self.day_index == "3":
day_30_time.set(self.dayList[2][1])
day_31_time.set(self.dayList[2][4])
day_32_time.set(self.dayList[2][7])
if self.day_index == "4":
day_40_time.set(self.dayList[3][1])
day_41_time.set(self.dayList[3][4])
day_42_time.set(self.dayList[3][7])
if self.day_index == "5":
day_50_time.set(self.dayList[4][1])
day_51_time.set(self.dayList[4][4])
day_52_time.set(self.dayList[4][6])
# Checks the time comapred to the data.
def checkTimes(self,indexstart,indexend):
for i in range(indexstart,indexend):
data = self.day_data["list"][i]["dt_txt"]
data = data[11:16]
if data == "00:00":
return i
# Seperateds times into 3 hour segments.
def makeTimeSeperations(self):
tempurl4 = "http://api.openweathermap.org/data/2.5/forecast?id={}&appid={}&units=metric".format(cityid.get(), APIKEY)
tempres4 = requests.get(tempurl4)
self.day_data = tempres4.json()
daysList = [[],[],[],[],[]]
dayindex = 0
for i in range(0,39):
dataJSON = self.day_data["list"][i]["dt_txt"]
if i != 0:
prevDataJSON = self.day_data["list"][(i-1)]["dt_txt"]
currentdata = dataJSON[0:11]
prevdata = prevDataJSON[0:11]
if currentdata == prevdata:
if dayindex != 5:
daysList[dayindex].append(dataJSON[11:16])
else:
dayindex += 1
if dayindex != 5:
daysList[dayindex].append(dataJSON[11:16])
else:
daysList[dayindex].append(dataJSON[11:16])
return daysList
# Gets the weather data from the API and sets the dynamic variables with that data
def acessDailyAPIData(index, city_id, day_index):
# Weather data API address
url4 = "http://api.openweathermap.org/data/2.5/forecast?id={}&appid={}&units=metric".format(city_id, APIKEY)
res4 = requests.get(url4)
day_data = res4.json()
day10_max = day_data["list"][index]["main"]["temp_max"]
day10_min = day_data["list"][index]["main"]["temp_min"]
day10_icon = day_data["list"][index]["weather"][0]["icon"]
if day_index == "1":
day_10_mint.set(str(int(day10_min))+"°")
day_10_maxt.set(str(int(day10_max))+"°")
day_10_icon.set(day10_icon)
elif day_index == "2":
day_20_mint.set(str(int(day10_min))+"°")
day_20_maxt.set(str(int(day10_max))+"°")
day_20_icon.set(day10_icon)
elif day_index == "3":
day_30_mint.set(str(int(day10_min))+"°")
day_30_maxt.set(str(int(day10_max))+"°")
day_30_icon.set(day10_icon)
elif day_index == "4":
day_40_mint.set(str(int(day10_min))+"°")
day_40_maxt.set(str(int(day10_max))+"°")
day_40_icon.set(day10_icon)
elif day_index == "5":
day_50_mint.set(str(int(day10_min))+"°")
day_50_maxt.set(str(int(day10_max))+"°")
day_50_icon.set(day10_icon)
else:
print("Error Getting Data")
# Displays the data on the GUI and sets other dynamic variables
def displayCurrentConditions():
# Gets the current date.
now = datetime.datetime.now()
daytime_string = now.strftime("%d/%m/%Y - %H:%M:%S")
weekday_list = listOfWeekdays()
savedLatLngData = open("savedLatLngData.txt", "r")
city_id = savedLatLngData.read()
url3 = "http://api.openweathermap.org/data/2.5/weather?id={}&appid={}&units=metric".format(city_id,APIKEY)
res3 = requests.get(url3)
data = res3.json()
cityid.set(city_id)
temp_data = data["main"]["temp"]
fltemp_data = data["main"]["feels_like"]
humidity_data = data["main"]["humidity"]
pressure_data = data["main"]["pressure"]
windspeed_data = data["wind"]["speed"]
main_data = data["weather"][0]["main"]
description_data = data["weather"][0]["description"]
icon_data = data["weather"][0]["icon"]
name_data = data["name"]
day_1.set(weekday_list[0])
day_2.set(weekday_list[1])
day_3.set(weekday_list[2])
day_4.set(weekday_list[3])
day_5.set(weekday_list[4])
city_name.set(name_data)
city_temp.set(str(int(temp_data))+"℃")
city_fltemp.set("Feels like "+str(int(fltemp_data))+"℃")
city_humidity.set(str(humidity_data)+"%")
msTomph = int(windspeed_data*2.23694)
city_windspeed.set(str(msTomph)+"mph")
city_main.set(main_data)
city_pressure.set(str(pressure_data)+"hPa")
city_description.set(description_data)
city_icon.set(str(icon_data))
last_refreshed.set("Last Updated: "+str(daytime_string))
file_directorystring = ("WeatherPics\{}.png").format(icon_data)
icon_image = PhotoImage(file = file_directorystring)
WIcon_image = Label(window, text="icon", image=icon_image, bd=0)
WIcon_image.image = icon_image
WIcon_image.place(x=100, y=170)
# Creates radio buttons for respective days
day1 = createDailyRadioButton(171, button1,"1",day_10_time,day_11_time,day_12_time)
day2 = createDailyRadioButton(304, button2,"2",day_20_time,day_21_time,day_22_time)
day3 = createDailyRadioButton(437, button3,"3",day_30_time,day_31_time,day_32_time)
day4 = createDailyRadioButton(570, button4,"4",day_40_time,day_41_time,day_42_time)
day5 = createDailyRadioButton(703, button5,"5",day_50_time,day_51_time,day_52_time)
# Calls update function for all radio buttons
day1.updateRadioButtons()
day2.updateRadioButtons()
day3.updateRadioButtons()
day4.updateRadioButtons()
day5.updateRadioButtons()
# Places aspects on GUI.
def getConditions():
displayCurrentConditions()
day1tag = Label(textvariable=day_1, bg="#FFFFFF", fg="#000000", font=("Verdana", 10), bd = 0)
day2tag = Label(textvariable=day_2, bg="#FFFFFF", fg="#000000", font=("Verdana", 10), bd = 0)
day3tag = Label(textvariable=day_3, bg="#FFFFFF", fg="#000000", font=("Verdana", 10), bd = 0)
day4tag = Label(textvariable=day_4, bg="#FFFFFF", fg="#000000", font=("Verdana", 10), bd = 0)
day5tag = Label(textvariable=day_5, bg="#FFFFFF", fg="#000000", font=("Verdana", 10), bd = 0)
showcasedTemp = Label(textvariable=city_temp, bg="#FFFFFF", fg="#000000", font=("Verdana", 30), bd = 0)
showcasedRefreshed = Label(textvariable=last_refreshed, bg="#FFFFFF", fg="#878787", font=("Verdana", 9), bd = 0)
showcasedMain = Label(textvariable=city_main, bg="#FFFFFF", fg="#000000", font=("Calibri", 20), bd = 0)
showcasedFLTemp = Label(textvariable=city_fltemp, bg="#FFFFFF", fg="#878787", font=("Verdana", 10), bd = 0)
showcasedName = Label(textvariable=city_name, bg="#FFFFFF", fg="#000000", font=("Calibri", 44, ), bd = 0)
showcasedHum = Label(textvariable=city_humidity, bg="#FFFFFF", fg="#878787", font=("Calibri", 16), bd = 0)
showcasedWS = Label(textvariable=city_windspeed, bg="#FFFFFF", fg="#878787", font=("Calibri", 16), bd = 0)
showcasedPress = Label(textvariable=city_pressure, bg="#FFFFFF", fg="#878787", font=("Calibri", 16), bd = 0)
hum_tag = Label(text="Humidity", bg="#FFFFFF", fg="#878787", font=("Calibri", 16), bd = 0)
ws_tag = Label(text="Wind speed", bg="#FFFFFF", fg="#878787", font=("Calibri", 16), bd = 0)
press_tag = Label(text="Pressure", bg="#FFFFFF", fg="#878787", font=("Calibri", 16), bd = 0)
now_tag = Label(text="NOW", bg="#FFFFFF", fg="#000000", font=("Calibri", 12), bd = 0)
underlinedFont = tkinter.font.Font(now_tag, now_tag.cget("font"))
underlinedFont.configure(underline=True)
drop_image = PhotoImage(file = "guiImages\drop.png")
hum_image = Label(window, text="drop", image=drop_image, bd=0)
hum_image.image = drop_image
wind_image = PhotoImage(file = "guiImages\wind.png")
WS_image = Label(window, text="WS", image=wind_image, bd=0)
WS_image.image = wind_image
pressure_image = PhotoImage(file = "guiImages\pressure.png")
Press_image = Label(window, text="Pressure", image=pressure_image, bd=0)
Press_image.image = pressure_image
tooltip_image = PhotoImage(file = "guiImages\qmark.png")
toolTipIcon = Label(window, text="button 1", image=tooltip_image, bg="#FFFFFF")
toolTipIcon.image = tooltip_image
toolTipIcon.place(x=880, y=355)
toolTipIcon_ttp = createToolTip(toolTipIcon, "All times are based off\nthe UTC timezone.")
now_tag.configure(font=underlinedFont)
now_tag.place(x=80, y=70)
hum_tag.place(x=505, y= 165)
showcasedRefreshed.place(x=760,y=60)
ws_tag.place(x=505, y= 215)
press_tag.place(x=505,y=265)
showcasedName.place(x=80,y=80)
showcasedTemp.place(x=250,y=190)
showcasedPress.place(x=820, y=265 )
showcasedHum.place(x=860, y=165)
showcasedMain.place(x=80,y=280)
hum_image.place(x=473,y=170)
showcasedWS.place(x=840, y=215)
Press_image.place(x=473,y=265)
WS_image.place(x=473,y=215)
showcasedFLTemp.place(x=255,y=240)
day1tag.place(x=213, y=355)
day2tag.place(x=346, y=355)
day3tag.place(x=479, y=355)
day4tag.place(x=612, y=355)
day5tag.place(x=745, y=355)
# Creates master window (GUI)
window = Tk()
# Initilises GUI aspects.
border = Border(master=window)
quit_button = quitButton(master=window)
cloud_logo = Logo(master=window)
settings_button = settingButton(master=window)
refresh_button = refreshButton(master=window)
window.title("Weather App")
window.resizable(0,0)
savedLatLngData = open("savedLatLngData.txt", "r")
data = savedLatLngData.read()
# Current Dynamic Variables
cityid = StringVar()
city_name = StringVar()
city_temp = StringVar()
city_fltemp = StringVar()
city_humidity = StringVar()
city_windspeed = StringVar()
city_main = StringVar()
city_description = StringVar()
city_icon = StringVar()
city_pressure = StringVar()
last_refreshed = StringVar()
# Day 1 Variables
day_1 = StringVar()
day_10_mint = StringVar()
day_10_maxt = StringVar()
day_10_icon = StringVar()
day_10_time = StringVar()
day_11_time = StringVar()
day_12_time = StringVar()
# Day 2 Variables
day_2 = StringVar()
day_20_mint = StringVar()
day_20_maxt = StringVar()
day_20_icon = StringVar()
day_20_time = StringVar()
day_21_time = StringVar()
day_22_time = StringVar()
# Day 3 Variables
day_3 = StringVar()
day_30_mint = StringVar()
day_30_maxt = StringVar()
day_30_icon = StringVar()
day_30_time = StringVar()
day_31_time = StringVar()
day_32_time = StringVar()
# Day 4 Variables
day_4 = StringVar()
day_40_mint = StringVar()
day_40_maxt = StringVar()
day_40_icon = StringVar()
day_40_time = StringVar()
day_41_time = StringVar()
day_42_time = StringVar()
# Day 5 Variables
day_5 = StringVar()
day_50_mint = StringVar()
day_50_maxt = StringVar()
day_50_icon = StringVar()
day_50_time = StringVar()
day_51_time = StringVar()
day_52_time = StringVar()
button1 = StringVar(window, 0)
button2 = StringVar(window, 1)
button3 = StringVar(window, 1)
button4 = StringVar(window, 1)
button5 = StringVar(window, 1)
if len(data) == 0:
print("No Data INSIDE")
else:
getConditions()
print("Data INSIDE")
window.mainloop()
```
#### File: JM1F/Weather-App/REST API module.py
```python
from flask import Flask, jsonify, request, render_template
from flask_cors import CORS
import json
# Rest API sends and recieves data, currently set up for local host.
app = Flask(__name__)
cors = CORS(app)
messages = {}
@app.route('/', methods=['GET', "PUT", "POST"])
def hello():
# POST request
if request.method == 'POST':
try:
print('Incoming...')
Long = request.get_json("long")
messages["coords"] = Long
print(messages)
return jsonify(messages)
except:
print("An Error Occured...")
# GET request
if request.method == "GET":
try:
return jsonify(messages)
except:
print("An Error Occured...")
# PUT request
if request.method == "PUT":
try:
Long = request.get_json("long")
messages["coords"] = Long
print(messages)
return jsonify(messages)
except:
print("An Error Occured...")
if __name__ == '__main__':
app.run(host="127.0.0.1", port=5000)
``` |
{
"source": "jm2826/Item-Catalog",
"score": 3
} |
#### File: jm2826/Item-Catalog/database_setup.py
```python
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from datetime import datetime
from sqlalchemy.types import TIMESTAMP
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250))
username = Column(String(32), index=True)
password_hash = Column(String(64))
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
@property
def serialize(self):
# Returns object data in easily serializable format
return {
'name': self.name,
'password_hash': self.password_hash,
'id': self.id,
}
class Catagory(Base):
__tablename__ = 'catagory'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
date = Column(TIMESTAMP, default=datetime.utcnow, nullable=False)
# user_id = Column(Integer, ForeignKey(user.id))
# user = Relationship(User)
@property
def serialize(self):
# Returns object data in easily serializable format
return {
'name': self.name,
'id': self.id,
}
class Item(Base):
__tablename__ = 'item'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False)
description = Column(String(250))
catagory_id = Column(Integer, ForeignKey('catagory.id'))
catagory = relationship(Catagory)
date = Column(TIMESTAMP, default=datetime.utcnow, nullable=False)
# user_id = Column(Integer, ForeignKey(user.id))
# user = Relationship(User)
@property
def serialize(self):
# Returns object data in easily serializable format
return {
'name': self.name,
'description': self.description,
'id': self.id,
}
engine = create_engine('sqlite:///catalogproject.db')
Base.metadata.create_all(engine)
```
#### File: jm2826/Item-Catalog/projectflask.py
```python
from flask import (Flask,
render_template,
request, redirect,
jsonify,
url_for,
flash)
from flask import session as login_session
import random
import string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
from functools import wraps
# import CRUD Operations from Lesson 1 ##
from database_setup import Base, Catagory, Item
from sqlalchemy import create_engine, desc
from sqlalchemy.orm import sessionmaker
app = Flask(__name__)
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
# Connect to Database and create database session
engine = create_engine('sqlite:///catalogproject.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token. Login utton shows for google+ login
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user\
is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['picture'] = data['picture']
login_session['username'] = data['email']
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;\
-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# Disconnect - Revoke current user token and reset login_session.
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
print 'In gdisconnect access token is %s', access_token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is'
print result
if result['status'] == '200':
# Reset the user's session.
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
#del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
# For whatever reason, the given token invalid.
response = make_response(json.dumps('Failed to revoke token for\
given user.', 400))
return response
# Making an API Endpoint (GET Request)
@app.route('/catagories/<int:catagory_id>/JSON')
def catagoryItemsJSON(catagory_id):
catagory = session.query(Catagory).filter_by(id=catagory_id).all()
items = session.query(Item).filter_by(catagory_id=catagory_id).all()
return jsonify(Items=[i.serialize for i in items])
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'email' not in login_session:
return f(*args, **kwargs)
else:
flash("You are not allowed to access there")
return redirect('/login')
return decorated_function
# Decorator to call function if URL used
@app.route('/')
@app.route('/catagories/')
def catagoryList():
catagories = session.query(Catagory).all()
return render_template('homepage.html', catagories=catagories)
# Decorator for routing to All Items in Catagory
@app.route('/catagories/<int:catagory_id>/')
def catagoryItems(catagory_id):
catagory = session.query(Catagory).filter_by(id=catagory_id).all()
items = session.query(Item).filter_by(catagory_id=catagory_id).all()
return render_template('catalogitems.html', items=items,
catagory=catagory, catagory_id=catagory_id)
# Decorator for routing individual Item Descriptions
@app.route('/catagories/<int:catagory_id>/<int:item_id>/<string:description>')
def itemDescription(catagory_id, item_id, description):
items = session.query(Item).filter_by(catagory_id=catagory_id,
id=item_id)
return render_template('catalog.html', items=items)
# Add new catagoryItem function
@app.route('/catagories/<int:catagory_id>/new/', methods=['GET', 'POST'])
def newCatagoryItem(catagory_id):
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newItem = Item(name=request.form['name'],
description=request.form['description'],
catagory_id=catagory_id)
session.add(newItem)
session.commit()
flash("New Item Successfuly Created !")
return redirect(url_for('catagoryItems', catagory_id=catagory_id))
else:
return render_template('newitem.html', catagory_id=catagory_id)
# Update catagoryItem function
@app.route('/catagories/<int:catagory_id>/<int:item_id>/update/',
methods=['GET', 'POST'])
def updateCatagoryItem(catagory_id, item_id):
if 'username' not in login_session:
return redirect('/login')
updatedItem = session.query(Item).filter_by(catagory_id=catagory_id,
id=item_id).one()
if request.method == 'POST':
updatedItem = Item(name=request.form['name'],
description=request.form['description'],
catagory_id=catagory_id)
session.add(updatedItem)
session.commit()
flash("New Item Successfully Updated !")
return redirect(url_for('catagoryItems',
catagory_id=catagory_id))
else:
return render_template('updateitem.html',
catagory_id=catagory_id,
i=updatedItem)
# Delete catagoryItem function
@app.route('/catagories/<int:catagory_id>/<int:item_id>/delete/',
methods=['GET', 'POST'])
def deleteCatagoryItem(catagory_id, item_id):
if 'username' not in login_session:
return redirect('/login')
deleteItem = session.query(Item).filter_by(catagory_id=catagory_id,
id=item_id).one()
if request.method == 'POST':
session.delete(deleteItem)
session.commit()
flash("New Item Successfuly Deleted !")
return redirect(url_for('catagoryItems', catagory_id=catagory_id))
else:
return (render_template('deleteitem.html',
removename=deleteItem.name, id=deleteItem.id,
catagory_id=deleteItem.catagory_id))
# Run Server in Debug Mode
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=8000, threaded=False)
``` |
{
"source": "jm33-m0/img2location",
"score": 2
} |
#### File: img2location/cbir/edge.py
```python
from __future__ import print_function
from math import sqrt
import numpy as np
import scipy.misc
from cbir.DB import Database
from cbir.evaluate import evaluate_class
stride = (1, 1)
n_slice = 10
h_type = 'region'
d_type = 'cosine'
depth = 5
''' MMAP
depth
depthNone, region-stride(1, 1)-n_slice10,co, MMAP 0.101670982288
depth100, region-stride(1, 1)-n_slice10,co, MMAP 0.207817305128
depth30, region-stride(1, 1)-n_slice10,co, MMAP 0.291715090839
depth10, region-stride(1, 1)-n_slice10,co, MMAP 0.353722379063
depth5, region-stride(1, 1)-n_slice10,co, MMAP 0.367119444444
depth3, region-stride(1, 1)-n_slice10,co, MMAP 0.3585
depth1, region-stride(1, 1)-n_slice10,co, MMAP 0.302
(exps below use depth=None)
d_type
global-stride(2, 2),d1, MMAP 0.0530993236031
global-stride(2, 2),co, MMAP 0.0528310744618
stride
region-stride(2, 2)-n_slice4,d1, MMAP 0.0736245142237
region-stride(1, 1)-n_slice4,d1, MMAP 0.0704206226545
n_slice
region-stride(1, 1)-n_slice10,co, MMAP 0.101670982288
region-stride(1, 1)-n_slice6,co, MMAP 0.0977736743859
h_type
global-stride(2, 2),d1, MMAP 0.0530993236031
region-stride(2, 2)-n_slice4,d1, MMAP 0.0736245142237
'''
edge_kernels = np.array([
[
# vertical
[1, -1],
[1, -1]
],
[
# horizontal
[1, 1],
[-1, -1]
],
[
# 45 diagonal
[sqrt(2), 0],
[0, -sqrt(2)]
],
[
# 135 diagnol
[0, sqrt(2)],
[-sqrt(2), 0]
],
[
# non-directional
[2, -2],
[-2, 2]
]
])
class Edge:
def histogram(self, input, stride=(2, 2), type=h_type, n_slice=n_slice, normalize=True):
''' count img histogram
arguments
input : a path to a image or a numpy.ndarray
stride : stride of edge kernel
type : 'global' means count the histogram for whole image
'region' means count the histogram for regions in images, then concatanate all of them
n_slice : work when type equals to 'region', height & width will equally sliced into N slices
normalize: normalize output histogram
return
type == 'global'
a numpy array with size len(edge_kernels)
type == 'region'
a numpy array with size len(edge_kernels) * n_slice * n_slice
'''
if isinstance(input, np.ndarray): # examinate input type
img = input.copy()
else:
img = scipy.misc.imread(input, mode='RGB')
height, width, _ = img.shape
if type == 'global':
hist = self._conv(img, stride=stride, kernels=edge_kernels)
elif type == 'region':
hist = np.zeros((n_slice, n_slice, edge_kernels.shape[0]))
h_silce = np.around(np.linspace(
0, height, n_slice+1, endpoint=True)).astype(int)
w_slice = np.around(np.linspace(
0, width, n_slice+1, endpoint=True)).astype(int)
for hs in range(len(h_silce)-1):
for ws in range(len(w_slice)-1):
img_r = img[h_silce[hs]:h_silce[hs+1], w_slice[ws]:w_slice[ws+1]] # slice img to regions
hist[hs][ws] = self._conv(
img_r, stride=stride, kernels=edge_kernels)
if normalize:
hist /= np.sum(hist)
return hist.flatten()
def _conv(self, img, stride, kernels, normalize=True):
H, W, C = img.shape
conv_kernels = np.expand_dims(kernels, axis=3)
conv_kernels = np.tile(conv_kernels, (1, 1, 1, C))
assert list(conv_kernels.shape) == list(
kernels.shape) + [C] # check kernels size
sh, sw = stride
kn, kh, kw, _ = conv_kernels.shape
hh = int((H - kh) / sh + 1)
ww = int((W - kw) / sw + 1)
hist = np.zeros(kn)
for idx, k in enumerate(conv_kernels):
for h in range(hh):
hs = int(h*sh)
he = int(h*sh + kh)
for w in range(ww):
ws = w*sw
we = w*sw + kw
# element-wise product
hist[idx] += np.sum(img[hs:he, ws:we] * k)
if normalize:
hist /= np.sum(hist)
return hist
def make_samples(self, db, verbose=True):
if h_type == 'global':
sample_cache = "edge-{}-stride{}".format(h_type, stride)
elif h_type == 'region':
sample_cache = "edge-{}-stride{}-n_slice{}".format(
h_type, stride, n_slice)
if verbose:
print("Counting histogram... distance=%s, depth=%s" %
(d_type, depth))
samples = []
data = db.get_data()
for d in data.itertuples():
d_img, d_cls = getattr(d, "img"), getattr(d, "cls")
d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice)
samples.append({
'img': d_img,
'cls': d_cls,
'hist': d_hist
})
return samples
def main():
db = Database("database", "database.csv")
# check shape
assert edge_kernels.shape == (5, 2, 2)
# evaluate database
APs = evaluate_class(db, f_class=Edge, d_type=d_type, depth=depth)
cls_MAPs = []
for cls, cls_APs in APs.items():
MAP = np.mean(cls_APs)
print("Class {}, MAP {}".format(cls, MAP))
cls_MAPs.append(MAP)
print("MMAP", np.mean(cls_MAPs))
```
#### File: img2location/cbir/vggnet.py
```python
from __future__ import print_function
import numpy as np
import scipy.misc
import torch
import torch.nn as nn
from torchvision.models.vgg import VGG
from cbir.DB import Database
from cbir.evaluate import evaluate_class
'''
downloading problem in mac OSX should refer to this answer:
https://stackoverflow.com/a/42334357
'''
# configs for histogram
VGG_model = 'vgg19' # model type
pick_layer = 'avg' # extract feature of this layer
d_type = 'd1' # distance type
depth = 1 # retrieved depth, set to None will count the ap for whole database
''' MMAP
depth
depthNone, vgg19,avg,d1, MMAP 0.688624709114
depth100, vgg19,avg,d1, MMAP 0.754443491363
depth30, vgg19,avg,d1, MMAP 0.838298388513
depth10, vgg19,avg,d1, MMAP 0.913892057193
depth5, vgg19,avg,d1, MMAP 0.936158333333
depth3, vgg19,avg,d1, MMAP 0.941666666667
depth1, vgg19,avg,d1, MMAP 0.934
(exps below use depth=None)
vgg19,fc1,d1, MMAP 0.245548035893 (w/o subtract mean)
vgg19,fc1,d1, MMAP 0.332583126964
vgg19,fc1,co, MMAP 0.333836506148
vgg19,fc2,d1, MMAP 0.294452201395
vgg19,fc2,co, MMAP 0.297209571796
vgg19,avg,d1, MMAP 0.688624709114
vgg19,avg,co, MMAP 0.674217021273
'''
use_gpu = torch.cuda.is_available()
means = np.array([103.939, 116.779, 123.68]) / 255.
# mean of three channels in the order of BGR
class VGGNet(VGG):
def __init__(self,
pretrained=True,
model='vgg16',
requires_grad=False,
remove_fc=False,
show_params=False):
super().__init__(make_layers(cfg[model]))
self.ranges = ranges[model]
self.fc_ranges = ((0, 2), (2, 5), (5, 7))
if pretrained:
exec("self.load_state_dict(models.%s(pretrained=True).state_dict())" % model)
if not requires_grad:
for param in super().parameters():
param.requires_grad = False
if remove_fc: # delete redundant fully-connected layer params, can save memory
del self.classifier
if show_params:
for name, param in self.named_parameters():
print(name, param.size())
def forward(self, x):
output = {}
x = self.features(x)
avg_pool = torch.nn.AvgPool2d((x.size(-2), x.size(-1)), stride=(
x.size(-2), x.size(-1)), padding=0, ceil_mode=False, count_include_pad=True)
avg = avg_pool(x) # avg.size = N * 512 * 1 * 1
avg = avg.view(avg.size(0), -1) # avg.size = N * 512
output['avg'] = avg
x = x.view(x.size(0), -1) # flatten()
dims = x.size(1)
if dims >= 25088:
x = x[:, :25088]
for idx in range(len(self.fc_ranges)):
for layer in range(self.fc_ranges[idx][0], self.fc_ranges[idx][1]):
x = self.classifier[layer](x)
output["fc%d" % (idx+1)] = x
else:
w = self.classifier[0].weight[:, :dims]
b = self.classifier[0].bias
x = torch.matmul(x, w.t()) + b
x = self.classifier[1](x)
output["fc1"] = x
for idx in range(1, len(self.fc_ranges)):
for layer in range(self.fc_ranges[idx][0], self.fc_ranges[idx][1]):
x = self.classifier[layer](x)
output["fc%d" % (idx+1)] = x
return output
ranges = {
'vgg11': ((0, 3), (3, 6), (6, 11), (11, 16), (16, 21)),
'vgg13': ((0, 5), (5, 10), (10, 15), (15, 20), (20, 25)),
'vgg16': ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31)),
'vgg19': ((0, 5), (5, 10), (10, 19), (19, 28), (28, 37))
}
# cropped version from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
cfg = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256,
'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M',
512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(config, batch_norm=False):
layers = []
in_channels = 3
for v in config:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class VGGNetFeat:
def make_samples(self, db, verbose=True):
if verbose:
print("Counting histogram... distance=%s, depth=%s" % (
d_type, depth))
vgg_model = VGGNet(requires_grad=False, model=VGG_model)
vgg_model.eval()
if use_gpu:
vgg_model = vgg_model.cuda()
samples = []
data = db.get_data()
for d in data.itertuples():
d_img, d_cls = getattr(d, "img"), getattr(d, "cls")
img = scipy.misc.imread(d_img, mode="RGB")
img = img[:, :, ::-1] # switch to BGR
img = np.transpose(img, (2, 0, 1)) / 255.
img[0] -= means[0] # reduce B's mean
img[1] -= means[1] # reduce G's mean
img[2] -= means[2] # reduce R's mean
img = np.expand_dims(img, axis=0)
try:
if use_gpu:
inputs = torch.autograd.Variable(
torch.from_numpy(img).cuda().float())
else:
inputs = torch.autograd.Variable(
torch.from_numpy(img).float())
d_hist = vgg_model(inputs)[pick_layer]
d_hist = np.sum(d_hist.data.cpu().numpy(), axis=0)
d_hist /= np.sum(d_hist) # normalize
samples.append({
'img': d_img,
'cls': d_cls,
'hist': d_hist
})
except BaseException:
pass
return samples
def main():
# evaluate database
DB = Database("database", "database.csv")
APs = evaluate_class(DB, f_class=VGGNetFeat, d_type=d_type, depth=depth)
cls_MAPs = []
for cls, cls_APs in APs.items():
MAP = np.mean(cls_APs)
print("Class {}, MAP {}".format(cls, MAP))
cls_MAPs.append(MAP)
print("MMAP", np.mean(cls_MAPs))
``` |
{
"source": "JM3ad/socketry-backend",
"score": 2
} |
#### File: socketry-backend/app/app.py
```python
from flask import Flask
from flask_socketio import SocketIO, emit
def create_app():
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/healthcheck')
def healthcheck():
return 'Healthy'
return app
def create_socketio(app):
socketio = SocketIO(app, cors_allowed_origins="*")
@socketio.on('message')
def handle_message(data):
print('received message: ' + data)
emit('response', data)
@socketio.on('connect')
def test_connect():
emit('seen', {'message': 'connected'})
return socketio
app = create_app()
socketio = create_socketio(app)
if __name__ == '__main__':
socketio.run(app, host="0.0.0.0")
``` |
{
"source": "jm404/python-elasticsearch",
"score": 3
} |
#### File: python-elasticsearch/importers/csvtoes.py
```python
from datetime import datetime
from elasticsearch import Elasticsearch,helpers
import csv
import json
def actions_generator(elastic_instance,index_name,bulk_function, data):
def generator():
size = len(data)
for i in range(size):
yield{
'_index': index_name,
'_id' : i,
'_type' : 'document',
'_source' : (data[i]),
}
for ok, info in bulk_function(elastic_instance,generator()):
if not ok:
print ("Error parsing", info)
def rows_formatter(path):
with open (path) as f:
dict_reader = csv.DictReader(f)
rows = list(dict_reader)
return rows
def importer(elastic_instance,index_name,path,processing_method):
if (processing_method == "parallel"):
actions_generator(elastic_instance,index_name,helpers.parallel_bulk,rows_formatter(path))
elif (processing_method == "single"):
actions_generator(elastic_instance,index_name,helpers.bulk,rows_formatter(path))
else:
print ("Unknown bulk method")
``` |
{
"source": "jm4444/bounce_game",
"score": 3
} |
#### File: jm4444/bounce_game/bounce_game.py
```python
import sys, pygame as game, get_image_size as image, random
from functions import *
from menu_screen import game_mode
game.init()
graphics_path = "graphics/"
fps_clock = game.time.Clock()
winner = None
# ~ ~ ~ ~ CLASSES ~ ~ ~ ~ ~#
class Board:
def __init__(self, image_file):
self.file_name = image_file
self.background = load_image(self.file_name)
self.size = width, height = image.get_image_size(graphics_path + self.file_name)
self.screen = game.display.set_mode(self.size)
def display(self):
self.screen.blit(self.background, (0, 0))
class Image:
def __init__(self, image_file):
self.file_name = image_file
self.image = load_image(self.file_name)
self.rectangle = self.image.get_rect()
self.rectangle.centerx = board.size[0] / 2
self.rectangle.centery = board.size[1] / 2
def display(self):
board.screen.blit(self.image, self.rectangle)
class Score:
def __init__(self, side_of_screen):
self.side_of_screen = side_of_screen
self.score_count = 0
self.change_score()
def change_score(self):
self.current_score = load_image(str(self.score_count) + ".png")
self.rectangle = self.current_score.get_rect()
self.display()
def add_point(self):
self.score_count += 1
self.change_score()
if self.score_count == 7:
global winner
if self.side_of_screen == "left":
winner = "player one"
elif self.side_of_screen == "right":
winner = "player two"
def display(self):
screen_quarter = board.size[0] / 4
self.rectangle.centery = 75
if self.side_of_screen == "left":
self.rectangle.centerx = screen_quarter
elif self.side_of_screen == "right":
self.rectangle.centerx = screen_quarter * 3
board.screen.blit(self.current_score, self.rectangle)
class Paddle:
def __init__(self, image_file, side_of_screen):
self.file_name = image_file
self.paddle = load_image(self.file_name)
self.rectangle = self.paddle.get_rect()
if side_of_screen == "left":
self.rectangle.centerx = 30
self.up = game.K_w
self.down = game.K_s
elif side_of_screen == "right":
self.rectangle.centerx = board.size[0] - 30
self.up = game.K_UP
self.down = game.K_DOWN
self.rectangle.centery = board.size[1] / 2
self.speed = 12
def display(self):
board.screen.blit(self.paddle, self.rectangle)
def move(self, key_input):
if key_input[self.up] and self.rectangle.top >= 0:
self.rectangle.centery -= self.speed
elif key_input[self.down] and self.rectangle.bottom <= board.size[1]:
self.rectangle.centery += self.speed
def reset_position(self):
self.rectangle.centery = board.size[1] / 2
class ArtificialPaddle(Paddle):
def __init__(self, image_file, side_of_screen):
super().__init__(image_file, side_of_screen)
self.speed = 10
def move(self):
if ball.start_moving == True:
if ball.speed[0] > 0 and ball.rectangle.centerx > board.size[0] / 3 * 2:
if self.rectangle.centery < ball.rectangle.centery and self.rectangle.bottom <= board.size[1]: # moves the paddle down, towards the ball
self.rectangle.centery += self.speed
elif self.rectangle.centery > ball.rectangle.centery and self.rectangle.top >= 0: # moves the paddle up, towards the ball
self.rectangle.centery -= self.speed
class Ball:
def __init__(self, image_file):
self.file_name = image_file
self.ball = load_image(self.file_name)
self.rectangle = self.ball.get_rect()
self.default_position()
self.start_moving = False
self.speed = [7.5, 11]
self.randomize_speed()
def display(self):
board.screen.blit(self.ball, self.rectangle)
def move(self):
if self.start_moving == True:
self.rectangle = self.rectangle.move(self.speed)
if self.rectangle.right < 0: # going off left of screen
right_score.add_point()
reset_positions(moving_objects, objects)
elif self.rectangle.left > board.size[0]: # going off right of screen
left_score.add_point()
reset_positions(moving_objects, objects)
if self.rectangle.top < 0 or self.rectangle.bottom > board.size[1]: # bouncing off top or bottom of screen
self.speed[1] = -self.speed[1]
if self.rectangle.colliderect(left_paddle.rectangle) and self.rectangle.left >= left_paddle.rectangle.centerx: # bouncing off of the left paddle
self.speed[0] = -self.speed[0]
if self.rectangle.colliderect(right_paddle.rectangle) and self.rectangle.right <=right_paddle.rectangle.centerx: # bouncing off of the right paddle
self.speed[0] = -self.speed[0]
def reset_position(self):
self.rectangle.move([0, 0])
ball.start_moving = False
self.randomize_speed()
self.default_position()
def randomize_speed(self):
randomizer = (-1)**random.randrange(2) # generates a 1 or -1
self.speed[0] *= randomizer
self.speed[1] *= randomizer
def default_position(self):
self.rectangle.centerx = board.size[0] / 2
self.rectangle.centery = board.size[1] / 2
# ~ ~ ~ ~ SETTING THE BOARD ~ ~ ~ ~ ~#
board = Board("background.png")
left_score = Score("left")
right_score = Score("right")
ball = Ball("ball.png")
left_paddle = Paddle("paddle.png", "left")
if game_mode == "single player":
right_paddle = ArtificialPaddle("paddle.png", "right")
elif game_mode == "two player":
right_paddle = Paddle("paddle.png", "right")
objects = [board, left_score, right_score, ball, left_paddle, right_paddle]
moving_objects = [ball, left_paddle, right_paddle]
update_display(objects)
# ~ ~ ~ ~ RUNNING THE GAME ~ ~ ~ ~ ~#
while True:
#// Check for Specific Events
for event in game.event.get():
if event.type == game.QUIT: # allows the player to exit the game by clicking the exit 'X' on the window
game.quit()
raise SystemExit
#// Variables for Running the Game
fps_clock.tick(40) # sets the frame rate
game.event.pump()
key_input = game.key.get_pressed()
update_display(objects)
game.display.update()
#// Moving the left paddle
if key_input[left_paddle.up] or key_input[left_paddle.down]:
left_paddle.move(key_input)
ball.start_moving = True
if game_mode == "single player":
right_paddle.move()
elif game_mode == "two player":
if key_input[right_paddle.up] or key_input[right_paddle.down]:
right_paddle.move(key_input)
ball.start_moving = True
ball.move()
if winner != None:
break
# ~ ~ ~ ~ ENDING THE GAME ~ ~ ~ ~ ~#
winner_image = Image(winner + " wins.png")
objects.append(winner_image)
update_display(objects)
game.display.update()
input()
```
#### File: jm4444/bounce_game/menu_screen.py
```python
import pygame as game, get_image_size as image
from pygame.locals import *
from functions import *
game.init()
graphics_path = "graphics/"
fps_clock = game.time.Clock()
game_mode = None
# ~ ~ ~ ~ CLASSES ~ ~ ~ ~ ~#
class Board:
def __init__(self, image_file):
self.file_name = image_file
self.background = load_image(self.file_name)
self.size = width, height = image.get_image_size(graphics_path + self.file_name)
self.screen = game.display.set_mode(self.size)
def display(self):
self.screen.blit(self.background, (0, 0))
class Button:
def __init__(self, image_file, position):
self.is_highlighted = False
#// Naming attributes
self.file_name = image_file + ".png"
self.highlighted_name = image_file + " highlight.png"
self.explanation_name = image_file + " explanation.png"
#// Loading attributes
self.button = load_image(self.file_name)
self.highlighted = load_image(self.highlighted_name)
self.explanation = load_image(self.explanation_name)
#// Positioning attributes
self.rectangle = self.button.get_rect()
self.highlighted_rectangle = self.highlighted.get_rect()
self.explanation_rectangle = self.explanation.get_rect()
self.rectangle.left = 106
self.highlighted_rectangle.left = 106
self.explanation_rectangle.top = 349
self.explanation_rectangle.left = 122
if position == "top":
self.rectangle.top = 199
self.highlighted_rectangle.top = 199
elif position == "bottom":
self.rectangle.top = 274
self.highlighted_rectangle.top = 274
def display(self):
if self.is_highlighted == False:
board.screen.blit(self.button, self.rectangle)
elif self.is_highlighted == True:
board.screen.blit(self.highlighted, self.highlighted_rectangle)
board.screen.blit(self.explanation, self.explanation_rectangle)
# ~ ~ ~ ~ SETTING THE MENU ~ ~ ~ ~ ~#
board = Board("menu.png")
single_player_button = Button("single player", "top")
two_player_button = Button("two player", "bottom")
objects = [board, single_player_button, two_player_button]
update_display(objects)
# ~ ~ ~ ~ RUNNING THE MENU ~ ~ ~ ~ ~#
while True:
#// Check for Specific Events
for event in game.event.get():
if event.type == game.QUIT: # allows the player to exit the game by clicking the exit 'X' on the window
game.quit()
raise SystemExit
#// Variables for Running the Game
fps_clock.tick(60) # sets the frame rate at 60fps
game.event.pump()
update_display(objects)
game.display.update()
if single_player_button.rectangle.collidepoint(game.mouse.get_pos()) or single_player_button.highlighted_rectangle.collidepoint(game.mouse.get_pos()):
single_player_button.is_highlighted = True
for event in game.event.get():
if event.type == MOUSEBUTTONDOWN:
game_mode = "single player"
break
else:
single_player_button.is_highlighted = False
if two_player_button.rectangle.collidepoint(game.mouse.get_pos()) or two_player_button.highlighted_rectangle.collidepoint(game.mouse.get_pos()):
two_player_button.is_highlighted = True
for event in game.event.get():
if event.type == MOUSEBUTTONDOWN:
game_mode = "two player"
break
else:
two_player_button.is_highlighted = False
if game_mode != None:
break
# ~ ~ ~ ~ EXITING THE MENU ~ ~ ~ ~ ~#
game.quit()
``` |
{
"source": "jm4474/FOMCTextAnalysis",
"score": 3
} |
#### File: lea_code/scripts/fix_dates.py
```python
import os
import pandas as pd
def main():
data_file_name = "../data/derived_data.csv"
dirs = ["../data/newsarticles_raw_text/preprocessed/"] #"../data/transcript_raw_text/", "../data/minutes_raw_text/", "../data/statement_raw_text/", "../data/bluebook_raw_text/", "../data/agenda_raw_text/",
flists = [[w.split('.')[0] for w in os.listdir(d)] for d in dirs]
datelist = [w.split('_')[0] for d in dirs for w in os.listdir(d)] # for newstext
print(flists)
start_year = 1965
end_year = 2013
df = pd.read_csv(data_file_name)
files = []
done = {}
for index, row in df.iterrows():
startyear = row['start_date']
endyear = row['end_date']
if startyear != endyear and startyear not in done:
for idx, flist in enumerate(flists):
for fname in flist:
if fname.startswith(endyear):
if not startyear in datelist:
#if endyear in flist:
#if not startyear in flist:
print("renaming {} in {} -- {}".format(endyear, dirs[idx], fname))
os.rename("{}{}_{}.txt".format(dirs[idx],endyear,fname.split("_")[1]), "{}{}_{}.txt".format(dirs[idx],startyear,fname.split("_")[1])) # for newstext
#os.rename("{}preprocessed/{}.txt".format(dirs[idx],endyear), "{}preprocessed/{}.txt".format(dirs[idx],startyear))
done[startyear]=1
if __name__ == "__main__":
main()
```
#### File: lea_code/scripts/scan_input.py
```python
import os
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
def make_scan_input(ftype):
file_path = "../data/{}_raw_text/preprocessed/".format(ftype)
outfile = "../data/scan_input/{}.txt".format(ftype)
with open(outfile, 'w+') as of:
for ff in sorted(os.listdir(file_path)):
if ff.endswith(".txt"):
print("processing {}".format(ff))
sentences = open(os.path.join(file_path, ff)).read().split("\n\n")
year = ff.split('-')[0]
for sentence in sentences:
words = [w.split('\t')[1].lower() for w in sentence.split('\n') if w.split('\t')[6] == "False" and w.split('\t')[2] != "PUNCT"]
if len(words) > 5:
of.write("{}\t{}\n".format(year, ' '.join(words)))
def sort_coo(coo_matrix):
tuples = zip(coo_matrix.col, coo_matrix.data)
return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
#use only topn items from vector
sorted_items = sorted_items[:topn]
score_vals = []
feature_vals = []
# word index and corresponding tf-idf score
for idx, score in sorted_items:
#keep track of feature name and its corresponding score
score_vals.append(round(score, 3))
feature_vals.append(feature_names[idx])
#create a tuples of feature,score
#results = zip(feature_vals,score_vals)
results= {}
for idx in range(len(feature_vals)):
results[feature_vals[idx]]=score_vals[idx]
return results
def word_counts(ftype):
docs = {}
docfile = open("../data/scan_input/{}.txt".format(ftype), 'r')
for line in docfile.readlines():
year=line.strip().split("\t")[0]
txt=line.strip().split("\t")[1]
if year not in docs:
docs[year]=[]
else:
docs[year].append(txt)
counts = CountVectorizer()
global_counts = counts.fit_transform([' '.join(doc) for doc in docs.values()])
for target in list(docs.keys()):
train_docs = [' '.join(tdocs) for time, tdocs in docs.items() if time != target]
test_doc = ' '.join(docs[target])
tfidf = TfidfVectorizer()
tfidf.fit_transform(train_docs)
feature_names=tfidf.get_feature_names()
test_tfidf=tfidf.transform([test_doc])
sorted_items=sort_coo(test_tfidf.tocoo())
keywords=extract_topn_from_vector(feature_names,sorted_items,50)
print(target, keywords)
if __name__ == "__main__":
counts = make_scan_input("statement")
word_counts("statement")
```
#### File: python/scripts/get_alternativedata.py
```python
import pandas as pd
import numpy as np
import os
import re
def map_treatment(x):
if x == 0:
treat = "U"
elif x> 0:
treat = "T"
else:
treat = "E"
return treat
def build_altmenu(blue_df,bb_missing_df):
blue_df['start_date'] = pd.to_datetime(blue_df['start_date'])
# Select columns
selectcols = ['start_date'] + [f'C_TREATMENT_alt_{alt}' for alt in ['a','b','c','d','e']] + [f'justify_alt_{alt}' for alt in ['a','b','c','d','e']] + [f'C_TREATMENT_SIZE_alt_{alt}' for alt in ['a','b','c','d','e']]
blue_df = blue_df[selectcols]
# Reshape long
longalt_df = pd.wide_to_long(blue_df, ['C_TREATMENT_alt','C_TREATMENT_SIZE_alt', 'justify_alt' ], i='start_date', j="alternative", sep="_", suffix='\w+')
longalt_df =longalt_df.reset_index()
# Restrict time period
longalt_df = longalt_df[(longalt_df['start_date']>="1988-01-01") & (longalt_df['start_date']<="2008-12-31")]
longalt_df = longalt_df[(longalt_df['start_date']<"2001-03-20") | (longalt_df['start_date']>"2004-01-29")]
longalt_df = longalt_df.sort_values(by=["start_date","alternative"])
# Cleaning
longalt_df.drop(columns='justify_alt',inplace=True)
longalt_df.rename(columns={"C_TREATMENT_alt":"treatment",'C_TREATMENT_SIZE_alt':"size"},inplace=True)
# Fill-in the missing corpora for time 3/20/01 - 1/27/04
bb_missing_df['start_date'] = pd.to_datetime(bb_missing_df['date'])
bb_missing_df.drop(columns=["date",'alt_num'],inplace=True)
bb_missing_df.rename(columns={'alt':'alternative', 'change':"size"},inplace=True)
all_df = pd.concat([bb_missing_df,longalt_df],join="outer",axis=0,ignore_index=True)
all_df = all_df[['start_date','alternative','size']]
all_df = all_df.sort_values(by=["start_date","alternative"])
all_df = all_df[~all_df['size'].isna()]
all_df = all_df[all_df['size']!="N"]
all_df.reset_index(drop=True,inplace=True)
# Assign treatment
all_df['treatment'] = all_df['size'].map(map_treatment)
return all_df
def build_alttext(blue_df,bb_missing_df):
blue_df = pd.read_excel("../data/bluebook_manual_data_online_WORKING.xlsx")
bb_missing_df=pd.read_excel("../data/bluebook_missingalternatives.xlsx")
blue_df['start_date'] = pd.to_datetime(blue_df['start_date'])
# Select columns
selectcols = ['start_date'] + [f'Sentences_alt_{alt}' for alt in ['a','b','c','d','e']] + [f'C_TREATMENT_SIZE_alt_{alt}' for alt in ['a','b','c','d','e']]
blue_df = blue_df[selectcols]
# Reshape long
longalt_df = pd.wide_to_long(blue_df, ['Sentences_alt',"C_TREATMENT_SIZE_alt"], i='start_date', j="alternative", sep="_", suffix='\w+')
longalt_df =longalt_df.reset_index()
# Restrict time period
longalt_df = longalt_df[(longalt_df['start_date']>="1988-01-01") & (longalt_df['start_date']<="2008-12-31")]
longalt_df = longalt_df[(longalt_df['start_date']<"2001-03-20") | (longalt_df['start_date']>"2004-01-29")]
longalt_df = longalt_df.sort_values(by=["start_date","alternative"])
longalt_df = longalt_df[~longalt_df['C_TREATMENT_SIZE_alt'].isna()]
longalt_df = longalt_df[longalt_df['C_TREATMENT_SIZE_alt']!="N"]
longalt_df.drop(columns=["C_TREATMENT_SIZE_alt"],inplace=True)
# Cleaning
longalt_df.rename(columns={"Sentences_alt":"text"},inplace=True)
# Fill-in the missing corpora for time 3/20/01 - 1/27/04
bb_missing_df['start_date'] = pd.to_datetime(bb_missing_df['date'])
bb_missing_df.drop(columns=["date",'alt_num','change'],inplace=True)
bb_missing_df.rename(columns={'alt':'alternative', },inplace=True)
all_df = pd.concat([bb_missing_df,longalt_df],join="outer",axis=0,ignore_index=True)
all_df = all_df[['start_date','alternative','text']]
all_df = all_df.sort_values(by=["start_date","alternative"])
all_df.reset_index(drop=True,inplace=True)
return all_df
def import_leaalttext(directory,missingcorpus):
# Import alternatives that Lea extracted
emptylist=[]
filenames = sorted(os.listdir(directory))
for filename in filenames:
if ".txt" not in filename:
#print("not a txt file")
continue
# get start date
start_date = filename.replace(".txt","")
alternatives = {'start_date_string':start_date,'a':[],'b':[],'c':[],'d':[],'e':[]}
with open(f"../data/alternatives_corpora/{filename}") as f:
for line in f.readlines():
if line.strip():
split = re.split("[a-z]\s[A-Z]{3,4}\s\d*",line.strip(),1)
if len(split)>1:
alt = line.strip()[0]
alternatives[alt].append(split[1])
emptylist.append(alternatives)
corpus_df = pd.DataFrame(emptylist)
# Restrict time period
corpus_df['start_date']=pd.to_datetime(corpus_df['start_date_string'])
corpus_df= corpus_df[(corpus_df['start_date']>="1988-01-01") & (corpus_df['start_date']<="2008-12-31")]
# Fill-in the missing corpora for time 3/20/01 - 1/27/04
corpus_df = corpus_df[(corpus_df['start_date']<"2001-03-20") | (corpus_df['start_date']>"2004-01-29")]
# Do a long reshape
newnames = dict(zip(['a', 'b', 'c', 'd', 'e'] ,[ f"alt_{col}" for col in ['a', 'b', 'c', 'd', 'e'] ]))
corpus_df.rename(columns=newnames,inplace=True)
corpus_df.drop(columns="start_date_string",inplace=True)
len(corpus_df["start_date"].unique())
corpus_long = pd.wide_to_long(corpus_df,"alt",i="start_date",j="alternative",sep="_",suffix="\w").reset_index()
corpus_long.rename(columns={"alt":"newtext"},inplace=True)
corpus_long = corpus_long.sort_values(["start_date","alternative"],ascending=(True, True))
corpus_long = corpus_long.reset_index()
corpus_long["tt"] = np.nan
for idx,row in corpus_long.iterrows():
if not row["newtext"]:
corpus_long.loc[idx, "tt"] = np.nan
else:
corpus_long.loc[idx, "tt"] = " ".join(row["newtext"])
corpus_long.drop(columns="newtext",inplace=True)
corpus_long.rename(columns={"tt":"newtext"},inplace=True)
corpus_long = corpus_long[corpus_long.newtext.notna()]
corpus_long.drop(columns="index",inplace=True)
missingcorpus_df = pd.read_csv(missingcorpus)
missingcorpus_df["start_date"] = pd.to_datetime( missingcorpus_df["start_date"])
corpus = pd.concat([missingcorpus_df,corpus_long],join="outer",axis=0,ignore_index=True)
return corpus
def get_ffr(ffr,startyear,endyear):
ffr.rename(columns={"observation_date":"date","DFEDTAR":"ffrtarget"},inplace=True)
ffr['year']=ffr['date'].apply(lambda x: x.year)
ffr=ffr[(ffr['year']>=startyear) & (ffr['year']<=endyear)].copy()
ffr['target_before'] = ffr['ffrtarget'].shift(1).copy()
ffr['target_after'] = ffr['ffrtarget'].shift(-1).copy()
ffr['target_change'] = ffr['target_after'] - ffr['target_before']
return ffr
def main():
print("Processing of the manual bluebook menu\n")
blue_df = pd.read_excel("../data/bluebook_manual_data_online_WORKING.xlsx")
bb_missing_df=pd.read_excel("../data/bluebook_missingalternatives.xlsx")
menu_df = build_altmenu(blue_df,bb_missing_df)
print("Processing Anand and Oliver alternative text\n")
blue_df = pd.read_excel("../data/bluebook_manual_data_online_WORKING.xlsx")
bb_missing_df=pd.read_excel("../data/bluebook_missingalternatives.xlsx")
text_df = build_alttext(blue_df,bb_missing_df)
print("Import FFR Target\n")
startyear, endyear = 1988, 2008
ffrdata=pd.read_excel("../../../collection/python/data/FRED_DFEDTAR.xls",skiprows=10)
ffr = get_ffr(ffrdata,startyear,endyear)
print("Processing the Ambiguous Decisions\n")
decisions_df = pd.read_excel("../data/undetermined_alternatives_og.xlsx")
decisions_df = decisions_df.loc[~decisions_df["Final decision"].isna(),['start_date',"Final decision"]].rename(columns={"Final decision":'decision'})
dates_df = pd.read_csv("../../../collection/python/output/derived_data.csv")
dates_df['start_date'] = pd.to_datetime(dates_df['start_date'])
dates_df['end_date'] = pd.to_datetime(dates_df['end_date'])
dates_df = dates_df.drop_duplicates(subset="start_date")
merged_df = menu_df.merge(dates_df[["start_date","end_date"]],on="start_date",how="left")
merged_df = merged_df.merge(ffr,left_on="end_date",right_on="date",how="left")
merged_df = merged_df.merge(decisions_df,on="start_date",how="left")
merged_df = merged_df[['start_date','end_date','alternative','size','treatment','target_after', 'target_before', 'target_change', 'decision']]
merged_df['newds'] = merged_df.loc[ merged_df['size']== merged_df["target_change"],'alternative' ]
helpdf = merged_df.loc[(~merged_df['newds'].isna()) & (merged_df['decision'].isna()),['start_date','newds']]
helpdf.rename( columns = {'newds':"matchdec"},inplace=True)
merged_df = merged_df.merge(helpdf,on="start_date",how="left")
merged_df.loc[ merged_df['decision'].isna(),'decision'] = merged_df.loc[ merged_df['decision'].isna() , 'matchdec']
merged_df.drop(columns=['newds',"matchdec"],inplace=True)
mergedtext_df = merged_df.merge(text_df,on=["start_date","alternative"],how="inner")
directory = "../data/alternatives_corpora"
missingcorpus = "../data/bluebook_missingcorpuslea.csv"
newdata = import_leaalttext(directory,missingcorpus)
mergedtext_df = mergedtext_df.merge(newdata , on=["start_date","alternative"],how="left")
# Fill Lea's alternatives with the manual collected alternatives in the period from 2001-03-20 to 2004-01-28
mergedtext_df.loc[(mergedtext_df['start_date']>="2001-03-20") & (mergedtext_df['start_date']<="2004-01-28"),"newtext" ]= mergedtext_df.loc[(mergedtext_df['start_date']>="2001-03-20") & (mergedtext_df['start_date']<="2004-01-28"),"text" ]
# Fill Lea's alternatives with the aprime outlier on start_date 2001-01-30
mergedtext_df.loc[(mergedtext_df['start_date']>="2001-01-30") & (mergedtext_df['alternative']=="aprime"),"newtext" ]= mergedtext_df.loc[(mergedtext_df['start_date']>="2001-01-30") & (mergedtext_df['alternative']=="aprime"),"text" ]
mergedtext_df.rename(columns={"text":"oldtext","newtext":"leatextwithadjustments"},inplace=True)
decision = mergedtext_df[['start_date', 'end_date','decision']]
decision = decision.drop_duplicates('start_date')
print("Export the final dataset")
mergedtext_df.to_csv("../output/alternativedata.csv",index=False)
on = 1
if on ==1:
votes = pd.read_csv("../output/votingrecordwo.csv")
votes["date"] = pd.to_datetime(votes["date"] )
votes = votes.merge(decision,left_on="date",right_on="end_date",how="left")
votes.loc[votes['votingmember']!=1,"decision"] = np.nan
votes['diss']= votes[['ambdiss',
'tighterdiss', 'easierdiss']].sum(axis=1)
votes.loc[votes['diss']==1,"decision"] = np.nan
votes.drop(columns=['start_date', 'end_date', 'diss'],inplace=True)
votes = votes[votes["date"]!="1988-01-05"]
votes.to_csv("../output/votingrecord.csv")
if __name__ == "__main__":
main()
```
#### File: scripts/indirect/auxfunction_tablecreation.py
```python
def create_table_df(data,name,max_columns=7):
columnheaders=list(data.columns)
numbercolumns=len(columnheaders)
## Check colum length
if not numbercolumns>max_columns+1:
#Change made by <NAME>: output changed to overleaf_files
with open("../output/overleaf_files/"+name+".tex", "w") as f:
f.write(r"\begin{tabular}{"+"l" + "".join("c" * (numbercolumns-1)) + "}\n")
f.write("\\hline\\hline \n")
f.write("\\addlinespace"+" \n")
f.write(" & ".join([str(x) for x in columnheaders]) + " \\\ \n")
f.write("\\hline \n")
# write data
for idx,row in data.iterrows():
# Do formatting for specific tables
if row.iloc[0]=="Total":
f.write("\\addlinespace"+" \n")
f.write(" & ".join([str(x) for x in row.values]) + " \\\\\n")
f.write("\\hline \n")
f.write(r"\end{tabular}")
else:
aux=(numbercolumns-1)//max_columns
#print("Total number of columns:",numbercolumns)
if aux==(numbercolumns-1)/max_columns:
n_tables=aux
else:
n_tables=aux+1
#print("Split into",n_tables,"tables")
n_colums=(numbercolumns-1)//n_tables
#print("with",n_colums,"columns each")
aux_columnheaders=[]
for n_table in range(n_tables):
aux_c=[columnheaders[0]]
for column in range(n_colums):
aux_c.append(columnheaders[(n_table)*n_colums+(column+1)])
aux_columnheaders.append(aux_c)
with open("../output/overleaf_files/"+name+".tex", "w") as f:
f.write(r"\begin{tabular}{"+"l" + "".join("c" * (n_colums)) + "}\n")
f.write("\\hline\\hline \n")
f.write("\\addlinespace"+" \n")
for i in range(n_tables):
aux_table_columns=aux_columnheaders[i]
data_table=data[aux_table_columns]
f.write(" & ".join([str(x) for x in aux_table_columns]) + " \\\ \n")
f.write("\\hline \n")
# write data
for idx,row in data_table.iterrows():
# Do formatting for specific tables
if row.iloc[0]=="Total":
f.write("\\addlinespace"+" \n")
f.write(" & ".join([str(x) for x in row.values]) + " \\\\\n")
f.write("\\hline \n")
f.write("\\addlinespace"+" \n")
f.write(r"\end{tabular}")
```
#### File: scripts/indirect/create_lda_data.py
```python
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
from gensim.utils import simple_preprocess
import itertools
import os
import json
###############################################################################
def clean_data(alternatives,speakers,votingrecord,alternative_results,speakerid,begin_date,end_date):
# Alternatives
alternatives = alternatives[["start_date","date","alt a corpus","alt b corpus","alt c corpus","alt d corpus"]]
names = {"alt a corpus":"corpus_alta","alt b corpus":"corpus_altb","alt c corpus":"corpus_altc","alt d corpus":"corpus_altd","date":"end_date"}
alternatives.rename(columns=names,inplace=True)
alts = pd.wide_to_long(alternatives,stubnames="corpus", sep="_",i="start_date", j="alternatives", suffix='\w+')
alts=alts.reset_index()
alts.rename(columns={'start_date':'Date','alternatives':'Speaker','corpus':'content'},inplace=True)
#print(alts)
data = pd.concat([speakers,alts],axis=0,keys=[0, 1])
data = data.reset_index()
data.drop(columns=["Unnamed: 0","level_1"],inplace=True)
data.rename(columns={"level_0":"d_alt"},inplace=True)
# Create full speaker dictionary that contains non-voting memebers
speakerid = {k.lower():v for k,v in speakerid.items()}
speakers = data['Speaker'].unique().tolist()
speakers =[s.lower() for s in speakers ]
newspeakerids = {}
for idx,speaker in enumerate(speakers):
if speaker in speakerid.keys():
newspeakerids.update({speaker:speakerid[speaker]})
else:
newid="id_"+str(100+idx)
#print(newid)
newspeakerids.update({speaker:newid})
#sorted(list(newspeakerids.keys()))
#sorted([ int(i.strip('id_')) for i in sorted(list(newspeakerids.values()))])
# Clean dataset
data["speaker"] = data["Speaker"].apply(lambda x: x.lower())
data["speaker_id"] = data["speaker"]
data.replace({"speaker_id":newspeakerids},inplace=True)
data.drop(columns="Speaker",inplace=True)
data.rename(columns={"Date":"date"},inplace=True)
# Merge alternative outcomes
data = data.merge(alternative_results,left_on = "end_date",right_on='date',how="left",indicator=True)
data.drop(columns=[ 'Unnamed: 0'],inplace=True)
# Merge voting record
data = data.merge(votingrecord,left_on=["speaker_id","end_date"],right_on=["speaker_id","date"],how="outer",indicator=False,sort=False)
data.dropna(subset=["content"],inplace=True,axis=0)
data.fillna(value={'votingmember':0, 'ambdiss':0,'tighterdiss':0, 'easierdiss':0},inplace=True)
data.drop(columns=["start_date","date_y","date"],inplace=True)
data.rename(columns={"date_x":"start_date"},inplace=True)
data.loc[(data['votingmember']==1) & (data["_merge"]=='both') ,'act_vote'] = data.loc[(data['votingmember']==1) & (data["_merge"]=='both') ,'act_chosen']
data.loc[(data['votingmember']==1) & (data['tighterdiss']==1) & (data["_merge"]=='both'),'act_vote'] = data.loc[(data['votingmember']==1) & (data['tighterdiss']==1) & (data["_merge"]=='both'),'vote_tighter']
data.loc[(data['votingmember']==1) & (data['tighterdiss']==1) & (data['vote_tighter'].isna()) & (data["_merge"]=='both'),'act_vote'] = "tighterdiss"
data.loc[(data['votingmember']==1) & (data['easierdiss']==1) & (data["_merge"]=='both'),'act_vote'] = data.loc[(data['votingmember']==1) & (data['easierdiss']==1)& (data["_merge"]=='both'),'vote_easier']
data.loc[(data['votingmember']==1) & (data['easierdiss']==1) & (data['vote_easier'].isna()) & (data["_merge"]=='both'),'act_vote'] = "easierdiss"
data.loc[(data['votingmember']==1) & (data['ambdiss']==1) & (data["_merge"]=='both'),'act_vote'] = "ambdissent"
data.drop(columns = ["_merge"],inplace=True)
# Constrain dataset
newdata = data[(data["start_date"]>=begin_date) & (data["start_date"]<=end_date) ]
return newdata
def main():
# Load dictionary
with open('../output/data.json', 'r') as speakerids:
speakerid = json.load(speakerids)
# Load votingrecord
votingrecord = pd.read_csv("../output/votingrecord.csv").sort_values(by="date")
# Load speaker text
speakers = pd.read_csv("../output/speaker_data/speaker_corpus.csv").sort_values(by="Date")
speakers = speakers[speakers.FOMC_Section==2]
#len(speakers[speakers.start_date>='1988-03-29'].start_date.unique())
# Alternatives that Anand collected
alternatives = pd.read_csv("../output/alternative_outcomes_and_corpus.csv")
# Alternative results
alternative_results = pd.read_csv("../output/alternative_results.csv")
begin_date = "1988-03-29"
end_date = "2006-01-31"
dataout = clean_data(alternatives,speakers,votingrecord,alternative_results,speakerid,begin_date,end_date)
return dataout
# =============================================================================
# # ### Do a variety of checks on the data
#
# data = main()
# print("The total data length is: %s" % len(data))
#
# num = len(data.loc[data["d_alt"]==1,'start_date'].unique())
# print(f"Alternative dates: {num} of 168")
#
# num = len(data.loc[(data["d_alt"]==0) & (data["votingmember"]==1) ,'start_date'].unique())
# print(f"Dates with votes: {num} of 174")
#
# num =len(data.loc[(data["d_alt"]==0) & (data["votingmember"]==1)])
# print(f"Votes: {num} out of 1905")
#
# # Check the number of dissents tighter
# num =len(data.loc[(data["d_alt"]==0) & (data["tighterdiss"]==1)])
# print(f"Dissent tighter: {num} out of 57")
#
# # Check the number of dissents easier
# num =len(data.loc[(data["d_alt"]==0) & (data["easierdiss"]==1)])
# print(f"Dissent tighter: {num} out of 23")
#
# # Check the number of dissents easier
# num =len(data.loc[(data["d_alt"]==0) & (data["ambdiss"]==1)])
# print(f"Dissent tighter: {num} out of 14")
#
#
# # Check for the missing votes
# new = data.loc[(data["d_alt"]==0) & (data["votingmember"]==1)].pivot_table(index="end_date",values="votingmember",aggfunc=sum).reset_index()
# excel_df = pd.read_excel("../data/fomc_dissents_data.xlsx",skiprows=3)
# excel_df['FOMC Votes'] = excel_df['FOMC Votes'].apply(lambda x:0 if np.isnan(x) else x)
# excel_df['date'] = excel_df["FOMC Meeting"].dt.date
# excel_df = excel_df[~excel_df["Chair"].isna()]
# new["end_date"] = pd.to_datetime(new["end_date"]).dt.date
# newn = new.merge(excel_df,left_on="end_date",right_on="date",how="left")
# newn['dev'] = newn['FOMC Votes'] == newn['votingmember']
# bb = newn[newn['dev']==False]
# check = data[data['end_date']=="2007-06-28"]
# ## All of the missing votes are on 01/05/1988: Voters don't have interjections.
# ## This is a date without bluebooks. Hence voting record is complete.
#
# data['new']=1
# df_balt=data[data['d_alt']==1].pivot_table(index="date",values='new',aggfunc=np.sum).reset_index()
# df_summary = data.pivot_table(index="date",values='new',columns=['d_alt','votingmember'],aggfunc=np.sum)
#
# =============================================================================
```
#### File: scripts/indirect/extract_alternative_text.py
```python
import os
import pandas as pd
import re
def main():
dir = "../../../collection/python/output/bluebook_raw_text"
raw_text_df = pd.DataFrame()
for file in os.listdir(dir):
with open(dir+"/"+file) as f:
text = f.read().replace("\n"," ")
raw_text_df = raw_text_df.append({'date':file.replace(".txt",""),'raw_text':text},ignore_index=True)
pattern = ".{1000}alternatives?\s+[a-e]\s.{1000}"
regex = re.compile(pattern, re.IGNORECASE)
basket = ['inflation','unemployment','monetary policy','productivity','market expectation']
for i in raw_text_df.index:
match = re.findall(regex,raw_text_df.at[i,'raw_text'])
if match:
match_text = "\n\n\n".join(match)
raw_text_df.at[i, 'matches'] = match_text
alternatives = re.findall("alternatives?\s+[a-e]",match_text)
raw_text_df.at[i,'alternatives'] = alternatives
for term in basket:
term_matches = []
for match in raw_text_df.at[i,'matches'].split("\n\n\n"):
#print(match)
sentences = match.split(".")
for sentence in sentences:
print(sentence)
if term in sentence:
term_matches.append(sentence+".")
#term_sentence = "\. .*"+term+".*\."
#search = re.findall(term_sentence,raw_text_df.at[i, 'matches'])
column_name = term.replace(" ","_")
if term_matches:
print("found")
raw_text_df.at[i, column_name] = "\n".join(term_matches)
raw_text_df.to_csv("../output/alternative_window_text_topics.csv")
if __name__ == "__main__":
main()
```
#### File: scripts/indirect/produce_bb_size_sumstats.py
```python
import pandas as pd
import re
import matplotlib.pyplot as plt
import numpy as np
import datetime
from produce_sumstats_bb_options import create_totstat, create_sumstat_byperiod
###############################################################################
### Create the summary statistics including the size.
def main():
sample_startdate=datetime.datetime(1989, 7, 1)
sample_enddate=datetime.datetime(2006, 2, 1)
data_sumstats=prepare_data(sample_startdate,sample_enddate)
create_totstat(data_sumstats,'tab_sumstat_sizeoptions')
#turning_points=['1989-06-01','1993-06-01','1995-04-01','2000-11-01','2004-01-01','2007-02-01']
#create_sumstat_byperiod(data_sumstats,turning_points,'tab_sumstats_sizeoptions_byperiod')
#data_graphs=produce_data_graph(1988,2008)
# Plot individual meeting
#datestring="2002-11-06"
#produce_graph(data_graphs,datestring)
def prepare_data(sample_startdate,sample_enddate):
data=pd.read_excel("../data/bluebook_manual_data_online_WORKING.xlsx")
data['year']=data['start_date'].apply(lambda x : x.year)
data=data[(data['start_date']>=sample_startdate) & (data['start_date']<=sample_enddate)]
data=data.reset_index()
treatments=[]
for alt in ['a','b','c','d','e']:
try:
treatments+=data['C_TREATMENT_SIZE_alt_'+alt].unique().tolist()
except:
pass
#print('No option found')
treatments=list(set(treatments))
data.loc[:,'treatment_options']=np.nan
data.loc[:,'treatment_options']=data.loc[:,'treatment_options'].astype('object')
for idx,row in data.iterrows():
treatments=[]
for alt in ['a','b','c','d','e']:
try:
treatments.append(row['C_TREATMENT_SIZE_alt_'+alt])
except:
pass
#print('No option found')
filtered_treatments=[]
for treatment in treatments:
try:
if not re.search('\?',str(treatment)):
if not np.isnan(treatment):
if isinstance(treatment, int) or isinstance(treatment, float):
filtered_treatments.append(treatment)
except:
pass
filtered_treatments=", ".join([str(x) for x in sorted(filtered_treatments)])
#print(filtered_treatments)
if not len(filtered_treatments)==0:
data['treatment_options'].iloc[idx]=filtered_treatments
return data
def produce_data_graph(startdate,enddate):
data=pd.read_excel("../../data/bluebook_manual_data_online_WORKING.xlsx")
data['year']=data['start_date'].apply(lambda x : x.year)
data=data[(data['year']>=startdate) & (data['year']<=enddate)]
data['start_date']=pd.to_datetime(data['start_date'])
data['end_date']=pd.to_datetime(data['end_date'])
data=data.reset_index()
treatments=[]
for alt in ['a','b','c','d','e']:
try:
treatments+=data['C_TREATMENT_SIZE_alt_'+alt].unique().tolist()
except:
pass
#print('No option found')
treatments=list(set(treatments))
data.loc[:,'treatment_options']=np.nan
data.loc[:,'treatment_options']=data.loc[:,'treatment_options'].astype('object')
for idx,row in data.iterrows():
treatments=[]
for alt in ['a','b','c','d','e']:
try:
treatments.append(row['C_TREATMENT_SIZE_alt_'+alt])
except:
pass
#print('No option found')
filtered_treatments=[]
for treatment in treatments:
try:
if not re.search('\?',str(treatment)):
if not np.isnan(treatment):
if isinstance(treatment, int) or isinstance(treatment, float):
if treatment==0:
filtered_treatments.append(treatment+0.01)
else:
filtered_treatments.append(treatment)
except:
pass
#print(filtered_treatments)
if not len(filtered_treatments)==0:
data['treatment_options'].iloc[idx]=filtered_treatments
else:
pass
return data
def produce_graph(data,datestring):
ind_meeting_data=data[data['end_date']==pd.to_datetime(datestring)]
title= "Meeting "+ pd.Series(ind_meeting_data['start_date']).dt.strftime('%Y-%m-%d').item()+" - "+\
pd.Series(ind_meeting_data['end_date']).dt.strftime('%Y-%m-%d').item()
treatment_sizes=ind_meeting_data['treatment_options'].item()
treatment_sizes=sorted(treatment_sizes)
index=[]
name=[]
for i in range(1,len(treatment_sizes)+1):
index.append(i)
name.append("Alternative "+str(i))
plt.rc('text', usetex=True)
fig = plt.figure(figsize=(4, 6))
ax = fig.add_subplot(1, 1, 1)
ax.bar(index,treatment_sizes,align='center',width =.4)
ax.set_xticks(index, name)
ax.set_ylim(-1,1)
ax.axhline(color='gray',ls="--")
ax.set_title(title)
plt.savefig('../output/fig_policy_option_'+datestring+'.png', dpi=300,bbox_inches='tight')
if __name__ == "__main__":
main()
```
#### File: scripts/indirect/produce_bluebook_stats.py
```python
import pandas as pd
import re
import os
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models.phrases import Phrases, Phraser
# spacy for lemmatization
from distutils.core import setup
from Cython.Build import cythonize
import spacy
from wordcloud import WordCloud
import numpy as np
###############################################################################
# Open points:
# 1) Handling of footnotes. Right now, they are mixed into sentence at the end
# of the page. This interrupts the sentence that do not end at the end of the
#page.
# 2) Think about the use of nltk for the extraction of the relevant information
# 3) Adjust the cloud of words to be more relevant to our objective.
# Do the merge with the news data
# Do the merge with the statements
###############################################################################
### Define table output
def create_table(data,name):
# Output the table
columnheaders=list(data[0].keys())[1:]
keysofdict=list(data[0].keys())
numbercolumns=len(data[0])
with open("../../output/"+name+".tex", "w") as f:
f.write(r"\begin{tabular}{"+"l" + "".join("c" * (numbercolumns-1)) + "}\n")
f.write("\\hline\\hline \n")
f.write(" & "+" & ".join([x for x in columnheaders]) + " \\\ \n")
f.write("\\hline \n")
# write data
for idx,entry in enumerate(data):
# Do formatting for specific tables
if name=="tab_aggsum":
if idx==1:
f.write("\\addlinespace"+" \n")
if idx==2:
f.write("\\textit{of which:} \\\ \n")
if name=="tab_summary_count":
if idx==1:
f.write("\\addlinespace"+" \n")
entries=[]
for key in keysofdict:
entries.append(str(entry[key]))
f.write(" & ".join(entries) + " \\\ \n")
f.write("\\hline \n")
f.write(r"\end{tabular}")
### Create cloud of words
def remove_stopwords(words,stopwords):
nostopwords=[]
for word in words:
if word not in stopwords:
nostopwords.append(word)
return nostopwords
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en_core_web_sm')
doc = nlp(" ".join(texts))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
def create_cloudwords(sentences,name):
stop_words = stopwords.words('english')
# Manual extraction of words
data_man="".join(sentences)
words_man=data_man.replace(".", " ")
# Gensium extraction of words
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
wordsperphrase = list(sent_to_words(sentences))
words_model=[]
for phrase in wordsperphrase:
for word in phrase:
words_model.append(word)
# Do some simple comparison
print("Manual word count: ",len(words_man))
print("Model word count: ",len(words_model))
# Assign the words
data_words=words_model
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words,stop_words)
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_nostops, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
# =============================================================================
# # Define functions for stopwords, bigrams, trigrams and lemmatization
# # Build the bigram and trigram models
# bigram = Phrases(data_lemmatized[0], min_count=1, threshold=0.1) # higher threshold fewer phrases.
# trigram = Phrases(bigram[data_lemmatized[0]], threshold=.1)
#
# bigram_mod = Phraser(bigram) # construct faster model (this is only an wrapper)
# bigram_mod[data_lemmatized[0]] # apply model to sentence
#
# print("original",len(data_lemmatized[0]),"processed",len(bigram_mod[data_lemmatized[0]]))
#
# =============================================================================
wordcloud = WordCloud(width = 1600, height = 1600,
background_color ='white',
min_font_size = 10).generate(" ".join(data_lemmatized[0]))
# plot the WordCloud image
plt.figure(figsize = (8, 8), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
plt.savefig("../output/"+name+".png")
### Do some summary statistics ###
df_output=pd.read_csv("../../../derivation/python/output/bluebook_alternatives.csv")
df_output['year']=pd.to_numeric(df_output['meeting_date'].str[:4])
df_output['date']=pd.to_datetime(df_output['meeting_date'])
df_result=df_output[(df_output['date']<="2009-03-18") & (df_output['date']>="1968-08-13")]
if not os.path.isdir("../output"):
os.mkdir("../output/")
## Aggregate Statistics ##
# Number of bluebooks
dat_list=[]
columnname="Number of Meetings with Bluebooks "
entry="Total"
print(entry,len(df_output))
dat_list.append({"entry":entry, columnname:len(df_output)})
# Number of meetings between 19680813-20090317
entry=r"In Period 08/13/1968-03/17/2009"
print(entry,len(df_result))
dat_list.append({"entry":entry, columnname:len(df_result)})
# Number with alt between 19680813-20090317
entry=r"...with mentioning of alternative(s)"
number=len(df_result[df_result['n_sentences']>0])
print(entry,number)
dat_list.append({"entry":entry, columnname:number})
# Number of meetings with alt A
for alt in ["a","b","c","d","e"]:
entry=r"...mentioning of alternative {"+alt.upper()+"}"
number=len(df_result[df_result["alternative_"+alt+"_count"]>=1])
print(entry,number)
dat_list.append({"entry":entry, columnname:number})
# Create the table for latex import
create_table(dat_list,name="tab_aggsumstat")
## Get counts per alternative [No plural here] ##
# Total counts
totalcount={"counter":"Total"}
for alt in ["a","b","c","d","e"]:
val=df_result[df_result["alternative_"+alt+"_count"]>=1]["alternative_"+alt+"_count"].sum()
totalcount.update({"Alternative "+alt.upper():val})
countbynumber=[totalcount]
for count in range(1,11):
counter={"counter":count}
for alt in ["a","b","c","d","e"]:
val=len(df_result[df_result["alternative_"+alt+"_count"]==count])
counter.update({"Alternative "+alt.upper():val})
countbynumber.append(counter)
counter={"counter":"$>10$"}
for alt in ["a","b","c","d","e"]:
val=len(df_result[df_result["alternative_"+alt+"_count"]>10])
counter.update({"Alternative "+alt.upper():val})
countbynumber.append(counter)
create_table(countbynumber,name="tab_summary_count")
## Show the mentioning of alternative {A-E} graphically
fig, ax = plt.subplots()
for alt in ["a","b","c","d","e"]:
df_result.loc[:,"d_alt_"+alt]=df_result["alternative_"+alt+"_count"]>=1
df_result.loc[:,"d_alt_label_"+alt]=""
df_result.loc[df_result["d_alt_"+alt]==True,"d_alt_label_"+alt]="Alt. "+alt.upper()
ax.plot_date(df_result["date"],df_result["d_alt_label_"+alt],marker='o',markersize=3)
ax.set_ylim(["Alt. A","Alt. E"])
fig.savefig('../output/fig_alt_time.png')
# =============================================================================
#
# ## Show the bigrams associated with each alternative
#
# for alt in ["a","b","c","d","e"]:
# phrases=df_result[df_result["alternative_"+alt+"_count"]>=1]["alt_"+alt+"_sentences"].tolist()
# sentences=[]
# for phrase in phrases:
# for sentence in phrase:
# sentences.append(sentence)
# create_cloudwords(sentences,name="fig_cloudwords_"+alt)
#
#
# =============================================================================
```
#### File: scripts/indirect/produce_news_stats.py
```python
import pandas as pd
import pprint
import numpy as np
import sys
output_dir = "../../../derivation/python/output/"
from auxfunction_tablecreation import create_table_df
'''
@Author <NAME>
Produces summary statistics on the coverage of meetings by
the financial times, nytimes, and wall street journal,
reading in our master news data exporting to a latex file
'''
def main():
comp_df = get_merge()
#print(comp_df)
comp_df.rename(columns={'meeting_date':'Meetings'},inplace=True)
pivot = pd.pivot_table(comp_df,
values=['Meetings','NYT', 'WSJ', 'FT'],
columns="year",
aggfunc={'Meetings':np.count_nonzero,
'NYT':np.sum,
'WSJ':np.sum,
'FT':np.sum})
pivot = pivot.reset_index()
pivot.rename(columns={"index": "Newspaper"}, inplace=True)
pivot = pivot.reindex([1,0,2,3])
pivot = pivot.astype(int,errors="ignore")
#print(pivot)
#print(pivot.shape)
create_table_df(pivot,"tab_news_coverage",12)
def get_merge():
derived_df = pd.read_csv("../../../collection/python/output/derived_data.csv")
derived_df['end_date'] = pd.to_datetime(derived_df['end_date'])
derived_df = derived_df[derived_df.event_type == "Meeting"]
date_period = derived_df[(derived_df.end_date.dt.year >= 1988) & (derived_df.end_date.dt.year <= 2009)]
meeting_dates = date_period[['end_date']]
meeting_dates = meeting_dates.drop_duplicates(subset="end_date")
meeting_dates['meeting_date'] = pd.to_datetime(meeting_dates['end_date'])
meeting_dates = meeting_dates[['meeting_date']]
news_df = pd.read_csv(output_dir+"all_news_articles.csv")
total_articles = len(news_df)
#print("Total number of articles:{}".format(total_articles))
content_df = news_df[news_df['content']!=""]
#print("Total number of articles with content:{}".format(len(content_df)))
news_df['meeting_date'] = pd.to_datetime(news_df['meeting_date'])
#print(news_df)
merged_df = meeting_dates.merge(news_df,how="left",indicator=True,on="meeting_date")
#print(merged_df)
interm_df = pd.pivot_table(data=merged_df,index="meeting_date",columns="source",
values="headline",aggfunc=np.count_nonzero)
interm_df = interm_df.reset_index()
interm_df['year'] = interm_df.meeting_date.apply(lambda x: x.year)
interm_df['NYT'] = interm_df["The New York Times"].notnull()
interm_df['WSJ'] = interm_df["The Wall Street Journal"].notnull()
interm_df['FT'] = interm_df["The Financial Times"].notnull()
return interm_df
main()
```
#### File: scripts/indirect/statement_text_analysis.py
```python
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
from nltk import word_tokenize
from nltk.corpus import stopwords
import re
import pprint
import math
import os
import shutil
def main():
statement_document_analysis()
frequency_counts()
def frequency_counts():
cwd = "../output/statement_text_analysis/"
terms = ['risk','risks',
]
statements = pd.read_csv("../../../collection/python/output/statement_data.csv")
statements['date'] = pd.to_datetime(statements['end_date'])
stop_words = stopwords.words('english')
corpus_words = []
for i in statements.index:
raw_text = statements.loc[i,'file_text'].lower().replace("\n"," ").strip(",")
sentences = raw_text.split(". ")
for term in terms:
term_sents = []
term_words = []
for sentence in sentences:
if term in sentence:
term_sents.append(sentence)
statements.at[i, term+"_sents"] = "|".join(term_sents)
for sent in term_sents:
for word in word_tokenize(sent):
#print(word)
if word.isalpha() and word not in stop_words:
corpus_words.append(word)
term_words.append(word)
#print(term_words)
statements.at[i,term+"_words"] = "|".join(term_words)
corpus_counter = Counter(corpus_words)
for term in terms:
term_words = []
for meeting_words in statements[term+"_words"]:
term_words.extend(meeting_words.split("|"))
term_counts = Counter(term_words)
print(term.upper())
pprint.pprint(term_counts)
statements.loc[1,term+"_word_freqs"] = "{}:{}".format(term.upper(),str(term_counts))
statements.to_csv(cwd+"word_grouping_counts.csv")
def statement_document_analysis():
cwd = "../output/statement_text_analysis/"
if os.path.exists(cwd):
shutil.rmtree(cwd)
if not os.path.exists(cwd):
os.mkdir(cwd)
os.mkdir(cwd+"graphs")
terms = [
['risks','balanced'],
['risks','weighted'],
['risks','maintained']
]
print(terms)
statements = pd.read_csv("../../../collection/python/output/statement_data.csv")
statements['date'] = pd.to_datetime(statements['end_date'])
for i in statements.index:
raw_text = statements.loc[i, 'file_text'].lower().replace("\n", " ").strip(",")
sentences = raw_text.split(". ")
for term in terms:
term_sents = []
for sentence in sentences:
if term[0] and term[1] in sentence:
term_sents.append(sentence)
statements.at[i, term[0]+":"+term[1] + "_sents"] = "|".join(term_sents)
for term in terms:
term_1 = term[0]
term_2 = term[1]
term_phrase = term_1+":"+term_2
statements[term_phrase] = ((statements.file_text.str.contains(term_1))&
(statements.file_text.str.contains(term_2)))
statements.sort_values(by="date",inplace=True)
plt.plot(statements['date'],statements[term_phrase],'bo',markersize=1)
plt.title(term_phrase)
graph_path = cwd+"graphs/"+term_phrase.replace(":","_")+".png"
if os.path.exists(graph_path):
os.rmdir(graph_path)
plt.savefig(graph_path)
statements.to_csv(cwd+"term_connections.csv")
#print(statements)
if __name__ == "__main__":
main()
```
#### File: scripts/_old/get_voting_members.py
```python
import pandas as pd
import os
import re
import numpy as np
import pprint
import logging
'''
@Author: <NAME>
This script extracts voting members using the minutes of FOMC meetings, and then appends a manual verification for certain values.
'''
def main():
voter_df = get_voters()
get_errors(voter_df)
merge_error_correction(voter_df)
#merge_voting_members_with_alternatives()
def get_voters():
df = pd.read_excel("../data/fomc_dissents_data.xlsx",skiprows=3)
df["Date"] = df["FOMC Meeting"].apply(lambda x:str(x).split(" ")[0])
df['FOMC Votes'] = df['FOMC Votes'].apply(lambda x:0 if np.isnan(x) else x)
df['date'] = pd.to_datetime(df["Date"])
df['start_date'] = df['date'] - pd.Timedelta('1 days')
df['start_date']=df['start_date'].dt.date
df['date']=df['date'].dt.date
df[['date','start_date']].head()
voter_df = pd.DataFrame()
for index,row in df.iterrows():
voters = []
num_voters = int(row['FOMC Votes'])
date_path = '../../../collection/python/data/transcript_raw_text/{}.txt'.format(row['Date'])
if not os.path.exists(date_path):
print("Date not found")
date_path = '../../../collection/python/data/transcript_raw_text/{}.txt'.format(row['start_date'])
if not os.path.exists(date_path):
print("Alternative date not found")
continue
else:
print('Process alternative date')
with open(date_path) as f:
broken = False
broken_starts = 0
broken_ends = 0
lines = f.readlines()
'''First Check For Broken Title'''
#print("CHECKING USING FRAGMENT HEURISTIC")
for line in lines[:200]:
if line.strip():
if broken_ends==0:
title_frag = re.match(r'^(?:PRESENT: |PRESENT. )?(?:Mr.|Ms.|Mt.|Mrs. )$',line.strip())
if title_frag:
if not broken:
broken = True
#print("Broken Begining")
#print(title_frag.group(0))
title_frag_string = str(title_frag.group(0)).replace("PRESENT: ","")
voters.append(title_frag_string)
broken_starts+=1
continue
if broken and broken_ends<len(voters):
name_fragment = re.match('^[A-Z][a-z][A-Za-z]*',line.strip())
if name_fragment:
voters[broken_ends] = voters[broken_ends]+" "+str(name_fragment.group(0))
broken_ends+=1
'''Check using Mr. Regex'''
if len(voters)==0:
#print("CHECKING TITLE REGEX")
for line in lines[:200]:
'''Then check for valid input'''
voter_line = re.findall(r'(?:Mr.|Ms.|Mrs.) [A-Z][a-zA-Z]*',line.strip())
if voter_line:
#print(voter_line)
voters.append(voter_line[0])
if len(voters)>=num_voters:
break
'''Check Last Name Regex'''
if len(voters) == 0:
#print("Checking POST-PRESENT-NAME HEURISTIC")
found_present = False
for line in lines[:200]:
if "PRESENT:" in line.strip() or "PRESENT." in line.strip():
found_present = True
present_line = line.split(",")[0].strip().replace("PRESENT","")
name_text = re.match('[A-Z][a-z]*\s?(?:[A-Z][a-z]*)?',present_line)
if name_text:
voters.append(name_text.group(0))
continue
if found_present:
#print(line)
name_text = re.match('[A-Z][a-z]*\s?(?:[A-Z][a-z]*)?',line.split(",")[0].strip())
if name_text:
voters.append(name_text.group(0))
if len(voters)>=num_voters:
break
#print('Date:{}'.format(row['Date']))
#print("Broken Status:{}".format(broken))
#print("Voter Number:{}".format(num_voters))
#print("Voters Found:{}".format(len(voters)))
#pprint.pprint(voters)
voter_df = voter_df.append({
"Date":row['FOMC Meeting'],
"voters_expected":num_voters,
"voters_observed":len(voters),
"Voters":voters if num_voters==len(voters) else None,
},ignore_index=True)
#print("="*50)
print(voter_df)
return voter_df
def get_errors(voter_df):
print(len(voter_df[voter_df["Voters"].isna()]))
voter_errors = voter_df[voter_df["Voters"].isna()].reset_index(drop=True)
voter_errors.to_csv("../output/voter_errors.csv",index=False)
def merge_error_correction(voter_df):
correction_df = pd.read_csv("../data/voter_corrections.csv")
correction_df['Date'] = pd.to_datetime(correction_df['Date'])
voter_df['Date'] = pd.to_datetime(voter_df['Date'])
voter_df = pd.concat([voter_df,correction_df])
voter_df = voter_df.drop_duplicates(['Date'], keep="last").sort_values(by="Date")
voter_df = voter_df[(voter_df['Date'].dt.year>1987)&(voter_df['Date'].dt.year<2010)]
voter_df.to_csv("../output/voting_members.csv",index=False)
def merge_voting_members_with_alternatives():
voting_df = pd.read_csv("../output/voting_members.csv")
alt_df = pd.read_csv("../output/alternative_outcomes_and_corpus.csv")
alt_df['date'] = pd.to_datetime(alt_df['date']).dt.date
merge_df = pd.merge(alt_df,voting_df,left_on="date",right_on="Date",how="outer")
excel_df = pd.read_excel("../data/fomc_dissents_data.xlsx",skiprows=3)
excel_df['FOMC Votes'] = excel_df['FOMC Votes'].apply(lambda x:0 if np.isnan(x) else x)
excel_df['date'] = excel_df["FOMC Meeting"].dt.date
excel_df = excel_df[~excel_df["Chair"].isna()]
merge_df = merge_df.merge(excel_df,left_on="date",right_on="date",how="outer")
merge_df = merge_df[[
'date', 'alt a corpus', 'bluebook_treatment_size_alt_a', 'alt b corpus',
'bluebook_treatment_size_alt_b', 'alt c corpus',
'bluebook_treatment_size_alt_c', 'alt d corpus',
'bluebook_treatment_size_alt_d', 'decision', 'Voters', 'Year',
'Chair', 'Dissent (Y or N)', 'FOMC Votes',
'Votes for Action', 'Votes Against Action',
'Number Governors Dissenting', 'Number Presidents Dissenting',
'No. Governors for Tighter', 'No. Governors for Easier',
'No. Presidents for Tighter', 'No. Presidents for Easier',
'Dissenters Tighter', 'Dissenters Easier',
'Dissenters Other/Indeterminate'
]]
merge_df = merge_df[(pd.to_datetime(merge_df['date']).dt.year>=1988)&(pd.to_datetime(merge_df['date']).dt.year<2010)]
merge_df.to_csv("../output/alternatives_corpus_and_voting_information.csv",index=False)
if __name__ == "__main__":
main()
```
#### File: python/scripts/extract_romer_appendix.py
```python
import re
import pprint
import csv
from tika import parser
'''
@Author <NAME>
This file reads in the pre-downloaded romer narrative appendix file
And outputs a csv containing all policy actions and comments by meeting called
romer_appendix.csv
'''
def extract_romer_appendix():
data_points = []
romer_raw_text = parser.from_file("../data/RomerandRomerNarrativeAppendix.pdf")
lines = romer_raw_text['content'].splitlines()
lines = lines[100:1419]
no_page_nums = []
for line in lines:
if not line.strip().isdigit():
no_page_nums.append(line)
lines = ''.join(no_page_nums)
date_exp = "(?:\d{1,2}/\d{1,2})(?:-\d{1,2})?(?:-\d{1,2}/\d{1,2})?(?:/\d{2})"
dates = re.findall(date_exp, lines)
#FIRST LINE IS NEWLINE
points = re.split(date_exp,lines)[1:]
assert(len(dates)==len(points))
for index in range(len(points)):
data = {}
date = dates[index]
if "-" in date:
date=date.split("-")[0]+"/"+date.split("-")[1].rsplit("/")[-1]
data['date'] = date
effect = points[index].strip().split(". ")[0]
if "no change" in effect:
data['prev'] = effect.split(",")[0]
data['new'] = data['prev']
else:
data['prev'] = effect.split("to")[0]
data['new'] = effect.split("to")[1]
description = points[index].split(". ",1)[1]
description = description.strip().replace("\n","")
data['description'] = description
#print(description)
data_points.append(data)
#pprint.pprint(data_points)
write_to_csv(data_points)
def write_to_csv(documents):
with open('../output/romer_appendix_data.csv', 'w') as csvfile:
fieldnames = ['date','prev','new','description']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for document in documents:
writer.writerow(document)
if __name__=="__main__":
extract_romer_appendix()
```
#### File: python/scripts/apply_keyword_classifier.py
```python
import pandas as pd
import re
import numpy as np
from obtain_bluebook_alternatives import getdata_bluebook
###############################################################################
def main():
df_result=import_bluebookdata()
df_result=do_keyword_classsification(df_result)
do_sumstats(df_result)
df_result.to_csv("../output/bluebook_alt_and_class_output.csv")
def import_bluebookdata():
df_output=getdata_bluebook()
df_output['year']=pd.to_numeric(df_output['meeting_date'].str[:4])
df_output['date']=pd.to_datetime(df_output['meeting_date'])
df_result=df_output[(df_output['date']<="2009-03-18") & (df_output['date']>="1968-08-13")]
return df_result
def do_keyword_classsification(df_result):
keywords_ease=["lower","cut","cuts","decline","reduction","ease","reduce","easing"]
keywords_unchanged=["keep","unchanged","no change","maintained","maintain","remain","forego","continue"]
keywords_tighten=["raise","hike","raised","firm","firming","increase","tightening","rise","tighten","tighter"]
for alt in ["a","b","c","d","e"]:
df_result.loc[:,"alt_"+alt+"_class"]=""
### Keyword classification
for idx,row in df_result.iterrows():
sentences=row["alt_"+alt+"_sentences"]
#sentences=df_result[df_result['date']=="2006-12-12"]["alt_c_sentences"].item()
keep_sentences=[]
for sentence in sentences:
pattern = "(alternatives?\s+[^"+alt+"])([^a-z])"
if not re.search(pattern,sentence,re.IGNORECASE):
keep_sentences.append(sentence)
# Do counts for each keyword
if len(keep_sentences)==0:
df_result.loc[df_result['date']==row['date'],"alt_"+alt+"_class"]="No sentence"
else:
count_ease=0
count_unchanged=0
count_tighten=0
for sentence in keep_sentences:
for keyword in keywords_ease:
pattern = "[^a-z]"+keyword+"[^a-z]"
regex=re.compile(pattern,re.IGNORECASE)
for match in regex.finditer(sentence):
count_ease+=1
#print(match.group())
for keyword in keywords_unchanged:
pattern = "[^a-z]"+keyword+"[^a-z]"
regex=re.compile(pattern,re.IGNORECASE)
for match in regex.finditer(sentence):
count_unchanged+=1
#print(match.group())
for keyword in keywords_tighten:
pattern = "[^a-z]"+keyword+"[^a-z]"
regex=re.compile(pattern,re.IGNORECASE)
for match in regex.finditer(sentence):
count_tighten+=1
#print(match.group())
#print("ease: ",count_ease,"unchanged: ",count_unchanged,"tighten:",count_tighten)
counts=[count_ease,count_unchanged,count_tighten]
new_count=[]
if 0 in counts:
new_count=counts.copy()
new_count.remove(0)
else:
new_count=counts.copy()
d_conflict=0
if len(new_count)>=2:
if sorted(new_count)[-1]==sorted(new_count)[-2]:
d_conflict=1
sum_counts=sum(counts)
labels=["ease","unchanged","tighten"]
if not sum_counts==0 and not d_conflict==1:
index_max = np.argmax([count_ease,count_unchanged,count_tighten])
#print(labels[index_max])
df_result.loc[df_result['date']==row['date'],"alt_"+alt+"_class"]=labels[index_max]
else:
df_result.loc[df_result['date']==row['date'],"alt_"+alt+"_class"]="No assignment"
df_result.sort_values('date', inplace=True)
return df_result
def do_sumstats(df_result):
### Get summary stats
alternative="b"
start_year=1994
end_year=1999
pd.pivot_table(df_result[(df_result['year']>=start_year) & (df_result['year']<=end_year)],'date',index=['alt_'+alternative+'_class'], \
aggfunc=np.count_nonzero)
pd.pivot_table(df_result[df_result['year']>=start_year],'date',index=['alt_'+alternative+'_class'], \
columns=['year'],aggfunc=np.count_nonzero)
# Check the no assignment cases
df_result[(df_result["alt_"+alternative+"_class"]=="No assignment") & \
(df_result['year']>=start_year) & (df_result['year']<=end_year)] \
["alt_"+alternative+"_sentences"].iloc[3]
if __name__ == "__main__":
main()
```
#### File: etm/script/utils.py
```python
import torch
import numpy as np
def get_topic_diversity(beta, topk):
num_topics = beta.shape[0]
list_w = np.zeros((num_topics, topk))
for k in range(num_topics):
idx = beta[k,:].argsort()[-topk:][::-1]
list_w[k,:] = idx
n_unique = len(np.unique(list_w))
TD = n_unique / (topk * num_topics)
print('Topic diversity is: {}'.format(TD))
def get_document_frequency(data, wi, wj=None):
if wj is None:
D_wi = 0
for l in range(len(data)):
doc = data[l].squeeze(0)
if len(doc) == 1:
continue
else:
doc = doc.squeeze()
if wi in doc:
D_wi += 1
return D_wi
D_wj = 0
D_wi_wj = 0
for l in range(len(data)):
doc = data[l].squeeze(0)
if len(doc) == 1:
doc = [doc.squeeze()]
else:
doc = doc.squeeze()
if wj in doc:
D_wj += 1
if wi in doc:
D_wi_wj += 1
return D_wj, D_wi_wj
def get_topic_coherence(beta, data, vocab):
D = len(data) ## number of docs...data is list of documents
#print('D: ', D)
TC = []
num_topics = len(beta)
for k in range(num_topics):
#print('k: {}/{}'.format(k, num_topics))
top_10 = list(beta[k].argsort()[-11:][::-1])
top_words = [vocab[a] for a in top_10]
TC_k = 0
counter = 0
for i, word in enumerate(top_10):
# get D(w_i)
D_wi = get_document_frequency(data, word)
j = i + 1
tmp = 0
while j < len(top_10) and j > i:
# get D(w_j) and D(w_i, w_j)
D_wj, D_wi_wj = get_document_frequency(data, word, top_10[j])
# get f(w_i, w_j)
if D_wi_wj == 0:
f_wi_wj = -1
else:
f_wi_wj = -1 + ( np.log(D_wi) + np.log(D_wj) - 2.0 * np.log(D) ) / ( np.log(D_wi_wj) - np.log(D) )
# update tmp:
tmp += f_wi_wj
j += 1
counter += 1
# update TC_k
TC_k += tmp
TC.append(TC_k)
#print('counter: ', counter)
#print('num topics: ', len(TC))
TC = np.mean(TC) / counter
print('Topic coherence is: {}'.format(TC))
def nearest_neighbors(word, embeddings, vocab):
vectors = embeddings.data.cpu().numpy()
index = vocab.index(word)
print('vectors: ', vectors.shape)
query = vectors[index]
print('query: ', query.shape)
ranks = vectors.dot(query).squeeze()
denom = query.T.dot(query).squeeze()
denom = denom * np.sum(vectors**2, 1)
denom = np.sqrt(denom)
ranks = ranks / denom
mostSimilar = []
[mostSimilar.append(idx) for idx in ranks.argsort()[::-1]]
nearest_neighbors = mostSimilar[:20]
nearest_neighbors = [vocab[comp] for comp in nearest_neighbors]
return nearest_neighbors
import nltk
from nltk.collocations import *
import matplotlib.pyplot as plt
import os
def bigrams(big_document):
ignored_words = nltk.corpus.stopwords.words('english')
ignored_words.append('percent')
ignored_words.append('governor')
ignored_words.append('dont')
# bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_documents(big_document)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
finder.apply_freq_filter(150)
return [' '.join(x) for x in list(finder.ngram_fd.keys())]
def trigram(big_document):
ignored_words = nltk.corpus.stopwords.words('english')
ignored_words.append('percent')
ignored_words.append('governor')
ignored_words.append('dont')
# trigram_measures = nltk.collocations.TrigramAssocMeasures()
finder = TrigramCollocationFinder.from_documents(big_document)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
finder.apply_freq_filter(100)
return [' '.join(x) for x in list(finder.ngram_fd.keys())]
def replace_collocation(string, dict_collocation):
for key in dict_collocation.keys():
string = string.replace(key, dict_collocation[key])
return string
def plot_word_cloud(text, filename='wordcloud.eps', format='eps',
width=1000, height=500, background_color='white', figsize=(10,6), dpi=100, bbox_inches='tight'):
from wordcloud import WordCloud
meeting_string = (" ").join([word for line in text for word in line])
wordcloud = WordCloud(width=width, height=height, background_color=background_color).generate(meeting_string)
fig = plt.figure(figsize=figsize)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.imshow(wordcloud)
plt.axis("off")
fig.tight_layout()
plt.savefig(os.path.join(PLOT_PATH, filename), format=format, dpi=dpi, bbox_inches=bbox_inches)
``` |
{
"source": "jm5948/test",
"score": 2
} |
#### File: opensfm/commands/test.py
```python
import cvtimages
import webcam
import shutil
import multiprocessing as mp
import os
path = '/Users/chaejaemin/Downloads/opensfm/OpenSfM/data/lund/'
img_path = '/Users/chaejaemin/Downloads/opensfm/OpenSfM/data/lund/images'
blur_path = '/Users/chaejaemin/Downloads/opensfm/OpenSfM/data/lund/blurimages'
if os.path.isdir(img_path) and os.path.isdir(blur_path):
shutil.rmtree(img_path)
shutil.rmtree(blur_path)
command = []
command.append(webcam.Command())
command.append(cvtimages)
# for subcommand in command:
p = mp.Process(target = command[0].run, args = (path, ))
q = mp.Process(target = command[1].cvt, args = (img_path, ))
p.start()
q.start()
p.join()
q.join()
# class apple():
# def add(self, a):
# print(a*a)
#
# djdj = apple()
# list = [1,2,3]
#
# def add(a):
# print(a*a)
#
# for index, i in enumerate(list):
# p = mp.Process(target=djdj.add, args=(i, ))
# p.start()
# p.join()
```
#### File: jm5948/test/opensfm_ui.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import QCoreApplication
import opensfm_run_all
class main_window(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('OpenSfM')
self.setGeometry(800, 200, 500, 500)
# menu list
menu = self.menuBar()
menu.setNativeMenuBar(False)
menu_file = menu.addMenu('File')
menu_test = menu.addMenu('Test')
menu_test2 = menu.addMenu('Test2')
# menu actions
file_exit = QAction('Exit', self)
file_exit.triggered.connect(QCoreApplication.instance().quit)
menu_file.addAction(file_exit)
wg = camera_button()
self.setCentralWidget(wg)
self.show()
class camera_button(QWidget):
def __init__(self):
super().__init__()
self.init_CTUI()
def init_CTUI(self):
self.set_directory = False
self.error_message = 'Directory is not set'
# btn list
btn1 = QPushButton('Start Webcam', self)
btn1.clicked.connect(self.webcam_btn)
btn2 = QPushButton('Set Directory', self)
btn2.clicked.connect(self.directory_btn)
btn3 = QPushButton('Run All Process')
btn3.clicked.connect(self.run_all_process)
# get directory
self.get_directory = QFileDialog(self)
# error message
self.error_mg = QErrorMessage(self)
# layout
layout = QVBoxLayout()
layout.addWidget(btn2)
layout.addWidget(btn1)
layout.addWidget(btn3)
self.setLayout(layout)
self.show()
def webcam_btn(self):
if self.set_directory == False:
self.error_mg.showMessage(self.error_message)
else:
self.run = opensfm_run_all.sfm_subprocesses(self.fname)
self.run.start_webcam()
def directory_btn(self):
self.fname = self.get_directory.getExistingDirectory(self)
self.set_directory = True
def run_all_process(self):
if self.set_directory == 0:
self.error_mg.showMessage(self.error_message)
else:
run2 = opensfm_run_all.sfm_subprocesses(self.fname)
run2.run_all()
app = QApplication(sys.argv)
main = main_window()
sys.exit(app.exec_())
``` |
{
"source": "jm66/env_canada",
"score": 3
} |
#### File: env_canada/env_canada/ec_aqhi.py
```python
from datetime import datetime, timezone
import logging
import xml.etree.ElementTree as et
from aiohttp import ClientSession
from geopy import distance
AQHI_SITE_LIST_URL = "https://dd.weather.gc.ca/air_quality/doc/AQHI_XML_File_List.xml"
AQHI_OBSERVATION_URL = "https://dd.weather.gc.ca/air_quality/aqhi/{}/observation/realtime/xml/AQ_OBS_{}_CURRENT.xml"
AQHI_FORECAST_URL = "https://dd.weather.gc.ca/air_quality/aqhi/{}/forecast/realtime/xml/AQ_FCST_{}_CURRENT.xml"
LOG = logging.getLogger(__name__)
def timestamp_to_datetime(timestamp):
dt = datetime.strptime(timestamp, "%Y%m%d%H%M%S")
dt = dt.replace(tzinfo=timezone.utc)
return dt
async def get_aqhi_regions(language):
"""Get list of all AQHI regions from Environment Canada, for auto-config."""
zone_name_tag = "name_%s_CA" % language.lower()
region_name_tag = "name%s" % language.title()
regions = []
async with ClientSession() as session:
response = await session.get(AQHI_SITE_LIST_URL, timeout=10)
result = await response.read()
site_xml = result.decode("utf-8")
xml_object = et.fromstring(site_xml)
for zone in xml_object.findall("./EC_administrativeZone"):
_zone_attribs = zone.attrib
_zone_attrib = {
"abbreviation": _zone_attribs["abreviation"],
"zone_name": _zone_attribs[zone_name_tag],
}
for region in zone.findall("./regionList/region"):
_region_attribs = region.attrib
_region_attrib = {
"region_name": _region_attribs[region_name_tag],
"cgndb": _region_attribs["cgndb"],
"latitude": float(_region_attribs["latitude"]),
"longitude": float(_region_attribs["longitude"]),
}
_children = list(region)
for child in _children:
_region_attrib[child.tag] = child.text
_region_attrib.update(_zone_attrib)
regions.append(_region_attrib)
return regions
async def find_closest_region(language, lat, lon):
"""Return the AQHI region and site ID of the closest site."""
region_list = await get_aqhi_regions(language)
def site_distance(site):
"""Calculate distance to a region."""
return distance.distance((lat, lon), (site["latitude"], site["longitude"]))
return min(region_list, key=site_distance)
class ECAirQuality(object):
"""Get air quality data from Environment Canada."""
def __init__(self, zone_id=None, region_id=None, coordinates=None, language="EN"):
"""Initialize the data object."""
self.language = language.upper()
if zone_id and region_id:
self.zone_id = zone_id
self.region_id = region_id.upper()
else:
self.zone_id = None
self.region_id = None
self.coordinates = coordinates
self.region_name = None
self.current = None
self.current_timestamp = None
self.forecasts = dict(daily={}, hourly={})
async def get_aqhi_data(self, url):
async with ClientSession() as session:
response = await session.get(
url.format(self.zone_id, self.region_id), timeout=10
)
if response.ok:
result = await response.read()
aqhi_xml = result.decode("ISO-8859-1")
return et.fromstring(aqhi_xml)
else:
LOG.warning("Error fetching AQHI data")
return None
async def update(self):
# Find closest site if not identified
if not (self.zone_id and self.region_id):
closest = await find_closest_region(self.language, *self.coordinates)
self.zone_id = closest["abbreviation"]
self.region_id = closest["cgndb"]
# Fetch current measurement
aqhi_current = await self.get_aqhi_data(url=AQHI_OBSERVATION_URL)
if aqhi_current:
# Update region name
element = aqhi_current.find("region")
self.region_name = element.attrib["name{lang}".format(lang=self.language.title())]
# Update AQHI current condition
element = aqhi_current.find("airQualityHealthIndex")
if element is not None:
self.current = float(element.text)
else:
self.current = None
element = aqhi_current.find("./dateStamp/UTCStamp")
if element is not None:
self.current_timestamp = timestamp_to_datetime(element.text)
else:
self.current_timestamp = None
# Update AQHI forecasts
aqhi_forecast = await self.get_aqhi_data(url=AQHI_FORECAST_URL)
if aqhi_forecast:
# Update AQHI daily forecasts
for f in aqhi_forecast.findall("./forecastGroup/forecast"):
for p in f.findall("./period"):
if self.language == p.attrib["lang"]:
period = p.attrib["forecastName"]
self.forecasts["daily"][period] = int(f.findtext("./airQualityHealthIndex"))
# Update AQHI hourly forecasts
for f in aqhi_forecast.findall("./hourlyForecastGroup/hourlyForecast"):
self.forecasts["hourly"][timestamp_to_datetime(f.attrib["UTCTime"])] = int(
f.text
)
```
#### File: env_canada/tests/test_ec_hydro.py
```python
import asyncio
from datetime import datetime
import pytest
from env_canada import ec_hydro, ECHydro
def test_get_hydro_sites():
sites = asyncio.run(ec_hydro.get_hydro_sites())
assert len(sites) > 0
@pytest.mark.parametrize(
"init_parameters",
[{"coordinates": (50, -100)}, {"province": "ON", "station": "02KF005"}],
)
def test_echydro(init_parameters):
hydro = ECHydro(**init_parameters)
assert isinstance(hydro, ECHydro)
asyncio.run(hydro.update())
assert isinstance(hydro.timestamp, datetime)
assert isinstance(hydro.measurements["water_level"]["value"], float)
if hydro.measurements.get("discharge"):
assert isinstance(hydro.measurements["discharge"]["value"], float)
@pytest.fixture()
def test_hydro():
return ECHydro(province="ON", station="02KF005")
def test_update(test_hydro):
asyncio.run(test_hydro.update())
assert isinstance(test_hydro.timestamp, datetime)
assert isinstance(test_hydro.measurements["water_level"]["value"], float)
assert isinstance(test_hydro.measurements["discharge"]["value"], float)
``` |
{
"source": "jm66/home-assistant-cli",
"score": 2
} |
#### File: home-assistant-cli/homeassistant_cli/config.py
```python
import logging
import os
import sys
from typing import Any, Dict, List, Optional, Tuple, cast # noqa: F401
import click
from requests import Session # noqa: ignore
from ruamel.yaml import YAML
import zeroconf
import homeassistant_cli.const as const
import homeassistant_cli.yaml as yaml
_LOGGING = logging.getLogger(__name__)
class _ZeroconfListener:
def __init__(self) -> None:
self.services = {} # type: Dict[str, zeroconf.ServiceInfo]
def remove_service(
self, _zeroconf: zeroconf.Zeroconf, _type: str, name: str
) -> None:
"""Remove service."""
self.services[name] = None
def add_service(
self, _zeroconf: zeroconf.Zeroconf, _type: str, name: str
) -> None:
"""Add service."""
self.services[name] = _zeroconf.get_service_info(_type, name)
def _locate_ha() -> Optional[str]:
_zeroconf = zeroconf.Zeroconf()
listener = _ZeroconfListener()
zeroconf.ServiceBrowser(_zeroconf, "_home-assistant._tcp.local.", listener)
try:
import time
retries = 0
while not listener.services and retries < 5:
_LOGGING.info(
"Trying to locate Home Assistant on local network..."
)
time.sleep(0.5)
retries = retries + 1
finally:
_zeroconf.close()
if listener.services:
if len(listener.services) > 1:
_LOGGING.warning(
"Found multiple Home Assistant instances at %s",
", ".join(listener.services),
)
_LOGGING.warning("Use --server to explicitly specify one.")
return None
_, service = listener.services.popitem()
base_url = service.properties[b'base_url'].decode('utf-8')
_LOGGING.info("Found and using %s as server", base_url)
return cast(str, base_url)
_LOGGING.warning(
"Found no Home Assistant on local network. Using defaults"
)
return None
def resolve_server(ctx: Any) -> str: # noqa: F821
"""Resolve server if not already done.
if server is `auto` try and resolve it
"""
# to work around bug in click that hands out
# non-Configuration context objects.
if not hasattr(ctx, "resolved_server"):
ctx.resolved_server = None
if not ctx.resolved_server:
if ctx.server == "auto":
if "HASSIO_TOKEN" in os.environ and "HASS_TOKEN" not in os.environ:
ctx.resolved_server = const.DEFAULT_SERVER_MDNS
else:
if not ctx.resolved_server and "pytest" in sys.modules:
ctx.resolved_server = const.DEFAULT_SERVER
else:
ctx.resolved_server = _locate_ha()
if not ctx.resolved_server:
sys.exit(3)
else:
ctx.resolved_server = ctx.server
if not ctx.resolved_server:
ctx.resolved_server = const.DEFAULT_SERVER
return cast(str, ctx.resolved_server)
class Configuration:
"""The configuration context for the Home Assistant CLI."""
def __init__(self) -> None:
"""Initialize the configuration."""
self.verbose = False # type: bool
self.server = const.AUTO_SERVER # type: str
self.resolved_server = None # type: Optional[str]
self.output = const.DEFAULT_OUTPUT # type: str
self.token = None # type: Optional[str]
self.password = None # type: Optional[str]
self.insecure = False # type: bool
self.timeout = const.DEFAULT_TIMEOUT # type: int
self.debug = False # type: bool
self.showexceptions = False # type: bool
self.session = None # type: Optional[Session]
self.cert = None # type: Optional[str]
self.columns = None # type: Optional[List[Tuple[str, str]]]
self.no_headers = False
self.table_format = 'plain'
self.sort_by = None
def echo(self, msg: str, *args: Optional[Any]) -> None:
"""Put content message to stdout."""
self.log(msg, *args)
def log( # pylint: disable=no-self-use
self, msg: str, *args: Optional[str]
) -> None: # pylint: disable=no-self-use
"""Log a message to stdout."""
if args:
msg %= args
click.echo(msg, file=sys.stdout)
def vlog(self, msg: str, *args: Optional[str]) -> None:
"""Log a message only if verbose is enabled."""
if self.verbose:
self.log(msg, *args)
def __repr__(self) -> str:
"""Return the representation of the Configuration."""
view = {
"server": self.server,
"access-token": '<PASSWORD>' if self.token is not None else 'no',
"api-password": '<PASSWORD>' if self.password is not None else 'no',
"insecure": self.insecure,
"output": self.output,
"verbose": self.verbose,
}
return f"<Configuration({view})"
def resolve_server(self) -> str:
"""Return resolved server (after resolving if needed)."""
return resolve_server(self)
def auto_output(self, auto_output: str) -> str:
"""Configure output format."""
if self.output == "auto":
if auto_output == 'data':
auto_output = const.DEFAULT_DATAOUTPUT
_LOGGING.debug("Setting auto-output to: %s", auto_output)
self.output = auto_output
return self.output
def yaml(self) -> YAML:
"""Create default yaml parser."""
if self:
yaml.yaml()
return yaml.yaml()
def yamlload(self, source: str) -> Any:
"""Load YAML from source."""
return self.yaml().load(source)
def yamldump(self, source: Any) -> str:
"""Dump dictionary to YAML string."""
return cast(str, yaml.dumpyaml(self.yaml(), source))
```
#### File: homeassistant_cli/plugins/map.py
```python
import sys
import webbrowser
import click
import homeassistant_cli.autocompletion as autocompletion
from homeassistant_cli.cli import pass_context
from homeassistant_cli.config import Configuration
import homeassistant_cli.remote as api
OSM_URL = "https://www.openstreetmap.org/"
GOOGLE_URL = "https://www.google.com/maps/search/"
BING_URL = "https://www.bing.com/maps"
SERVICE = {
'openstreetmap': OSM_URL + '?mlat={0}&mlon={1}#map=17/{0}/{1}',
'google': GOOGLE_URL + '?api=1&query={0},{1}',
'bing': BING_URL + '?v=2&cp={0}~{1}&lvl=17&sp=point.{0}_{1}_{2}',
}
@click.command('map')
@click.argument(
'entity',
required=False,
autocompletion=autocompletion.entities, # type: ignore
)
@click.option(
'--service', default='openstreetmap', type=click.Choice(SERVICE.keys())
)
@pass_context
def cli(ctx: Configuration, service: str, entity: str) -> None:
"""Show the location of the config or an entity on a map."""
latitude = None
longitude = None
if entity:
thing = entity
data = api.get_state(ctx, entity)
if data:
attr = data.get('attributes', {})
latitude = attr.get('latitude')
longitude = attr.get('longitude')
thing = attr.get('friendly_name', entity)
else:
thing = "configuration"
response = api.get_config(ctx)
if response:
latitude = response.get('latitude')
longitude = response.get('longitude')
thing = response.get('location_name', thing)
if latitude and longitude:
urlpattern = SERVICE.get(service)
import urllib.parse
if urlpattern:
url = urlpattern.format(
latitude, longitude, urllib.parse.quote_plus(thing)
)
ctx.echo(
"{} location is at {}, {}".format(thing, latitude, longitude)
)
webbrowser.open_new_tab(url)
else:
ctx.echo(
"Could not find url pattern for service {}".format(service)
)
else:
ctx.echo("No exact location info found in {}".format(thing))
sys.exit(2)
```
#### File: homeassistant_cli/plugins/raw.py
```python
import json as json_
import logging
from typing import Any, Dict, List, cast # noqa: F401
import click
import homeassistant_cli.autocompletion as autocompletion
from homeassistant_cli.cli import pass_context
from homeassistant_cli.config import Configuration
from homeassistant_cli.helper import format_output
import homeassistant_cli.remote as api
_LOGGING = logging.getLogger(__name__)
@click.group('raw')
@pass_context
def cli(ctx: Configuration):
"""Call the raw API (advanced)."""
ctx.auto_output("data")
def _report(ctx, cmd, method, response) -> None:
"""Create a report."""
response.raise_for_status()
if response.ok:
try:
ctx.echo(format_output(ctx, response.json()))
except json_.decoder.JSONDecodeError:
_LOGGING.debug("Response could not be parsed as JSON")
ctx.echo(response.text)
else:
_LOGGING.warning(
"%s: <No output returned from %s %s>",
response.status_code,
cmd,
method,
)
@cli.command()
@click.argument(
'method', autocompletion=autocompletion.api_methods # type: ignore
)
@pass_context
def get(ctx: Configuration, method):
"""Do a GET request against api/<method>."""
response = api.restapi(ctx, 'get', method)
_report(ctx, "GET", method, response)
@cli.command()
@click.argument(
'method', autocompletion=autocompletion.api_methods # type: ignore
)
@click.option('--json')
@pass_context
def post(ctx: Configuration, method, json):
"""Do a POST request against api/<method>."""
if json:
data = json_.loads(json)
else:
data = {}
response = api.restapi(ctx, 'post', method, data)
_report(ctx, "GET", method, response)
@cli.command("ws")
@click.argument(
'wstype', autocompletion=autocompletion.wsapi_methods # type: ignore
)
@click.option('--json')
@pass_context
def websocket(ctx: Configuration, wstype, json): # noqa: D301
"""Send a websocket request against /api/websocket.
WSTYPE is name of websocket methods.
\b
--json is dictionary to pass in addition to the type.
Example: --json='{ "area_id":"2c8bf93c8082492f99c989896962f207" }'
"""
if json:
data = json_.loads(json)
else:
data = {}
frame = {'type': wstype}
frame = {**frame, **data} # merging data into frame
response = cast(List[Dict[str, Any]], api.wsapi(ctx, frame))
ctx.echo(format_output(ctx, response))
```
#### File: home-assistant-cli/tests/test_completion.py
```python
from typing import cast
import requests_mock
import homeassistant_cli.autocompletion as autocompletion
import homeassistant_cli.cli as cli
from homeassistant_cli.config import Configuration
def test_entity_completion(basic_entities_text) -> None:
"""Test completion for entities."""
with requests_mock.Mocker() as mock:
mock.get(
'http://localhost:8123/api/states',
text=basic_entities_text,
status_code=200,
)
cfg = cli.cli.make_context('hass-cli', ['entity', 'get'])
result = autocompletion.entities(
cast(cfg, Configuration), ["entity", "get"], "" # type: ignore
)
assert len(result) == 3
resultdict = dict(result)
assert "sensor.one" in resultdict
assert resultdict['sensor.one'] == 'friendly long name'
def test_service_completion(default_services_text) -> None:
"""Test completion for services."""
with requests_mock.Mocker() as mock:
mock.get(
'http://localhost:8123/api/services',
text=default_services_text,
status_code=200,
)
cfg = cli.cli.make_context('hass-cli', ['service', 'list'])
result = autocompletion.services(
cfg, ["service", "list"], "" # type: ignore
)
assert len(result) == 12
resultdict = dict(result)
assert "group.remove" in resultdict
val = resultdict["group.remove"]
assert val == "Remove a user group."
def test_event_completion(default_events_text) -> None:
"""Test completion for events."""
with requests_mock.Mocker() as mock:
mock.get(
'http://localhost:8123/api/events',
text=default_events_text,
status_code=200,
)
cfg = cli.cli.make_context('hass-cli', ['events', 'list'])
result = autocompletion.events(
cfg, ["events", "list"], "" # type: ignore
)
assert len(result) == 11
resultdict = dict(result)
assert "component_loaded" in resultdict
assert resultdict["component_loaded"] == ""
def test_area_completion(default_events_text) -> None:
"""Test completion for Area."""
with requests_mock.Mocker() as mock:
mock.get(
'http://localhost:8123/api/events',
text=default_events_text,
status_code=200,
)
cfg = cli.cli.make_context('hass-cli', ['events', 'list'])
result = autocompletion.events(
cfg, ["events", "list"], "" # type: ignore
)
assert len(result) == 11
resultdict = dict(result)
assert "component_loaded" in resultdict
assert resultdict["component_loaded"] == ""
``` |
{
"source": "jm66/pyvmomi-community-samples",
"score": 3
} |
#### File: pyvmomi-community-samples/samples/add_raw_disk_to_vm.py
```python
from pyVmomi import vim
from pyVmomi import vmodl
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
from pyVim.task import WaitForTasks
from tools import cli
import atexit
import argparse
import getpass
def get_args():
parser = cli.build_arg_parser()
parser.add_argument('-v', '--vm-name',
required=False,
action='store',
help='name of the vm')
parser.add_argument('--uuid',
required=False,
action='store',
help='vmuuid of vm')
parser.add_argument('--device-name',
required=True,
action='store',
help=('The device name. Might look like '
'"/vmfs/devices/disks/naa.*". '
'See vim.vm.device.VirtualDisk.'
'RawDiskMappingVer1BackingInfo documentation.'))
parser.add_argument('--disk-mode',
required=False,
action='store',
default='independent_persistent',
choices=['append',
'independent_nonpersistent',
'independent_persistent',
'nonpersistent',
'persistent',
'undoable'],
help=('See vim.vm.device.VirtualDiskOption.DiskMode '
'documentation.'))
parser.add_argument('--compatibility-mode',
required=False,
default='virtualMode',
choices=['physicalMode', 'virtualMode'],
action='store',
help=('See vim.vm.device.VirtualDiskOption.'
'CompatibilityMode documentation.'))
return cli.prompt_for_password(parser.parse_args())
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def add_raw_disk(vm, si, device_name, disk_mode, compatibility_mode):
spec = vim.vm.ConfigSpec()
# get all disks on a VM, set unit_number to the next available
unit_number = 0
for dev in vm.config.hardware.device:
if hasattr(dev.backing, 'fileName'):
unit_number = int(dev.unitNumber) + 1
# unit_number 7 reserved for scsi controller
if unit_number == 7:
unit_number += 1
if unit_number >= 16:
print "we don't support this many disks"
return
if isinstance(dev, vim.vm.device.VirtualSCSIController):
controller = dev
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
rdm_info = vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo()
disk_spec.device.backing = rdm_info
disk_spec.device.backing.compatibilityMode = compatibility_mode
disk_spec.device.backing.diskMode = disk_mode
# The device_name will look something like
# /vmfs/devices/disks/naa.41412340757396001d7710df0fdd22a9
disk_spec.device.backing.deviceName = device_name
disk_spec.device.unitNumber = unit_number
disk_spec.device.controllerKey = controller.key
spec.deviceChange = [disk_spec]
WaitForTasks([vm.ReconfigVM_Task(spec=spec)], si=si)
print "Raw disk added to %s" % (vm.config.name)
def main():
args = get_args()
# create the service instance
si = None
if args.disable_ssl_verification:
si = SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=<PASSWORD>,
port=args.port)
else:
si = SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
# disconnect the service instance at program exit
atexit.register(Disconnect, si)
vm = None
if args.uuid:
search_index = si.content.searchIndex
vm = search_index.FindByUuid(None, args.uuid, True)
elif args.vm_name:
content = si.RetrieveContent()
vm = get_obj(content, [vim.VirtualMachine], args.vm_name)
if vm:
add_raw_disk(vm, si, args.device_name,
args.disk_mode, args.compatibility_mode)
else:
print "VM not found"
# start this thing
if __name__ == "__main__":
main()
```
#### File: pyvmomi-community-samples/samples/cdrom_vm.py
```python
import sys
from pyVmomi import vim
from pyVim.connect import SmartConnect
from pyVim.task import WaitForTask
from tools import cli
__author__ = 'prziborowski'
# Prerequisite for VM (for simplicity sake)
# is there is an existing IDE controller.
def setup_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--name',
help='Name of the VM to test CD-rom on')
parser.add_argument('-i', '--iso',
help='ISO to use in test. Use datastore path format. '
'E.g. [datastore1] path/to/file.iso')
parser.add_argument('-d', '--datacenter',
help='Name of datacenter to search on. '
'Defaults to first.')
return cli.prompt_for_password(parser.parse_args())
def get_dc(si, name):
for dc in si.content.rootFolder.childEntity:
if dc.name == name:
return dc
raise Exception('Failed to find datacenter named %s' % name)
# Returns the first cdrom if any, else None.
def get_physical_cdrom(host):
for lun in host.configManager.storageSystem.storageDeviceInfo.scsiLun:
if lun.lunType == 'cdrom':
return lun
return None
def find_free_ide_controller(vm):
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualIDEController):
# If there are less than 2 devices attached, we can use it.
if len(dev.device) < 2:
return dev
return None
def find_device(vm, device_type):
result = []
for dev in vm.config.hardware.device:
if isinstance(dev, device_type):
result.append(dev)
return result
def new_cdrom_spec(controller_key, backing):
connectable = vim.vm.device.VirtualDevice.ConnectInfo()
connectable.allowGuestControl = True
connectable.startConnected = True
cdrom = vim.vm.device.VirtualCdrom()
cdrom.controllerKey = controller_key
cdrom.key = -1
cdrom.connectable = connectable
cdrom.backing = backing
return cdrom
def main():
args = setup_args()
si = SmartConnect(host=args.host, user=args.user, pwd=args.password)
if args.datacenter:
dc = get_dc(si, args.datacenter)
else:
dc = si.content.rootFolder.childEntity[0]
vm = si.content.searchIndex.FindChild(dc.vmFolder, args.name)
if vm is None:
raise Exception('Failed to find VM %s in datacenter %s' %
(dc.name, args.name))
controller = find_free_ide_controller(vm)
if controller is None:
raise Exception('Failed to find a free slot on the IDE controller')
cdrom = None
cdrom_lun = get_physical_cdrom(vm.runtime.host)
if cdrom_lun is not None:
backing = vim.vm.device.VirtualCdrom.AtapiBackingInfo()
backing.deviceName = cdrom_lun.deviceName
deviceSpec = vim.vm.device.VirtualDeviceSpec()
deviceSpec.device = new_cdrom_spec(controller.key, backing)
deviceSpec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
configSpec = vim.vm.ConfigSpec(deviceChange=[deviceSpec])
WaitForTask(vm.Reconfigure(configSpec))
cdroms = find_device(vm, vim.vm.device.VirtualCdrom)
cdrom = filter(lambda x: type(x.backing) == type(backing) and
x.backing.deviceName == cdrom_lun.deviceName,
cdroms)[0]
else:
print('Skipping physical CD-Rom test as no device present.')
op = vim.vm.device.VirtualDeviceSpec.Operation
iso = args.iso
if iso is not None:
deviceSpec = vim.vm.device.VirtualDeviceSpec()
if cdrom is None: # add a cdrom
backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso)
cdrom = new_cdrom_spec(controller.key, backing)
deviceSpec.operation = op.add
else: # edit an existing cdrom
backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso)
cdrom.backing = backing
deviceSpec.operation = op.edit
deviceSpec.device = cdrom
configSpec = vim.vm.ConfigSpec(deviceChange=[deviceSpec])
WaitForTask(vm.Reconfigure(configSpec))
cdroms = find_device(vm, vim.vm.device.VirtualCdrom)
cdrom = filter(lambda x: type(x.backing) == type(backing) and
x.backing.fileName == iso, cdroms)[0]
else:
print('Skipping ISO test as no iso provided.')
if cdrom is not None: # Remove it
deviceSpec = vim.vm.device.VirtualDeviceSpec()
deviceSpec.device = cdrom
deviceSpec.operation = op.remove
configSpec = vim.vm.ConfigSpec(deviceChange=[deviceSpec])
WaitForTask(vm.Reconfigure(configSpec))
if __name__ == '__main__':
main()
```
#### File: pyvmomi-community-samples/samples/change_disk_mode.py
```python
import atexit
import requests
from tools import cli
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
from tools import tasks
# disable urllib3 warnings
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
def change_disk_mode(si, vm_obj, disk_number, mode,
disk_prefix_label='Hard disk '):
"""Change the disk mode on a virtual hard disk.
:param si: Service Instance
:param vm_obj: Virtual Machine Object
:param disk_number: Disk number.
:param mode: New disk mode.
:param disk_prefix_label: Prefix name of disk.
:return: True if success
"""
disk_label = disk_prefix_label + str(disk_number)
virtual_disk_device = None
# Find the disk device
for dev in vm_obj.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) \
and dev.deviceInfo.label == disk_label:
virtual_disk_device = dev
if not virtual_disk_device:
raise RuntimeError('Virtual {} could not be found.'.format(disk_label))
virtual_disk_spec = vim.vm.device.VirtualDeviceSpec()
virtual_disk_spec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.edit
virtual_disk_spec.device = virtual_disk_device
virtual_disk_spec.device.backing.diskMode = mode
dev_changes = []
dev_changes.append(virtual_disk_spec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = dev_changes
task = vm_obj.ReconfigVM_Task(spec=spec)
tasks.wait_for_tasks(si, [task])
return True
def get_args():
parser = cli.build_arg_parser()
parser.add_argument('-v', '--vmname', required=True,
help='Name of the VirtualMachine you want to change.')
parser.add_argument('-d', '--disk-number', required=True,
help='Disk number to change mode.')
parser.add_argument('-m', '--mode', required=True,
choices=['independent_persistent',
'persistent',
'independent_nonpersistent',
'nonpersistent',
'undoable',
'append'])
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vim_type, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vim_type, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def main():
args = get_args()
si = SmartConnect(host=args.host,
user=args.user,
pwd=<PASSWORD>,
port=int(args.port))
atexit.register(Disconnect, si)
content = si.RetrieveContent()
print 'Searching for VM {}'.format(args.vmname)
vm_obj = get_obj(content, [vim.VirtualMachine], args.vmname)
if vm_obj:
change_disk_mode(si, vm_obj, args.disk_number, args.mode)
print 'VM Disk {} successfully ' \
'changed to mode {}.'.format(args.disk_number,
args.mode)
else:
print "VM not found."
# start
if __name__ == "__main__":
main()
```
#### File: pyvmomi-community-samples/samples/del_vswitch_from_host.py
```python
from __future__ import print_function
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim
import atexit
import sys
import argparse
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSpehre service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use')
parser.add_argument('-p', '--password',
required=True,
action='store',
help='Password to use')
parser.add_argument('-v', '--vswitch',
required=True,
action='store',
help='vSwitch to delete')
args = parser.parse_args()
return args
def GetVMHosts(content):
host_view = content.viewManager.CreateContainerView(content.rootFolder,
[vim.HostSystem],
True)
obj = [host for host in host_view.view]
host_view.Destroy()
return obj
def DelHostsSwitch(hosts, vswitchName):
for host in hosts:
DelHostSwitch(host, vswitchName)
def DelHostSwitch(host, vswitchName):
host.configManager.networkSystem.RemoveVirtualSwitch(vswitchName)
def main():
args = get_args()
serviceInstance = SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=443)
atexit.register(Disconnect, serviceInstance)
content = serviceInstance.RetrieveContent()
hosts = GetVMHosts(content)
DelHostsSwitch(hosts, args.vswitch)
# Main section
if __name__ == "__main__":
sys.exit(main())
```
#### File: pyvmomi-community-samples/samples/deploy_ova.py
```python
import atexit
import os
import os.path
import ssl
import sys
import tarfile
import time
from threading import Timer
from argparse import ArgumentParser
from getpass import getpass
from six.moves.urllib.request import Request, urlopen
from tools import cli
from pyVim.connect import SmartConnectNoSSL, Disconnect
from pyVmomi import vim, vmodl
__author__ = 'prziborowski'
def setup_args():
parser = cli.build_arg_parser()
parser.add_argument('--ova-path',
help='Path to the OVA file, can be local or a URL.')
parser.add_argument('-d', '--datacenter',
help='Name of datacenter to search on. '
'Defaults to first.')
parser.add_argument('-r', '--resource-pool',
help='Name of resource pool to use. '
'Defaults to largest memory free.')
parser.add_argument('-ds', '--datastore',
help='Name of datastore to use. '
'Defaults to largest free space in datacenter.')
return cli.prompt_for_password(parser.parse_args())
def main():
args = setup_args()
try:
si = SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
atexit.register(Disconnect, si)
except:
print("Unable to connect to %s" % args.host)
return 1
if args.datacenter:
dc = get_dc(si, args.datacenter)
else:
dc = si.content.rootFolder.childEntity[0]
if args.resource_pool:
rp = get_rp(si, dc, args.resource_pool)
else:
rp = get_largest_free_rp(si, dc)
if args.datastore:
ds = get_ds(dc, args.datastore)
else:
ds = get_largest_free_ds(dc)
ovf_handle = OvfHandler(args.ova_path)
ovfManager = si.content.ovfManager
# CreateImportSpecParams can specify many useful things such as
# diskProvisioning (thin/thick/sparse/etc)
# networkMapping (to map to networks)
# propertyMapping (descriptor specific properties)
cisp = vim.OvfManager.CreateImportSpecParams()
cisr = ovfManager.CreateImportSpec(ovf_handle.get_descriptor(),
rp, ds, cisp)
# These errors might be handleable by supporting the parameters in
# CreateImportSpecParams
if len(cisr.error):
print("The following errors will prevent import of this OVA:")
for error in cisr.error:
print("%s" % error)
return 1
ovf_handle.set_spec(cisr)
lease = rp.ImportVApp(cisr.importSpec, dc.vmFolder)
while lease.state == vim.HttpNfcLease.State.initializing:
print("Waiting for lease to be ready...")
time.sleep(1)
if lease.state == vim.HttpNfcLease.State.error:
print("Lease error: %s" % lease.error)
return 1
if lease.state == vim.HttpNfcLease.State.done:
return 0
print("Starting deploy...")
return ovf_handle.upload_disks(lease, args.host)
def get_dc(si, name):
"""
Get a datacenter by its name.
"""
for dc in si.content.rootFolder.childEntity:
if dc.name == name:
return dc
raise Exception('Failed to find datacenter named %s' % name)
def get_rp(si, dc, name):
"""
Get a resource pool in the datacenter by its names.
"""
viewManager = si.content.viewManager
containerView = viewManager.CreateContainerView(dc, [vim.ResourcePool],
True)
try:
for rp in containerView.view:
if rp.name == name:
return rp
finally:
containerView.Destroy()
raise Exception("Failed to find resource pool %s in datacenter %s" %
(name, dc.name))
def get_largest_free_rp(si, dc):
"""
Get the resource pool with the largest unreserved memory for VMs.
"""
viewManager = si.content.viewManager
containerView = viewManager.CreateContainerView(dc, [vim.ResourcePool],
True)
largestRp = None
unreservedForVm = 0
try:
for rp in containerView.view:
if rp.runtime.memory.unreservedForVm > unreservedForVm:
largestRp = rp
unreservedForVm = rp.runtime.memory.unreservedForVm
finally:
containerView.Destroy()
if largestRp is None:
raise Exception("Failed to find a resource pool in dc %s" % dc.name)
return largestRp
def get_ds(dc, name):
"""
Pick a datastore by its name.
"""
for ds in dc.datastore:
try:
if ds.name == name:
return ds
except: # Ignore datastores that have issues
pass
raise Exception("Failed to find %s on datacenter %s" % (name, dc.name))
def get_largest_free_ds(dc):
"""
Pick the datastore that is accessible with the largest free space.
"""
largest = None
largestFree = 0
for ds in dc.datastore:
try:
freeSpace = ds.summary.freeSpace
if freeSpace > largestFree and ds.summary.accessible:
largestFree = freeSpace
largest = ds
except: # Ignore datastores that have issues
pass
if largest is None:
raise Exception('Failed to find any free datastores on %s' % dc.name)
return largest
def get_tarfile_size(tarfile):
"""
Determine the size of a file inside the tarball.
If the object has a size attribute, use that. Otherwise seek to the end
and report that.
"""
if hasattr(tarfile, 'size'):
return tarfile.size
size = tarfile.seek(0, 2)
tarfile.seek(0, 0)
return size
class OvfHandler(object):
"""
OvfHandler handles most of the OVA operations.
It processes the tarfile, matches disk keys to files and
uploads the disks, while keeping the progress up to date for the lease.
"""
def __init__(self, ovafile):
"""
Performs necessary initialization, opening the OVA file,
processing the files and reading the embedded ovf file.
"""
self.handle = self._create_file_handle(ovafile)
self.tarfile = tarfile.open(fileobj=self.handle)
ovffilename = list(filter(lambda x: x.endswith(".ovf"),
self.tarfile.getnames()))[0]
ovffile = self.tarfile.extractfile(ovffilename)
self.descriptor = ovffile.read().decode()
def _create_file_handle(self, entry):
"""
A simple mechanism to pick whether the file is local or not.
This is not very robust.
"""
if os.path.exists(entry):
return FileHandle(entry)
else:
return WebHandle(entry)
def get_descriptor(self):
return self.descriptor
def set_spec(self, spec):
"""
The import spec is needed for later matching disks keys with
file names.
"""
self.spec = spec
def get_disk(self, fileItem, lease):
"""
Does translation for disk key to file name, returning a file handle.
"""
ovffilename = list(filter(lambda x: x == fileItem.path,
self.tarfile.getnames()))[0]
return self.tarfile.extractfile(ovffilename)
def get_device_url(self, fileItem, lease):
for deviceUrl in lease.info.deviceUrl:
if deviceUrl.importKey == fileItem.deviceId:
return deviceUrl
raise Exception("Failed to find deviceUrl for file %s" % fileItem.path)
def upload_disks(self, lease, host):
"""
Uploads all the disks, with a progress keep-alive.
"""
self.lease = lease
try:
self.start_timer()
for fileItem in self.spec.fileItem:
self.upload_disk(fileItem, lease, host)
lease.Complete()
print("Finished deploy successfully.")
return 0
except vmodl.MethodFault as e:
print("Hit an error in upload: %s" % e)
lease.Abort(e)
except Exception as e:
print("Lease: %s" % lease.info)
print("Hit an error in upload: %s" % e)
lease.Abort(vmodl.fault.SystemError(reason=str(e)))
raise
return 1
def upload_disk(self, fileItem, lease, host):
"""
Upload an individual disk. Passes the file handle of the
disk directly to the urlopen request.
"""
ovffile = self.get_disk(fileItem, lease)
if ovffile is None:
return
deviceUrl = self.get_device_url(fileItem, lease)
url = deviceUrl.url.replace('*', host)
headers = {'Content-length': get_tarfile_size(ovffile)}
if hasattr(ssl, '_create_unverified_context'):
sslContext = ssl._create_unverified_context()
else:
sslContext = None
req = Request(url, ovffile, headers)
urlopen(req, context=sslContext)
def start_timer(self):
"""
A simple way to keep updating progress while the disks are transferred.
"""
Timer(5, self.timer).start()
def timer(self):
"""
Update the progress and reschedule the timer if not complete.
"""
try:
prog = self.handle.progress()
self.lease.Progress(prog)
if self.lease.state not in [vim.HttpNfcLease.State.done,
vim.HttpNfcLease.State.error]:
self.start_timer()
sys.stderr.write("Progress: %d%%\r" % prog)
except: # Any exception means we should stop updating progress.
pass
class FileHandle(object):
def __init__(self, filename):
self.filename = filename
self.fh = open(filename, 'rb')
self.st_size = os.stat(filename).st_size
self.offset = 0
def __del__(self):
self.fh.close()
def tell(self):
return self.fh.tell()
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.st_size - offset
return self.fh.seek(offset, whence)
def seekable(self):
return True
def read(self, amount):
self.offset += amount
result = self.fh.read(amount)
return result
# A slightly more accurate percentage
def progress(self):
return int(100.0 * self.offset / self.st_size)
class WebHandle(object):
def __init__(self, url):
self.url = url
r = urlopen(url)
if r.code != 200:
raise FileNotFoundError(url)
self.headers = self._headers_to_dict(r)
if 'accept-ranges' not in self.headers:
raise Exception("Site does not accept ranges")
self.st_size = int(self.headers['content-length'])
self.offset = 0
def _headers_to_dict(self, r):
result = {}
if hasattr(r, 'getheaders'):
for n, v in r.getheaders():
result[n.lower()] = v.strip()
else:
for line in r.info().headers:
if line.find(':') != -1:
n, v = line.split(': ', 1)
result[n.lower()] = v.strip()
return result
def tell(self):
return self.offset
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.st_size - offset
return self.offset
def seekable(self):
return True
def read(self, amount):
start = self.offset
end = self.offset + amount - 1
req = Request(self.url,
headers={'Range': 'bytes=%d-%d' % (start, end)})
r = urlopen(req)
self.offset += amount
result = r.read(amount)
r.close()
return result
# A slightly more accurate percentage
def progress(self):
return int(100.0 * self.offset / self.st_size)
if __name__ == "__main__":
exit(main())
```
#### File: pyvmomi-community-samples/samples/destroy_vm.py
```python
from __future__ import print_function
import atexit
from pyVim import connect
from pyVmomi import vim
from tools import cli
from tools import tasks
def setup_args():
"""Adds additional ARGS to allow the vm name or uuid to
be set.
"""
parser = cli.build_arg_parser()
# using j here because -u is used for user
parser.add_argument('-j', '--uuid',
help='BIOS UUID of the VirtualMachine you want '
'to destroy.')
parser.add_argument('-n', '--name',
help='DNS Name of the VirtualMachine you want to '
'destroy.')
parser.add_argument('-i', '--ip',
help='IP Address of the VirtualMachine you want to '
'destroy')
parser.add_argument('-v', '--vm',
help='VM name of the VirtualMachine you want '
'to destroy.')
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vimtype, name):
"""Create contrainer view and search for object in it"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
ARGS = setup_args()
SI = None
try:
SI = connect.SmartConnectNoSSL(host=ARGS.host,
user=ARGS.user,
pwd=<PASSWORD>,
port=ARGS.port)
atexit.register(connect.Disconnect, SI)
except (IOError, vim.fault.InvalidLogin):
pass
if not SI:
raise SystemExit("Unable to connect to host with supplied credentials.")
VM = None
if ARGS.vm:
VM = get_obj(SI.content, [vim.VirtualMachine], ARGS.vm)
elif ARGS.uuid:
VM = SI.content.searchIndex.FindByUuid(None, ARGS.uuid,
True,
False)
elif ARGS.name:
VM = SI.content.searchIndex.FindByDnsName(None, ARGS.name,
True)
elif ARGS.ip:
VM = SI.content.searchIndex.FindByIp(None, ARGS.ip, True)
if VM is None:
raise SystemExit(
"Unable to locate VirtualMachine. Arguments given: "
"vm - {0} , uuid - {1} , name - {2} , ip - {3}"
.format(ARGS.vm, ARGS.uuid, ARGS.name, ARGS.ip)
)
print("Found: {0}".format(VM.name))
print("The current powerState is: {0}".format(VM.runtime.powerState))
if format(VM.runtime.powerState) == "poweredOn":
print("Attempting to power off {0}".format(VM.name))
TASK = VM.PowerOffVM_Task()
tasks.wait_for_tasks(SI, [TASK])
print("{0}".format(TASK.info.state))
print("Destroying VM from vSphere.")
TASK = VM.Destroy_Task()
tasks.wait_for_tasks(SI, [TASK])
print("Done.")
```
#### File: pyvmomi-community-samples/samples/renamer.py
```python
import atexit
import argparse
import getpass
import sys
import time
from pyVim import connect
from pyVmomi import vim
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--host',
required=True,
action='store',
help='Remote host to connect to')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-o', '--port',
required=False,
action='store',
help="port to use, default 443", default=443)
parser.add_argument('-n', '--name',
required=True,
action='store',
help='Name of the entity to look for.')
parser.add_argument('-r', '--new_name',
required=False,
action='store',
help='New name of the entity.')
args = parser.parse_args()
if args.password is None:
args.password = <PASSWORD>pass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
args = parser.parse_args()
return args
args = get_args()
# form a connection...
si = connect.SmartConnect(host=args.host, user=args.user, pwd=args.password,
port=args.port)
# doing this means you don't need to remember to disconnect your script/objects
atexit.register(connect.Disconnect, si)
# search the whole inventory tree recursively... a brutish but effective tactic
root_folder = si.content.rootFolder
entity_stack = root_folder.childEntity
name = args.name
obj = None
while entity_stack:
entity = entity_stack.pop()
if entity.name == name:
obj = entity
break
elif isinstance(entity, vim.Datacenter):
# add this vim.DataCenter's folders to our search
# we don't know the entity's type so we have to scan
# each potential folder...
entity_stack.append(entity.datastoreFolder)
entity_stack.append(entity.hostFolder)
entity_stack.append(entity.networkFolder)
entity_stack.append(entity.vmFolder)
elif isinstance(entity, vim.Folder):
# add all child entities from this folder to our search
entity_stack.extend(entity.childEntity)
if obj is None:
print "A object named %s could not be found" % args.name
exit()
if args.new_name:
new_name = args.new_name
else:
# just because we want the script to do *something*
new_name = args.name + "0"
print
print "name : %s" % obj.name
print
print " renaming from %s to %s" % (args.name, new_name)
print
# rename creates a task...
task = obj.Rename(new_name)
# Did you know that task objects in pyVmomi get updates automatically?
# Check this out... it's not super efficient but here's how you could
# have a script that looped waiting on a task but still had the
# chance to periodically check other things or do other actions...
print "rename task state:"
count = 0
state = task.info.state
while task.info.state != vim.TaskInfo.State.success:
sys.stdout.write("\r\t" + str(time.time()) + "\t: " + task.info.state)
sys.stdout.flush()
count += 1
print
print "rename finished"
print
```
#### File: pyvmomi-community-samples/samples/upgrade_vm.py
```python
from __future__ import print_function
import atexit
from pyVim import connect, task
from pyVmomi import vim
from tools import cli
def get_args():
""" Get commandline arguments from the user. """
parser = cli.build_arg_parser()
parser.add_argument('-v', '--version',
required=False,
action='store',
default=None,
help='Virtual machine hardware version')
parser.add_argument('-n', '--name',
required=True,
action='store',
help='Name of the virtual machine to upgrade '
'(case sensitive!)')
parser.add_argument('-S', '--use-ssl',
required=False,
action='store_true',
default=False, # Test setups are usually self-signed
help='Enable SSL host certificate verification')
args = parser.parse_args()
cli.prompt_for_password(args)
return args
def get_vm(content, name):
""" Gets a named virtual machine. """
virtual_machine = None
container = content.viewManager.CreateContainerView(content.rootFolder,
[vim.VirtualMachine],
True)
for item in container.view:
if item.name == name:
virtual_machine = item
break
container.Destroy() # Best practice. Frees up resources on host.
return virtual_machine
def connect_vsphere(username, password, hostname, port, use_ssl):
""" Connects to a ESXi host or vCenter server. """
server = None
try:
if use_ssl: # Connect to server using SSL certificate verification
server = connect.SmartConnect(host=hostname, user=username,
pwd=password, port=port)
else:
server = connect.SmartConnectNoSSL(host=hostname, user=username,
pwd=password, port=port)
except vim.fault.InvalidLogin:
print("ERROR: Invalid login credentials for user '%s'" % username)
exit(1)
except vim.fault as message:
print("Error connecting to vSphere: %s" % str(message))
exit(1)
# Ensures clean disconnect upon program termination
atexit.register(connect.Disconnect, server)
return server
def main():
""" Upgrades the hardware version of a Virtual Machine. """
args = get_args()
service_instance = connect_vsphere(args.user, args.password,
args.host, int(args.port), args.use_ssl)
content = service_instance.RetrieveContent()
virtual_machine = get_vm(content, args.name)
if not virtual_machine:
print("Could not find VM %s" % args.name)
else:
print("Upgrading VM %s" % args.name)
# Set the hardware version to use if specified
if args.version is not None:
print("New version will be %s" % args.version)
version = "vmx-{:02d}".format(args.version)
else:
version = None
# Upgrade the VM
try:
task.WaitForTask(task=virtual_machine.UpgradeVM_Task(version),
si=service_instance)
except vim.fault.AlreadyUpgraded:
print("VM is already upgraded")
# Start the script
if __name__ == '__main__':
main()
``` |
{
"source": "jm833/Flodding-warning-system",
"score": 4
} |
#### File: jm833/Flodding-warning-system/DEtest.py
```python
from floodsystem.geo import rivers_with_station
from floodsystem.geo import stations_by_river
from floodsystem.geo import rivers_by_station_number
from floodsystem.stationdata import build_station_list
def test_rivers_with_station():
# get the data for testing
stations = build_station_list()
r = rivers_with_station(stations)
# check less or equal number of items is in r
assert len(r) <= len(stations)
# check for no repetition and sorted in alphabetical order
for i in range(len(r) - 1):
assert r[i] < r[i + 1]
def test_stations_by_river():
stations = build_station_list()
d = stations_by_river(stations)
# check the list of station is sorted in alphabetical order
# check the list of stations has no repetitions
keys = []
for key, value in d.items():
keys.append(key)
for i in range(len(value) - 1):
assert value[i] < value[i + 1]
# produce a list of (river, station) pair recorded
pair = []
for item in stations:
if item.river != None and item.name != None:
pair.append((item.river, item.name))
# remove recorded data from a list of all (river, station) pairs
for k, v in d.items():
for i in range(len(v)):
if (k, v[i]) in pair:
pair.remove((k, v[i]))
# check whether all the data is recorded and sorted
for item in pair:
assert item[0] not in keys
def test_rivers_by_station_number():
"""Test 1E, rivers by station number"""
N = 9
stations = build_station_list()
# get data for testing
r = rivers_by_station_number(stations, N)
x = len(r)
# check enough items in the list is printed
assert x >= N
# if more is printed than required,
# check the extra rivers have the same number of stations compared to the last required river
if x >= N:
for i in range(x - N):
assert r[x - 1 - i][1] == r[N - 1][1]
# check the result is sorted by number of stations
for i in range(x - 1):
assert r[i][1] >= r[i + 1][1]
```
#### File: jm833/Flodding-warning-system/Task2E.py
```python
from floodsystem.plot import *
from floodsystem.flood import *
from floodsystem.stationdata import *
def task2E():
stations = build_station_list()
#produce the x and y variable: the 5 most risky rivers and their levels
risky_stations = stations_highest_rel_level(stations,5)
risky_station_objects = [i[0] for i in risky_stations]
#plots the water levels over the past 10 days for the 5 stations at which the current relative water level is greatest.
for station in risky_station_objects:
dates, levels = fetch_measure_levels(station.measure_id, dt = datetime.timedelta(days=10))
if len(dates) == 0 or len(levels) == 0:
continue # Deal with empty lists appearing
plot_water_levels(station,dates,levels)
plt.show()
if __name__ == "__main__":
task2E()
```
#### File: jm833/Flodding-warning-system/test_analysis.py
```python
from floodsystem.analysis import polyfit
from matplotlib.dates import num2date
import numpy as np
"""
def test_analysis():
times1 = num2date([0,2,4,6])
times2 = num2date([50,52,54,56])
times3 = num2date([10000,10002,10004,10006])
levels = [10,15,20,25]
p = 3
poly1 = polyfit(times1, levels, p)
poly2 = polyfit(times2, levels, p)
poly3 = polyfit(times3, levels, p)
return poly1
test_analysis()
"""
def test_stations_polyfit():
"""test_analysis"""
#(x-1)^2
dates = num2date([7,6,5,4,3,2,1])
levels = [36,25,16,9,4,1,0]
p, d0 = polyfit(dates,levels,2)
assert round(p[2]) == 1
assert d0 == 1
#(2x-1)^3
dates = num2date([6,4,3,2])
levels = [1331,343,125,27]
p, d0 = polyfit(dates,levels,3)
assert round(p[3]) == 8
assert d0 == 2
```
#### File: jm833/Flodding-warning-system/test_geo.py
```python
from floodsystem.geo import *
from floodsystem.station import MonitoringStation
from unittest import result
from floodsystem.stationdata import build_station_list
def test_stations_by_distance():
station1 = MonitoringStation("s id", "m id","A station", (3.0,4.0),(0.0,1.0),"A river","A town")
station2 = MonitoringStation("s id", "m id","B station", (6.0,8.0),(0.0,1.0),"B river","B town")
stations = [station1,station2]
sorted_station_by_distance = stations_by_distance(stations,(0.0,0.0))
assert sorted_station_by_distance[0][0]== station1
test_stations_by_distance()
def test_stations_within_radius():
station1 = MonitoringStation("s id", "m id","A station", (0.0,0.1),(0.0,1.0),"A river","A town")
station2 = MonitoringStation("s id", "m id","B station", (0.1,0.2),(0.0,1.0),"B river","B town")
station3 = MonitoringStation("s id", "m id","C station", (9.0,9.0),(0.0,1.0),"C river","C town")
stations_within_r = stations_within_radius([station1,station2,station3],(0.0,0.0),100)
assert stations_within_r == [station1,station2] or [station2,station1]
test_stations_within_radius()
def test_rivers_with_station():
# get the data for testing
stations = build_station_list()
r = rivers_with_station(stations)
# check less or equal number of items is in r
assert len(r) <= len(stations)
# check for no repetition and sorted in alphabetical order
for i in range(len(r) - 1):
assert r[i] < r[i + 1]
def test_stations_by_river():
stations = build_station_list()
d = stations_by_river(stations)
# check the list of station is sorted in alphabetical order
# check the list of stations has no repetitions
keys = []
for key, value in d.items():
keys.append(key)
for i in range(len(value)-1):
assert value[i] < value[i + 1]
# produce a list of (river, station) pair recorded
pair = []
for item in stations:
if item.river != None and item.name != None:
pair.append((item.river, item.name))
# remove recorded data from a list of all (river, station) pairs
for k, v in d.items():
for i in range(len(v)):
if (k, v[i]) in pair:
pair.remove((k,v[i]))
# check whether all the data is recorded and sorted
for item in pair:
assert item[0] not in keys
def test_rivers_by_station_number():
"""Test 1E, rivers by station number"""
N = 9
stations = build_station_list()
# get data for testing
r = rivers_by_station_number(stations, N)
x = len(r)
# check enough items in the list is printed
assert x >= N
# if more is printed than required,
# check the extra rivers have the same number of stations compared to the last required river
if x >= N:
for i in range(x - N):
assert r[x - 1 - i][1] == r[N-1][1]
# check the result is sorted by number of stations
for i in range(x-1):
assert r[i][1] >= r[i+1][1]
```
#### File: jm833/Flodding-warning-system/test_station.py
```python
from floodsystem.station import *
def test_create_monitoring_station():
# Create a station
s_id = "test-s-id"
m_id = "test-m-id"
label = "some station"
coord = (-2.0, 4.0)
trange = (-2.3, 3.4445)
river = "River X"
town = "My Town"
s = MonitoringStation(s_id, m_id, label, coord, trange, river, town)
assert s.station_id == s_id
assert s.measure_id == m_id
assert s.name == label
assert s.coord == coord
assert s.typical_range == trange
assert s.river == river
assert s.town == town
def test_typical_range_consistent():
#Create 3 stations, with last two having inconsistent data
station1 = MonitoringStation("s id", "m id","some station", (1.0,2.2),(-2.3,3.4),"river x","my town")
station2 = MonitoringStation("s id", "m id","some station", (1.0,2.2),(0.0,0.0),"river x","my town")
station3 = MonitoringStation("s id", "m id","some station", (1.0,2.2),(1.0,0.5),"river x","my town")
assert station1.typical_range_consistent() is True
assert station2.typical_range_consistent() is False
assert station3.typical_range_consistent() is False
def test_inconsistent_typical_range_stations():
#Create 3 stations, with last two having inconsistent data
station1 = MonitoringStation("s id", "m id","A station", (1.0,2.2),(-2.3,3.4),"river x","my town")
station2 = MonitoringStation("s id", "m id","B station", (1.0,2.2),(0.0,0.0),"river x","my town")
station3 = MonitoringStation("s id", "m id","C station", (1.0,2.2),(1.0,0.5),"river x","my town")
station_list = [station1,station2,station3]
assert inconsistent_typical_range_stations(station_list)==[station2.name,station3.name]
test_create_monitoring_station()
test_typical_range_consistent()
test_inconsistent_typical_range_stations()
``` |
{
"source": "JM88888/AWS-foryou",
"score": 3
} |
#### File: AWS-foryou/awsforyou/aws_metadata.py
```python
import requests
def get_instance():
"""
Generate the EC2 instance type
"""
req = requests.get(
"http://169.254.169.254/latest/dynamic/instance-identity/document")
response_json = req.json()
instancetype = response_json.get('instanceType')
region = response_json.get('region')
return {'instancetype': instancetype, 'region': region}
```
#### File: awsforyou/tests/test_reccomender.py
```python
import unittest
import pandas as pd
from awsforyou import recommender as rc
from awsforyou import benchmark_runner as br
from awsforyou import total_time_component as tt
from awsforyou import aws_pricing as ap
class TestRecommender(unittest.TestCase):
"""
this class contains tests
"""
def test_for_benchmark_df_empty(self):
"""
test to see if benchmark dataframe is empty
"""
benchmark_df = rc.get_benchmark_data()
self.assertGreater(benchmark_df.shape[0], 0)
def test_for_time_df_empty(self):
"""
test to see if added times dataframe is empty
"""
benchmark_df = rc.get_benchmark_data()
times, percents = [2, 4, 6], [1, 5, 10]
est_time_user = tt.find_total_time(times, percents)
user_benchmark = br.run_benchmark()
est_time_aws = benchmark_df[['runtime']] / \
user_benchmark * est_time_user[0]
benchmark_df["estimated_time_aws"] = est_time_aws
self.assertGreater(benchmark_df.shape[0], 0)
def test_for_complete_df_empty(self):
"""
test to see if added times dataframe is empty
"""
benchmark_df = rc.get_benchmark_data()
times, percents = [2, 4, 6], [1, 5, 10]
est_time_user = tt.find_total_time(times, percents)
user_benchmark = br.run_benchmark()
est_time_aws = benchmark_df[['runtime']] \
/ user_benchmark * est_time_user[0]
benchmark_df["estimated_time_aws"] = est_time_aws
self.assertGreater(benchmark_df.shape[0], 0)
def test_add_estimated_price(self):
"""
This function tests adding the spot and on-demand pricing
to the dataframe
"""
benchmark_df = rc.get_benchmark_data()
times, percents = [2, 4, 6], [1, 5, 10]
est_time_user = tt.find_total_time(times, percents)
user_benchmark = br.run_benchmark()
est_time_aws = benchmark_df[['runtime']] \
/ user_benchmark * est_time_user[0]
benchmark_df["estimated_time_aws"] = est_time_aws
instance_types = benchmark_df["instance_type"].tolist()
price = ap.get_instance_pricing(instance_types)
complete_df = pd.merge(benchmark_df, price, on="instance_type")
complete_df["est_cost_spot_price"] = \
complete_df["estimated_time_aws"] \
* complete_df["spot_price"] / 3600
complete_df["est_cost_on_demand_price"] = \
complete_df["estimated_time_aws"] \
* complete_df["on_demand_price"] / 3600
self.assertGreater(complete_df.shape[0], 0)
``` |
{
"source": "jm9498/customers",
"score": 2
} |
#### File: customers/tests/test_models.py
```python
import os
import logging
import unittest
from werkzeug.exceptions import NotFound
from service.models import Customer, DataValidationError, db
from service import app
from .factories import CustomerFactory
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgresql://postgres:postgres@localhost:5432/testdb"
)
######################################################################
# C U S T O M E R M O D E L T E S T C A S E S
######################################################################
# pylint: disable=too-many-public-methods
class TestCustomerModel(unittest.TestCase):
"""Test Cases for Customer Model"""
@classmethod
def setUpClass(cls):
"""This runs once before the entire test suite"""
app.config["TESTING"] = True
app.config["DEBUG"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
Customer.init_db(app)
@classmethod
def tearDownClass(cls):
"""This runs once after the entire test suite"""
db.session.close()
def setUp(self):
"""This runs before each test"""
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
def tearDown(self):
"""This runs after each test"""
db.session.remove()
db.drop_all()
######################################################################
# H E L P E R M E T H O D S
######################################################################
def _create_customer(self):
""" Creates a Customer from a Factory """
fake_customer = CustomerFactory()
customer = Customer(
first_name = fake_customer.first_name,
last_name = fake_customer.last_name,
email = fake_customer.email,
phone_number = fake_customer.phone_number
)
self.assertTrue(customer != None)
self.assertEqual(customer.id, None)
return customer
######################################################################
# T E S T C A S E S
######################################################################
def test_create_a_customer(self):
""" Create a Customer and assert that it exists """
fake_customer = CustomerFactory()
customer = Customer(
first_name = fake_customer.first_name,
last_name = fake_customer.last_name,
email = fake_customer.email,
phone_number = fake_customer.phone_number
)
self.assertTrue(customer != None)
self.assertEqual(customer.id, None)
self.assertEqual(customer.first_name, fake_customer.first_name)
self.assertEqual(customer.last_name, fake_customer.last_name)
self.assertEqual(customer.email, fake_customer.email)
self.assertEqual(customer.phone_number, fake_customer.phone_number)
def test_add_a_customer(self):
""" Creates a customer and adds it to the database """
customers = Customer.all()
self.assertEqual(customers, [])
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
customers = Customer.all()
self.assertEqual(len(customers),1)
def test_update_customer(self):
""" Update a customer """
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows in the database
self.assertEqual(customer.id, 1)
# Fetch it back
customer = Customer.find(customer.id)
customer.email = "<EMAIL>"
customer.save()
# Fetch it back again
customer = Customer.find(customer.id)
self.assertEqual(customer.email, "<EMAIL>")
def test_delete_a_customer(self):
""" Delete an account from the database """
customers = Customer.all()
self.assertEqual(customers, [])
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
customers = Customer.all()
self.assertEqual(len(customers), 1)
customer = customers[0]
customer.delete()
customers = Customer.all()
self.assertEqual(len(customers), 0)
def test_find_or_404(self):
""" Find or throw 404 error """
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
# Fetch it back
customer = Customer.find_or_404(customer.id)
self.assertEqual(customer.id, 1)
def test_find_by_first_name(self):
""" Find by first name """
customer = self._create_customer()
customer.create()
# Fetch it back by name
same_customer = Customer.find_by_first_name(customer.first_name)[0]
self.assertEqual(same_customer.id, customer.id)
self.assertEqual(same_customer.first_name, customer.first_name)
def test_find_by_last_name(self):
""" Find by last name """
customer = self._create_customer()
customer.create()
# Fetch it back by name
same_customer = Customer.find_by_last_name(customer.last_name)[0]
self.assertEqual(same_customer.id, customer.id)
self.assertEqual(same_customer.last_name, customer.last_name)
def test_serialize_a_customer(self):
""" Serialize a customer """
customer = self._create_customer()
serial_customer = customer.serialize()
self.assertEqual(serial_customer['id'], customer.id)
self.assertEqual(serial_customer['first_name'], customer.first_name)
self.assertEqual(serial_customer['last_name'], customer.last_name)
self.assertEqual(serial_customer['email'], customer.email)
self.assertEqual(serial_customer['phone_number'], customer.phone_number)
def test_deserialize_a_customer(self):
""" Deserialize a customer """
customer = self._create_customer()
serial_customer = customer.serialize()
new_customer = Customer()
new_customer.deserialize(serial_customer)
self.assertEqual(new_customer.id, customer.id)
self.assertEqual(new_customer.first_name, customer.first_name)
self.assertEqual(new_customer.last_name, customer.last_name)
self.assertEqual(new_customer.email, customer.email)
self.assertEqual(new_customer.phone_number, customer.phone_number)
def test_deserialize_with_key_error(self):
""" Deserialize a customer with a KeyError """
customer = Customer()
self.assertRaises(DataValidationError, customer.deserialize, {})
def test_deserialize_with_type_error(self):
""" Deserialize a customer with a TypeError """
customer = Customer()
self.assertRaises(DataValidationError, customer.deserialize, [])
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.