repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
amolenaar/gaphor | gaphor/core/modeling/coremodel.py | 1 | 2200 | # This file is generated by codegen.py. DO NOT EDIT!
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Callable, List, Optional
from gaphor.core.modeling.element import Element
from gaphor.core.modeling.properties import (
association,
attribute,
derived,
derivedunion,
enumeration,
redefine,
relation_many,
relation_one,
)
if TYPE_CHECKING:
from gaphor.UML import Dependency, Namespace
# 8: override Element
# defined above
# 11: override NamedElement
# Define extra attributes defined in UML model
class NamedElement(Element):
name: attribute[str]
qualifiedName: derived[List[str]]
namespace: relation_one[Namespace]
clientDependency: relation_many[Dependency]
supplierDependency: relation_many[Dependency]
class PackageableElement(NamedElement):
pass
# 55: override Diagram
# defined in gaphor.core.modeling.diagram
# 46: override Presentation
# defined in gaphor.core.modeling.presentation
class Comment(Element):
body: attribute[str]
annotatedElement: relation_many[Element]
# 40: override StyleSheet
# defined in gaphor.core.modeling.presentation
NamedElement.name = attribute("name", str)
Comment.body = attribute("body", str)
# 43: override StyleSheet.styleSheet
# defined in gaphor.core.modeling.presentation
# 52: override Presentation.subject
# defined in gaphor.core.modeling.presentation
# 49: override Element.presentation
# defined in gaphor.core.modeling.presentation
Comment.annotatedElement = association(
"annotatedElement", Element, opposite="ownedComment"
)
Element.ownedComment = association("ownedComment", Comment, opposite="annotatedElement")
# 20: override NamedElement.qualifiedName(NamedElement.namespace): derived[List[str]]
def _namedelement_qualifiedname(self) -> List[str]:
"""
Returns the qualified name of the element as a tuple
"""
if self.namespace:
return _namedelement_qualifiedname(self.namespace) + [self.name]
else:
return [self.name]
NamedElement.qualifiedName = derived(
NamedElement,
"qualifiedName",
List[str],
0,
1,
lambda obj: [_namedelement_qualifiedname(obj)],
)
| lgpl-2.1 | 8,562,233,402,867,397,000 | 22.913043 | 88 | 0.742273 | false |
rackerlabs/deuce-client | deuceclient/auth/rackspaceauth.py | 1 | 2721 | """
Deuce Rackspace Authentication API
"""
import logging
import deuceclient.auth
import deuceclient.auth.openstackauth
def get_identity_apihost(datacenter):
if datacenter in ('us', 'uk', 'lon', 'iad', 'dfw', 'ord'):
return 'https://identity.api.rackspacecloud.com/v2.0'
elif datacenter in ('hkg', 'syd'):
return'https://{0:}.identity.api.rackspacecloud.com/v2.0'.\
format(datacenter)
else:
raise deuceclient.auth.AuthenticationError(
'Unknown Data Center: {0:}'.format(datacenter))
class RackspaceAuthentication(
deuceclient.auth.openstackauth.OpenStackAuthentication):
"""Rackspace Identity Authentication Support
Only difference between this and OpenStackAuthentication is that this
can know the servers without one being specified.
"""
def __init__(self, userid=None, usertype=None,
credentials=None, auth_method=None,
datacenter=None, auth_url=None):
# If an authentication url is not provided then create one using
# Rackspace's Identity Service for the specified datacenter
if auth_url is None:
if datacenter is None:
raise deuceclient.auth.AuthenticationError(
'Required Parameter, datacenter, not specified.')
auth_url = get_identity_apihost(datacenter)
log = logging.getLogger(__name__)
log.debug('No AuthURL specified. Using {0:}'.format(auth_url))
super(RackspaceAuthentication, self).__init__(userid=userid,
usertype=usertype,
credentials=credentials,
auth_method=auth_method,
datacenter=datacenter,
auth_url=auth_url)
@staticmethod
def _management_url(*args, **kwargs):
# NOTE(TheSriram): kwarg region_name is the datacenter supplied
# when instantiating RackspaceAuthentication class
return get_identity_apihost(kwargs['region_name'])
@staticmethod
def patch_management_url():
from keystoneclient.service_catalog import ServiceCatalog
ServiceCatalog.url_for = RackspaceAuthentication._management_url
def get_client(self):
"""Retrieve the Rackspace Client
"""
# NOTE(TheSriram): The exceptions thrown if any, would still
# bear OpenstackAuthentication class in the message.
RackspaceAuthentication.patch_management_url()
return super(RackspaceAuthentication, self).get_client()
| apache-2.0 | -272,386,779,934,035,970 | 39.014706 | 78 | 0.609335 | false |
lambdaloop/CIT-biosignals | pygame/present_images_pygame.py | 1 | 1565 | import pygame
from pygame.locals import *
from constants import *
from generate_images import *
import time
import pandas as pd
from pylsl import StreamInfo, StreamOutlet
import random
pygame.init()
#pygame.mouse.set_visible(False)
from screen import screen
from drawstuff import *
study_time = int(time.time())
print(study_time)
info = StreamInfo('Ganglion_EEG', 'Markers', 1, 0.0, 'int32',
'marker')
outlet = StreamOutlet(info)
images = gen_images()
def check_for_key(key=K_ESCAPE):
while True:
event = pygame.event.poll()
if event.type == 0:
return False
elif event.dict.get('key', -1) == key:
return True
def check_for_escape():
return check_for_key(K_ESCAPE)
def finish_stuff(early=False):
return
text_slide("""Start recording and
press space to continue""")
while not check_for_key(K_SPACE):
pass
focus_slide()
outlet.push_sample([-1], time.time())
time.sleep(0.5)
images = [(path, pygame.image.load(path)) for path in images]
t = time.time()
for image_path, img in images:
# if d['is_shown'] != 1:
# continue
# word = d['word']
print(time.time() - t)
t = time.time()
print(image_path, hash(image_path))
image_slide(img)
outlet.push_sample([hash(image_path)], time.time())
time.sleep(4)
if check_for_escape():
finish_stuff(early=True)
exit()
focus_slide()
outlet.push_sample([-1], time.time())
time.sleep(2.0)
if check_for_escape():
finish_stuff(early=True)
exit()
| mit | 3,029,757,093,882,281,000 | 19.324675 | 61 | 0.630671 | false |
Metruption/hophacks17project | src/analysis.py | 1 | 2022 | """
analysis.py: A wrapper for the Bark Partner API
Copyright (C) 2016 Aaron Thomas
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import pybark
from bark_config import BARK_TOKEN
def check_message(message):
'''
preconditions:
@param message is a string
postconditions:
returns a boolean
True of the message is abusive
False if the message is not abusive
'''
resp = pybark.woof(BARK_TOKEN, message)
resp = json.loads(resp)
power_level = [resp['abusive'],resp['results']['sentiment'] in ["VERY_NEGATIVE", "NEGATIVE"]]
bad_varname = ["profanity", "cyberbullying"]
power_level = power_level + [resp['results'][i]['abusive'] for i in bad_varname]
return sum(power_level) >=2
def find_handle(message):
'''
preconditions:
@param message is a string
postconditions:
returns a tuple containing all of the twitter ids of any @handles given in the text of the direct message
returns a tuple containing all of the twitter @handles given in the text of the direct message
example outputs:
() none given
(aaron_the_king,,@hack,@hateishate_) three given
(@aaron_the_king,@jack) two
(@aaron_the_king) one given
'''
words = message.split(" ")
handles = (word for word in words if word.startswith('@'))
return handles
| gpl-3.0 | -570,306,720,088,601,800 | 35.763636 | 113 | 0.670129 | false |
yadt/yadt-config-rpm-maker | src/config_rpm_maker/token/treenode.py | 1 | 1209 | # yadt-config-rpm-maker
# Copyright (C) 2011-2013 Immobilien Scout GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class NameNotAcceptedException(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "name '%s' is not accepted, must not be empty or null" % self.name
class TreeNode:
def __init__(self, name, children=None):
if children is None:
children=[]
if name is None or len(name) == 0:
raise NameNotAcceptedException(name)
self.name = name
self.children = set(children)
| gpl-3.0 | 7,911,371,813,833,091,000 | 35.636364 | 81 | 0.679901 | false |
cfobel/camip | camip/bin/vpr_net_to_df.py | 1 | 2970 | import sys
from collections import OrderedDict
import numpy as np
import pandas as pd
from path_helpers import path
from vpr_netfile_parser.VprNetParser import cVprNetFileParser
try:
profile
except NameError:
profile = lambda f: f
INPUT_DRIVER_PIN = 0
LOGIC_DRIVER_PIN = 4
LOGIC_BLOCK = 0
INPUT_BLOCK = 1
OUTPUT_BLOCK = 2
CLOCK_PIN = 5
CONNECTION_CLOCK = 5
CONNECTION_DRIVER = 200
CONNECTION_SINK = 100
CONNECTION_CLOCK_DRIVER = 30
# Connection type = DRIVER_TYPE + 10 * SINK_TYPE
DELAY_IO_TO_IO = INPUT_BLOCK + 10 * OUTPUT_BLOCK
DELAY_FB_TO_FB = LOGIC_BLOCK + 10 * LOGIC_BLOCK
DELAY_IO_TO_FB = INPUT_BLOCK + 10 * LOGIC_BLOCK
DELAY_FB_TO_IO = LOGIC_BLOCK + 10 * OUTPUT_BLOCK
@profile
def vpr_net_to_df(net_file_path):
parser = cVprNetFileParser(net_file_path)
block_labels = pd.Series(parser.block_labels)
net_labels = pd.Series(parser.net_labels)
type_labels = pd.Series(['.clb', '.input', '.output'],
index=[LOGIC_BLOCK, INPUT_BLOCK,
OUTPUT_BLOCK])
type_keys = pd.DataFrame(range(type_labels.shape[0]), dtype='uint32',
index=type_labels, columns=['type_key'])
block_type_keys = type_keys.loc[parser.block_type,
'type_key'].reset_index(drop=True)
block_to_net_ids = parser.block_to_net_ids()
net_key = np.concatenate(block_to_net_ids).astype('uint32')
block_key = np.concatenate([[i] * len(v)
for i, v in
enumerate(block_to_net_ids)]).astype('uint32')
pin_key = np.concatenate(parser.block_used_pins).astype('uint32')
connections = pd.DataFrame(OrderedDict([('net_key', net_key),
('block_key', block_key),
('pin_key', pin_key)]))
connections.insert(2, 'block_type',
block_type_keys.iloc[connections.block_key].values)
connections['net_label'] = net_labels.iloc[connections.net_key].values
connections['block_label'] = block_labels.iloc[connections.block_key].values
return connections.sort(['net_key', 'block_key']).reset_index(drop=True)
def parse_args(argv=None):
'''Parses arguments, returns (options, args).'''
from argparse import ArgumentParser
if argv is None:
argv = sys.argv
parser = ArgumentParser(description='Convert VPR netlist `.net` file to HDF '
'connections `.h5` format.')
parser.add_argument(dest='vpr_net_file', type=path)
parser.add_argument(dest='hdf_file', type=path)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
df_netlist = vpr_net_to_df(args.vpr_net_file)
df_netlist.to_hdf(str(args.hdf_file), '/connections', format='table',
data_columns=df_netlist.columns, complib='zlib',
complevel=6)
| gpl-2.0 | -9,034,308,781,177,645,000 | 33.137931 | 81 | 0.606734 | false |
khchine5/xl | lino_xl/lib/ledger/fixtures/demo_bookings.py | 1 | 4927 | # -*- coding: UTF-8 -*-
# Copyright 2012-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""
Creates fictive demo bookings with monthly purchases.
See also:
- :mod:`lino_xl.lib.finan.fixtures.demo_bookings`
- :mod:`lino_xl.lib.sales.fixtures.demo_bookings`
- :mod:`lino_xl.lib.invoicing.fixtures.demo_bookings`
"""
from __future__ import unicode_literals
import datetime
from dateutil.relativedelta import relativedelta as delta
from decimal import Decimal
from django.conf import settings
from lino.utils import Cycler
from lino.utils.dates import AMONTH
from lino.api import dd, rt
from lino_xl.lib.vat.mixins import myround
# from lino.core.requests import BaseRequest
REQUEST = settings.SITE.login() # BaseRequest()
MORE_THAN_A_MONTH = datetime.timedelta(days=40)
from lino_xl.lib.vat.choicelists import VatAreas, VatRules
from lino_xl.lib.ledger.choicelists import TradeTypes
def objects():
Journal = rt.models.ledger.Journal
PaymentTerm = rt.models.ledger.PaymentTerm
Company = rt.models.contacts.Company
USERS = Cycler(settings.SITE.user_model.objects.all())
def func():
# qs = Company.objects.filter(sepa_accounts__iban__isnull=False)
qs = Company.objects.exclude(vat_regime='').filter(
country__isnull=False)
for p in qs.order_by('id'):
# if Journal.objects.filter(partner=p).exists():
# continue
# if not p.vat_regime:
# continue
va = VatAreas.get_for_country(p.country)
if va is None:
continue
rule = VatRules.get_vat_rule(
va, TradeTypes.purchases, p.vat_regime, default=False)
if rule:
yield p
PROVIDERS = Cycler(func())
if len(PROVIDERS) == 0:
raise Exception("No providers.")
JOURNAL_P = Journal.objects.get(ref="PRC")
if dd.is_installed('ana'):
ANA_ACCS = Cycler(rt.models.ana.Account.objects.all())
ACCOUNTS = Cycler(JOURNAL_P.get_allowed_accounts())
AMOUNTS = Cycler([Decimal(x) for x in
"20 29.90 39.90 99.95 199.95 599.95 1599.99".split()])
AMOUNT_DELTAS = Cycler([Decimal(x)
for x in "0 0.60 1.10 1.30 2.50".split()])
DATE_DELTAS = Cycler((1, 2, 3, 4, 5, 6, 7))
INFLATION_RATE = Decimal("0.02")
""""purchase stories" : each story represents a provider who sends
monthly invoices.
"""
PURCHASE_STORIES = []
for i in range(7):
# provider, (account,amount)
story = (PROVIDERS.pop(), [])
story[1].append((ACCOUNTS.pop(), AMOUNTS.pop()))
if i % 3:
story[1].append((ACCOUNTS.pop(), AMOUNTS.pop()))
PURCHASE_STORIES.append(story)
START_YEAR = dd.plugins.ledger.start_year
date = datetime.date(START_YEAR, 1, 1)
end_date = settings.SITE.demo_date(-10) # + delta(years=-2)
# end_date = datetime.date(START_YEAR+1, 5, 1)
# print(20151216, START_YEAR, settings.SITE.demo_date(), end_date - date)
PAYMENT_TERMS = Cycler(PaymentTerm.objects.all())
if len(PAYMENT_TERMS) == 0:
raise Exception("No PAYMENT_TERMS.")
while date < end_date:
for story in PURCHASE_STORIES:
vd = date + delta(days=DATE_DELTAS.pop())
if dd.is_installed('ana'):
cl = rt.models.ana.AnaAccountInvoice
else:
cl = rt.models.vat.VatAccountInvoice
invoice = cl(
journal=JOURNAL_P, partner=story[0], user=USERS.pop(),
voucher_date=vd,
payment_term=PAYMENT_TERMS.pop(),
entry_date=vd + delta(days=1))
yield invoice
for account, amount in story[1]:
kwargs = dict()
if dd.is_installed('ana'):
if account.needs_ana:
kwargs.update(ana_account=ANA_ACCS.pop())
model = rt.models.ana.InvoiceItem
else:
model = rt.models.vat.InvoiceItem
amount += amount + \
(amount * INFLATION_RATE * (date.year - START_YEAR))
item = model(voucher=invoice,
account=account,
total_incl=myround(amount) +
AMOUNT_DELTAS.pop(), **kwargs)
try:
item.total_incl_changed(REQUEST)
except Exception as e:
msg = "20171006 {} in ({} {!r})".format(
e, invoice.partner, invoice.vat_regime)
# raise Exception(msg)
dd.logger.warning(msg)
else:
item.before_ui_save(REQUEST)
yield item
invoice.register(REQUEST)
invoice.save()
date += AMONTH
| bsd-2-clause | -259,979,154,959,346,780 | 33.697183 | 77 | 0.56505 | false |
AeroNotix/django-timetracker | tracker/management/commands/approval_reminders.py | 1 | 1376 | '''
Simple module to aid in command-line debugging of notification related issues.
'''
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.core.mail import EmailMessage
from timetracker.overtime.models import PendingApproval, Tbluser
def send_approval_digest(market):
approvals = PendingApproval.objects.filter(closed=False, approver__market=market)
if not len(approvals):
return
if len({entry.approver for entry in approvals}) > 1:
error_log.critical(
"Cannot send e-mails as a clear approval chain cannot be established."
)
return
message = "Hi,\n\n" \
"You have %d approvals pending in the timetracker." \
"\n\n" \
"Kind Regards,\n" \
"Timetracker team"
message = message % len(approvals)
email = EmailMessage(from_email='[email protected]')
email.body = message
email.to = approvals[0].entry.user.get_manager_email()
email.subject = "Pending Approvals in the Timetracker."
email.send()
class Command(BaseCommand):
def handle(self, *args, **options):
for market in Tbluser.MARKET_CHOICES:
if settings.SENDING_APPROVAL_DIGESTS.get(market[0]):
send_approval_digest(market[0])
| bsd-3-clause | 6,090,599,434,145,423,000 | 31.761905 | 86 | 0.646802 | false |
digitalhealthhack/is_it_good_for_me | data_scripts/get_studies.py | 1 | 3019 | from requests import get as get_page
from bs4 import BeautifulSoup
import csv
import codecs
import cStringIO
TOPIC = 'chocolate'
def _get_study_url(url):
# Receives the url of a cochrane search result and returns the url for the
# study
result_page = get_page(url)
result_soup = BeautifulSoup(result_page.text)
study_url = result_soup \
.find(id='node_review_full_group_research') \
.find_all('a')[0] \
.get('href')
return study_url
def _get_info_for_study(study_url):
study_page = get_page(study_url)
soup = BeautifulSoup(study_page.text)
study_title = soup.find(class_='articleTitle').span.text
abstract_html = soup.find(id='mrwFulltext').div.find_all(['p', 'div'])
abstract_text = u''
authors_conclusions = u''
is_capturing = False
for html in abstract_html:
if is_capturing and html.name != 'p' and html.text != 'Authors\' conclusions':
is_capturing = False
break
abstract_text += unicode(html.text)
if is_capturing:
authors_conclusions += unicode(html.text)
if html.name != 'p' and html.text == 'Authors\' conclusions':
is_capturing = True
return (study_title, authors_conclusions, abstract_text)
def main(search_query=''):
req = get_page(
'http://summaries.cochrane.org/search/site/{}'.format(search_query),
)
soup = BeautifulSoup(req.text)
results = soup.find_all(class_='search-result')
studies = []
for result in results:
result_url = result.a.get('href')
if result_url:
study_url = _get_study_url(result_url)
study_title, study_conclusion, study_abstract = \
_get_info_for_study(study_url)
studies.append([study_title, study_conclusion, study_abstract])
filename = 'studies.csv'
with open(filename, 'w') as csv_file:
for study in studies:
spamwriter = UnicodeWriter(csv_file)
spamwriter.writerow(study)
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
main(TOPIC)
| mit | -463,493,121,653,917,700 | 27.752381 | 86 | 0.613448 | false |
EliCDavis/PyChart | src/ColorSelector.py | 1 | 1167 | import math
import random
class ColorSelector:
def __init__(self):
self._colors_for_selection = ['F94F48', 'FF6A41', 'B4B4B4', 'D5D5D5', 'E973F5', '237FEA',
'F2B838', '19EC5A', '2395DE', 'D4B57F', 'FFD700']
self._colors_already_selected = []
def get_random_color(self):
index = math.floor(random.random()*len(self._colors_for_selection))
index_has_been_found = False
# Keep trying to find an index until we're successful
# TODO this needs to be way more efficient
while index_has_been_found is False:
if index not in self._colors_already_selected:
index_has_been_found = True
else:
index = math.floor(random.random()*len(self._colors_for_selection))
# Finally get our color
color = self._colors_for_selection[index]
self._colors_already_selected.append(index)
# If we've used all the colors then start all over
if len(self._colors_already_selected) == len(self._colors_for_selection):
self._colors_already_selected = []
return color
| mit | -6,649,144,329,297,534,000 | 28.175 | 97 | 0.59126 | false |
justb4/GeoHealthCheck | GeoHealthCheck/app.py | 1 | 38830 | # =================================================================
#
# Authors: Tom Kralidis <[email protected]>
# Just van den Broecke <[email protected]>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import base64
import csv
import json
import logging
from io import StringIO
from flask import (abort, flash, g, jsonify, redirect,
render_template, request, url_for)
from flask_babel import gettext
from flask_login import (LoginManager, login_user, logout_user,
current_user, login_required)
from flask_migrate import Migrate
from itertools import chain
import views
from __init__ import __version__
from enums import RESOURCE_TYPES
from factory import Factory
from init import App
from models import Resource, Run, ProbeVars, CheckVars, Tag, User, Recipient
from resourceauth import ResourceAuth
from util import send_email, geocode, format_checked_datetime, \
format_run_status, format_obj_value
# Module globals for convenience
LOGGER = logging.getLogger(__name__)
APP = App.get_app()
CONFIG = App.get_config()
DB = App.get_db()
BABEL = App.get_babel()
MIGRATE = Migrate(APP, DB)
LOGIN_MANAGER = LoginManager()
LOGIN_MANAGER.init_app(APP)
LANGUAGES = (
('en', 'English'),
('fr', 'Français'),
('de', 'German'),
('nl_NL', 'Nederlands (Nederland)'),
('es_BO', 'Español (Bolivia)'),
('hr_HR', 'Croatian (Croatia)')
)
# Should GHC Runner be run within GHC webapp?
if CONFIG['GHC_RUNNER_IN_WEBAPP'] is True:
LOGGER.info('Running GHC Scheduler in WebApp')
from scheduler import start_schedule
# Start scheduler
start_schedule()
else:
LOGGER.info('NOT Running GHC Scheduler in WebApp')
# commit or rollback shorthand
def db_commit():
err = None
try:
DB.session.commit()
except Exception:
DB.session.rollback()
# finally:
# DB.session.close()
return err
@APP.before_request
def before_request():
g.user = current_user
if request.args and 'lang' in request.args and request.args['lang'] != '':
g.current_lang = request.args['lang']
if not hasattr(g, 'current_lang'):
g.current_lang = 'en'
if CONFIG['GHC_REQUIRE_WEBAPP_AUTH'] is True:
# Login is required to access GHC Webapp.
# We need to pass-through static resources like CSS.
if any(['/static/' in request.path,
request.path.endswith('.ico'),
g.user.is_authenticated(), # This is from Flask-Login
(request.endpoint is not None
and getattr(APP.view_functions[request.endpoint],
'is_public', False))]):
return # Access granted
else:
return redirect(url_for('login'))
# Marks (endpoint-) function as always to be accessible
# (used for GHC_REQUIRE_WEBAPP_AUTH)
def public_route(decorated_function):
decorated_function.is_public = True
return decorated_function
@APP.teardown_appcontext
def shutdown_session(exception=None):
DB.session.remove()
@BABEL.localeselector
def get_locale():
return g.get('current_lang', 'en')
# return request.accept_languages.best_match(LANGUAGES.keys())
@LOGIN_MANAGER.user_loader
def load_user(identifier):
return User.query.get(int(identifier))
@LOGIN_MANAGER.unauthorized_handler
def unauthorized_callback():
if request.query_string:
url = '%s%s?%s' % (request.script_root, request.path,
request.query_string)
else:
url = '%s%s' % (request.script_root, request.path)
return redirect(url_for('login', lang=g.current_lang, next=url))
@LOGIN_MANAGER.request_loader
def load_user_from_request(request):
# Try to login using Basic Auth
# Inspiration: https://flask-login.readthedocs.io
# /en/latest/#custom-login-using-request-loader
basic_auth_val = request.headers.get('Authorization')
if basic_auth_val:
basic_auth_val = basic_auth_val.replace('Basic ', '', 1)
authenticated = False
try:
username, password = base64.b64decode(basic_auth_val).split(':')
user = User.query.filter_by(username=username).first()
if user:
authenticated = user.authenticate(password)
finally:
# Ignore errors, they should all fail the auth attempt
pass
if not authenticated:
LOGGER.warning('Unauthorized access for user=%s' % username)
abort(401)
else:
return user
# TODO: may add login via api-key or token here
# finally, return None if both methods did not login the user
return None
@APP.template_filter('cssize_reliability')
def cssize_reliability(value, css_type=None):
"""returns CSS button class snippet based on score"""
number = int(value)
if CONFIG['GHC_RELIABILITY_MATRIX']['red']['min'] <= number <= \
CONFIG['GHC_RELIABILITY_MATRIX']['red']['max']:
score = 'danger'
panel = 'red'
elif (CONFIG['GHC_RELIABILITY_MATRIX']['orange']['min'] <= number <=
CONFIG['GHC_RELIABILITY_MATRIX']['orange']['max']):
score = 'warning'
panel = 'yellow'
elif (CONFIG['GHC_RELIABILITY_MATRIX']['green']['min'] <= number <=
CONFIG['GHC_RELIABILITY_MATRIX']['green']['max']):
score = 'success'
panel = 'green'
else: # should never really get here
score = 'info'
panel = 'blue'
if css_type is not None and css_type == 'panel':
return panel
else:
return score
@APP.template_filter('cssize_reliability2')
def cssize_reliability2(value):
"""returns CSS panel class snippet based on score"""
return cssize_reliability(value, 'panel')
@APP.template_filter('round2')
def round2(value):
"""rounds a number to 2 decimal places except for values of 0 or 100"""
if value in [0.0, 100.0]:
return int(value)
return round(value, 2)
@APP.context_processor
def context_processors():
"""global context processors for templates"""
rtc = views.get_resource_types_counts()
tags = views.get_tag_counts()
return {
'app_version': __version__,
'resource_types': RESOURCE_TYPES,
'resource_types_counts': rtc['counts'],
'resources_total': rtc['total'],
'languages': LANGUAGES,
'tags': tags,
'tagnames': list(tags.keys())
}
@APP.route('/')
def home():
"""homepage"""
response = views.get_health_summary()
return render_template('home.html', response=response)
@APP.route('/csv', endpoint='csv')
@APP.route('/json', endpoint='json')
def export():
"""export resource list as JSON"""
resource_type = None
if request.args.get('resource_type') in RESOURCE_TYPES.keys():
resource_type = request.args['resource_type']
query = request.args.get('q')
response = views.list_resources(resource_type, query)
if request.url_rule.rule == '/json':
json_dict = {'total': response['total'], 'resources': []}
for r in response['resources']:
try:
ghc_url = '%s/resource/%s' % \
(CONFIG['GHC_SITE_URL'], r.identifier)
last_run_report = '-'
if r.last_run:
last_run_report = r.last_run.report
json_dict['resources'].append({
'resource_type': r.resource_type,
'title': r.title,
'url': r.url,
'ghc_url': ghc_url,
'ghc_json': '%s/json' % ghc_url,
'ghc_csv': '%s/csv' % ghc_url,
'first_run': format_checked_datetime(r.first_run),
'last_run': format_checked_datetime(r.last_run),
'status': format_run_status(r.last_run),
'min_response_time': round(r.min_response_time, 2),
'average_response_time': round(r.average_response_time, 2),
'max_response_time': round(r.max_response_time, 2),
'reliability': round(r.reliability, 2),
'last_report': format_obj_value(last_run_report)
})
except Exception as e:
LOGGER.warning(
'JSON error resource id=%d: %s' % (r.identifier, str(e)))
return jsonify(json_dict)
elif request.url_rule.rule == '/csv':
output = StringIO()
writer = csv.writer(output)
header = [
'resource_type', 'title', 'url', 'ghc_url', 'ghc_json', 'ghc_csv',
'first_run', 'last_run', 'status', 'min_response_time',
'average_response_time', 'max_response_time', 'reliability'
]
writer.writerow(header)
for r in response['resources']:
try:
ghc_url = '%s%s' % (CONFIG['GHC_SITE_URL'],
url_for('get_resource_by_id',
identifier=r.identifier))
writer.writerow([
r.resource_type,
r.title,
r.url,
ghc_url,
'%s/json' % ghc_url,
'%s/csv' % ghc_url,
format_checked_datetime(r.first_run),
format_checked_datetime(r.last_run),
format_run_status(r.last_run),
round(r.min_response_time, 2),
round(r.average_response_time, 2),
round(r.max_response_time, 2),
round(r.reliability, 2)
])
except Exception as e:
LOGGER.warning(
'CSV error resource id=%d: %s' % (r.identifier, str(e)))
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/opensearch')
def opensearch():
"""generate OpenSearch description document"""
content = render_template('opensearch_description.xml')
return content, 200, {'Content-type': 'text/xml'}
@APP.route('/resource/<identifier>/csv', endpoint='csv-resource')
@APP.route('/resource/<identifier>/json', endpoint='json-resource')
def export_resource(identifier):
"""export resource as JSON or CSV"""
resource = views.get_resource_by_id(identifier)
history_csv = '%s/resource/%s/history/csv' % (CONFIG['GHC_SITE_URL'],
resource.identifier)
history_json = '%s/resource/%s/history/json' % (CONFIG['GHC_SITE_URL'],
resource.identifier)
if 'json' in request.url_rule.rule:
last_run_report = '-'
if resource.last_run:
last_run_report = resource.last_run.report
json_dict = {
'identifier': resource.identifier,
'title': resource.title,
'url': resource.url,
'resource_type': resource.resource_type,
'owner': resource.owner.username,
'min_response_time': resource.min_response_time,
'average_response_time': resource.average_response_time,
'max_response_time': resource.max_response_time,
'reliability': resource.reliability,
'status': format_run_status(resource.last_run),
'first_run': format_checked_datetime(resource.first_run),
'last_run': format_checked_datetime(resource.last_run),
'history_csv': history_csv,
'history_json': history_json,
'last_report': format_obj_value(last_run_report)
}
return jsonify(json_dict)
elif 'csv' in request.url_rule.rule:
output = StringIO()
writer = csv.writer(output)
header = [
'identifier', 'title', 'url', 'resource_type', 'owner',
'min_response_time', 'average_response_time', 'max_response_time',
'reliability', 'status', 'first_run', 'last_run', 'history_csv',
'history_json'
]
writer.writerow(header)
writer.writerow([
resource.identifier,
resource.title,
resource.url,
resource.resource_type,
resource.owner.username,
resource.min_response_time,
resource.average_response_time,
resource.max_response_time,
resource.reliability,
format_run_status(resource.last_run),
format_checked_datetime(resource.first_run),
format_checked_datetime(resource.last_run),
history_csv,
history_json
])
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/resource/<identifier>/history/csv',
endpoint='csv-resource-history')
@APP.route('/resource/<identifier>/history/json',
endpoint='json-resource-history')
def export_resource_history(identifier):
"""export resource history as JSON or CSV"""
resource = views.get_resource_by_id(identifier)
if 'json' in request.url_rule.rule:
json_dict = {'runs': []}
for run in resource.runs:
json_dict['runs'].append({
'owner': resource.owner.username,
'resource_type': resource.resource_type,
'checked_datetime': format_checked_datetime(run),
'title': resource.title,
'url': resource.url,
'response_time': round(run.response_time, 2),
'status': format_run_status(run)
})
return jsonify(json_dict)
elif 'csv' in request.url_rule.rule:
output = StringIO()
writer = csv.writer(output)
header = [
'owner', 'resource_type', 'checked_datetime', 'title', 'url',
'response_time', 'status'
]
writer.writerow(header)
for run in resource.runs:
writer.writerow([
resource.owner.username,
resource.resource_type,
format_checked_datetime(run),
resource.title,
resource.url,
round(run.response_time, 2),
format_run_status(run),
])
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/settings')
def settings():
"""settings"""
pass
@APP.route('/resources')
def resources():
"""lists resources with optional filter"""
resource_type = None
if request.args.get('resource_type') in RESOURCE_TYPES.keys():
resource_type = request.args['resource_type']
tag = request.args.get('tag')
query = request.args.get('q')
response = views.list_resources(resource_type, query, tag)
return render_template('resources.html', response=response)
@APP.route('/resource/<identifier>')
def get_resource_by_id(identifier):
"""show resource"""
response = views.get_resource_by_id(identifier)
return render_template('resource.html', resource=response)
@APP.route('/register', methods=['GET', 'POST'])
def register():
"""register a new user"""
if not CONFIG['GHC_SELF_REGISTER']:
msg1 = gettext('This site is not configured for self-registration')
msg2 = gettext('Please contact')
msg = '%s. %s %s' % (msg1, msg2,
CONFIG['GHC_ADMIN_EMAIL'])
flash('%s' % msg, 'danger')
return render_template('register.html', errmsg=msg)
if request.method == 'GET':
return render_template('register.html')
# Check for existing user or email
user = User.query.filter_by(username=request.form['username']).first()
email = User.query.filter_by(email=request.form['email']).first()
if user or email:
flash('%s' % gettext('Invalid username or email'), 'danger')
return render_template('register.html')
user = User(request.form['username'],
request.form['password'], request.form['email'])
DB.session.add(user)
try:
DB.session.commit()
except Exception as err:
DB.session.rollback()
bad_column = err.message.split()[2]
bad_value = request.form[bad_column]
msg = gettext('already registered')
flash('%s %s %s' % (bad_column, bad_value, msg), 'danger')
return redirect(url_for('register', lang=g.current_lang))
return redirect(url_for('login', lang=g.current_lang))
@APP.route('/add', methods=['GET', 'POST'])
@login_required
def add():
"""add resource"""
if not g.user.is_authenticated():
return render_template('add.html')
if request.method == 'GET':
return render_template('add.html')
resource_type = request.form['resource_type']
tags = request.form.getlist('tags')
url = request.form['url'].strip()
resources_to_add = []
from healthcheck import sniff_test_resource, run_test_resource
sniffed_resources = sniff_test_resource(CONFIG, resource_type, url)
if not sniffed_resources:
msg = gettext("No resources detected")
LOGGER.exception()
flash(msg, 'danger')
for (resource_type, resource_url,
title, success, response_time,
message, start_time, resource_tags,) in sniffed_resources:
tags_to_add = []
for tag in chain(tags, resource_tags):
tag_obj = tag
if not isinstance(tag, Tag):
tag_obj = Tag.query.filter_by(name=tag).first()
if tag_obj is None:
tag_obj = Tag(name=tag)
tags_to_add.append(tag_obj)
resource_to_add = Resource(current_user,
resource_type,
title,
resource_url,
tags=tags_to_add)
resources_to_add.append(resource_to_add)
probe_to_add = None
checks_to_add = []
# Always add a default Probe and Check(s)
# from the GHC_PROBE_DEFAULTS conf
if resource_type in CONFIG['GHC_PROBE_DEFAULTS']:
resource_settings = CONFIG['GHC_PROBE_DEFAULTS'][resource_type]
probe_class = resource_settings['probe_class']
if probe_class:
# Add the default Probe
probe_obj = Factory.create_obj(probe_class)
probe_to_add = ProbeVars(
resource_to_add, probe_class,
probe_obj.get_default_parameter_values())
# Add optional default (parameterized)
# Checks to add to this Probe
checks_info = probe_obj.get_checks_info()
checks_param_info = probe_obj.get_plugin_vars()['CHECKS_AVAIL']
for check_class in checks_info:
check_param_info = checks_param_info[check_class]
if 'default' in checks_info[check_class]:
if checks_info[check_class]['default']:
# Filter out params for Check with fixed values
param_defs = check_param_info['PARAM_DEFS']
param_vals = {}
for param in param_defs:
if param_defs[param]['value']:
param_vals[param] = \
param_defs[param]['value']
check_vars = CheckVars(
probe_to_add, check_class, param_vals)
checks_to_add.append(check_vars)
result = run_test_resource(resource_to_add)
run_to_add = Run(resource_to_add, result)
DB.session.add(resource_to_add)
# prepopulate notifications for current user
resource_to_add.set_recipients('email', [g.user.email])
if probe_to_add:
DB.session.add(probe_to_add)
for check_to_add in checks_to_add:
DB.session.add(check_to_add)
DB.session.add(run_to_add)
try:
DB.session.commit()
msg = gettext('Services registered')
flash('%s (%s, %s)' % (msg, resource_type, url), 'success')
except Exception as err:
DB.session.rollback()
flash(str(err), 'danger')
return redirect(url_for('home', lang=g.current_lang))
if len(resources_to_add) == 1:
return edit_resource(resources_to_add[0].identifier)
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/resource/<int:resource_identifier>/update', methods=['POST'])
@login_required
def update(resource_identifier):
"""update a resource"""
update_counter = 0
status = 'success'
try:
resource_identifier_dict = request.get_json()
resource = Resource.query.filter_by(
identifier=resource_identifier).first()
for key, value in resource_identifier_dict.items():
if key == 'tags':
resource_tags = [t.name for t in resource.tags]
tags_to_add = set(value) - set(resource_tags)
tags_to_delete = set(resource_tags) - set(value)
# Existing Tags: create relation else add new Tag
all_tag_objs = Tag.query.all()
for tag in tags_to_add:
tag_add_obj = None
for tag_obj in all_tag_objs:
if tag == tag_obj.name:
# use existing
tag_add_obj = tag_obj
break
if not tag_add_obj:
# add new
tag_add_obj = Tag(name=tag)
DB.session.add(tag_add_obj)
resource.tags.append(tag_add_obj)
for tag in tags_to_delete:
tag_to_delete = Tag.query.filter_by(name=tag).first()
resource.tags.remove(tag_to_delete)
update_counter += 1
elif key == 'probes':
# Remove all existing ProbeVars for Resource
for probe_var in resource.probe_vars:
resource.probe_vars.remove(probe_var)
# Add ProbeVars anew each with optional CheckVars
for probe in value:
LOGGER.info('adding Probe class=%s parms=%s' %
(probe['probe_class'], str(probe)))
probe_vars = ProbeVars(resource, probe['probe_class'],
probe['parameters'])
for check in probe['checks']:
check_vars = CheckVars(
probe_vars, check['check_class'],
check['parameters'])
probe_vars.check_vars.append(check_vars)
resource.probe_vars.append(probe_vars)
update_counter += 1
elif key == 'notify_emails':
resource.set_recipients('email',
[v for v in value if v.strip()])
elif key == 'notify_webhooks':
resource.set_recipients('webhook',
[v for v in value if v.strip()])
elif key == 'auth':
resource.auth = value
elif getattr(resource, key) != resource_identifier_dict[key]:
# Update other resource attrs, mainly 'name'
setattr(resource, key, resource_identifier_dict[key])
min_run_freq = CONFIG['GHC_MINIMAL_RUN_FREQUENCY_MINS']
if int(resource.run_frequency) < min_run_freq:
resource.run_frequency = min_run_freq
update_counter += 1
# Always update geo-IP: maybe failure on creation or
# IP-address of URL may have changed.
latitude, longitude = geocode(resource.url)
if latitude != 0.0 and longitude != 0.0:
# Only update for valid lat/lon
resource.latitude = latitude
resource.longitude = longitude
update_counter += 1
except Exception as err:
LOGGER.error("Cannot update resource: %s", err, exc_info=err)
DB.session.rollback()
status = str(err)
update_counter = 0
# finally:
# DB.session.close()
if update_counter > 0:
err = db_commit()
if err:
status = str(err)
return jsonify({'status': status})
@APP.route('/resource/<int:resource_identifier>/test', methods=['GET', 'POST'])
@login_required
def test(resource_identifier):
"""test a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(request.referrer)
from healthcheck import run_test_resource
result = run_test_resource(
resource)
if request.method == 'GET':
if result.message == 'Skipped':
msg = gettext('INFO')
flash('%s: %s' % (msg, result.message), 'info')
elif result.message not in ['OK', None, 'None']:
msg = gettext('ERROR')
flash('%s: %s' % (msg, result.message), 'danger')
else:
flash(gettext('Resource tested successfully'), 'success')
return redirect(url_for('get_resource_by_id', lang=g.current_lang,
identifier=resource_identifier))
elif request.method == 'POST':
return jsonify(result.get_report())
@APP.route('/resource/<int:resource_identifier>/edit')
@login_required
def edit_resource(resource_identifier):
"""edit a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(request.referrer)
probes_avail = views.get_probes_avail(resource.resource_type, resource)
suggestions = json.dumps(Recipient.get_suggestions('email',
g.user.username))
return render_template('edit_resource.html',
lang=g.current_lang,
resource=resource,
suggestions=suggestions,
auths_avail=ResourceAuth.get_auth_defs(),
probes_avail=probes_avail)
@APP.route('/resource/<int:resource_identifier>/delete')
@login_required
def delete(resource_identifier):
"""delete a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if g.user.role != 'admin' and g.user.username != resource.owner.username:
msg = gettext('You do not have access to delete this resource')
flash(msg, 'danger')
return redirect(url_for('get_resource_by_id', lang=g.current_lang,
identifier=resource_identifier))
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(url_for('home', lang=g.current_lang))
resource.clear_recipients()
DB.session.delete(resource)
try:
DB.session.commit()
flash(gettext('Resource deleted'), 'success')
return redirect(url_for('home', lang=g.current_lang))
except Exception as err:
DB.session.rollback()
flash(str(err), 'danger')
return redirect(url_for(request.referrer))
@APP.route('/probe/<string:probe_class>/<int:resource_identifier>/edit_form')
@APP.route('/probe/<string:probe_class>/edit_form')
@login_required
def get_probe_edit_form(probe_class, resource_identifier=None):
"""get the form to edit a Probe"""
probe_obj = Factory.create_obj(probe_class)
if resource_identifier:
resource = views.get_resource_by_id(resource_identifier)
if resource:
probe_obj._resource = resource
probe_obj.expand_params(resource)
probe_info = probe_obj.get_plugin_vars()
probe_vars = ProbeVars(
None, probe_class, probe_obj.get_default_parameter_values())
# Get only the default Checks for this Probe class
checks_avail = probe_obj.get_checks_info_defaults()
checks_avail = probe_obj.expand_check_vars(checks_avail)
for check_class in checks_avail:
check_obj = Factory.create_obj(check_class)
check_params = check_obj.get_default_parameter_values()
probe_check_param_defs = \
probe_info['CHECKS_AVAIL'][check_class]['PARAM_DEFS']
for param in probe_check_param_defs:
if 'value' in probe_check_param_defs[param]:
check_params[param] = probe_check_param_defs[param]['value']
# Appends 'check_vars' to 'probe_vars' (SQLAlchemy)
CheckVars(probe_vars, check_class, check_params)
return render_template('includes/probe_edit_form.html',
lang=g.current_lang,
probe=probe_vars, probe_info=probe_info)
@APP.route('/check/<string:check_class>/edit_form')
@login_required
def get_check_edit_form(check_class):
"""get the form to edit a Check"""
check_obj = Factory.create_obj(check_class)
check_info = check_obj.get_plugin_vars()
check_vars = CheckVars(
None, check_class, check_obj.get_default_parameter_values())
return render_template('includes/check_edit_form.html',
lang=g.current_lang,
check=check_vars, check_info=check_info)
@APP.route('/login', methods=['GET', 'POST'])
@public_route
def login():
"""login"""
if request.method == 'GET':
return render_template('login.html')
username = request.form['username']
password = request.form['password']
registered_user = User.query.filter_by(username=username).first()
authenticated = False
if registered_user:
# May not have upgraded to pw encryption: warn
if len(registered_user.password) < 80:
msg = 'Please upgrade GHC to encrypted passwords first, see docs!'
flash(gettext(msg), 'danger')
return redirect(url_for('login', lang=g.current_lang))
try:
authenticated = registered_user.authenticate(password)
finally:
pass
if not authenticated:
flash(gettext('Invalid username and / or password'), 'danger')
return redirect(url_for('login', lang=g.current_lang))
# Login ok
login_user(registered_user)
if 'next' in request.args:
return redirect(request.args.get('next'))
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/logout')
def logout():
"""logout"""
logout_user()
flash(gettext('Logged out'), 'success')
if request.referrer:
return redirect(request.referrer)
else:
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/reset_req', methods=['GET', 'POST'])
@public_route
def reset_req():
"""
Reset password request handling.
"""
if request.method == 'GET':
return render_template('reset_password_request.html')
# Reset request form with email
email = request.form['email']
registered_user = User.query.filter_by(email=email).first()
if registered_user is None:
LOGGER.warn('Invalid email for reset_req: %s' % email)
flash(gettext('Invalid email'), 'danger')
return redirect(url_for('reset_req', lang=g.current_lang))
# Generate reset url using user-specific token
token = registered_user.get_token()
reset_url = '%s/reset/%s' % (CONFIG['GHC_SITE_URL'], token)
# Create message body with reset link
msg_body = render_template('reset_password_email.txt',
lang=g.current_lang, config=CONFIG,
reset_url=reset_url,
username=registered_user.username)
try:
from email.mime.text import MIMEText
from email.utils import formataddr
msg = MIMEText(msg_body, 'plain', 'utf-8')
msg['From'] = formataddr((CONFIG['GHC_SITE_TITLE'],
CONFIG['GHC_ADMIN_EMAIL']))
msg['To'] = registered_user.email
msg['Subject'] = '[%s] %s' % (CONFIG['GHC_SITE_TITLE'],
gettext('reset password'))
from_addr = '%s <%s>' % (CONFIG['GHC_SITE_TITLE'],
CONFIG['GHC_ADMIN_EMAIL'])
to_addr = registered_user.email
msg_text = msg.as_string()
send_email(CONFIG['GHC_SMTP'], from_addr, to_addr, msg_text)
except Exception as err:
msg = 'Cannot send email. Contact admin: '
LOGGER.warn(msg + ' err=' + str(err))
flash(gettext(msg) + CONFIG['GHC_ADMIN_EMAIL'], 'danger')
return redirect(url_for('login', lang=g.current_lang))
flash(gettext('Password reset link sent via email'), 'success')
if 'next' in request.args:
return redirect(request.args.get('next'))
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/reset/<token>', methods=['GET', 'POST'])
@public_route
def reset(token=None):
"""
Reset password submit form handling.
"""
# Must have at least a token to proceed.
if token is None:
return redirect(url_for('reset_req', lang=g.current_lang))
# Token received: verify if ok, may also time-out.
registered_user = User.verify_token(token)
if registered_user is None:
LOGGER.warn('Cannot find User from token: %s' % token)
flash(gettext('Invalid token'), 'danger')
return redirect(url_for('login', lang=g.current_lang))
# Token and user ok: return reset form.
if request.method == 'GET':
return render_template('reset_password_form.html')
# Valid token and user: change password from form-value
password = request.form['password']
if not password:
flash(gettext('Password required'), 'danger')
return redirect(url_for('reset/%s' % token, lang=g.current_lang))
registered_user.set_password(password)
DB.session.add(registered_user)
try:
DB.session.commit()
flash(gettext('Update password OK'), 'success')
except Exception as err:
msg = 'Update password failed!'
LOGGER.warn(msg + ' err=' + str(err))
DB.session.rollback()
flash(gettext(msg), 'danger')
# Finally redirect user to login page
return redirect(url_for('login', lang=g.current_lang))
#
# REST Interface Calls
#
@APP.route('/api/v1.0/summary')
@APP.route('/api/v1.0/summary/')
@APP.route('/api/v1.0/summary.<content_type>')
def api_summary(content_type='json'):
"""
Get health summary for all Resources within this instance.
"""
health_summary = views.get_health_summary()
# Convert Runs to dict-like structure
for run in ['first_run', 'last_run']:
run_obj = health_summary.get(run, None)
if run_obj:
health_summary[run] = run_obj.for_json()
# Convert Resources failing to dict-like structure
failed_resources = []
for resource in health_summary['failed_resources']:
failed_resources.append(resource.for_json())
health_summary['failed_resources'] = failed_resources
if content_type == 'json':
result = jsonify(health_summary)
else:
result = '<pre>\n%s\n</pre>' % \
render_template('status_report_email.txt',
lang=g.current_lang, summary=health_summary)
return result
@APP.route('/api/v1.0/probes-avail/')
@APP.route('/api/v1.0/probes-avail/<resource_type>')
@APP.route('/api/v1.0/probes-avail/<resource_type>/<int:resource_id>')
def api_probes_avail(resource_type=None, resource_id=None):
"""
Get available (configured) Probes for this
installation, optional for resource type
"""
resource = None
if resource_id:
resource = views.get_resource_by_id(resource_id)
probes = views.get_probes_avail(resource_type=resource_type,
resource=resource)
return jsonify(probes)
@APP.route('/api/v1.0/runs/<int:resource_id>')
@APP.route('/api/v1.0/runs/<int:resource_id>.<content_type>')
@APP.route('/api/v1.0/runs/<int:resource_id>/<int:run_id>')
@APP.route('/api/v1.0/runs/<int:resource_id>/<int:run_id>.<content_type>')
def api_runs(resource_id, run_id=None, content_type='json'):
"""
Get Runs (History of results) for Resource.
"""
if run_id:
runs = [views.get_run_by_id(run_id)]
else:
runs = views.get_run_by_resource_id(resource_id)
run_arr = []
for run in runs:
run_dict = {
'id': run.identifier,
'success': run.success,
'response_time': run.response_time,
'checked_datetime': run.checked_datetime,
'message': run.message,
'report': run.report
}
run_arr.append(run_dict)
runs_dict = {'total': len(run_arr), 'runs': run_arr}
result = 'unknown'
if content_type == 'json':
result = jsonify(runs_dict)
elif content_type == 'html':
result = render_template('includes/runs.html',
lang=g.current_lang, runs=runs_dict['runs'])
return result
if __name__ == '__main__': # run locally, for fun
import sys
HOST = '0.0.0.0'
PORT = 8000
if len(sys.argv) > 1:
HOST, PORT = sys.argv[1].split(':')
APP.run(host=HOST, port=int(PORT), use_reloader=True, debug=True)
| mit | 1,309,314,165,383,930,400 | 34.3303 | 79 | 0.579298 | false |
openstates/openstates | openstates/az/__init__.py | 1 | 15461 | import lxml.html
import re
import requests
from openstates.utils import State
from .people import AZPersonScraper
from .bills import AZBillScraper
# from .committees import AZCommitteeScraper
# from .events import AZEventScraper
class Arizona(State):
scrapers = {
"people": AZPersonScraper,
# 'committees': AZCommitteeScraper,
# 'events': AZEventScraper,
"bills": AZBillScraper,
}
legislative_sessions = [
{
"_scraped_name": "2009 - Forty-ninth Legislature - First Regular Session",
"classification": "primary",
"end_date": "2009-07-01",
"identifier": "49th-1st-regular",
"name": "49th Legislature, 1st Regular Session (2009)",
"start_date": "2009-01-12",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - First Special Session",
"classification": "special",
"end_date": "2009-01-31",
"identifier": "49th-1st-special",
"name": "49th Legislature, 1st Special Session (2009)",
"start_date": "2009-01-28",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Second Regular Session",
"classification": "primary",
"end_date": "2010-04-29",
"identifier": "49th-2nd-regular",
"name": "49th Legislature, 2nd Regular Session (2010)",
"start_date": "2010-01-11",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Second Special Session",
"classification": "special",
"end_date": "2009-05-27",
"identifier": "49th-2nd-special",
"name": "49th Legislature, 2nd Special Session (2009)",
"start_date": "2009-05-21",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Third Special Session",
"classification": "special",
"end_date": "2009-08-25",
"identifier": "49th-3rd-special",
"name": "49th Legislature, 3rd Special Session (2009)",
"start_date": "2009-07-06",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Fourth Special Session",
"classification": "special",
"end_date": "2009-11-23",
"identifier": "49th-4th-special",
"name": "49th Legislature, 4th Special Session (2009)",
"start_date": "2009-11-17",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Fifth Special Session",
"classification": "special",
"end_date": "2009-12-19",
"identifier": "49th-5th-special",
"name": "49th Legislature, 5th Special Session (2009)",
"start_date": "2009-12-17",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Sixth Special Session",
"classification": "special",
"end_date": "2010-02-11",
"identifier": "49th-6th-special",
"name": "49th Legislature, 6th Special Session (2010)",
"start_date": "2010-02-01",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Seventh Special Session",
"classification": "special",
"end_date": "2010-03-16",
"identifier": "49th-7th-special",
"name": "49th Legislature, 7th Special Session (2010)",
"start_date": "2010-03-08",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Eighth Special Session",
"classification": "special",
"end_date": "2010-04-01",
"identifier": "49th-8th-special",
"name": "49th Legislature, 8th Special Session (2010)",
"start_date": "2010-03-29",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Ninth Special Session",
"classification": "special",
"end_date": "2010-08-11",
"identifier": "49th-9th-special",
"name": "49th Legislature, 9th Special Session (2010)",
"start_date": "2010-08-09",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - First Regular Session",
"classification": "primary",
"end_date": "2011-04-20",
"identifier": "50th-1st-regular",
"name": "50th Legislature, 1st Regular Session (2011)",
"start_date": "2011-01-10",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - First Special Session",
"classification": "special",
"end_date": "2011-01-20",
"identifier": "50th-1st-special",
"name": "50th Legislature, 1st Special Session (2011)",
"start_date": "2011-01-19",
},
{
"_scraped_name": "2012 - Fiftieth Legislature - Second Regular Session",
"classification": "primary",
"identifier": "50th-2nd-regular",
"name": "50th Legislature, 2nd Regular Session (2012)",
"start_date": "2012-01-09",
"end_date": "2012-05-03",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Second Special Session",
"classification": "special",
"end_date": "2011-02-16",
"identifier": "50th-2nd-special",
"name": "50th Legislature, 2nd Special Session (2011)",
"start_date": "2011-02-14",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Third Special Session",
"classification": "special",
"end_date": "2011-06-13",
"identifier": "50th-3rd-special",
"name": "50th Legislature, 3rd Special Session (2011)",
"start_date": "2011-06-10",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Fourth Special Session",
"classification": "special",
"end_date": "2011-11-01",
"identifier": "50th-4th-special",
"name": "50th Legislature, 4th Special Session (2011)",
"start_date": "2011-11-01",
},
{
"_scraped_name": "2013 - Fifty-first Legislature - First Regular Session",
"classification": "primary",
"identifier": "51st-1st-regular",
"name": "51st Legislature - 1st Regular Session (2013)",
"start_date": "2013-01-14",
"end_date": "2013-06-14",
},
{
"_scraped_name": "2013 - Fifty-first Legislature - First Special Session",
"classification": "primary",
"identifier": "51st-1st-special",
"name": "51st Legislature - 1st Special Session (2013)",
"start_date": "2013-06-11",
"end_date": "2013-06-14",
},
{
"_scraped_name": "2014 - Fifty-first Legislature - Second Regular Session",
"classification": "primary",
"identifier": "51st-2nd-regular",
"name": "51st Legislature - 2nd Regular Session",
"start_date": "2014-01-13",
"end_date": "2014-04-24",
},
{
"_scraped_name": "2014 - Fifty-first Legislature - Second Special Session",
"classification": "special",
"identifier": "51st-2nd-special",
"name": "51st Legislature - 2nd Special Session",
"start_date": "2014-05-27",
"end_date": "2014-05-29",
},
{
"_scraped_name": "2015 - Fifty-second Legislature - First Regular Session",
"classification": "primary",
"identifier": "52nd-1st-regular",
"name": "52nd Legislature - 1st Regular Session",
"start_date": "2015-01-12",
"end_date": "2015-04-02",
},
{
"_scraped_name": "2015 - Fifty-second Legislature - First Special Session",
"classification": "special",
"identifier": "52nd-1st-special",
"name": "52nd Legislature - 1st Special Session",
"start_date": "2015-10-28",
"end_date": "2015-10-30",
},
{
"_scraped_name": "2016 - Fifty-second Legislature - Second Regular Session",
"classification": "primary",
"identifier": "52nd-2nd-regular",
"name": "52nd Legislature - 2nd Regular Session",
"start_date": "2016-01-11",
"end_date": "2016-05-07",
},
{
"_scraped_name": "2017 - Fifty-third Legislature - First Regular Session",
"classification": "primary",
"end_date": "2017-05-03",
"identifier": "53rd-1st-regular",
"name": "53rd Legislature - 1st Regular Session",
"start_date": "2017-01-09",
},
{
"_scraped_name": "2018 - Fifty-third Legislature - First Special Session",
"classification": "special",
"identifier": "53rd-1st-special",
"name": "53rd Legislature - 1st Special Session",
"start_date": "2018-01-22",
"end_date": "2018-01-26",
},
{
"_scraped_name": "2018 - Fifty-third Legislature - Second Regular Session",
"classification": "primary",
"identifier": "53rd-2nd-regular",
"name": "53rd Legislature - 2nd Regular Session",
"start_date": "2018-01-08",
"end_date": "2018-05-03",
},
{
"_scraped_name": "2019 - Fifty-fourth Legislature - First Regular Session",
"classification": "primary",
"identifier": "54th-1st-regular",
"name": "54th Legislature - 1st Regular Session",
"start_date": "2019-01-14",
"end_date": "2019-03-29",
},
{
"_scraped_name": "2020 - Fifty-fourth Legislature - Second Regular Session",
"classification": "primary",
"identifier": "54th-2nd-regular",
"name": "54th Legislature - 2nd Regular Session",
"start_date": "2020-01-13",
},
]
ignored_scraped_sessions = [
"2008 - Forty-eighth Legislature - Second Regular Session",
"2007 - Forty-eighth Legislature - First Regular Session",
"2006 - Forty-seventh Legislature - First Special Session",
"2006 - Forty-seventh Legislature - Second Regular Session",
"2005 - Forty-seventh Legislature - First Regular Session",
"2004 - Forty-sixth Legislature - Second Regular Session",
"2003 - Forty-sixth Legislature - Second Special Session",
"2003 - Forty-sixth Legislature - First Special Session",
"2003 - Forty-sixth Legislature - First Regular Session",
"2002 - Forty-fifth Legislature - Sixth Special Session",
"2002 - Forty-fifth Legislature - Fifth Special Session",
"2002 - Forty-fifth Legislature - Fourth Special Session",
"2002 - Forty-fifth Legislature - Third Special Session",
"2002 - Forty-fifth Legislature - Second Regular Session",
"2001 - Forty-fifth Legislature - Second Special Session",
"2001 - Forty-fifth Legislature - First Special Session",
"2001 - Forty-fifth Legislature - First Regular Session",
"2000 - Forty-fourth Legislature - Seventh Special Session",
"2000 - Forty-fourth Legislature - Sixth Special Session",
"2000 - Forty-fourth Legislature - Fifth Special Session",
"2000 - Forty-fourth Legislature - Fourth Special Session",
"2000 - Forty-fourth Legislature - Second Regular Session",
"1999 - Forty-fourth Legislature - Third Special Session",
"1999 - Forty-fourth Legislature - Second Special Session",
"1999 - Forty-fourth Legislature - First Special Session",
"1999 - Forty-fourth Legislature - First Regular Session",
"1998 - Forty-third Legislature - Sixth Special Session",
"1998 - Forty-third Legislature - Fifth Special Session",
"1998 - Forty-third Legislature - Fourth Special Session",
"1998 - Forty-third Legislature - Third Special Session",
"1998 - Forty-third Legislature - Second Regular Session",
"1997 - Forty-third Legislature - Second Special Session",
"1997 - Forty-third Legislature - First Special Session",
"1997 - Forty-third Legislature - First Regular Session",
"1996 - Forty-second Legislature - Seventh Special Session",
"1996 - Forty-second Legislature - Sixth Special Session",
"1996 - Forty-second Legislature - Fifth Special Session",
"1996 - Forty-second Legislature - Second Regular Session",
"1995 - Forty-second Legislature - Fourth Special Session",
"1995 - Forty-second Legislature - Third Special Session",
"1995 - Forty-Second Legislature - Second Special Session",
"1995 - Forty-Second Legislature - First Special Session",
"1995 - Forty-second Legislature - First Regular Session",
"1994 - Forty-first Legislature - Ninth Special Session",
"1994 - Forty-first Legislature - Eighth Special Session",
"1994 - Forty-first Legislature - Second Regular Session",
"1993 - Forty-first Legislature - Seventh Special Session",
"1993 - Forty-first Legislature - Sixth Special Session",
"1993 - Forty-first Legislature - Fifth Special Session",
"1993 - Forty-first Legislature - Fourth Special Session",
"1993 - Forty-first Legislature - Third Special Session",
"1993 - Forty-first Legislature - Second Special Session",
"1993 - Forty-first Legislature - First Special Session",
"1993 - Forty-first Legislature - First Regular Session",
"1992 - Fortieth Legislature - Ninth Special Session",
"1992 - Fortieth Legislature - Eighth Special Session",
"1992 - Fortieth Legislature - Seventh Special Session",
"1992 - Fortieth Legislature - Fifth Special Session",
"1992 - Fortieth Legislature - Sixth Special Session",
"1992 - Fortieth Legislature - Second Regular Session",
"1991 - Fortieth Legislature - Fourth Special Session",
"1991 - Fortieth Legislature - Third Special Session",
"1991 - Fortieth Legislature - Second Special Session",
"1991 - Fortieth Legislature - First Special Session",
"1991 - Fortieth Legislature - First Regular Session",
"1990 - Thirty-ninth Legislature - Fifth Special Session",
"1990 - Thirty-ninth Legislature - Fourth Special Session",
"1990 - Thirty-ninth Legislature - Third Special Session",
"1990 - Thirty-ninth Legislature - Second Regular Session",
"1989 - Thirty-ninth Legislature - Second Special Session",
"1989 - Thirty-ninth Legislature - First Special Session",
"1989 - Thirty-ninth Legislature - First Regular Session",
]
def get_session_list(self):
session = requests.Session()
data = session.get("https://www.azleg.gov/")
# TODO: JSON at https://apps.azleg.gov/api/Session/
doc = lxml.html.fromstring(data.text)
sessions = doc.xpath("//select/option/text()")
sessions = [re.sub(r"\(.+$", "", x).strip() for x in sessions]
return sessions
| gpl-3.0 | -4,138,875,049,610,144,300 | 44.878338 | 88 | 0.563935 | false |
jcsalterego/pynsour | src/bot.py | 1 | 5843 | """Bot class"""
import os
import socket
from parser import Parser
from logger import Logger
from sandbox import Sandbox
import botcode
MAX_CONSOLE_LEN = 50
BUFFER_SIZE = 1024
STATE_DISCONNECTED = 0
STATE_CONNECTING = 1
STATE_HANDSHAKE = 2
STATE_CONNECTED = 3
STATE_ONLINE = 4
class Bot:
def __init__(self):
"""Constructor
"""
self.__state = STATE_DISCONNECTED
self.__load_defaults()
self.parser = Parser()
self.logger = Logger()
self.sandbox = Sandbox()
def __load_defaults(self):
"""Loads default settings
"""
self.username = os.getlogin()
self.password = None
self.nicks = ["nick", "altnick"]
self.realname = "Default pynsour user"
self.handlers = []
self.localhost = 'localhost'
self.on_connect = []
self.ops = []
self.name = ""
def asDict(self):
"""Return object as dictionary
Ignores doc variables
"""
info = {}
for attr in dir(self):
if attr[0] == "_" or attr[:2] == "__":
continue
i = getattr(self, attr)
if type(i).__name__ == "instancemethod":
continue
else:
info[attr] = i
return info
def connect(self):
"""Connect the bot to the IRC server
"""
self.__state = STATE_CONNECTING
self.__connection = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.logger.console("+++ Connecting to %s:%s" %
(self.hostname, self.port))
self.__connection.connect((self.hostname, self.port))
def event(self):
"""Event fire
"""
if self.__state == STATE_DISCONNECTED:
return
elif self.__state == STATE_CONNECTING:
if self.password:
self.write("PASS %s" % self.password)
self.write("NICK %s" % self.nicks[0])
self.write("USER %s %s %s :%s" %
(self.username,
self.localhost,
self.hostname,
self.realname))
self.__state = STATE_HANDSHAKE
elif self.__state == STATE_HANDSHAKE:
pass
self.read()
self.ops += self.parser.parse()
self.execute()
def execute(self):
"""Execute botcode
"""
# Expand meta-ops, e.g. connect events
new_ops = []
for operation in self.ops:
if operation[0] == botcode.OP_EVENT_CONNECT:
new_ops += self.on_connect
self.__state = STATE_ONLINE
elif operation[0] == botcode.OP_EVENT_PRIVMSG:
sandbox_ops = self.filter_eval(operation[1])
if sandbox_ops:
new_ops += self.sandbox.execute(sandbox_ops)
else:
new_ops.append(operation)
self.ops = new_ops
while len(self.ops) > 0:
new_ops = []
for operation in self.ops:
if operation[0] == botcode.OP_PONG:
self.write("PONG :%s" % operation[1])
elif operation[0] == botcode.OP_JOIN:
if len(operation) == 2:
self.write("JOIN %s :%s" % operation[1])
elif len(operation) == 1:
self.write("JOIN %s" % operation[1])
elif operation[0] == botcode.OP_MODE:
self.write("MODE %s" % operation[1])
elif operation[0] == botcode.OP_PRIVMSG:
self.write("PRIVMSG %s :%s" % operation[1:3])
elif operation[0] == botcode.OP_ERROR:
self.logger.console("ERR\n"
"%s" % operation[1])
self.ops = new_ops
# self.ops will be empty by here
def filter_eval(self, line):
"""Filter based on channel
"""
ops = []
words = line.split(":", 1)
if len(words) == 1:
return ops
args, msg = words
argv = args.split(" ")
if len(argv) < 4:
return ops
sender, action, recipient = argv[:3]
path = "%s/%s" % (self.name, recipient)
for handler in self.handlers:
re = handler['channel_re']
if re.match(path):
# self.logger.console("F: %s %s" % (path, argv))
script_path = re.sub(handler['script'].replace("$", "\\"),
path)
ops += (botcode.OP_EVENT_SCRIPT,
script_path,
(sender, action, recipient, msg)),
return ops
def read(self):
"""Reading from connection
"""
if self.__state > STATE_DISCONNECTED:
incoming = self.__connection.recv(BUFFER_SIZE)
self.parser.append(incoming)
read_bytes = len(incoming)
first_line = incoming.split("\n")[0]
if len(first_line) > MAX_CONSOLE_LEN:
first_line = "%s..." % first_line[:MAX_CONSOLE_LEN]
self.logger.console(" IN [%4d] %s" % (read_bytes,
first_line))
def write(self, outgoing):
"""Writing to connection
"""
first_line = outgoing
outgoing = "".join((outgoing, "\r\n"))
write_bytes = len(outgoing)
if len(first_line) > MAX_CONSOLE_LEN:
first_line = "%s..." % first_line[:MAX_CONSOLE_LEN]
self.logger.console("OUT [%4d] %s" % (write_bytes,
first_line))
self.__connection.send(outgoing)
| bsd-2-clause | -995,107,099,016,354,200 | 30.079787 | 74 | 0.475441 | false |
mlecours/netman | tests/adapters/unified_tests/configured_test_case.py | 1 | 3935 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import unittest
from functools import wraps
from unittest import SkipTest
from netman import raw_or_json
from netman.main import app
class ConfiguredTestCase(unittest.TestCase):
switch_specs = None
def setUp(self):
tested_switch = type(self).switch_specs
self.switch_hostname = tested_switch["hostname"]
self.switch_port = tested_switch["port"]
self.switch_type = tested_switch["model"]
self.switch_username = tested_switch["username"]
self.switch_password = tested_switch["password"]
self.test_port = tested_switch["test_port_name"]
def get(self, relative_url, fail_on_bad_code=True):
with app.test_client() as http_client:
r = http_client.get(**self.request(relative_url))
if fail_on_bad_code and r.status_code >= 400:
raise AssertionError("Call to %s returned %s : %s" % (relative_url, r.status_code, r.data))
return json.loads(r.data)
def post(self, relative_url, data=None, raw_data=None, fail_on_bad_code=True):
with app.test_client() as http_client:
r = http_client.post(data=raw_or_json(raw_data, data), **self.request(relative_url))
if fail_on_bad_code and r.status_code >= 400:
raise AssertionError("Call to %s returned %s : %s" % (relative_url, r.status_code, r.data))
return r
def put(self, relative_url, data=None, raw_data=None, fail_on_bad_code=True):
with app.test_client() as http_client:
r = http_client.put(data=raw_or_json(raw_data, data), **self.request(relative_url))
if fail_on_bad_code and r.status_code >= 400:
raise AssertionError("Call to %s returned %s : %s" % (relative_url, r.status_code, r.data))
return r
def delete(self, relative_url, fail_on_bad_code=True):
with app.test_client() as http_client:
r = http_client.delete(**self.request(relative_url))
if fail_on_bad_code and r.status_code >= 400:
raise AssertionError("Call to %s returned %s : %s" % (relative_url, r.status_code, r.data))
return r
def request(self, relative_url):
logging.info("Querying " + ("http://netman.example.org%s" % relative_url.format(switch=self.switch_hostname, port=self.test_port)))
headers = {
'Netman-Model': self.switch_type,
'Netman-Username': self.switch_username,
'Netman-Password': self.switch_password,
'Netman-Port': self.switch_port
}
return {
"path": relative_url.format(switch=self.switch_hostname, port=self.test_port),
"headers": headers
}
def get_vlan(self, number):
data = self.get("/switches/{switch}/vlans")
vlan = next((vlan for vlan in data if vlan["number"] == number), None)
if not vlan:
raise AssertionError("Vlan #{} not found".format(number))
return vlan
def skip_on_switches(*to_skip):
def resource_decorator(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
if not self.switch_type in to_skip:
return fn(self, *args, **kwargs)
else:
raise SkipTest('Test not executed on Switch model %s' % self.switch_type)
return wrapper
return resource_decorator
| apache-2.0 | 8,079,829,964,718,263,000 | 37.203883 | 139 | 0.637865 | false |
plucena24/OpenClos | jnpr/openclos/cli_parser.py | 1 | 16382 | #------------------------------------------------------------------------------
# cli_parser.py
#------------------------------------------------------------------------------
'''
@author : rgiyer
Date : October 20th, 2014
This module is responsible for parsing command model defined in
cliCommands.yaml and providing functions for:
- Validation of user-input
- invoking execution handle for CLI commands or macro expansions
- determine possible arg match for command auto-completion based
on context
'''
# Standard Python libraries
import os
import re
import inspect
import subprocess
# Packages required for openclos
import yaml
# openclos classes
import util
# cli related classes
from cli_handle_impl import CLIImplementor
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class CLICommand:
def __init__ ( self, cmd_access, cmd_handle, cmd_macro, cmd_desc ):
self.cmd_access = cmd_access
self.cmd_handle = cmd_handle
self.cmd_macro = cmd_macro
self.cmd_desc = cmd_desc
# end class CLICommand
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class CLIUtil:
def __init__ ( self ):
commandConfFile = os.path.join ( util.configLocation,
'cliCommands.yaml' )
self.yaml_file_stream = open ( commandConfFile, 'r' )
raw_graph = yaml.load ( self.yaml_file_stream )
self.cmd_graph = {}
self.indentation = 8
self.dump_cmd ( raw_graph )
self.yaml_file_stream.close ()
#------------------------------------------------------------------------------
def get_implementor_handle ( self, class_instance, handle_name ):
handles = inspect.getmembers ( class_instance,
predicate = inspect.ismethod )
for function_tuple in handles:
if ( handle_name == function_tuple [ 0 ] ):
return function_tuple [ 1 ]
# no match found
return 0
#------------------------------------------------------------------------------
# Parse through the dictionary iteratively:
def dump_cmd ( self,
cmds,
cmd_root="",
cmd_access="READ",
cmd_handle="",
cmd_macro="",
cmd_desc="" ):
for cmd in cmds:
if ( cmd_root == "" ):
cmd_compound = cmd
else:
cmd_compound = cmd_root + "_" + cmd
cmd_data = cmds [ cmd ]
# Get command access
if cmd_data.has_key ( "Access" ):
cmd_access = cmd_data [ "Access" ]
# Get command handler
if cmd_data.has_key ( "Handle" ):
cmd_handle = cmd_data [ "Handle" ]
elif ( cmd_handle != "" ):
cmd_handle = ""
# Get command macro
if cmd_data.has_key ( "Macro" ):
cmd_macro = cmd_data [ "Macro" ]
elif ( cmd_macro != "" ):
cmd_macro = ""
# Get command description
if cmd_data.has_key ( "Desc" ):
cmd_desc = cmd_data [ "Desc" ]
elif ( cmd_desc != "" ):
cmd_desc = ""
# Parse the arguments
if cmd_data.has_key ( "Args" ):
cmd_args = cmd_data [ "Args" ]
self.dump_cmd ( cmd_args,
cmd_compound,
cmd_access,
cmd_handle,
cmd_macro,
cmd_desc )
if cmd_data.has_key ( "Handle" ):
self.cmd_graph [ cmd_compound ] = CLICommand ( cmd_access,
cmd_handle,
cmd_macro,
cmd_desc )
if ( len ( cmd_compound ) > self.indentation ):
self.indentation = len ( cmd_compound )
#------------------------------------------------------------------------------
def normalize_command ( self, cmd ):
return cmd.replace ( " ", "_" )
#------------------------------------------------------------------------------
def get_indentation ( self, cmd ):
return ( self.indentation + 8 - len ( cmd ) )
#------------------------------------------------------------------------------
def suffix_macro_to_cmd ( self, macro_list, cmd ):
ret_cmd = []
for macro in macro_list:
ret_cmd.append ( self.normalize_command ( cmd + "_" + macro ) )
return ret_cmd
#------------------------------------------------------------------------------
def get_macro_list ( self, class_instance, macro_txt, add_help=None ):
fn_macro = self.get_implementor_handle ( class_instance, macro_txt )
return fn_macro ( add_help )
#------------------------------------------------------------------------------
def include_macro ( self, macro_list, ret_list ):
for item in macro_list:
ret_list.append ( item )
#------------------------------------------------------------------------------
def string_has_enter ( self, string ):
if ( re.search ( "<enter>", string ) != None ):
return 1
else:
return 0
#------------------------------------------------------------------------------
def add_enter_instruction ( self, result_list ):
if ( len ( result_list ) ):
string = result_list [ 0 ]
if ( self.string_has_enter ( string ) == 1 ):
return 0
result_list.insert ( 0, " <enter>" + " " * self.get_indentation ( "<enter" ) + "Execute the current command" )
#------------------------------------------------------------------------------
def match_macro ( self, macro_list, needle, ret_list ):
for haystack in macro_list:
if ( len ( needle ) == len ( haystack ) ):
if ( re.match ( needle, haystack ) != None ):
self.add_enter_instruction ( ret_list )
elif ( len ( needle ) < len ( haystack ) ):
if ( re.match ( needle, haystack ) != None ):
ret_list.append ( haystack )
else:
print ""
#------------------------------------------------------------------------------
def option_exists ( self, consider_option, ret_list ):
for option in ret_list:
if ( re.match ( option, consider_option ) != None ):
return 1
return 0
#------------------------------------------------------------------------------
def complete_command ( self,
part_cmd,
full_cmd,
end_index,
cmd_helper,
ret_list ):
unmatched_string = full_cmd [ end_index: ]
# This is an adjustment for "<space>" before tab / ? keypress
if ( part_cmd [ -1 ] == "_" ):
part_cmd = part_cmd [ 0:-1 ]
unmatched_string = "_" + unmatched_string
if ( unmatched_string [ 0 ] == "_" ):
# attach possible matches
possible_option = unmatched_string.replace ( "_", " " ) + ( " " * self.get_indentation ( full_cmd ) )
possible_option = possible_option + "<" + cmd_helper.cmd_desc + ">"
ret_list.append ( possible_option )
else:
# Get part of the command from part_cmd
match_object = re.search ( "_", part_cmd )
while ( match_object != None ):
part_cmd = part_cmd [ match_object.end (): ]
match_object = re.search ( "_", part_cmd )
# Get rest of the command from unmatched_string
match_object = re.search ( "_", unmatched_string )
if ( match_object != None ):
unmatched_string = unmatched_string [ :(match_object.end()-1)]
complete_word = part_cmd + unmatched_string
if ( self.option_exists ( complete_word, ret_list ) == 0 ):
ret_list.append ( complete_word )
return ret_list
#------------------------------------------------------------------------------
def get_all_cmds ( self ):
ret_list = []
for cmd in self.cmd_graph:
cmd_str = cmd.replace ( "_", " " )
cmd_str = cmd_str + ( " " * self.get_indentation ( cmd ) ) + "<" + self.cmd_graph [ cmd ].cmd_desc + ">"
ret_list.append ( cmd_str )
return ret_list
#------------------------------------------------------------------------------
# Lot of reference here to needle and haystack, needle being the current
# command context of the CLI, and haystack being the command model dict
# created during CLIUtil instantiation
#------------------------------------------------------------------------------
def get_match ( self, cmd ):
if ( len ( cmd ) == 0 or re.search ( "[a-z|A-Z|0-9]", cmd ) == None ):
return self.get_all_cmds ()
# chomp input string
if ( cmd [ -1 ] == " " ):
cmd = cmd [ 0:-1 ]
needle = self.normalize_command ( cmd )
ret_list = []
for haystack in self.cmd_graph:
len_haystack = len ( haystack )
len_needle = len ( needle )
cmd_helper = self.cmd_graph [ haystack ]
# Case 1: Full command is provided, without macro expansion
if ( len_needle == len_haystack ):
# check if we have a match
if ( re.match ( needle, haystack ) != None ):
if ( cmd_helper.cmd_macro != "" ):
self.include_macro ( self.get_macro_list ( CLIImplementor (), cmd_helper.cmd_macro, "add help" ), ret_list )
else:
self.add_enter_instruction ( ret_list )
# Case 2: Full command is provided with macro expansion
elif ( len_needle > len_haystack ):
match_object = re.match ( haystack, needle )
if ( match_object != None ):
# Match exists - so get the macro
cmd_macro = needle [ match_object.end (): ]
if ( cmd_macro [ 0 ] == "_" and len ( cmd_macro ) > 1 ):
cmd_macro = cmd_macro [ 1: ]
if ( cmd_helper.cmd_macro != "" ):
cmd_macro_list = self.get_macro_list ( CLIImplementor(),
cmd_helper.cmd_macro )
self.match_macro ( cmd_macro_list, cmd_macro, ret_list )
# Case 3: Part command is provided
elif ( len_needle < len_haystack ):
match_object = re.match ( needle, haystack )
if ( match_object != None ):
# Match exists - get rest of the command
balance_cmd = haystack [ match_object.end (): ]
self.complete_command ( needle,
haystack,
match_object.end (),
self.cmd_graph [ haystack ],
ret_list )
return ret_list
#------------------------------------------------------------------------------
def chomp ( self, token ):
match_object = re.search ( "[a-z|A-Z|0-9]", token )
if ( match_object != None ):
token = token [ ( match_object.end () - 1): ]
token = token [ ::-1 ]
match_object = re.search ( "[a-z|A-Z|0-9]", token )
if ( match_object != None ):
token = token [ ( match_object.end () - 1): ]
token = token [ ::-1 ]
return token
#------------------------------------------------------------------------------
def validate_command_and_execute ( self, full_cmd_context ):
# We will do the validation again in case this function is called
# outside the CLI context
best_cmd_match = ""
best_cmd_args = ""
best_cmd_handle = None
for command in self.cmd_graph:
match_object = re.match ( command,
self.normalize_command ( full_cmd_context ) )
if ( match_object != None ):
# Okay - we found a match. Get macros if included
command_args = ""
# TODO - different impl here for multiple args support
if ( len ( full_cmd_context ) > len ( command ) ):
command_args = self.chomp ( full_cmd_context [ match_object.end (): ] )
if ( len ( best_cmd_match ) < len ( command ) ):
best_cmd_match = command
best_cmd_args = command_args
best_cmd_handle = self.get_implementor_handle ( CLIImplementor (), self.cmd_graph [ command ].cmd_handle )
if ( best_cmd_handle != 0 ):
return best_cmd_handle ( best_cmd_args )
else:
print self.cmd_graph [ best_cmd_match ].cmd_handle + " not implemented"
#------------------------------------------------------------------------------
def print_results ( self, result_list ):
for result in result_list:
print "\t" + result
#------------------------------------------------------------------------------
def print_command_graph ( self, cmd_dict ):
for keys in cmd_dict:
print keys + "=>"
cmd = cmd_dict [ keys ]
if ( cmd.cmd_desc != "" ):
print " " + cmd.cmd_desc
print " " + cmd.cmd_access
if ( cmd.cmd_macro != "" ):
fn_macro = self.get_implementor_handle ( CLIImplementor (),
cmd.cmd_macro )
if ( fn_macro != 0 ):
print fn_macro ()
else:
print " Macro not implemented"
if ( cmd.cmd_handle != "" ):
fn_handle = self.get_implementor_handle ( CLIImplementor (),
cmd.cmd_handle )
if ( fn_handle != 0 ):
fn_handle ()
else:
print " Handler not implemented"
# end class CLIUtil
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
cli_util = CLIUtil ()
match_options = [ "create cabling",
# "create cabling-plan",
# "create cabling-",
# "create cabling",
# "create cabling-plan pod",
# "create cabling-plan pod pod_2",
# "create",
# "create dev",
# "create device-config",
# "create device-config p",
# "create device-config pod",
# "create device-config pod pod_1",
# "run",
# "update password",
# "run r",
# "run RE",
# "create cab",
"create pods",
"create pods from",
"create pods from-file",
"" ]
if __name__ == '__main__':
for match in match_options:
print "Matching results for " + match + " is:"
cli_util.print_results ( cli_util.get_match ( match ) )
print "------------------------------------------------------"
| apache-2.0 | -8,635,042,734,460,953,000 | 39.751244 | 132 | 0.398608 | false |
xapple/plumbing | plumbing/scraping/blockers.py | 1 | 1536 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
# Internal modules #
# First party modules #
from autopaths.file_path import FilePath
################################################################################
def check_blocked_request(tree):
"""
Check if the request was denied by the server.
And raise an exception if it was.
"""
# Modules #
from lxml import etree
# Did we get a filepath? #
if isinstance(tree, FilePath):
if tree.count_bytes > 1000000: return
tree = tree.contents
# Did we get a tree or raw text? #
if isinstance(tree, str): tree = etree.HTML(tree)
# By default we are good #
blocked = False
# Try Incapsula #
blocked = blocked or check_incapsula(tree)
# If we were indeed blocked, we can stop here #
if blocked: raise Exception("The request was flagged and blocked by the server.")
################################################################################
def check_incapsula(tree):
# By default we are good #
blocked = False
# Result type 1 from Incapsula #
meta = tree.xpath("//head/meta[@name='ROBOTS']")
if meta and 'NOINDEX' in meta[0].get('content'): blocked = True
# Result type 2 from Incapsula #
meta = tree.xpath("//head/meta[@name='robots']")
if meta and 'noindex' in meta[0].get('content'): blocked = True
# If we were indeed blocked, we can stop here #
return blocked | mit | -2,501,178,769,367,362,000 | 30.367347 | 85 | 0.580729 | false |
OCA/account-invoicing | purchase_stock_picking_return_invoicing/tests/test_purchase_stock_picking_return_invoicing.py | 1 | 7543 | # Copyright 2019 Eficent Business and IT Consulting Services
# Copyright 2017-2018 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields
from odoo.tests.common import Form, SavepointCase
class TestPurchaseStockPickingReturnInvoicing(SavepointCase):
at_install = False
post_install = True
@classmethod
def setUpClass(cls):
"""Add some defaults to let the test run without an accounts chart."""
super(TestPurchaseStockPickingReturnInvoicing, cls).setUpClass()
cls.journal = cls.env["account.journal"].create(
{"name": "Test journal", "type": "purchase", "code": "TEST_J"}
)
cls.account_payable_type = cls.env["account.account.type"].create(
{
"name": "Payable account type",
"type": "payable",
"internal_group": "liability",
}
)
cls.account_expense_type = cls.env["account.account.type"].create(
{
"name": "Expense account type",
"type": "other",
"internal_group": "expense",
}
)
cls.payable_account = cls.env["account.account"].create(
{
"name": "Payable Account",
"code": "PAY",
"user_type_id": cls.account_payable_type.id,
"reconcile": True,
}
)
cls.expense_account = cls.env["account.account"].create(
{
"name": "Expense Account",
"code": "EXP",
"user_type_id": cls.account_expense_type.id,
"reconcile": False,
}
)
cls.partner = cls.env["res.partner"].create(
{"name": "Test partner", "is_company": True}
)
cls.partner.property_account_payable_id = cls.payable_account
cls.product_categ = cls.env["product.category"].create(
{"name": "Test category"}
)
cls.product = cls.env["product.product"].create(
{
"name": "test product",
"categ_id": cls.product_categ.id,
"uom_id": cls.env.ref("uom.product_uom_unit").id,
"uom_po_id": cls.env.ref("uom.product_uom_unit").id,
"default_code": "tpr1",
}
)
cls.product.property_account_expense_id = cls.expense_account
cls.po = cls.env["purchase.order"].create(
{
"partner_id": cls.partner.id,
"order_line": [
(
0,
0,
{
"name": cls.product.name,
"product_id": cls.product.id,
"product_qty": 5.0,
"product_uom": cls.product.uom_id.id,
"price_unit": 10,
"date_planned": fields.Datetime.now(),
},
)
],
}
)
cls.po_line = cls.po.order_line
cls.po.button_confirm()
def check_values(
self,
po_line,
qty_returned,
qty_received,
qty_refunded,
qty_invoiced,
invoice_status,
):
self.assertAlmostEqual(po_line.qty_returned, qty_returned, 2)
self.assertAlmostEqual(po_line.qty_received, qty_received, 2)
self.assertAlmostEqual(po_line.qty_refunded, qty_refunded, 2)
self.assertAlmostEqual(po_line.qty_invoiced, qty_invoiced, 2)
self.assertEqual(po_line.order_id.invoice_status, invoice_status)
def test_initial_state(self):
self.check_values(self.po_line, 0, 0, 0, 0, "no")
def test_purchase_stock_return_1(self):
"""Test a PO with received, invoiced, returned and refunded qty.
Receive and invoice the PO, then do a return of the picking.
Check that the invoicing status of the purchase, and quantities
received and billed are correct throughout the process.
"""
# receive completely
pick = self.po.picking_ids
pick.move_lines.write({"quantity_done": 5})
pick.button_validate()
self.check_values(self.po_line, 0, 5, 0, 0, "to invoice")
# Make invoice
ctx = self.po.action_view_invoice()["context"]
active_model = self.env["account.move"].with_context(ctx)
view_id = "account.view_move_form"
with Form(active_model, view=view_id) as f:
f.partner_id = self.partner
f.purchase_id = self.po
inv_1 = f.save()
self.check_values(self.po_line, 0, 5, 0, 5, "invoiced")
self.assertAlmostEqual(inv_1.amount_untaxed_signed, -50, 2)
# Return some items, after PO was invoiced
return_wizard = self.env["stock.return.picking"].create({"picking_id": pick.id})
return_wizard._onchange_picking_id()
return_wizard.product_return_moves.write({"quantity": 2, "to_refund": True})
return_pick = pick.browse(return_wizard.create_returns()["res_id"])
return_pick.move_lines.write({"quantity_done": 2})
return_pick.button_validate()
self.check_values(self.po_line, 2, 3, 0, 5, "to invoice")
# Make refund
ctx = self.po.action_view_invoice_refund()["context"]
active_model = self.env["account.move"].with_context(ctx)
view_id = "account.view_move_form"
with Form(active_model, view=view_id) as f:
f.partner_id = self.partner
f.purchase_id = self.po
inv_2 = f.save()
self.check_values(self.po_line, 2, 3, 2, 3, "invoiced")
self.assertAlmostEqual(inv_2.amount_untaxed_signed, 20, 2)
action = self.po.action_view_invoice()
self.assertEqual(action["res_id"], inv_1.id)
action2 = self.po.action_view_invoice_refund()
self.assertEqual(action2["res_id"], inv_2.id)
def test_purchase_stock_return_2(self):
"""Test a PO with received and returned qty, and invoiced after.
Receive the PO, then do a partial return of the picking.
Create a new invoice to get the bill for the remaining qty.
Check that the invoicing status of the purchase, and quantities
received and billed are correct throughout the process.
"""
pick = self.po.picking_ids
pick.move_lines.write({"quantity_done": 5})
pick.button_validate()
# Return some items before PO was invoiced
return_wizard = self.env["stock.return.picking"].create({"picking_id": pick.id})
return_wizard._onchange_picking_id()
return_wizard.product_return_moves.write({"quantity": 2, "to_refund": True})
return_pick = pick.browse(return_wizard.create_returns()["res_id"])
return_pick.move_lines.write({"quantity_done": 2})
return_pick.button_validate()
self.check_values(self.po_line, 2, 3, 0, 0, "to invoice")
# Make invoice
ctx = self.po.action_view_invoice()["context"]
active_model = self.env["account.move"].with_context(ctx)
view_id = "account.view_move_form"
with Form(active_model, view=view_id) as f:
f.partner_id = self.partner
f.purchase_id = self.po
inv_1 = f.save()
self.check_values(self.po_line, 2, 3, 0, 3, "invoiced")
self.assertAlmostEqual(inv_1.amount_untaxed_signed, -30, 2)
| agpl-3.0 | -1,908,722,734,729,567,500 | 40.445055 | 88 | 0.558796 | false |
juja256/tasks_manager | des/perm.py | 1 | 1503 | from bitarray import *
class Permutation:
def __init__(self, l):
# if sorted(l) != range(1, len(l)+1):
# raise ValueError("List is not valid!")
self.__bare = [i - 1 for i in l]
def Get(self):
return self.__bare
def Reverse(self):
rev = [0] * len(self.__bare)
for i in range(0, len(self.__bare)):
rev[self.__bare[i]] = i + 1
return Permutation(rev)
def Substitude(self, msg):
"""
Substitudes all bits in input message
"""
bits = bitarray()
if type(msg) == str or type(msg) == bytes:
bits.frombytes(msg)
elif type(msg) == bitarray:
bits = msg
else:
raise ValueError("Not valid type of input data")
res = bitarray(bits.length() * [0])
size = len(self.__bare)
for i in range(0, bits.length()):
res[i] = bits[(i // size) * size + self.__bare[i % size]]
return res
def Reduce(self, block, size):
"""
Shrinks or extends block to specified size with permutation
"""
bits = bitarray()
if type(block) == str or type(block) == bytes:
bits.frombytes(block)
elif type(block) == bitarray:
bits = block
else:
raise ValueError("Not valid type of input data")
res = bitarray(size * [0])
for i in range(0, size):
res[i] = bits[self.__bare[i]]
return res | gpl-2.0 | 2,248,312,554,323,554,300 | 25.857143 | 69 | 0.503659 | false |
lzkelley/sne | scripts/import.py | 1 | 293386 | #!/usr/local/bin/python3.5
import csv
import os
import re
import urllib
import requests
import calendar
import sys
import json
import codecs
import resource
import argparse
import gzip
import io
import shutil
import statistics
import warnings
from datetime import timedelta, datetime
from glob import glob
from hashlib import md5
from html import unescape
from cdecimal import Decimal
from astroquery.vizier import Vizier
from astroquery.simbad import Simbad
from astroquery.irsa_dust import IrsaDust
from copy import deepcopy
from astropy import constants as const
from astropy import units as un
from astropy.io import fits
from astropy.time import Time as astrotime
from astropy.cosmology import Planck15 as cosmo, z_at_value
from collections import OrderedDict, Sequence
from math import log10, floor, sqrt, isnan, ceil
from bs4 import BeautifulSoup, Tag, NavigableString
from string import ascii_letters
from photometry import *
from tq import *
from digits import *
from repos import *
from events import *
parser = argparse.ArgumentParser(description='Generate a catalog JSON file and plot HTML files from SNE data.')
parser.add_argument('--update', '-u', dest='update', help='Only update catalog using live sources.', default=False, action='store_true')
parser.add_argument('--verbose', '-v', dest='verbose', help='Print more messages to the screen.', default=False, action='store_true')
parser.add_argument('--refresh', '-r', dest='refresh', help='Ignore most task caches.', default=False, action='store_true')
parser.add_argument('--full-refresh', '-f', dest='fullrefresh', help='Ignore all task caches.', default=False, action='store_true')
parser.add_argument('--archived', '-a', dest='archived', help='Always use task caches.', default=False, action='store_true')
parser.add_argument('--travis', '-tr', dest='travis', help='Run import script in test mode for Travis.', default=False, action='store_true')
parser.add_argument('--refreshlist', '-rl', dest='refreshlist', help='Comma-delimited list of caches to clear.', default='')
args = parser.parse_args()
tasks = OrderedDict([
("deleteoldevents", {"nicename":"Deleting old events", "update": False}),
("internal", {"nicename":"%pre metadata and photometry", "update": False}),
("radio", {"nicename":"%pre radio data", "update": False}),
("xray", {"nicename":"%pre X-ray data", "update": False}),
("simbad", {"nicename":"%pre SIMBAD", "update": False}),
("vizier", {"nicename":"%pre VizieR", "update": False}),
("donations", {"nicename":"%pre donations", "update": False}),
("pessto-dr1", {"nicename":"%pre PESSTO DR1", "update": False}),
("scp", {"nicename":"%pre SCP", "update": False}),
("ascii", {"nicename":"%pre ASCII", "update": False}),
("cccp", {"nicename":"%pre CCCP", "update": False, "archived": True}),
("suspect", {"nicename":"%pre SUSPECT", "update": False}),
("cfa", {"nicename":"%pre CfA archive photometry", "update": False}),
("ucb", {"nicename":"%pre UCB photometry", "update": False, "archived": True}),
("sdss", {"nicename":"%pre SDSS photometry", "update": False}),
("csp", {"nicename":"%pre CSP photometry", "update": False}),
("itep", {"nicename":"%pre ITEP", "update": False}),
("asiago", {"nicename":"%pre Asiago metadata", "update": False}),
("tns", {"nicename":"%pre TNS metadata", "update": True, "archived": True}),
("rochester", {"nicename":"%pre Latest Supernovae", "update": True, "archived": False}),
("lennarz", {"nicename":"%pre Lennarz", "update": False}),
("fermi", {"nicename":"%pre Fermi", "update": False}),
("gaia", {"nicename":"%pre GAIA", "update": True, "archived": False}),
("ogle", {"nicename":"%pre OGLE", "update": True, "archived": False}),
("snls", {"nicename":"%pre SNLS", "update": False}),
("psthreepi", {"nicename":"%pre Pan-STARRS 3π", "update": True, "archived": False}),
("psmds", {"nicename":"%pre Pan-STARRS MDS", "update": False}),
("crts", {"nicename":"%pre CRTS", "update": True, "archived": False}),
("snhunt", {"nicename":"%pre SNhunt", "update": True, "archived": False}),
("nedd", {"nicename":"%pre NED-D", "update": False}),
("cpcs", {"nicename":"%pre CPCS", "update": True, "archived": False}),
("ptf", {"nicename":"%pre PTF", "update": False, "archived": False}),
("des", {"nicename":"%pre DES", "update": False, "archived": False}),
("asassn", {"nicename":"%pre ASASSN", "update": True }),
#("asiagospectra", {"nicename":"%pre Asiago spectra", "update": True }),
#("wiserepspectra", {"nicename":"%pre WISeREP spectra", "update": False}),
#("cfaspectra", {"nicename":"%pre CfA archive spectra", "update": False}),
#("snlsspectra", {"nicename":"%pre SNLS spectra", "update": False}),
#("cspspectra", {"nicename":"%pre CSP spectra", "update": False}),
#("ucbspectra", {"nicename":"%pre UCB spectra", "update": True, "archived": True}),
#("suspectspectra", {"nicename":"%pre SUSPECT spectra", "update": False}),
#("snfspectra", {"nicename":"%pre SNH spectra", "update": False}),
#("superfitspectra", {"nicename":"%pre Superfit spectra", "update": False}),
#("mergeduplicates", {"nicename":"Merging duplicates", "update": False}),
#("setprefnames", {"nicename":"Setting preferred names", "update": False}),
("writeevents", {"nicename":"Writing events", "update": True })
])
oscbibcode = '2016arXiv160501054G'
oscname = 'The Open Supernova Catalog'
oscurl = 'https://sne.space'
cfaack = ("This research has made use of the CfA Supernova Archive, "
"which is funded in part by the National Science Foundation "
"through grant AST 0907903.")
clight = const.c.cgs.value
km = (1.0 * un.km).cgs.value
planckh = const.h.cgs.value
keV = (1.0 * un.keV).cgs.value
travislimit = 10
currenttask = ''
eventnames = []
events = OrderedDict()
warnings.filterwarnings('ignore', r'Warning: converting a masked element to nan.')
with open('type-synonyms.json', 'r') as f:
typereps = json.loads(f.read(), object_pairs_hook=OrderedDict)
with open('source-synonyms.json', 'r') as f:
sourcereps = json.loads(f.read(), object_pairs_hook=OrderedDict)
with open('non-sne-types.json', 'r') as f:
nonsnetypes = json.loads(f.read(), object_pairs_hook=OrderedDict)
nonsnetypes = [x.upper() for x in nonsnetypes]
repbetterquantity = {
'redshift',
'ebv',
'velocity',
'lumdist',
'discoverdate',
'maxdate'
}
maxbands = [
['B', 'b', 'g'], # B-like bands first
['V', 'G'], # if not, V-like bands
['R', 'r'] # if not, R-like bands
]
def uniq_cdl(values):
return ','.join(list(OrderedDict.fromkeys(values).keys()))
def event_attr_priority(attr):
if attr == 'photometry':
return 'zzzzzzzy'
if attr == 'spectra':
return 'zzzzzzzz'
if attr == 'name':
return 'aaaaaaaa'
if attr == 'sources':
return 'aaaaaaab'
if attr == 'alias':
return 'aaaaaaac'
return attr
prefkinds = ['heliocentric', 'cmb', 'spectroscopic', 'photometric', 'host', 'cluster', '']
def frame_priority(attr):
if 'kind' in attr:
if attr['kind'] in prefkinds:
return prefkinds.index(attr['kind'])
else:
return len(prefkinds)
return len(prefkinds)
def alias_priority(name, attr):
if name == attr:
return 0
return 1
def ct_priority(name, attr):
aliases = attr['source'].split(',')
max_source_year = -10000
vaguetypes = ['CC', 'I']
if attr['value'] in vaguetypes:
return -max_source_year
for alias in aliases:
if alias == 'D':
continue
source = get_source_by_alias(name, alias)
if 'bibcode' in source:
source_year = get_source_year(source)
if source_year > max_source_year:
max_source_year = source_year
return -max_source_year
def get_source_year(source):
if 'bibcode' in source:
if is_number(source['bibcode'][:4]):
return int(source['bibcode'][:4])
else:
return -10000
raise(ValueError('No bibcode available for source!'))
def name_clean(name):
newname = name.strip(' ;,*')
if newname.startswith('MASJ'):
newname = newname.replace('MASJ', 'MASTER OT J', 1)
if newname.startswith('MASTER') and is_number(newname[7]):
newname = newname.replace('MASTER', 'MASTER OT J', 1)
if newname.startswith('MASTER OT J '):
newname = newname.replace('MASTER OT J ', 'MASTER OT J', 1)
if newname.startswith('Psn'):
newname = newname.replace('Psn', 'PSN', 1)
if newname.startswith('PSNJ'):
newname = newname.replace('PSNJ', 'PSN J', 1)
if newname.startswith('TCPJ'):
newname = newname.replace('TCPJ', 'TCP J', 1)
if newname.startswith('SMTJ'):
newname = newname.replace('SMTJ', 'SMT J', 1)
if newname.startswith('PSN20J'):
newname = newname.replace('PSN20J', 'PSN J', 1)
if newname.startswith('ASASSN') and newname[6] != '-':
newname = newname.replace('ASASSN', 'ASASSN-', 1)
if newname.startswith('ROTSE3J'):
newname = newname.replace('ROTSE3J', 'ROTSE3 J', 1)
if newname.startswith('SNHunt'):
newname = newname.replace('SNHunt', 'SNhunt', 1)
if newname.startswith('ptf'):
newname = newname.replace('ptf', 'PTF', 1)
if newname.startswith('PTF '):
newname = newname.replace('PTF ', 'PTF', 1)
if newname.startswith('iPTF '):
newname = newname.replace('iPTF ', 'iPTF', 1)
if newname.startswith('SNHunt'):
newname = newname.replace('SNHunt', 'SNhunt', 1)
if newname.startswith('PESSTOESO'):
newname = newname.replace('PESSTOESO', 'PESSTO ESO ', 1)
if newname.startswith('snf'):
newname = newname.replace('snf', 'SNF', 1)
if newname.startswith('SNF') and is_number(newname[3:]) and len(newname) >= 12:
newname = 'SNF' + newname[3:11] + '-' + newname[11:]
if newname.startswith(('MASTER OT J', 'ROTSE3 J')):
prefix = newname.split('J')[0]
coords = newname.split('J')[-1].strip()
decsign = '+' if '+' in coords else '-'
coordsplit = coords.replace('+','-').split('-')
if '.' not in coordsplit[0] and len(coordsplit[0]) > 6 and '.' not in coordsplit[1] and len(coordsplit[1]) > 6:
newname = (prefix + 'J' + coordsplit[0][:6] + '.' + coordsplit[0][6:] +
decsign + coordsplit[1][:6] + '.' + coordsplit[1][6:])
if newname.startswith('Gaia ') and is_number(newname[3:4]) and len(newname) > 5:
newname = newname.replace('Gaia ', 'Gaia', 1)
if len(newname) <= 4 and is_number(newname):
newname = 'SN' + newname + 'A'
if len(newname) > 4 and is_number(newname[:4]) and not is_number(newname[4:]):
newname = 'SN' + newname
if newname.startswith('sn') and is_number(newname[2:6]) and len(newname) > 6:
newname = newname.replace('sn', 'SN', 1)
if newname.startswith('SN ') and is_number(newname[3:7]) and len(newname) > 7:
newname = newname.replace('SN ', 'SN', 1)
if newname.startswith('SN') and is_number(newname[2:6]) and len(newname) == 7 and newname[6].islower():
newname = 'SN' + newname[2:6] + newname[6].upper()
elif (newname.startswith('SN') and is_number(newname[2:6]) and
(len(newname) == 8 or len(newname) == 9) and newname[6:].isupper()):
newname = 'SN' + newname[2:6] + newname[6:].lower()
newname = (' '.join(newname.split())).strip()
return newname
def get_aliases(name, includename = True):
if 'alias' in events[name]:
aliases = [x['value'] for x in events[name]['alias']]
if includename and name not in aliases:
return [name] + aliases
return aliases
if includename:
return [name]
return []
def add_event(name, load = True, delete = True, source = '', loadifempty = True):
if loadifempty and args.update and not len(events):
load_stubs()
newname = name_clean(name)
if newname not in events or 'stub' in events[newname]:
match = ''
if newname not in events:
for event in events:
aliases = get_aliases(event)
if (len(aliases) > 1 and newname in aliases and
('distinctfrom' not in events[event] or newname not in events[event]['distinctfrom'])):
match = event
break
if match:
newname = match
if load:
loadedname = load_event_from_file(name = newname, delete = delete)
if loadedname:
if 'stub' in events[loadedname]:
raise(ValueError('Failed to find event file for stubbed event'))
return loadedname
if match:
return match
events[newname] = OrderedDict()
events[newname]['name'] = newname
if source:
add_quantity(newname, 'alias', newname, source)
if args.verbose and 'stub' not in events[newname]:
tprint('Added new event ' + newname)
return newname
else:
return newname
def event_exists(name):
if name in events:
return True
for ev in events:
if name in get_aliases(ev):
return True
return False
def get_preferred_name(name):
if name not in events:
matches = []
for event in events:
aliases = get_aliases(event)
if len(aliases) > 1 and name in aliases:
return event
return name
else:
return name
def snname(string):
newstring = string.replace(' ', '').upper()
if (newstring[:2] == "SN"):
head = newstring[:6]
tail = newstring[6:]
if len(tail) >= 2 and tail[1] != '?':
tail = tail.lower()
newstring = head + tail
return newstring
def add_source(name, refname = '', reference = '', url = '', bibcode = '', secondary = '', acknowledgment = ''):
nsources = len(events[name]['sources']) if 'sources' in events[name] else 0
if not refname:
if not bibcode:
raise(ValueError('Bibcode must be specified if name is not.'))
if bibcode and len(bibcode) != 19:
raise(ValueError('Bibcode "' + bibcode + '" must be exactly 19 characters long'))
refname = bibcode
if refname.upper().startswith('ATEL') and not bibcode:
refname = refname.replace('ATEL', 'ATel').replace('Atel', 'ATel').replace('ATel #', 'ATel ').replace('ATel#', 'ATel').replace('ATel', 'ATel ')
refname = ' '.join(refname.split())
atelnum = refname.split()[-1]
if is_number(atelnum) and atelnum in atelsdict:
bibcode = atelsdict[atelnum]
if refname.upper().startswith('CBET') and not bibcode:
refname = refname.replace('CBET', 'CBET ')
refname = ' '.join(refname.split())
cbetnum = refname.split()[-1]
if is_number(cbetnum) and cbetnum in cbetsdict:
bibcode = cbetsdict[cbetnum]
if refname.upper().startswith('IAUC') and not bibcode:
refname = refname.replace('IAUC', 'IAUC ')
refname = ' '.join(refname.split())
iaucnum = refname.split()[-1]
if is_number(iaucnum) and iaucnum in iaucsdict:
bibcode = iaucsdict[iaucnum]
for rep in sourcereps:
if refname in sourcereps[rep]:
refname = rep
break
if 'sources' not in events[name] or (refname not in [x['name'] for x in events[name]['sources']] and
(not bibcode or bibcode not in [x['bibcode'] if 'bibcode' in x else '' for x in events[name]['sources']])):
source = str(nsources + 1)
newsource = OrderedDict()
newsource['name'] = refname
if url:
newsource['url'] = url
if reference:
newsource['reference'] = reference
if bibcode:
newsource['bibcode'] = bibcode
if acknowledgment:
newsource['acknowledgment'] = acknowledgment
newsource['alias'] = source
if secondary:
newsource['secondary'] = True
events[name].setdefault('sources',[]).append(newsource)
else:
if refname in [x['name'] for x in events[name]['sources']]:
source = [x['alias'] for x in events[name]['sources']][
[x['name'] for x in events[name]['sources']].index(refname)]
elif bibcode and bibcode in [x['bibcode'] if 'bibcode' in x else '' for x in events[name]['sources']]:
source = [x['alias'] for x in events[name]['sources']][
[x['bibcode'] if 'bibcode' in x else '' for x in events[name]['sources']].index(bibcode)]
else:
raise(ValueError("Couldn't find source that should exist!"))
return source
def get_source_by_alias(name, alias):
for source in events[name]['sources']:
if source['alias'] == alias:
return source
raise(ValueError('Source alias not found!'))
def same_tag_str(photo, val, tag):
issame = ((tag not in photo and not val) or (tag in photo and not val) or (tag in photo and photo[tag] == val))
return issame
def same_tag_num(photo, val, tag, canbelist = False):
issame = ((tag not in photo and not val) or (tag in photo and not val) or (tag in photo and
((not canbelist and Decimal(photo[tag]) == Decimal(val)) or
(canbelist and
((isinstance(photo[tag], str) and isinstance(val, str) and Decimal(photo[tag]) == Decimal(val)) or
(isinstance(photo[tag], list) and isinstance(val, list) and photo[tag] == val))))))
return issame
def add_photometry(name, time = "", u_time = "MJD", e_time = "", telescope = "", instrument = "", band = "",
magnitude = "", e_magnitude = "", source = "", upperlimit = False, system = "",
observatory = "", observer = "", host = False, includeshost = False, survey = "",
flux = "", fluxdensity = "", e_flux = "", e_fluxdensity = "", u_flux = "", u_fluxdensity = "", frequency = "",
u_frequency = "", counts = "", e_counts = "", nhmw = "", photonindex = "", unabsorbedflux = "",
e_unabsorbedflux = "", energy = "", u_energy = "", e_lower_magnitude = "", e_upper_magnitude = ""):
if (not time and not host) or (not magnitude and not flux and not fluxdensity and not counts and not unabsorbedflux):
warnings.warn('Time or brightness not specified when adding photometry, not adding.')
tprint('Name : "' + name + '", Time: "' + time + '", Band: "' + band + '", AB magnitude: "' + magnitude + '"')
return
if (not host and not is_number(time)) or (not is_number(magnitude) and not is_number(flux) and not is_number(fluxdensity) and not is_number(counts)):
warnings.warn('Time or brightness not numerical, not adding.')
tprint('Name : "' + name + '", Time: "' + time + '", Band: "' + band + '", AB magnitude: "' + magnitude + '"')
return
if ((e_magnitude and not is_number(e_magnitude)) or (e_flux and not is_number(e_flux)) or
(e_fluxdensity and not is_number(e_fluxdensity)) or (e_counts and not is_number(e_counts))):
warnings.warn('Brightness error not numerical, not adding.')
tprint('Name : "' + name + '", Time: "' + time + '", Band: "' + band + '", AB error: "' + e_magnitude + '"')
return
if e_time and not is_number(e_time):
warnings.warn('Time error not numerical, not adding.')
tprint('Name : "' + name + '", Time: "' + time + '", Time error: "' + e_time + '"')
return
if (flux or fluxdensity) and ((not u_flux and not u_fluxdensity) or (not frequency and not band and not energy)):
warnings.warn('Unit and band/frequency must be set when adding photometry by flux or flux density, not adding.')
tprint('Name : "' + name + '", Time: "' + time)
return
if not source:
ValueError('Photometry must have source before being added!')
if is_erroneous(name, 'photometry', source):
return
# Do some basic homogenization
sband = bandrepf(band)
sinstrument = instrument
ssystem = system
stelescope = telescope
if not sinstrument:
sinstrument = bandmetaf(sband, 'instrument')
if not stelescope:
stelescope = bandmetaf(sband, 'telescope')
if not ssystem:
ssystem = bandmetaf(sband, 'system')
# Look for duplicate data and don't add if duplicate
if 'photometry' in events[name]:
for photo in events[name]['photometry']:
if (same_tag_str(photo, sband, 'band') and
same_tag_str(photo, u_time, 'u_time') and
same_tag_num(photo, time, 'time', canbelist = True) and
same_tag_num(photo, magnitude, 'magnitude') and
(('host' not in photo and not host) or ('host' in photo and host)) and
same_tag_num(photo, flux, 'flux') and
same_tag_num(photo, unabsorbedflux, 'unabsorbedflux') and
same_tag_num(photo, fluxdensity, 'fluxdensity') and
same_tag_num(photo, counts, 'counts') and
same_tag_num(photo, energy, 'energy') and
same_tag_num(photo, frequency, 'frequency') and
same_tag_num(photo, photonindex, 'photonindex') and
same_tag_num(photo, e_magnitude, 'e_magnitude') and
same_tag_num(photo, e_lower_magnitude, 'e_lower_magnitude') and
same_tag_num(photo, e_upper_magnitude, 'e_upper_magnitude') and
same_tag_num(photo, e_flux, 'e_flux') and
same_tag_num(photo, e_unabsorbedflux, 'e_unabsorbedflux') and
same_tag_num(photo, e_fluxdensity, 'e_fluxdensity') and
same_tag_num(photo, e_counts, 'e_counts') and
same_tag_num(photo, u_flux, 'u_flux') and
same_tag_num(photo, u_fluxdensity, 'u_fluxdensity') and
same_tag_num(photo, u_frequency, 'u_frequency') and
same_tag_num(photo, u_energy, 'u_energy') and
same_tag_str(photo, ssystem, 'system')
):
return
photoentry = OrderedDict()
if time:
photoentry['time'] = time if isinstance(time, list) or isinstance(time, str) else str(time)
if e_time:
photoentry['e_time'] = str(e_time)
if u_time:
photoentry['u_time'] = u_time
if sband:
photoentry['band'] = sband
if ssystem:
photoentry['system'] = ssystem
if magnitude:
photoentry['magnitude'] = str(magnitude)
if e_magnitude:
photoentry['e_magnitude'] = str(e_magnitude)
if e_lower_magnitude:
photoentry['e_lower_magnitude'] = str(e_lower_magnitude)
if e_upper_magnitude:
photoentry['e_upper_magnitude'] = str(e_upper_magnitude)
if frequency:
photoentry['frequency'] = frequency if isinstance(frequency, list) or isinstance(frequency, str) else str(frequency)
if u_frequency:
photoentry['u_frequency'] = u_frequency
if energy:
photoentry['energy'] = energy if isinstance(energy, list) or isinstance(energy, str) else str(energy)
if u_energy:
photoentry['u_energy'] = u_energy
if flux:
photoentry['flux'] = str(flux)
if e_flux:
photoentry['e_flux'] = str(e_flux)
if unabsorbedflux:
photoentry['unabsorbedflux'] = str(unabsorbedflux)
if e_unabsorbedflux:
photoentry['e_unabsorbedflux'] = str(e_unabsorbedflux)
if u_flux:
photoentry['u_flux'] = str(u_flux)
if photonindex:
photoentry['photonindex'] = str(photonindex)
if fluxdensity:
photoentry['fluxdensity'] = str(fluxdensity)
if e_fluxdensity:
photoentry['e_fluxdensity'] = str(e_fluxdensity)
if u_fluxdensity:
photoentry['u_fluxdensity'] = str(u_fluxdensity)
if counts:
photoentry['counts'] = str(counts)
if e_counts:
photoentry['e_counts'] = str(e_counts)
if upperlimit:
photoentry['upperlimit'] = upperlimit
if host:
photoentry['host'] = host
if includeshost:
photoentry['includeshost'] = includeshost
if observer:
photoentry['observer'] = observer
if survey:
photoentry['survey'] = survey
if observatory:
photoentry['observatory'] = observatory
if stelescope:
photoentry['telescope'] = stelescope
if sinstrument:
photoentry['instrument'] = sinstrument
if nhmw:
photoentry['nhmw'] = nhmw
if source:
photoentry['source'] = source
events[name].setdefault('photometry',[]).append(photoentry)
def trim_str_arr(arr, length = 10):
return [str(round_sig(float(x), length)) if (len(x) > length and len(str(round_sig(float(x), length))) < len(x)) else x for x in arr]
def add_spectrum(name, waveunit, fluxunit, wavelengths = "", fluxes = "", u_time = "", time = "", instrument = "",
deredshifted = "", dereddened = "", errorunit = "", errors = "", source = "", snr = "", telescope = "",
observer = "", reducer = "", filename = "", observatory = "", data = ""):
if is_erroneous(name, 'spectra', source):
return
spectrumentry = OrderedDict()
if 'spectra' in events[name]:
for si, spectrum in enumerate(events[name]['spectra']):
if 'filename' in spectrum and spectrum['filename'] == filename:
# Copy exclude info
if 'exclude' in spectrum:
spectrumentry['exclude'] = spectrum['exclude']
# Don't add duplicate spectra
if 'data' in spectrum:
return
del(events[name]['spectra'][si])
break
if not waveunit:
warnings.warn('No error unit specified, not adding spectrum.')
return
if not fluxunit:
warnings.warn('No flux unit specified, not adding spectrum.')
return
if not data or (not wavelengths or not fluxes):
ValueError('Spectrum must have wavelengths and fluxes set, or data set.')
if not source:
ValueError('Spectrum must have source before being added!')
if deredshifted != '':
spectrumentry['deredshifted'] = deredshifted
if dereddened != '':
spectrumentry['dereddened'] = dereddened
if instrument:
spectrumentry['instrument'] = instrument
if telescope:
spectrumentry['telescope'] = telescope
if observatory:
spectrumentry['observatory'] = observatory
if u_time:
spectrumentry['u_time'] = u_time
if time:
spectrumentry['time'] = time
if snr:
spectrumentry['snr'] = snr
if observer:
spectrumentry['observer'] = observer
if reducer:
spectrumentry['reducer'] = reducer
if filename:
spectrumentry['filename'] = filename
spectrumentry['waveunit'] = waveunit
spectrumentry['fluxunit'] = fluxunit
if data:
spectrumentry['data'] = data
else:
if errors and max([float(x) for x in errors]) > 0.:
if not errorunit:
warnings.warn('No error unit specified, not adding spectrum.')
return
spectrumentry['errorunit'] = errorunit
data = [trim_str_arr(wavelengths), trim_str_arr(fluxes), trim_str_arr(errors)]
else:
data = [trim_str_arr(wavelengths), trim_str_arr(fluxes)]
spectrumentry['data'] = [list(i) for i in zip(*data)]
if source:
spectrumentry['source'] = source
events[name].setdefault('spectra',[]).append(spectrumentry)
def is_erroneous(name, field, sources):
if 'errors' in events[name]:
for alias in sources.split(','):
source = get_source_by_alias(name, alias)
if ('bibcode' in source and source['bibcode'] in
[x['value'] for x in events[name]['errors'] if x['kind'] == 'bibcode' and x['extra'] == field]):
return True
if ('name' in source and source['name'] in
[x['value'] for x in events[name]['errors'] if x['kind'] == 'name' and x['extra'] == field]):
return True
return False
def add_quantity(name, quantity, value, sources, forcereplacebetter = False,
lowerlimit = '', upperlimit = '', error = '', unit = '', kind = '', extra = ''):
if not quantity:
raise(ValueError('Quantity must be specified for add_quantity.'))
if not sources:
raise(ValueError('Source must be specified for quantity before it is added.'))
if not isinstance(value, str) and (not isinstance(value, list) or not isinstance(value[0], str)):
raise(ValueError('Quantity must be a string or an array of strings.'))
if is_erroneous(name, quantity, sources):
return
svalue = value.strip()
serror = error.strip()
skind = kind.strip()
sunit = ''
if not svalue or svalue == '--' or svalue == '-':
return
if serror and (not is_number(serror) or float(serror) < 0.):
raise(ValueError('Quanta error value must be a number and positive.'))
#Set default units
if not unit and quantity == 'velocity':
unit = 'km/s'
if not unit and quantity == 'ra':
unit = 'hours'
if not unit and quantity == 'dec':
unit = 'degrees'
if not unit and quantity in ['lumdist', 'comovingdist']:
unit = 'Mpc'
#Handle certain quantity
if quantity == 'alias':
svalue = name_clean(svalue)
if 'distinctfrom' in events[name]:
if svalue in [x['value'] for x in events[name]['distinctfrom']]:
return
if quantity in ['velocity', 'redshift', 'ebv', 'lumdist', 'comovingdist']:
if not is_number(svalue):
return
if quantity == 'host':
if is_number(svalue):
return
if svalue.lower() in ['anonymous', 'anon.', 'anon', 'intergalactic']:
return
if svalue.startswith('M ') and is_number(svalue[2:]):
svalue.replace('M ', 'M', 1)
svalue = svalue.strip("()").replace(' ', ' ', 1)
svalue = svalue.replace("Abell", "Abell ", 1)
svalue = svalue.replace("APMUKS(BJ)", "APMUKS(BJ) ", 1)
svalue = svalue.replace("ARP", "ARP ", 1)
svalue = svalue.replace("CGCG", "CGCG ", 1)
svalue = svalue.replace("HOLM", "HOLM ", 1)
svalue = svalue.replace("IC", "IC ", 1)
svalue = svalue.replace("Intergal.", "Intergalactic", 1)
svalue = svalue.replace("MCG+", "MCG +", 1)
svalue = svalue.replace("MCG-", "MCG -", 1)
svalue = svalue.replace("M+", "MCG +", 1)
svalue = svalue.replace("M-", "MCG -", 1)
svalue = svalue.replace("MGC ", "MCG ", 1)
svalue = svalue.replace("Mrk", "MRK", 1)
svalue = svalue.replace("MRK", "MRK ", 1)
svalue = svalue.replace("NGC", "NGC ", 1)
svalue = svalue.replace("PGC", "PGC ", 1)
svalue = svalue.replace("SDSS", "SDSS ", 1)
svalue = svalue.replace("UGC", "UGC ", 1)
if len(svalue) > 4 and svalue.startswith("PGC "):
svalue = svalue[:4] + svalue[4:].lstrip(" 0")
if len(svalue) > 4 and svalue.startswith("UGC "):
svalue = svalue[:4] + svalue[4:].lstrip(" 0")
if len(svalue) > 5 and svalue.startswith(("MCG +", "MCG -")):
svalue = svalue[:5] + '-'.join([x.zfill(2) for x in svalue[5:].strip().split("-")])
if len(svalue) > 5 and svalue.startswith("CGCG "):
svalue = svalue[:5] + '-'.join([x.zfill(3) for x in svalue[5:].strip().split("-")])
if (len(svalue) > 1 and svalue.startswith("E")) or (len(svalue) > 3 and svalue.startswith('ESO')):
if svalue[0] == "E":
esplit = svalue[1:].split("-")
else:
esplit = svalue[3:].split("-")
if len(esplit) == 2 and is_number(esplit[0].strip()):
if esplit[1].strip()[0] == 'G':
parttwo = esplit[1][1:].strip()
else:
parttwo = esplit[1].strip()
if is_number(parttwo.strip()):
svalue = 'ESO ' + esplit[0].lstrip('0') + '-G' + parttwo.lstrip('0')
svalue = ' '.join(svalue.split())
if (not skind and ((svalue.lower().startswith('abell') and is_number(svalue[5:].strip())) or
'cluster' in svalue.lower())):
skind = 'cluster'
elif quantity == 'claimedtype':
isq = False
svalue = svalue.replace('young', '')
if '?' in svalue:
isq = True
svalue = svalue.strip(' ?')
for rep in typereps:
if svalue in typereps[rep]:
svalue = rep
break
if isq:
svalue = svalue + '?'
elif quantity in ['ra', 'dec', 'hostra', 'hostdec']:
if unit == 'floatdegrees':
deg = float('%g' % Decimal(svalue))
sig = get_sig_digits(svalue)
if 'ra' in quantity:
flhours = deg / 360.0 * 24.0
hours = floor(flhours)
minutes = floor((flhours - hours) * 60.0)
seconds = (flhours * 60.0 - (hours * 60.0 + minutes)) * 60.0
if seconds > 60.0:
raise(ValueError('Invalid seconds value for ' + quantity))
svalue = str(hours).zfill(2) + ':' + str(minutes).zfill(2) + ':' + zpad(pretty_num(seconds, sig = sig - 1))
elif 'dec' in quantity:
fldeg = abs(deg)
degree = floor(fldeg)
minutes = floor((fldeg - degree) * 60.0)
seconds = (fldeg * 60.0 - (degree * 60.0 + minutes)) * 60.0
if seconds > 60.0:
raise(ValueError('Invalid seconds value for ' + quantity))
svalue = (('+' if deg >= 0.0 else '-') + str(degree).strip('+-').zfill(2) + ':' +
str(minutes).zfill(2) + ':' + zpad(pretty_num(seconds, sig = sig - 1)))
elif unit == 'nospace' and 'ra' in quantity:
svalue = svalue[:2] + ':' + svalue[2:4] + ((':' + zpad(svalue[4:])) if len(svalue) > 4 else '')
elif unit == 'nospace' and 'dec' in quantity:
if svalue.startswith(('+', '-')):
svalue = svalue[:3] + ':' + svalue[3:5] + ((':' + zpad(svalue[5:])) if len(svalue) > 5 else '')
else:
svalue = '+' + svalue[:2] + ':' + svalue[2:4] + ((':' + zpad(svalue[4:])) if len(svalue) > 4 else '')
else:
svalue = svalue.replace(' ', ':')
if 'dec' in quantity:
valuesplit = svalue.split(':')
svalue = (('-' if valuesplit[0].startswith('-') else '+') + valuesplit[0].strip('+-').zfill(2) +
(':' + valuesplit[1].zfill(2) if len(valuesplit) > 1 else '') +
(':' + zpad(valuesplit[2]) if len(valuesplit) > 2 else ''))
if 'ra' in quantity:
sunit = 'hours'
elif 'dec' in quantity:
sunit = 'degrees'
# Correct case of arcseconds = 60.0.
valuesplit = svalue.split(':')
if len(valuesplit) == 3 and valuesplit[-1] in ["60.0", "60.", "60"]:
svalue = valuesplit[0] + ':' + str(Decimal(valuesplit[1]) + Decimal(1.0)) + ':' + "00.0"
# Strip trailing dots.
svalue = svalue.rstrip('.')
elif quantity == 'maxdate' or quantity == 'discoverdate':
# Make sure month and day have leading zeroes
sparts = svalue.split('/')
if len(sparts) >= 2:
svalue = sparts[0] + '/' + sparts[1].zfill(2)
if len(sparts) == 3:
svalue = svalue + '/' + sparts[2].zfill(2)
if quantity in events[name]:
for i, ct in enumerate(events[name][quantity]):
# Only add dates if they have more information
if len(ct['value'].split('/')) > len(svalue.split('/')):
return
if is_number(svalue):
svalue = '%g' % Decimal(svalue)
if serror:
serror = '%g' % Decimal(serror)
if quantity in events[name]:
for i, ct in enumerate(events[name][quantity]):
if ct['value'] == svalue and sources:
if 'kind' in ct and skind and ct['kind'] != skind:
return
for source in sources.split(','):
if source not in events[name][quantity][i]['source'].split(','):
events[name][quantity][i]['source'] += ',' + source
if serror and 'error' not in events[name][quantity][i]:
events[name][quantity][i]['error'] = serror
return
if not sunit:
sunit = unit
quantaentry = OrderedDict()
quantaentry['value'] = svalue
if serror:
quantaentry['error'] = serror
if sources:
quantaentry['source'] = sources
if skind:
quantaentry['kind'] = skind
if sunit:
quantaentry['unit'] = sunit
if lowerlimit:
quantaentry['lowerlimit'] = lowerlimit
if upperlimit:
quantaentry['upperlimit'] = upperlimit
if extra:
quantaentry['extra'] = extra
if (forcereplacebetter or quantity in repbetterquantity) and quantity in events[name]:
newquantities = []
isworse = True
if quantity in ['discoverdate', 'maxdate']:
for ct in events[name][quantity]:
ctsplit = ct['value'].split('/')
svsplit = svalue.split('/')
if len(ctsplit) < len(svsplit):
isworse = False
continue
elif len(ctsplit) < len(svsplit) and len(svsplit) == 3:
if max(2,get_sig_digits(ctsplit[-1].lstrip('0'))) < max(2,get_sig_digits(svsplit[-1].lstrip('0'))):
isworse = False
continue
newquantities.append(ct)
else:
newsig = get_sig_digits(svalue)
for ct in events[name][quantity]:
if 'error' in ct:
if serror:
if float(serror) < float(ct['error']):
isworse = False
continue
newquantities.append(ct)
else:
if serror:
isworse = False
continue
oldsig = get_sig_digits(ct['value'])
if oldsig >= newsig:
newquantities.append(ct)
if newsig >= oldsig:
isworse = False
if not isworse:
newquantities.append(quantaentry)
events[name][quantity] = newquantities
else:
events[name].setdefault(quantity,[]).append(quantaentry)
def load_cached_url(url, filepath, timeout = 120, write = True):
filemd5 = ''
filetxt = ''
if not args.refresh and os.path.isfile(filepath):
with codecs.open(filepath, 'r', encoding='utf8') as f:
filetxt = f.read()
if args.update:
filemd5 = md5(filetxt.encode('utf-8')).hexdigest()
try:
session = requests.Session()
response = session.get(url, timeout = timeout)
if any([x.status_code == 307 for x in response.history]):
raise
txt = response.text
newmd5 = md5(txt.encode('utf-8')).hexdigest()
#tprint(filemd5 + ":" + newmd5)
if args.update and newmd5 == filemd5:
tprint('Skipping file in "' + currenttask + '," local and remote copies identical [' + newmd5 + '].')
return False
except:
return filetxt
else:
if write:
with codecs.open(filepath, 'w', encoding='utf8') as f:
f.write(txt)
return txt
def make_date_string(year, month = '', day = ''):
if not year:
raise ValueError('At least the year must be specified when constructing date string')
datestring = str(year)
if month:
datestring = datestring + '/' + str(month).zfill(2)
if day:
datestring = datestring + '/' + str(day).zfill(2)
return datestring
def get_max_light(name):
if 'photometry' not in events[name]:
return (None, None, None, None)
eventphoto = [(x['u_time'], x['time'], Decimal(x['magnitude']), x['band'] if 'band' in x else '', x['source']) for x in events[name]['photometry'] if
('magnitude' in x and 'time' in x and 'u_time' in x and 'upperlimit' not in x)]
if not eventphoto:
return (None, None, None, None)
mlmag = None
for mb in maxbands:
leventphoto = [x for x in eventphoto if x[3] in mb]
if leventphoto:
mlmag = min([x[2] for x in leventphoto])
eventphoto = leventphoto
break
if not mlmag:
mlmag = min([x[2] for x in eventphoto])
mlindex = [x[2] for x in eventphoto].index(mlmag)
mlband = eventphoto[mlindex][3]
mlsource = eventphoto[mlindex][4]
if eventphoto[mlindex][0] == 'MJD':
mlmjd = float(eventphoto[mlindex][1])
return (astrotime(mlmjd, format='mjd').datetime, mlmag, mlband, mlsource)
else:
return (None, mlmag, mlband, mlsource)
def get_first_light(name):
if 'photometry' not in events[name]:
return (None, None)
eventphoto = [(Decimal(x['time']) if isinstance(x['time'], str) else Decimal(min(float(y) for y in x['time'])),
x['source']) for x in events[name]['photometry'] if 'upperlimit' not in x
and 'time' in x and 'u_time' in x and x['u_time'] == 'MJD']
if not eventphoto:
return (None, None)
flmjd = min([x[0] for x in eventphoto])
flindex = [x[0] for x in eventphoto].index(flmjd)
flmjd = float(flmjd)
flsource = eventphoto[flindex][1]
return (astrotime(flmjd, format='mjd').datetime, flsource)
def set_first_max_light(name):
if 'maxappmag' not in events[name]:
(mldt, mlmag, mlband, mlsource) = get_max_light(name)
if mldt:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxdate', make_date_string(mldt.year, mldt.month, mldt.day), uniq_cdl([source,mlsource]))
if mlmag:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxappmag', pretty_num(mlmag), uniq_cdl([source,mlsource]))
if mlband:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxband', mlband, uniq_cdl([source,mlsource]))
if 'discoverdate' not in events[name] or max([len(x['value'].split('/')) for x in events[name]['discoverdate']]) < 3:
(fldt, flsource) = get_first_light(name)
if fldt:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', make_date_string(fldt.year, fldt.month, fldt.day), uniq_cdl([source,flsource]))
if 'discoverdate' not in events[name] and 'spectra' in events[name]:
minspecmjd = float("+inf")
for spectrum in events[name]['spectra']:
if 'time' in spectrum and 'u_time' in spectrum:
if spectrum['u_time'] == 'MJD':
mjd = float(spectrum['time'])
elif spectrum['u_time'] == 'JD':
mjd = float(jd_to_mjd(Decimal(spectrum['time'])))
else:
continue
if mjd < minspecmjd:
minspecmjd = mjd
minspecsource = spectrum['source']
if minspecmjd < float("+inf"):
fldt = astrotime(minspecmjd, format='mjd').datetime
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', make_date_string(fldt.year, fldt.month, fldt.day), 'D,' + minspecsource)
def get_best_redshift(name):
bestsig = -1
bestkind = 10
for z in events[name]['redshift']:
kind = prefkinds.index(z['kind'] if 'kind' in z else '')
sig = get_sig_digits(z['value'])
if sig > bestsig and kind <= bestkind:
bestz = z['value']
bestkind = kind
bestsig = sig
return (bestz, bestkind, bestsig)
def jd_to_mjd(jd):
return jd - Decimal(2400000.5)
def utf8(x):
return str(x, 'utf-8')
def convert_aq_output(row):
return OrderedDict([(x, str(row[x]) if is_number(row[x]) else row[x]) for x in row.colnames])
def set_preferred_names():
if not len(events):
load_stubs()
for name in list(sorted(list(events.keys()))):
if name not in events:
continue
newname = ''
aliases = get_aliases(name)
if len(aliases) <= 1:
continue
if (name.startswith('SN') and ((is_number(name[2:6]) and not is_number(name[6:])) or
(is_number(name[2:5]) and not is_number(name[5:])))):
continue
for alias in aliases:
if (alias[:2] == 'SN' and ((is_number(alias[2:6]) and not is_number(alias[6:])) or
(is_number(alias[2:5]) and not is_number(alias[5:])))):
newname = alias
break
if not newname and 'discoverer' in events[name]:
discoverer = ','.join([x['value'].upper() for x in events[name]['discoverer']])
if 'ASAS' in discoverer:
for alias in aliases:
if 'ASASSN' in alias.upper():
newname = alias
break
if not newname and 'OGLE' in discoverer:
for alias in aliases:
if 'OGLE' in alias.upper():
newname = alias
break
if not newname and 'CRTS' in discoverer:
for alias in aliases:
if True in [x in alias.upper() for x in ['CSS', 'MLS', 'SSS', 'SNHUNT']]:
newname = alias
break
if not newname and 'PS1' in discoverer:
for alias in aliases:
if 'PS1' in alias.upper():
newname = alias
break
if not newname and 'PTF' in discoverer:
for alias in aliases:
if 'PTF' in alias.upper():
newname = alias
break
if not newname and 'GAIA' in discoverer:
for alias in aliases:
if 'GAIA' in alias.upper():
newname = alias
break
if not newname:
for alias in aliases:
# Always prefer another alias over PSN
if name.startswith('PSN'):
newname = alias
break
if newname and name != newname:
# Make sure new name doesn't already exist
if load_event_from_file(newname):
continue
if load_event_from_file(name, delete = True):
tprint('Changing event name (' + name + ') to preferred name (' + newname + ').')
events[newname] = events[name]
events[newname]['name'] = newname
del(events[name])
journal_events()
# Merge and remove duplicate events
def merge_duplicates():
if not len(events):
load_stubs()
currenttask = 'Merging duplicate events'
keys = list(sorted(list(events.keys())))
for n1, name1 in enumerate(tq(keys[:], currenttask)):
if name1 not in events:
continue
allnames1 = get_aliases(name1) + (['AT' + name1[2:]] if (name1.startswith('SN') and is_number(name1[2:6])) else [])
for name2 in keys[n1+1:]:
if name2 not in events or name1 == name2:
continue
allnames2 = get_aliases(name2) + (['AT' + name2[2:]] if (name2.startswith('SN') and is_number(name2[2:6])) else [])
if any(i in allnames1 for i in allnames2):
tprint('Found single event with multiple entries (' + name1 + ' and ' + name2 + '), merging.')
load1 = load_event_from_file(name1, delete = True)
load2 = load_event_from_file(name2, delete = True)
if load1 and load2:
priority1 = 0
priority2 = 0
for an in allnames1:
if len(an) >= 2 and an.startswith(('SN', 'AT')):
priority1 = priority1 + 1
for an in allnames2:
if len(an) >= 2 and an.startswith(('SN', 'AT')):
priority2 = priority2 + 1
if priority1 > priority2:
copy_to_event(name2, name1)
keys.append(name1)
del(events[name2])
else:
copy_to_event(name1, name2)
keys.append(name2)
del(events[name1])
else:
print ('Duplicate already deleted')
journal_events()
def derive_and_sanitize():
biberrordict = {
"2012Sci..337..942D":"2012Sci...337..942D",
"2012MNRAS.420.1135":"2012MNRAS.420.1135S",
"2014MNRAS.438,368":"2014MNRAS.438..368T",
"2006ApJ...636...400Q":"2006ApJ...636..400Q",
"0609268":"2007AJ....133...58K",
"2004MNRAS.tmp..131P":"2004MNRAS.352..457P",
"2013MNRAS.tmp.1499F":"2013MNRAS.433.1312F",
"1991MNRAS.247P.410B":"1991A&A...247..410B",
"2011Sci.333..856S":"2011Sci...333..856S"
}
# Calculate some columns based on imported data, sanitize some fields
for name in events:
aliases = get_aliases(name, includename = False)
if name not in aliases:
if 'sources' in events[name]:
add_quantity(name, 'alias', name, '1')
else:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'alias', name, source)
if (name.startswith('SN') and is_number(name[2:6]) and 'discoverdate' in events[name] and
int(events[name]['discoverdate'][0]['value'].split('/')[0]) >= 2016 and not any(['AT' in x for x in aliases])):
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'alias', 'AT' + name[2:], source)
events[name]['alias'] = list(sorted(events[name]['alias'], key=lambda key: alias_priority(name, key)))
aliases = get_aliases(name)
set_first_max_light(name)
if 'claimedtype' in events[name]:
events[name]['claimedtype'] = list(sorted(events[name]['claimedtype'], key=lambda key: ct_priority(name, key)))
if 'discoverdate' not in events[name]:
prefixes = ['MLS', 'SSS', 'CSS']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:2]):
discoverdate = '/'.join(['20' + alias.replace(prefix, '')[:2],
alias.replace(prefix, '')[2:4], alias.replace(prefix, '')[4:6]])
if args.verbose:
tprint ('Added discoverdate from name: ' + discoverdate)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', discoverdate, source)
break
if 'discoverdate' in events[name]:
break
if 'discoverdate' not in events[name]:
prefixes = ['ASASSN-', 'PS1-', 'PS1', 'PS', 'iPTF', 'PTF', 'SCP-']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:2]):
discoverdate = '20' + alias.replace(prefix, '')[:2]
if args.verbose:
tprint ('Added discoverdate from name: ' + discoverdate)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', discoverdate, source)
break
if 'discoverdate' in events[name]:
break
if 'discoverdate' not in events[name]:
prefixes = ['SNF']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:4]):
discoverdate = '/'.join([alias.replace(prefix, '')[:4],
alias.replace(prefix, '')[4:6], alias.replace(prefix, '')[6:8]])
if args.verbose:
tprint ('Added discoverdate from name: ' + discoverdate)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', discoverdate, source)
break
if 'discoverdate' in events[name]:
break
if 'discoverdate' not in events[name]:
prefixes = ['AT', 'SN']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:4]):
discoverdate = alias.replace(prefix, '')[:4]
if args.verbose:
tprint ('Added discoverdate from name: ' + discoverdate)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', discoverdate, source)
break
if 'discoverdate' in events[name]:
break
if 'ra' not in events[name] or 'dec' not in events[name]:
prefixes = ['PSN J', 'MASJ', 'CSS', 'SSS', 'MASTER OT J']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:6]):
noprefix = alias.split(':')[-1].replace(prefix, '').replace('.', '')
decsign = '+' if '+' in noprefix else '-'
noprefix = noprefix.replace('+','|').replace('-','|')
nops = noprefix.split('|')
if len(nops) < 2:
continue
rastr = nops[0]
decstr = nops[1]
ra = ':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) + ('.' + rastr[6:] if len(rastr) > 6 else '')
dec = decsign + ':'.join([decstr[:2], decstr[2:4], decstr[4:6]]) + ('.' + decstr[6:] if len(decstr) > 6 else '')
if args.verbose:
tprint ('Added ra/dec from name: ' + ra + ' ' + dec)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'ra', ra, source)
add_quantity(name, 'dec', dec, source)
break
if 'ra' in events[name]:
break
if ('ra' in events[name] and 'dec' in events[name] and
(not 'host' in events[name] or not any([x['value'] == 'Milky Way' for x in events[name]['host']]))):
if name not in extinctionsdict:
try:
result = IrsaDust.get_query_table(events[name]['ra'][0]['value'] + " " + events[name]['dec'][0]['value'], section = 'ebv')
except:
warnings.warn("Coordinate lookup for " + name + " failed in IRSA.")
else:
ebv = result['ext SandF mean'][0]
ebverr = result['ext SandF std'][0]
extinctionsdict[name] = [ebv, ebverr]
if name in extinctionsdict:
source = add_source(name, bibcode = '2011ApJ...737..103S')
add_quantity(name, 'ebv', str(extinctionsdict[name][0]), source, error = str(extinctionsdict[name][1]))
if 'claimedtype' in events[name]:
events[name]['claimedtype'][:] = [ct for ct in events[name]['claimedtype'] if (ct['value'] != '?' and ct['value'] != '-')]
if 'claimedtype' not in events[name] and name.startswith('AT'):
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'claimedtype', 'Candidate', source)
if 'redshift' not in events[name] and 'velocity' in events[name]:
# Find the "best" velocity to use for this
bestsig = 0
for hv in events[name]['velocity']:
sig = get_sig_digits(hv['value'])
if sig > bestsig:
besthv = hv['value']
bestsig = sig
if bestsig > 0 and is_number(besthv):
voc = float(besthv)*1.e5/clight
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'redshift', pretty_num(sqrt((1. + voc)/(1. - voc)) - 1., sig = bestsig), source, kind = 'heliocentric')
if 'redshift' not in events[name] and has_task('nedd') and 'host' in events[name]:
reference = "NED-D"
refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
for host in events[name]['host']:
if host['value'] in nedddict:
secondarysource = add_source(name, refname = reference, url = refurl, secondary = True)
meddist = statistics.median(nedddict[host['value']])
redshift = pretty_num(z_at_value(cosmo.comoving_distance, float(meddist) * un.Mpc), sig = get_sig_digits(str(meddist)))
add_quantity(name, 'redshift', redshift, secondarysource, kind = 'host')
if 'maxabsmag' not in events[name] and 'maxappmag' in events[name] and 'lumdist' in events[name]:
# Find the "best" distance to use for this
bestsig = 0
for ld in events[name]['lumdist']:
sig = get_sig_digits(ld['value'])
if sig > bestsig:
bestld = ld['value']
bestsig = sig
if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxabsmag', pretty_num(float(events[name]['maxappmag'][0]['value']) -
5.0*(log10(float(bestld)*1.0e6) - 1.0), sig = bestsig), source)
if 'redshift' in events[name]:
# Find the "best" redshift to use for this
(bestz, bestkind, bestsig) = get_best_redshift(name)
if bestsig > 0:
bestz = float(bestz)
if 'velocity' not in events[name]:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'velocity', pretty_num(clight/km*((bestz + 1.)**2. - 1.)/
((bestz + 1.)**2. + 1.), sig = bestsig), source, kind = prefkinds[bestkind])
if bestz > 0.:
if 'lumdist' not in events[name]:
dl = cosmo.luminosity_distance(bestz)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'lumdist', pretty_num(dl.value, sig = bestsig), source, kind = prefkinds[bestkind])
if 'maxabsmag' not in events[name] and 'maxappmag' in events[name]:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxabsmag', pretty_num(float(events[name]['maxappmag'][0]['value']) -
5.0*(log10(dl.to('pc').value) - 1.0), sig = bestsig), source)
if 'comovingdist' not in events[name]:
dl = cosmo.comoving_distance(bestz)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'comovingdist', pretty_num(dl.value, sig = bestsig), source)
if 'photometry' in events[name]:
events[name]['photometry'].sort(key=lambda x: ((float(x['time']) if isinstance(x['time'], str) else
min([float(y) for y in x['time']])) if 'time' in x else 0.0,
x['band'] if 'band' in x else '', float(x['magnitude']) if 'magnitude' in x else ''))
if 'spectra' in events[name] and list(filter(None, ['time' in x for x in events[name]['spectra']])):
events[name]['spectra'].sort(key=lambda x: (float(x['time']) if 'time' in x else 0.0))
if 'sources' in events[name]:
for source in events[name]['sources']:
if 'bibcode' in source:
#First sanitize the bibcode
if len(source['bibcode']) != 19:
source['bibcode'] = urllib.parse.unquote(unescape(source['bibcode'])).replace('A.A.', 'A&A')
if source['bibcode'] in biberrordict:
source['bibcode'] = biberrordict[source['bibcode']]
if source['bibcode'] not in bibauthordict:
bibcode = source['bibcode']
adsquery = ('http://adsabs.harvard.edu/cgi-bin/nph-abs_connect?db_key=ALL&version=1&bibcode=' +
urllib.parse.quote(bibcode) + '&data_type=Custom&format=%253m%20%25(y)')
response = urllib.request.urlopen(adsquery)
html = response.read().decode('utf-8')
hsplit = html.split("\n")
if len(hsplit) > 5:
bibcodeauthor = hsplit[5]
else:
bibcodeauthor = ''
if not bibcodeauthor:
warnings.warn("Bibcode didn't return authors, not converting this bibcode.")
bibauthordict[bibcode] = unescape(bibcodeauthor).strip()
for source in events[name]['sources']:
if 'bibcode' in source and source['bibcode'] in bibauthordict and bibauthordict[source['bibcode']]:
source['reference'] = bibauthordict[source['bibcode']]
if 'name' not in source and source['bibcode']:
source['name'] = source['bibcode']
if 'redshift' in events[name]:
events[name]['redshift'] = list(sorted(events[name]['redshift'], key=lambda key: frame_priority(key)))
if 'velocity' in events[name]:
events[name]['velocity'] = list(sorted(events[name]['velocity'], key=lambda key: frame_priority(key)))
if 'claimedtype' in events[name]:
events[name]['claimedtype'] = list(sorted(events[name]['claimedtype'], key=lambda key: ct_priority(name, key)))
events[name] = OrderedDict(sorted(events[name].items(), key=lambda key: event_attr_priority(key[0])))
def delete_old_event_files():
# Delete all old event JSON files
files = repo_file_list()
for f in files:
os.remove(f)
def write_all_events(empty = False, gz = False, bury = False):
# Write it all out!
for name in events:
if 'stub' in events[name]:
if not empty:
continue
else:
del(events[name]['stub'])
if args.verbose and not args.travis:
tprint('Writing ' + name)
filename = get_event_filename(name)
outdir = '../'
if 'discoverdate' in events[name]:
for r, year in enumerate(repoyears):
if int(events[name]['discoverdate'][0]['value'].split('/')[0]) <= year:
outdir += repofolders[r]
break
else:
outdir += str(repofolders[0])
# Delete non-SN events here without IAU designations (those with only banned types)
if bury:
buryevent = False
nonsneprefixes = ('PNVJ', 'PNV J', 'OGLE-2013-NOVA')
if name.startswith(nonsneprefixes):
tprint('Burying ' + name + ', non-SNe prefix.')
continue
if 'claimedtype' in events[name] and not (name.startswith('SN') and is_number(name[2:6])):
for ct in events[name]['claimedtype']:
if ct['value'].upper() not in nonsnetypes and ct['value'].upper() != 'CANDIDATE':
buryevent = False
break
if ct['value'].upper() in nonsnetypes:
buryevent = True
if buryevent:
tprint('Burying ' + name + ' (' + ct['value'] + ').')
outdir = '../sne-boneyard'
jsonstring = json.dumps({name:events[name]}, indent='\t', separators=(',', ':'), ensure_ascii=False)
path = outdir + '/' + filename + '.json'
with codecs.open(path, 'w', encoding='utf8') as f:
f.write(jsonstring)
if gz:
if os.path.getsize(path) > 90000000:
if not args.travis:
tprint('Compressing ' + name)
with open(path, 'rb') as f_in, gzip.open(path + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(path)
os.system('cd ' + outdir + '; git rm ' + filename + '.json; git add -f ' + filename + '.json.gz; cd ' + '../scripts')
def null_field(obj, field):
return obj[field] if field in obj else ''
def copy_to_event(fromname, destname):
tprint('Copying ' + fromname + ' to event ' + destname)
newsourcealiases = {}
keys = list(sorted(events[fromname].keys(), key=lambda key: event_attr_priority(key)))
if 'sources' in events[fromname]:
for source in events[fromname]['sources']:
newsourcealiases[source['alias']] = (add_source(destname,
bibcode = source['bibcode'] if 'bibcode' in source else '',
refname = source['name'] if 'name' in source else '',
reference = source['reference'] if 'reference' in source else '',
url = source['url'] if 'url' in source else ''))
for key in keys:
if key not in ['name', 'sources']:
for item in events[fromname][key]:
isd = False
sources = []
if 'source' not in item:
ValueError("Item has no source!")
for sid in item['source'].split(','):
if sid == 'D':
sources.append('D')
elif sid in newsourcealiases:
sources.append(newsourcealiases[sid])
else:
ValueError("Couldn't find source alias!")
sources = uniq_cdl(sources)
if key == 'photometry':
add_photometry(destname, u_time = null_field(item, "u_time"), time = null_field(item, "time"),
e_time = null_field(item, "e_time"), telescope = null_field(item, "telescope"),
instrument = null_field(item, "instrument"), band = null_field(item, "band"),
magnitude = null_field(item, "magnitude"), e_magnitude = null_field(item, "e_magnitude"),
source = sources, upperlimit = null_field(item, "upperlimit"), system = null_field(item, "system"),
observatory = null_field(item, "observatory"), observer = null_field(item, "observer"),
host = null_field(item, "host"), survey = null_field(item, "survey"))
elif key == 'spectra':
add_spectrum(destname, null_field(item, "waveunit"), null_field(item, "fluxunit"), data = null_field(item, "data"),
u_time = null_field(item, "u_time"), time = null_field(item, "time"),
instrument = null_field(item, "instrument"), deredshifted = null_field(item, "deredshifted"),
dereddened = null_field(item, "dereddened"), errorunit = null_field(item, "errorunit"),
source = sources, snr = null_field(item, "snr"),
telescope = null_field(item, "telescope"), observer = null_field(item, "observer"),
reducer = null_field(item, "reducer"), filename = null_field(item, "filename"),
observatory = null_field(item, "observatory"))
elif key == 'errors':
add_quantity(destname, key, item['value'], sources,
kind = null_field(item, "kind"), extra = null_field(item, "extra"))
else:
add_quantity(destname, key, item['value'], sources, error = null_field(item, "error"),
unit = null_field(item, "unit"), kind = null_field(item, "kind"))
def load_event_from_file(name = '', location = '', clean = False, delete = True, append = False):
if not name and not location:
raise ValueError('Either event name or location must be specified to load event')
path = ''
namepath = ''
if location:
path = location
if name:
indir = '../'
for rep in repofolders:
filename = get_event_filename(name)
newpath = indir + rep + '/' + filename + '.json'
if os.path.isfile(newpath):
namepath = newpath
if not path and not namepath:
return False
else:
newevent = ''
newevent2 = ''
if path or namepath:
if name in events:
del events[name]
if path and namepath:
with open(path, 'r') as f, open(namepath, 'r') as nf:
newevent = json.loads(f.read(), object_pairs_hook=OrderedDict)
newevent2 = json.loads(nf.read(), object_pairs_hook=OrderedDict)
elif path:
with open(path, 'r') as f:
newevent = json.loads(f.read(), object_pairs_hook=OrderedDict)
elif namepath:
with open(namepath, 'r') as f:
newevent = json.loads(f.read(), object_pairs_hook=OrderedDict)
if newevent:
if clean:
newevent = clean_event(newevent)
name = next(reversed(newevent))
if append:
indir = '../'
for rep in repofolders:
filename = get_event_filename(name)
newpath = indir + rep + '/' + filename + '.json'
if os.path.isfile(newpath):
namepath = newpath
if namepath:
with open(namepath, 'r') as f:
newevent2 = json.loads(f.read(), object_pairs_hook=OrderedDict)
namename = next(reversed(newevent2))
if newevent2:
# Needs to be fixed
newevent = OrderedDict([['temp',newevent[name]]])
copy_to_event('temp', namename)
else:
events.update(newevent)
if args.verbose and not args.travis:
tprint('Loaded ' + name)
if 'writeevents' in tasks and delete and namepath:
os.remove(namepath)
return name
def clean_event(dirtyevent):
bibcodes = []
name = next(reversed(dirtyevent))
# This is very hacky and is only necessary because we don't have a proper 'Event' object yet.
events['temp'] = dirtyevent[name]
if 'name' not in events['temp']:
events['temp']['name'] = name
if 'sources' in events['temp']:
# Rebuild the sources
newsources = []
oldsources = events['temp']['sources']
del(events['temp']['sources'])
for s, source in enumerate(oldsources):
if 'bibcode' in source:
bibcodes.append(source['bibcode'])
add_source('temp', bibcode = source['bibcode'])
else:
add_source('temp', refname = source['name'], url = source['url'])
# Clean some legacy fields
if 'aliases' in events['temp'] and isinstance(events['temp']['aliases'], list):
source = add_source('temp', bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
for alias in events['temp']['aliases']:
add_quantity('temp', 'alias', alias, source)
del(events['temp']['aliases'])
if ('distinctfrom' in events['temp'] and isinstance(events['temp']['distinctfrom'], list) and
isinstance(events['temp']['distinctfrom'][0], str)):
distinctfroms = [x for x in events['temp']['distinctfrom']]
del(events['temp']['distinctfrom'])
source = add_source('temp', bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
for df in distinctfroms:
add_quantity('temp', 'distinctfrom', df, source)
if ('errors' in events['temp'] and isinstance(events['temp']['errors'], list) and
'sourcekind' in events['temp']['errors'][0]):
source = add_source('temp', bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
for err in events['temp']['errors']:
add_quantity('temp', 'error', err['quantity'], source, kind = err['sourcekind'], extra = err['id'])
del(events['temp']['errors'])
if not bibcodes:
add_source('temp', bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
bibcodes = [oscbibcode]
for key in list(events['temp'].keys()):
if key in ['name', 'sources']:
pass
elif key == 'photometry':
for p, photo in enumerate(events['temp']['photometry']):
if photo['u_time'] == 'JD':
events['temp']['photometry'][p]['u_time'] = 'MJD'
events['temp']['photometry'][p]['time'] = str(jd_to_mjd(Decimal(photo['time'])))
if bibcodes and 'source' not in photo:
source = add_source('temp', bibcode = bibcodes[0])
events['temp']['photometry'][p]['source'] = source
else:
for qi, quantity in enumerate(events['temp'][key]):
if bibcodes and 'source' not in quantity:
source = add_source('temp', bibcode = bibcodes[0])
events['temp'][key][qi]['source'] = source
cleanevent = events['temp']
del (events['temp'])
return OrderedDict([[name,cleanevent]])
def has_task(task):
return task in tasks and (not args.update or tasks[task]['update'])
def archived_task(task):
if 'archived' in tasks[task] and args.archived:
return True
if ('archived' in tasks[task] and tasks[task]['archived'] and
task not in args.refreshlist.split(',') and not args.fullrefresh):
return True
return False
def do_task(checktask, task, quiet = False):
global currenttask
dotask = has_task(task) and checktask == task
if dotask and not quiet:
currenttask = (tasks[task]['nicename'] if tasks[task]['nicename'] else task).replace('%pre', 'Updating' if args.update else 'Loading')
return dotask
def journal_events(clear = True):
if 'writeevents' in tasks:
write_all_events()
if clear:
clear_events()
def clear_events():
global events
events = OrderedDict((k, OrderedDict([['name', events[k]['name']]] + ([['alias', events[k]['alias']]] if 'alias' in events[k] else []) + [['stub', True]])) for k in events)
def load_stubs():
global currenttask
currenttask = 'Loading event stubs'
files = repo_file_list()
#try:
# namepath = '../names.min.json'
# with open(namepath, 'r') as f:
# names = json.loads(f.read(), object_pairs_hook=OrderedDict)
# for fi in tq(files):
# name = os.path.basename(os.path.splitext(fi)[0])
# if name not in names:
# name = name.replace("_", "/")
# events[name] = OrderedDict(([['name', name], ['alias', [OrderedDict(([['value', x]])) for x in names[name]]], ['stub', True]]))
#except:
# events = OrderedDict()
for fi in tq(files, currenttask):
fname = fi
if '.gz' in fi:
fname = fi.replace('.gz', '')
with gzip.open(fi, 'rb') as f_in, open(fname, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(fi)
name = os.path.basename(os.path.splitext(fname)[0]).replace('.json', '')
name = add_event(name, delete = False, loadifempty = False)
events[name] = OrderedDict(([['name', events[name]['name']]] + ([['alias', events[name]['alias']]] if 'alias' in events[name] else []) + [['stub', True]]))
path = '../atels.json'
if os.path.isfile(path):
with open(path, 'r') as f:
atelsdict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
atelsdict = OrderedDict()
path = '../cbets.json'
if os.path.isfile(path):
with open(path, 'r') as f:
cbetsdict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
cbetsdict = OrderedDict()
path = '../iaucs.json'
if os.path.isfile(path):
with open(path, 'r') as f:
iaucsdict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
iaucsdict = OrderedDict()
for task in tasks:
if do_task(task, 'deleteoldevents'):
currenttask = 'Deleting old events'
delete_old_event_files()
# Import data provided directly to OSC
if do_task(task, 'internal'):
for datafile in tq(sorted(glob("../sne-internal/*.json"), key=lambda s: s.lower()), currenttask):
if args.update:
if not load_event_from_file(location = datafile, clean = True, delete = False, append = True):
raise IOError('Failed to find specified file.')
else:
if not load_event_from_file(location = datafile, clean = True, delete = False):
raise IOError('Failed to find specified file.')
journal_events()
if do_task(task, 'radio'):
for datafile in tq(sorted(glob("../sne-external-radio/*.txt"), key=lambda s: s.lower()), currenttask):
name = add_event(os.path.basename(datafile).split('.')[0])
radiosourcedict = OrderedDict()
with open(datafile, 'r') as f:
for li, line in enumerate([x.strip() for x in f.read().splitlines()]):
if line.startswith('(') and li <= len(radiosourcedict):
radiosourcedict[line.split()[0]] = add_source(name, bibcode = line.split()[-1])
elif li in [x + len(radiosourcedict) for x in range(3)]:
continue
else:
cols = list(filter(None, line.split()))
source = radiosourcedict[cols[6]]
add_photometry(name, time = cols[0], frequency = cols[2], u_frequency = 'GHz', fluxdensity = cols[3],
e_fluxdensity = cols[4], u_fluxdensity = 'µJy', instrument = cols[5], source = source)
add_quantity(name, 'alias', name, source)
journal_events()
if do_task(task, 'xray'):
for datafile in tq(sorted(glob("../sne-external-xray/*.txt"), key=lambda s: s.lower()), currenttask):
name = add_event(os.path.basename(datafile).split('.')[0])
with open(datafile, 'r') as f:
for li, line in enumerate(f.read().splitlines()):
if li == 0:
source = add_source(name, bibcode = line.split()[-1])
elif li in [1,2,3]:
continue
else:
cols = list(filter(None, line.split()))
add_photometry(name, time = cols[:2],
energy = cols[2:4], u_energy = 'keV', counts = cols[4], flux = cols[6],
unabsorbedflux = cols[8], u_flux = 'ergs/s/cm^2',
photonindex = cols[15], instrument = cols[17], nhmw = cols[11],
upperlimit = (float(cols[5]) < 0), source = source)
add_quantity(name, 'alias', name, source)
journal_events()
#if do_task(task, 'simbad'):
# Simbad.list_votable_fields()
# customSimbad = Simbad()
# customSimbad.add_votable_fields('otype', 'id(opt)')
# result = customSimbad.query_object('SN 20[0-9][0-9]*', wildcard=True)
# for r, row in enumerate(result):
# if row['OTYPE'].decode() != "SN":
# continue
# name = row["MAIN_ID"].decode()
# aliases = Simbad.query_objectids(name)
# print(aliases)
# if name[:3] == 'SN ':
# name = 'SN' + name[3:]
# if name[:2] == 'SN' and is_number(name[2:]):
# name = name + 'A'
# name = add_event(name)
# journal_events()
# Import primary data sources from Vizier
if do_task(task, 'vizier'):
Vizier.ROW_LIMIT = -1
# 2012ApJS..200...12H
result = Vizier.get_catalogs("J/ApJS/200/12/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = row['SN']
if is_number(name[:4]):
name = 'SN' + name
name = add_event(name)
source = add_source(name, bibcode = "2012ApJS..200...12H")
add_quantity(name, 'alias', name, source)
if '[' not in row['Gal']:
add_quantity(name, 'host', row['Gal'].replace('_', ' '), source)
add_quantity(name, 'redshift', str(row['z']), source, kind = 'heliocentric')
add_quantity(name, 'redshift', str(row['zCMB']), source, kind = 'cmb')
add_quantity(name, 'ebv', str(row['E_B-V_']), source, error = str(row['e_E_B-V_']) if row['e_E_B-V_'] else '')
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
# 2012ApJ...746...85S
result = Vizier.get_catalogs("J/ApJ/746/85/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = row['Name'].replace('SCP', 'SCP-')
name = add_event(name)
source = add_source(name, bibcode = "2012ApJ...746...85S")
add_quantity(name, 'alias', name, source)
if row['f_Name']:
add_quantity(name, 'claimedtype', 'Ia', source)
if row['z']:
add_quantity(name, 'redshift', str(row['z']), source, kind = 'spectroscopic')
else:
add_quantity(name, 'redshift', str(row['zCl']), source, kind = 'cluster')
add_quantity(name, 'ebv', str(row['E_B-V_']), source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
result = Vizier.get_catalogs("J/ApJ/746/85/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = row['Name'].replace('SCP', 'SCP-')
flux = Decimal(float(row['Flux']))
if flux <= 0.0:
continue
err = Decimal(float(row['e_Flux']))
zp = Decimal(float(row['Zero']))
sig = get_sig_digits(str(row['Flux']))+1
magnitude = pretty_num(zp-Decimal(2.5)*(flux.log10()), sig = sig)
e_magnitude = pretty_num(Decimal(2.5)*(Decimal(1.0) + err/flux).log10(), sig = sig)
if float(e_magnitude) > 5.0:
continue
name = add_event(name)
source = add_source(name, bibcode = "2012ApJ...746...85S")
add_quantity(name, 'alias', name, source)
add_photometry(name, time = str(row['MJD']), band = row['Filter'], instrument = row['Inst'],
magnitude = magnitude, e_magnitude = e_magnitude, source = source)
# 2004ApJ...602..571B
result = Vizier.get_catalogs("J/ApJ/602/571/table8")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = 'SN'+row['SN']
flux = Decimal(float(row['Flux']))
if flux <= 0.0:
continue
err = Decimal(float(row['e_Flux']))
sig = get_sig_digits(str(row['Flux']))+1
magnitude = pretty_num(Decimal(25.0)-Decimal(2.5)*(flux.log10()), sig = sig)
e_magnitude = pretty_num(Decimal(2.5)*(Decimal(1.0) + err/flux).log10(), sig = sig)
if float(e_magnitude) > 5.0:
continue
name = add_event(name)
source = add_source(name, bibcode = "2004ApJ...602..571B")
add_quantity(name, 'alias', name, source)
band = row['Filt']
system = ''
telescope = ''
if band in ['R', 'I']:
system = 'Cousins'
if band == 'Z':
telescope = 'Subaru'
add_photometry(name, time = str(row['MJD']), band = band, system = system, telescope = telescope,
magnitude = magnitude, e_magnitude = e_magnitude, source = source)
# 2014MNRAS.444.3258M
result = Vizier.get_catalogs("J/MNRAS/444/3258/SNe")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = row['SN']
if name == oldname:
continue
oldname = name
name = add_event(name)
source = add_source(name, bibcode = '2014MNRAS.444.3258M')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', str(row['z']), source, kind = 'heliocentric', error = str(row['e_z']))
add_quantity(name, 'ra', str(row['_RA']), source, unit = 'floatdegrees')
add_quantity(name, 'dec', str(row['_DE']), source, unit = 'floatdegrees')
journal_events()
# 2014MNRAS.438.1391P
result = Vizier.get_catalogs("J/MNRAS/438/1391/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2014MNRAS.438.1391P')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', str(row['zh']), source, kind = 'heliocentric')
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
journal_events()
# 2012ApJ...749...18B
result = Vizier.get_catalogs("J/ApJ/749/18/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['Name'].replace(' ','')
name = add_event(name)
source = add_source(name, bibcode = '2012ApJ...749...18B')
add_quantity(name, 'alias', name, source)
mjd = str(astrotime(2450000.+row['JD'], format='jd').mjd)
band = row['Filt']
magnitude = str(row['mag'])
e_magnitude = str(row['e_mag'])
e_magnitude = '' if e_magnitude == '--' else e_magnitude
upperlimit = True if row['l_mag'] == '>' else False
add_photometry(name, time = mjd, band = band, magnitude = magnitude, e_magnitude = e_magnitude, instrument = 'UVOT',
source = source, upperlimit = upperlimit, telescope = 'Swift', system = 'Swift')
journal_events()
# 2010A&A...523A...7G
result = Vizier.get_catalogs("J/A+A/523/A7/table9")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = 'SNLS-' + row['SNLS']
name = add_event(name)
source = add_source(name, bibcode = '2010A&A...523A...7G')
add_quantity(name, 'alias', name, source)
astrot = astrotime(2450000.+row['Date1'], format='jd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), source)
add_quantity(name, 'ebv', str(row['E_B-V_']), source)
add_quantity(name, 'redshift', str(row['z']), source, kind = 'heliocentric')
add_quantity(name, 'claimedtype', (row['Type'].replace('*', '?').replace('SN','')
.replace('(pec)',' P').replace('Ia? P?', 'Ia P?')), source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
journal_events()
# 2004A&A...415..863G
result = Vizier.get_catalogs("J/A+A/415/863/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = 'SN' + row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2004A&A...415..863G')
add_quantity(name, 'alias', name, source)
datesplit = row['Date'].split('-')
add_quantity(name, 'discoverdate', make_date_string(datesplit[0], datesplit[1].lstrip('0'), datesplit[2].lstrip('0')), source)
add_quantity(name, 'host', 'Abell ' + str(row['Abell']), source)
add_quantity(name, 'claimedtype', row['Type'], source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
if row['zSN']:
add_quantity(name, 'redshift', str(row['zSN']), source, kind = 'spectroscopic')
else:
add_quantity(name, 'redshift', str(row['zCl']), source, kind = 'cluster')
journal_events()
# 2008AJ....136.2306H
result = Vizier.get_catalogs("J/AJ/136/2306/sources")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = 'SDSS-II ' + str(row['SNID'])
name = add_event(name)
source = add_source(name, bibcode = '2008AJ....136.2306H')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', row['SpType'].replace('SN.', '').strip(':'), source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
# 2010ApJ...708..661D
result = Vizier.get_catalogs("J/ApJ/708/661/sn")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['SN']
if not name:
name = 'SDSS-II ' + str(row['SDSS-II'])
else:
name = 'SN' + name
name = add_event(name)
source = add_source(name, bibcode = '2010ApJ...708..661D')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', 'SDSS-II ' + str(row['SDSS-II']), source)
add_quantity(name, 'claimedtype', 'II P', source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
result = Vizier.get_catalogs("J/ApJ/708/661/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
if row['f_SN'] == 'a':
name = 'SDSS-II ' + str(row['SN'])
else:
name = 'SN' + row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2010ApJ...708..661D')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', str(row['z']), source, error = str(row['e_z']))
journal_events()
# 2014ApJ...795...44R
result = Vizier.get_catalogs("J/ApJ/795/44/ps1_snIa")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2014ApJ...795...44R')
add_quantity(name, 'alias', name, source)
astrot = astrotime(row['tdisc'], format='mjd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), source)
add_quantity(name, 'redshift', str(row['z']), source, error = str(row['e_z']), kind = 'heliocentric')
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
add_quantity(name, 'claimedtype', 'Ia', source)
result = Vizier.get_catalogs("J/ApJ/795/44/table6")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2014ApJ...795...44R')
add_quantity(name, 'alias', name, source)
if row['mag'] != '--':
add_photometry(name, time = str(row['MJD']), band = row['Filt'], magnitude = str(row['mag']),
e_magnitude = str(row['e_mag']), source = source, system = 'AB', telescope = 'PS1', instrument = 'PS1')
journal_events()
# 1990A&AS...82..145C
result = Vizier.get_catalogs("II/189/mag")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
with open('../sne-external/II_189_refs.csv') as f:
tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
ii189bibdict = {}
ii189refdict = {}
for r, row in enumerate(tsvin):
if row[0] != '0':
ii189bibdict[r+1] = row[1]
else:
ii189refdict[r+1] = row[2]
for row in tq(table, currenttask):
if row['band'][0] == '(':
continue
name = 'SN' + row['SN']
name = add_event(name)
source = ''
secsource = add_source(name, bibcode = '1990A&AS...82..145C', secondary = True)
mjd = str(jd_to_mjd(Decimal(row['JD'])))
mag = str(row['m'])
band = row['band'].strip("'")
if row['r_m'] in ii189bibdict:
source = add_source(name, bibcode = ii189bibdict[row['r_m']])
else:
source = add_source(name, refname = ii189refdict[row['r_m']])
add_quantity(name, 'alias', name, source)
add_photometry(name, time = mjd, band = band, magnitude = mag, source = uniq_cdl([source,secsource]))
journal_events()
# 2014yCat.7272....0G
result = Vizier.get_catalogs("VII/272/snrs")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = ''
if row["Names"]:
names = row["Names"].split(',')
for nam in names:
if nam.strip()[:2] == 'SN':
name = nam.strip()
if is_number(name[2:]):
name = name + 'A'
if not name:
for nam in names:
if nam.strip('()') == nam:
name = nam.strip()
break
if not name:
name = row["SNR"].strip()
name = add_event(name)
source = (add_source(name, bibcode = '2014BASI...42...47G') + ',' +
add_source(name, refname = 'Galactic SNRs', url = 'https://www.mrao.cam.ac.uk/surveys/snrs/snrs.data.html'))
add_quantity(name, 'alias', name, source)
add_quantity(name, "alias", row["SNR"].strip(), source)
add_quantity(name, "alias", 'MWSNR '+row["SNR"].strip('G '), source)
if row["Names"]:
names = row["Names"].split(',')
for nam in names:
add_quantity(name, "alias", nam.replace('Vela (XYZ)', 'Vela').strip('()').strip(), source)
if nam.strip()[:2] == 'SN':
add_quantity(name, 'discoverdate', nam.strip()[2:], source)
add_quantity(name, 'host', 'Milky Way', source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
journal_events()
# 2014MNRAS.442..844F
result = Vizier.get_catalogs("J/MNRAS/442/844/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = 'SN' + row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2014MNRAS.442..844F')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', str(row['zhost']), source, kind = 'host')
add_quantity(name, 'ebv', str(row['E_B-V_']), source)
journal_events()
result = Vizier.get_catalogs("J/MNRAS/442/844/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = 'SN' + str(row['SN'])
name = add_event(name)
source = add_source(name, bibcode = "2014MNRAS.442..844F")
add_quantity(name, 'alias', name, source)
for band in ['B', 'V', 'R', 'I']:
bandtag = band + 'mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = row['MJD'], band = band, magnitude = row[bandtag],
e_magnitude = row['e_' + bandtag], source = source, telescope = 'KAIT', instrument = 'KAIT')
journal_events()
# 2012MNRAS.425.1789S
result = Vizier.get_catalogs("J/MNRAS/425/1789/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = ''.join(row['SimbadName'].split(' '))
name = add_event(name)
source = add_source(name, bibcode = '2012MNRAS.425.1789S')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', 'SN' + row['SN'], source)
add_quantity(name, 'host', row['Gal'], source)
if is_number(row['cz']):
add_quantity(name, 'redshift', str(round_sig(float(row['cz'])*km/clight, sig = get_sig_digits(str(row['cz'])))), source, kind = 'heliocentric')
add_quantity(name, 'ebv', str(row['E_B-V_']), source)
journal_events()
# 2015ApJS..219...13W
result = Vizier.get_catalogs("J/ApJS/219/13/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = u'LSQ' + str(row['LSQ'])
name = add_event(name)
source = add_source(name, bibcode = "2015ApJS..219...13W")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
add_quantity(name, 'redshift', row['z'], source, error = row['e_z'], kind = 'heliocentric')
add_quantity(name, 'ebv', row['E_B-V_'], source)
add_quantity(name, 'claimedtype', 'Ia', source)
result = Vizier.get_catalogs("J/ApJS/219/13/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = 'LSQ' + row['LSQ']
name = add_event(name)
source = add_source(name, bibcode = "2015ApJS..219...13W")
add_quantity(name, 'alias', name, source)
add_photometry(name, time = str(jd_to_mjd(Decimal(row['JD']))), instrument = 'QUEST', telescope = 'ESO Schmidt',
observatory = 'La Silla', band = row['Filt'],
magnitude = row['mag'], e_magnitude = row['e_mag'], system = "Swope", source = source)
journal_events()
# 2012Natur.491..228C
result = Vizier.get_catalogs("J/other/Nat/491.228/tablef1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'SN2213-1745'
name = add_event(name)
source = add_source(name, bibcode = "2012Natur.491..228C")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', 'SLSN-R', source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['g', 'r', 'i']:
bandtag = band + '_mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = row["MJD" + band + "_"], band = band + "'", magnitude = row[bandtag],
e_magnitude = row["e_" + bandtag], source = source)
result = Vizier.get_catalogs("J/other/Nat/491.228/tablef2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'SN1000+0216'
name = add_event(name)
source = add_source(name, bibcode = "2012Natur.491..228C")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', 'SLSN-II?', source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['g', 'r', 'i']:
bandtag = band + '_mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = row["MJD" + band + "_"], band = band + "'", magnitude = row[bandtag],
e_magnitude = row["e_" + bandtag], source = source)
journal_events()
# 2011Natur.474..484Q
result = Vizier.get_catalogs("J/other/Nat/474.484/tables1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2011Natur.474..484Q")
add_quantity(name, 'alias', name, source)
add_photometry(name, time = row['MJD'], band = row['Filt'], telescope = row['Tel'],
magnitude = row['mag'], e_magnitude = row['e_mag'], source = source)
journal_events()
# 2011ApJ...736..159G
result = Vizier.get_catalogs("J/ApJ/736/159/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'PTF10vdl'
name = add_event(name)
source = add_source(name, bibcode = "2011ApJ...736..159G")
add_quantity(name, 'alias', name, source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
add_photometry(name, time = str(jd_to_mjd(Decimal(row['JD']))), band = row['Filt'], telescope = row['Tel'], magnitude = row['mag'],
e_magnitude = row['e_mag'] if is_number(row['e_mag']) else '', upperlimit = (not is_number(row['e_mag'])), source = source)
journal_events()
# 2012ApJ...760L..33B
result = Vizier.get_catalogs("J/ApJ/760/L33/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'PTF12gzk'
name = add_event(name)
source = add_source(name, bibcode = "2012ApJ...760L..33B")
add_quantity(name, 'alias', name, source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
# Fixing a typo in VizieR table
if str(row['JD']) == '2455151.456':
row['JD'] = '2456151.456'
add_photometry(name, time = str(jd_to_mjd(Decimal(row['JD']))), band = row['Filt'], telescope = row['Inst'], magnitude = row['mag'],
e_magnitude = row['e_mag'], source = source)
journal_events()
# 2013ApJ...769...39S
result = Vizier.get_catalogs("J/ApJ/769/39/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'PS1-12sk'
name = add_event(name)
source = add_source(name, bibcode = "2013ApJ...769...39S")
add_quantity(name, 'alias', name, source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
instrument = ''
telescope = ''
if row['Inst'] == 'RATCam':
instrument = row['Inst']
else:
telescope = row['Inst']
add_photometry(name, time = row['MJD'], band = row['Filt'], telescope = telescope, instrument = instrument, magnitude = row['mag'],
e_magnitude = row['e_mag'] if not row['l_mag'] else '', upperlimit = (row['l_mag'] == '>'), source = source)
journal_events()
# 2009MNRAS.394.2266P
# Note: Instrument info available via links in VizieR, can't auto-parse just yet.
name = 'SN2005cs'
name = add_event(name)
source = add_source(name, bibcode = "2009MNRAS.394.2266P")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/MNRAS/394/2266/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['U', 'B', 'V', 'R', 'I']:
bandtag = band + 'mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), band = band, magnitude = row[bandtag],
e_magnitude = (row["e_" + bandtag] if row['l_' + bandtag] != '>' else ''),
source = source, upperlimit = (row['l_' + bandtag] == '>'))
if "zmag" in row and is_number(row["zmag"]) and not isnan(float(row["zmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), band = "z", magnitude = row["zmag"],
e_magnitude = row["e_zmag"], source = source)
result = Vizier.get_catalogs("J/MNRAS/394/2266/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['B', 'V', 'R']:
bandtag = band + 'mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), band = band, magnitude = row[bandtag],
e_magnitude = (row["e_" + bandtag] if row['l_' + bandtag] != '>' else ''),
source = source, upperlimit = (row['l_' + bandtag] == '>'))
result = Vizier.get_catalogs("J/MNRAS/394/2266/table4")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['J', 'H', 'K']:
bandtag = band + 'mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), band = band, magnitude = row[bandtag],
e_magnitude = row["e_" + bandtag], source = source)
journal_events()
# 2013AJ....145...99A
result = Vizier.get_catalogs("J/AJ/145/99/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'SN2003ie'
name = add_event(name)
source = add_source(name, bibcode = "2013AJ....145...99A")
add_quantity(name, 'alias', name, source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = row["MJD"], band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"] if not row["l_Bmag"] else '',
upperlimit = (row['l_Bmag'] == '>'), source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = row["MJD"], band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"] if is_number(row["e_Vmag"]) else '',
upperlimit = (not is_number(row["e_Vmag"])), source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = row["MJD"], band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"] if not row["l_Rmag"] else '',
upperlimit = (row['l_Rmag'] == '>'), source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = row["MJD"], band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
journal_events()
# 2011ApJ...729..143C
name = 'SN2008am'
name = add_event(name)
source = add_source(name, bibcode = "2011ApJ...729..143C")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/ApJ/729/143/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
add_photometry(name, time = row['MJD'], band = 'ROTSE', telescope = 'ROTSE', magnitude = row['mag'],
e_magnitude = row['e_mag'] if not row['l_mag'] else '', upperlimit = (row['l_mag'] == '<'), source = source)
result = Vizier.get_catalogs("J/ApJ/729/143/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "Jmag" in row and is_number(row["Jmag"]) and not isnan(float(row["Jmag"])):
add_photometry(name, time = row["MJD"], telescope = "PAIRITEL", band = "J", magnitude = row["Jmag"],
e_magnitude = row["e_Jmag"], source = source)
if "Hmag" in row and is_number(row["Hmag"]) and not isnan(float(row["Hmag"])):
add_photometry(name, time = row["MJD"], telescope = "PAIRITEL", band = "H", magnitude = row["Hmag"],
e_magnitude = row["e_Hmag"], source = source)
if "Ksmag" in row and is_number(row["Ksmag"]) and not isnan(float(row["Ksmag"])):
add_photometry(name, time = row["MJD"], telescope = "PAIRITEL", band = "Ks", magnitude = row["Ksmag"],
e_magnitude = row["e_Ksmag"], source = source)
result = Vizier.get_catalogs("J/ApJ/729/143/table4")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
add_photometry(name, time = row['MJD'], band = row['Filt'], telescope = 'P60', magnitude = row['mag'],
e_magnitude = row['e_mag'], source = source)
result = Vizier.get_catalogs("J/ApJ/729/143/table5")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
add_photometry(name, time = row['MJD'], band = row['Filt'], instrument = 'UVOT', telescope = 'Swift', magnitude = row['mag'],
e_magnitude = row['e_mag'], source = source)
journal_events()
# 2011ApJ...728...14P
name = 'SN2009bb'
name = add_event(name)
source = add_source(name, bibcode = "2011ApJ...728...14P")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/ApJ/728/14/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"], source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
result = Vizier.get_catalogs("J/ApJ/728/14/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "u_mag" in row and is_number(row["u_mag"]) and not isnan(float(row["u_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "u'", magnitude = row["u_mag"],
e_magnitude = row["e_u_mag"], source = source)
if "g_mag" in row and is_number(row["g_mag"]) and not isnan(float(row["g_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "g'", magnitude = row["g_mag"],
e_magnitude = row["e_g_mag"], source = source)
if "r_mag" in row and is_number(row["r_mag"]) and not isnan(float(row["r_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "r'", magnitude = row["r_mag"],
e_magnitude = row["e_r_mag"], source = source)
if "i_mag" in row and is_number(row["i_mag"]) and not isnan(float(row["i_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "i'", magnitude = row["i_mag"],
e_magnitude = row["e_i_mag"], source = source)
if "z_mag" in row and is_number(row["z_mag"]) and not isnan(float(row["z_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "z'", magnitude = row["z_mag"],
e_magnitude = row["e_z_mag"], source = source)
result = Vizier.get_catalogs("J/ApJ/728/14/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "Ymag" in row and is_number(row["Ymag"]) and not isnan(float(row["Ymag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), instrument = row['Inst'], band = "Y", magnitude = row["Ymag"],
e_magnitude = row["e_Ymag"], source = source)
if "Jmag" in row and is_number(row["Jmag"]) and not isnan(float(row["Jmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), instrument = row['Inst'], band = "J", magnitude = row["Jmag"],
e_magnitude = row["e_Jmag"], source = source)
if "Hmag" in row and is_number(row["Hmag"]) and not isnan(float(row["Hmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), instrument = row['Inst'], band = "H", magnitude = row["Hmag"],
e_magnitude = row["e_Hmag"], source = source)
journal_events()
# 2011PAZh...37..837T
name = 'SN2009nr'
name = add_event(name)
source = add_source(name, bibcode = "2011PAZh...37..837T")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/PAZh/37/837/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = str(jd_to_mjd(Decimal(row["JD"]) + 2455000))
if "Umag" in row and is_number(row["Umag"]) and not isnan(float(row["Umag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "U", magnitude = row["Umag"],
e_magnitude = row["e_Umag"], source = source)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"], source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
journal_events()
# 2013MNRAS.433.1871B
name = 'SN2012aw'
name = add_event(name)
source = add_source(name, bibcode = "2013MNRAS.433.1871B")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/MNRAS/433/1871/table3a")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = str(jd_to_mjd(Decimal(row["JD"]) + 2456000))
if "Umag" in row and is_number(row["Umag"]) and not isnan(float(row["Umag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "U", magnitude = row["Umag"],
e_magnitude = row["e_Umag"], source = source)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rcmag" in row and is_number(row["Rcmag"]) and not isnan(float(row["Rcmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "Rc", magnitude = row["Rcmag"],
e_magnitude = row["e_Rcmag"], source = source)
if "Icmag" in row and is_number(row["Icmag"]) and not isnan(float(row["Icmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "Ic", magnitude = row["Icmag"],
e_magnitude = row["e_Icmag"], source = source)
result = Vizier.get_catalogs("J/MNRAS/433/1871/table3b")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = str(jd_to_mjd(Decimal(row["JD"]) + 2456000))
if "gmag" in row and is_number(row["gmag"]) and not isnan(float(row["gmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "g", magnitude = row["gmag"],
e_magnitude = row["e_gmag"], source = source)
if "rmag" in row and is_number(row["rmag"]) and not isnan(float(row["rmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "r", magnitude = row["rmag"],
e_magnitude = row["e_rmag"], source = source)
if "imag" in row and is_number(row["imag"]) and not isnan(float(row["imag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "i", magnitude = row["imag"],
e_magnitude = row["e_imag"], source = source)
if "zmag" in row and is_number(row["zmag"]) and not isnan(float(row["zmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "z", magnitude = row["zmag"],
e_magnitude = row["e_zmag"], source = source)
journal_events()
# 2014AJ....148....1Z
name = 'SN2012fr'
name = add_event(name)
source = add_source(name, bibcode = "2014AJ....148....1Z")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/AJ/148/1/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = row['MJD']
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = "LJT", instrument = "YFOSC", band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = "LJT", instrument = "YFOSC", band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = mjd, telescope = "LJT", instrument = "YFOSC", band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"], source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = mjd, telescope = "LJT", instrument = "YFOSC", band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
result = Vizier.get_catalogs("J/AJ/148/1/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = row['MJD']
if "Umag" in row and is_number(row["Umag"]) and not isnan(float(row["Umag"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "U", magnitude = row["Umag"],
e_magnitude = row["e_Umag"], source = source)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "UVW1" in row and is_number(row["UVW1"]) and not isnan(float(row["UVW1"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "W1", magnitude = row["UVW1"],
e_magnitude = row["e_UVW1"], source = source)
if "UVW2" in row and is_number(row["UVW2"]) and not isnan(float(row["UVW2"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "W2", magnitude = row["UVW2"],
e_magnitude = row["e_UVW2"], source = source)
if "UVM2" in row and is_number(row["UVM2"]) and not isnan(float(row["UVM2"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "M2", magnitude = row["UVM2"],
e_magnitude = row["e_UVM2"], source = source)
result = Vizier.get_catalogs("J/AJ/148/1/table5")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = row['MJD']
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = "LJT", band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = "LJT", band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = mjd, telescope = "LJT", band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"], source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = mjd, telescope = "LJT", band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
journal_events()
# 2015ApJ...805...74B
name = 'SN2014J'
name = add_event(name)
source = add_source(name, bibcode = "2014AJ....148....1Z")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/ApJ/805/74/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = row['MJD']
if "mag" in row and is_number(row["mag"]) and not isnan(float(row["mag"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = row["Filt"], magnitude = row["mag"],
e_magnitude = row["e_mag"], source = source)
elif "maglim" in row and is_number(row["maglim"]) and not isnan(float(row["maglim"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = row["Filt"], magnitude = row["maglim"],
upperlimit = True, source = source)
journal_events()
# 2011ApJ...741...97D
result = Vizier.get_catalogs("J/ApJ/741/97/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['SN'])
name = add_event(name)
source = add_source(name, bibcode = "2011ApJ...741...97D")
add_quantity(name, 'alias', name, source)
add_photometry(name, time = str(jd_to_mjd(Decimal(row['JD']))), band = row['Filt'], magnitude = row['mag'],
e_magnitude = row['e_mag'] if is_number(row['e_mag']) else '', upperlimit = (not is_number(row['e_mag'])), source = source)
journal_events()
# 2015MNRAS.448.1206M
# Note: Photometry from two SN can also be added from this source.
result = Vizier.get_catalogs("J/MNRAS/448/1206/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'redshift', row['zsp'], source, kind = 'spectroscopic')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', 'Ia', source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/table4")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'redshift', row['zph'], source, error = row['e_zph'], kind = 'photometric')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', 'Ia?', source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/table5")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'redshift', row['zsp'], source, kind = 'spectroscopic')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', row['Type'], source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/table6")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', row['Type'], source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/tablea2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', row['Typesoft']+'?', source)
add_quantity(name, 'claimedtype', row['Typepsnid']+'?', source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/tablea3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', 'Candidate', source)
journal_events()
# 2012AJ....143..126B
result = Vizier.get_catalogs("J/AJ/143/126/table4")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
if not row['Wcl'] or row['Wcl'] == 'N':
continue
row = convert_aq_output(row)
name = str(row['SN']).replace(' ', '')
name = add_event(name)
source = add_source(name, bibcode = "2012AJ....143..126B")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', 'Ia-' + row['Wcl'], source)
journal_events()
# 2015ApJS..220....9F
for viztab in ['1', '2']:
result = Vizier.get_catalogs("J/ApJS/220/9/table" + viztab)
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = add_event(row['SN'])
source = add_source(name, bibcode = "2015ApJS..220....9F")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', row['Type'], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
if '?' not in row['Host']:
add_quantity(name, 'host', row['Host'].replace('_', ' '), source)
kind = ''
if 'Host' in row['n_z']:
kind = 'host'
elif 'Spectrum' in row['n_z']:
kind = 'spectroscopic'
add_quantity(name, 'redshift', row['z'], source, error = row['e_z'], kind = kind)
result = Vizier.get_catalogs("J/ApJS/220/9/table8")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = add_event(row['SN'])
source = add_source(name, bibcode = "2015ApJS..220....9F")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', row['Type'], source)
add_photometry(name, time = row['MJD'], band = row['Band'], magnitude = row['mag'],
e_magnitude = row["e_mag"], telescope = row["Tel"], source = source)
journal_events()
result = Vizier.get_catalogs("J/ApJ/673/999/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = add_event('SN'+row['SN'])
source = add_source(name, bibcode = "2008ApJ...673..999P")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'redshift', row['z'], source, kind = 'host')
add_quantity(name, 'hostra', row['RAGdeg'], source, unit = 'floatdegrees')
add_quantity(name, 'hostdec', row['DEGdeg'], source, unit = 'floatdegrees')
add_quantity(name, 'claimedtype', row['Type'].strip(':'), source)
journal_events()
if do_task(task, 'donations'):
# Nicholl 04-01-16 donation
with open("../sne-external/Nicholl-04-01-16/bibcodes.json", 'r') as f:
bcs = json.loads(f.read())
for datafile in sorted(glob("../sne-external/Nicholl-04-01-16/*.txt"), key=lambda s: s.lower()):
name = os.path.basename(datafile).split('_')[0]
name = add_event(name)
bibcode = ''
for bc in bcs:
if name in bcs[bc]:
bibcode = bc
if not bibcode:
raise(ValueError('Bibcode not found!'))
source = add_source(name, bibcode = bibcode)
add_quantity(name, 'alias', name, source)
with open(datafile,'r') as f:
tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
for r, rrow in enumerate(tsvin):
row = list(filter(None, rrow))
if not row:
continue
if row[0][0] == '#' and row[0] != '#MJD':
continue
if row[0] == '#MJD':
bands = [x for x in row[1:] if x and 'err' not in x]
continue
mjd = row[0]
if not is_number(mjd):
continue
for v, val in enumerate(row[1::2]):
upperlimit = ''
if '>' in val:
upperlimit = True
mag = val.strip('>')
if not is_number(mag) or isnan(float(mag)) or float(mag) > 90.0:
continue
err = ''
if is_number(row[2*v+2]) and not isnan(float(row[2*v+2])):
err = row[2*v+2]
add_photometry(name, time = mjd, band = bands[v], magnitude = mag,
e_magnitude = err, upperlimit = upperlimit, source = source)
journal_events()
# Maggi 04-11-16 donation (MC SNRs)
with open('../sne-external/Maggi-04-11-16/LMCSNRs_OpenSNe.csv') as f:
tsvin = csv.reader(f, delimiter=',')
for row in tsvin:
name = 'MCSNR ' + row[0]
name = add_event(name)
source = add_source(name, bibcode = '2016A&A...585A.162M')
add_quantity(name, 'alias', name, source)
if row[1] != 'noname':
add_quantity(name, "alias", row[1], source)
add_quantity(name, 'ra', row[2], source)
add_quantity(name, 'dec', row[3], source)
add_quantity(name, 'host', 'LMC', source)
if row[4] == '1':
add_quantity(name, 'claimedtype', 'Ia', source)
elif row[4] == '2':
add_quantity(name, 'claimedtype', 'CC', source)
with open('../sne-external/Maggi-04-11-16/SMCSNRs_OpenSNe.csv') as f:
tsvin = csv.reader(f, delimiter=',')
for row in tsvin:
name = 'MCSNR ' + row[0]
name = add_event(name)
source = add_source(name, refname = 'Pierre Maggi')
add_quantity(name, 'alias', name, source)
add_quantity(name, "alias", row[1], source)
add_quantity(name, "alias", row[2], source)
add_quantity(name, 'ra', row[3], source)
add_quantity(name, 'dec', row[4], source)
add_quantity(name, 'host', 'SMC', source)
journal_events()
# Galbany 04-18-16 donation
folders = next(os.walk('../sne-external/galbany-04-18-16/'))[1]
bibcode = '2016AJ....151...33G'
for folder in folders:
infofiles = glob("../sne-external/galbany-04-18-16/" + folder + "/*.info")
photfiles = glob("../sne-external/galbany-04-18-16/" + folder + "/*.out*")
zhel = ''
zcmb = ''
zerr = ''
for path in infofiles:
with open(path, 'r') as f:
lines = f.read().splitlines()
for line in lines:
splitline = line.split(':')
field = splitline[0].strip().lower()
value = splitline[1].strip()
if field == 'name':
name = value[:6].upper() + (value[6].upper() if len(value) == 7 else value[6:])
name = add_event(name)
source = add_source(name, bibcode = bibcode)
add_quantity(name, 'alias', name, source)
elif field == 'type':
claimedtype = value.replace('SN', '')
add_quantity(name, 'claimedtype', claimedtype, source)
elif field == 'zhel':
zhel = value
elif field == 'redshift_error':
zerr = value
elif field == 'zcmb':
zcmb = value
elif field == 'ra':
add_quantity(name, 'ra', value, source, unit = 'floatdegrees')
elif field == 'dec':
add_quantity(name, 'dec', value, source, unit = 'floatdegrees')
elif field == 'host':
add_quantity(name, 'host', value.replace('- ', '-').replace('G ', 'G'), source)
elif field == 'e(b-v)_mw':
add_quantity(name, 'ebv', value, source)
add_quantity(name, 'redshift', zhel, source, error = zerr, kind = 'heliocentric')
add_quantity(name, 'redshift', zcmb, source, error = zerr, kind = 'cmb')
for path in photfiles:
with open(path, 'r') as f:
band = ''
lines = f.read().splitlines()
for li, line in enumerate(lines):
if li in [0, 2, 3]:
continue
if li == 1:
band = line.split(':')[-1].strip()
else:
cols = list(filter(None, line.split()))
if not cols:
continue
add_photometry(name, time = cols[0], magnitude = cols[1], e_magnitude = cols[2],
band = band, system = cols[3], telescope = cols[4], source = source)
journal_events()
# Brown 05-14-16
files = glob("../sne-external/brown-05-14-16/*.dat")
for fi in tq(files, currenttask):
name = os.path.basename(fi).split('_')[0]
name = add_event(name)
source = add_source(name, refname = 'Swift Supernovae', bibcode = '2014Ap&SS.354...89B',
url = 'http://people.physics.tamu.edu/pbrown/SwiftSN/swift_sn.html')
add_quantity(name, 'alias', name, source)
with open(fi, 'r') as f:
lines = f.read().splitlines()
for line in lines:
if not line or line[0] == '#':
continue
cols = list(filter(None, line.split()))
band = cols[0]
mjd = cols[1]
# Skip lower limit entries for now
if cols[2] == 'NULL' and cols[6] == 'NULL':
continue
isupp = cols[2] == 'NULL' and cols[6] != 'NULL'
mag = cols[2] if not isupp else cols[4]
e_mag = cols[3] if not isupp else ''
upp = '' if not isupp else True
add_photometry(name, time = mjd, magnitude = mag, e_magnitude = e_mag,
upperlimit = upp, band = band, source = source,
telescope = 'Swift', instrument = 'UVOT', system = 'Vega')
journal_events()
# Nicholl 05-03-16
files = glob("../sne-external/nicholl-05-03-16/*.txt")
name = add_event('SN2015bn')
source = add_source(name, bibcode = '2016arXiv160304748N')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', 'PS15ae', source)
for fi in tq(files, currenttask):
telescope = os.path.basename(fi).split('_')[1]
with open(fi, 'r') as f:
lines = f.read().splitlines()
for li, line in enumerate(lines):
if not line or (line[0] == '#' and li != 0):
continue
cols = list(filter(None, line.split()))
if not cols:
continue
if li == 0:
bands = cols[1:]
continue
mjd = cols[0]
for ci, col in enumerate(cols[1::2]):
if not is_number(col):
continue
emag = cols[2*ci+2]
upp = ''
if not is_number(emag):
emag = ''
upp = True
add_photometry(name, time = mjd, magnitude = col, e_magnitude = emag,
upperlimit = upp, band = bands[ci], source = source,
telescope = telescope, instrument = 'UVOT' if telescope == 'Swift' else '')
journal_events()
if do_task(task, 'pessto-dr1'):
with open("../sne-external/PESSTO_MPHOT.csv", 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for ri, row in enumerate(tsvin):
if ri == 0:
bands = [x.split('_')[0] for x in row[3::2]]
systems = [x.split('_')[1].capitalize().replace('Ab', 'AB') for x in row[3::2]]
continue
name = row[1]
name = add_event(name)
source = add_source(name, bibcode = "2015A&A...579A..40S")
add_quantity(name, 'alias', name, source)
for hi, ci in enumerate(range(3,len(row)-1,2)):
if not row[ci]:
continue
add_photometry(name, time = row[2], magnitude = row[ci], e_magnitude = row[ci+1],
band = bands[hi], system = systems[hi], telescope = 'Swift' if systems[hi] == 'Swift' else '',
source = source)
journal_events()
if do_task(task, 'scp'):
with open("../sne-external/SCP09.csv", 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for ri, row in enumerate(tq(tsvin, currenttask)):
if ri == 0:
continue
name = row[0].replace('SCP', 'SCP-')
name = add_event(name)
source = add_source(name, refname = 'Supernova Cosmology Project', url = 'http://supernova.lbl.gov/2009ClusterSurvey/')
add_quantity(name, 'alias', name, source)
if row[1]:
add_quantity(name, 'alias', row[1], source)
if row[2]:
add_quantity(name, 'redshift', row[2], source, kind = 'spectroscopic' if row[3] == 'sn' else 'host')
if row[4]:
add_quantity(name, 'redshift', row[2], source, kind = 'cluster')
if row[6]:
claimedtype = row[6].replace('SN ', '')
kind = ('spectroscopic/light curve' if 'a' in row[7] and 'c' in row[7] else
'spectroscopic' if 'a' in row[7] else 'light curve' if 'c' in row[7] else '')
if claimedtype != '?':
add_quantity(name, 'claimedtype', claimedtype, source, kind = kind)
journal_events()
if do_task(task, 'ascii'):
# 2006ApJ...645..841N
with open("../sne-external/2006ApJ...645..841N-table3.csv", 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for ri, row in enumerate(tq(tsvin, currenttask)):
name = 'SNLS-' + row[0]
name = add_event(name)
source = add_source(name, bibcode = '2006ApJ...645..841N')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', row[1], source, kind = 'spectroscopic')
astrot = astrotime(float(row[4]) + 2450000., format = 'jd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), source)
journal_events()
# Anderson 2014
for datafile in tq(sorted(glob("../sne-external/SNII_anderson2014/*.dat"), key=lambda s: s.lower()), currenttask):
basename = os.path.basename(datafile)
if not is_number(basename[:2]):
continue
if basename == '0210_V.dat':
name = 'SN0210'
else:
name = ('SN20' if int(basename[:2]) < 50 else 'SN19') + basename.split('_')[0]
name = add_event(name)
source = add_source(name, bibcode = '2014ApJ...786...67A')
add_quantity(name, 'alias', name, source)
if name in ['SN1999ca','SN2003dq','SN2008aw']:
system = 'Swope'
else:
system = 'Landolt'
with open(datafile,'r') as f:
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
for row in tsvin:
if not row[0]:
continue
add_photometry(name, time = str(jd_to_mjd(Decimal(row[0]))), band = 'V', magnitude = row[1], e_magnitude = row[2], system = system, source = source)
journal_events()
# stromlo
stromlobands = ['B','V','R','I','VM','RM']
with open('../sne-external/J_A+A_415_863-1/photometry.csv', 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for row in tq(tsvin, currenttask):
name = row[0]
name = add_event(name)
source = add_source(name, bibcode = "2004A&A...415..863G")
add_quantity(name, 'alias', name, source)
mjd = str(jd_to_mjd(Decimal(row[1])))
for ri, ci in enumerate(range(2,len(row),3)):
if not row[ci]:
continue
band = stromlobands[ri]
upperlimit = True if (not row[ci+1] and row[ci+2]) else False
e_upper_magnitude = str(abs(Decimal(row[ci+1]))) if row[ci+1] else ''
e_lower_magnitude = str(abs(Decimal(row[ci+2]))) if row[ci+2] else ''
add_photometry(name, time = mjd, band = band, magnitude = row[ci],
e_upper_magnitude = e_upper_magnitude, e_lower_magnitude = e_lower_magnitude,
upperlimit = upperlimit, telescope = 'MSSSO 1.3m' if band in ['VM', 'RM'] else 'CTIO',
instrument = 'MaCHO' if band in ['VM', 'RM'] else '', source = source)
journal_events()
# 2015MNRAS.449..451W
with open("../sne-external/2015MNRAS.449..451W.dat", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
for r, row in enumerate(tq(data, currenttask)):
if r == 0:
continue
namesplit = row[0].split('/')
name = namesplit[-1]
if name.startswith('SN'):
name = name.replace(' ', '')
name = add_event(name)
source = add_source(name, bibcode = '2015MNRAS.449..451W')
add_quantity(name, 'alias', name, source)
if len(namesplit) > 1:
add_quantity(name, 'alias', namesplit[0], source)
add_quantity(name, 'claimedtype', row[1], source)
add_photometry(name, time = row[2], band = row[4], magnitude = row[3], source = source)
journal_events()
# 2016MNRAS.459.1039T
with open("../sne-external/2016MNRAS.459.1039T.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
name = add_event('LSQ13zm')
source = add_source(name, bibcode = '2016MNRAS.459.1039T')
add_quantity(name, 'alias', name, source)
for r, row in enumerate(tq(data, currenttask)):
if row[0][0] == '#':
bands = [x.replace('(err)', '') for x in row[3:-1]]
continue
mjd = row[1]
mags = [re.sub(r'\([^)]*\)', '', x) for x in row[3:-1]]
upps = [True if '>' in x else '' for x in mags]
mags = [x.replace('>', '') for x in mags]
errs = [x[x.find("(")+1:x.find(")")] if "(" in x else '' for x in row[3:-1]]
for mi, mag in enumerate(mags):
if not is_number(mag):
continue
add_photometry(name, time = mjd, band = bands[mi], magnitude = mag, e_magnitude = errs[mi],
instrument = row[-1], upperlimit = upps[mi], source = source)
journal_events()
# 2015ApJ...804...28G
with open("../sne-external/2015ApJ...804...28G.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
name = add_event('PS1-13arp')
source = add_source(name, bibcode = '2015ApJ...804...28G')
add_quantity(name, 'alias', name, source)
for r, row in enumerate(tq(data, currenttask)):
if r == 0:
continue
mjd = row[1]
mag = row[3]
upp = True if '<' in mag else ''
mag = mag.replace('<', '')
err = row[4] if is_number(row[4]) else ''
ins = row[5]
add_photometry(name, time = mjd, band = row[0], magnitude = mag, e_magnitude = err,
instrument = ins, upperlimit = upp, source = source)
journal_events()
# 2016ApJ...819...35A
with open("../sne-external/2016ApJ...819...35A.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
for r, row in enumerate(tq(data, currenttask)):
if row[0][0] == '#':
continue
name = add_event(row[0])
source = add_source(name, bibcode = '2016ApJ...819...35A')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'ra', row[1], source)
add_quantity(name, 'dec', row[2], source)
add_quantity(name, 'redshift', row[3], source)
add_quantity(name, 'discoverdate',
datetime.strptime(row[4], '%Y %b %d').isoformat().split('T')[0].replace('-', '/'), source)
journal_events()
# 2014ApJ...784..105W
with open("../sne-external/2014ApJ...784..105W.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
for r, row in enumerate(tq(data, currenttask)):
if row[0][0] == '#':
continue
name = add_event(row[0])
source = add_source(name, bibcode = '2014ApJ...784..105W')
add_quantity(name, 'alias', name, source)
mjd = row[1]
band = row[2]
mag = row[3]
err = row[4]
add_photometry(name, time = mjd, band = row[2], magnitude = mag, e_magnitude = err,
instrument = 'WHIRC', telescope = 'WIYN 3.5 m', observatory = 'NOAO',
system = 'WHIRC', source = source)
journal_events()
# 2012MNRAS.425.1007B
with open("../sne-external/2012MNRAS.425.1007B.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
for r, row in enumerate(tq(data, currenttask)):
if row[0][0] == '#':
bands = row[2:]
continue
name = add_event(row[0])
source = add_source(name, bibcode = '2012MNRAS.425.1007B')
add_quantity(name, 'alias', name, source)
mjd = row[1]
mags = [x.split('±')[0].strip() for x in row[2:]]
errs = [x.split('±')[1].strip() if '±' in x else '' for x in row[2:]]
if row[0] == 'PTF09dlc':
ins = 'HAWK-I'
tel = 'VLT 8.1m'
obs = 'ESO'
else:
ins = 'NIRI'
tel = 'Gemini North 8.2m'
obs = 'Gemini'
for mi, mag in enumerate(mags):
if not is_number(mag):
continue
add_photometry(name, time = mjd, band = bands[mi], magnitude = mag, e_magnitude = errs[mi],
instrument = ins, telescope = tel, observatory = obs,
system = 'Natural', source = source)
journal_events()
# CCCP
if do_task(task, 'cccp'):
cccpbands = ['B', 'V', 'R', 'I']
for datafile in sorted(glob("../sne-external/CCCP/apj407397*.txt"), key=lambda s: s.lower()):
with open(datafile,'r') as f:
tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
for r, row in enumerate(tsvin):
if r == 0:
continue
elif r == 1:
name = 'SN' + row[0].split('SN ')[-1]
name = add_event(name)
source = add_source(name, bibcode = '2012ApJ...744...10K')
add_quantity(name, 'alias', name, source)
elif r >= 5:
mjd = str(Decimal(row[0]) + 53000)
for b, band in enumerate(cccpbands):
if row[2*b + 1]:
if not row[2*b + 2]:
upplim = True
add_photometry(name, time = mjd, band = band, magnitude = row[2*b + 1].strip('>'),
e_magnitude = row[2*b + 2], upperlimit = (not row[2*b + 2]), source = source)
if archived_task('cccp'):
with open('../sne-external/CCCP/sc_cccp.html', 'r') as f:
html = f.read()
else:
session = requests.Session()
response = session.get("https://webhome.weizmann.ac.il/home/iair/sc_cccp.html")
html = response.text
with open('../sne-external/CCCP/sc_cccp.html', 'w') as f:
f.write(html)
soup = BeautifulSoup(html, "html5lib")
links = soup.body.findAll("a")
for link in tq(links, currenttask):
if 'sc_sn' in link['href']:
name = add_event(link.text.replace(' ', ''))
source = add_source(name, refname = 'CCCP', url = 'https://webhome.weizmann.ac.il/home/iair/sc_cccp.html')
add_quantity(name, 'alias', name, source)
if archived_task('cccp'):
with open('../sne-external/CCCP/' + link['href'].split('/')[-1], 'r') as f:
html2 = f.read()
else:
response2 = session.get("https://webhome.weizmann.ac.il/home/iair/" + link['href'])
html2 = response2.text
with open('../sne-external/CCCP/' + link['href'].split('/')[-1], 'w') as f:
f.write(html2)
soup2 = BeautifulSoup(html2, "html5lib")
links2 = soup2.body.findAll("a")
for link2 in links2:
if ".txt" in link2['href'] and '_' in link2['href']:
band = link2['href'].split('_')[1].split('.')[0].upper()
if archived_task('cccp'):
fname = '../sne-external/CCCP/' + link2['href'].split('/')[-1]
if not os.path.isfile(fname):
continue
with open(fname, 'r') as f:
html3 = f.read()
else:
response3 = session.get("https://webhome.weizmann.ac.il/home/iair/cccp/" + link2['href'])
if response3.status_code == 404:
continue
html3 = response3.text
with open('../sne-external/CCCP/' + link2['href'].split('/')[-1], 'w') as f:
f.write(html3)
table = [[str(Decimal(y.strip())).rstrip('0') for y in x.split(",")] for x in list(filter(None, html3.split("\n")))]
for row in table:
add_photometry(name, time = str(Decimal(row[0]) + 53000), band = band, magnitude = row[1], e_magnitude = row[2], source = source)
journal_events()
# Suspect catalog
if do_task(task, 'suspect'):
with open('../sne-external/suspectreferences.csv','r') as f:
tsvin = csv.reader(f, delimiter=',', skipinitialspace=True)
suspectrefdict = {}
for row in tsvin:
suspectrefdict[row[0]] = row[1]
for datafile in tq(sorted(glob("../sne-external/SUSPECT/*.html"), key=lambda s: s.lower()), currenttask):
basename = os.path.basename(datafile)
basesplit = basename.split('-')
name = basesplit[1]
name = add_event(name)
if name.startswith('SN') and is_number(name[2:]):
name = name + 'A'
band = basesplit[3].split('.')[0]
ei = int(basesplit[2])
bandlink = 'file://' + os.path.abspath(datafile)
bandresp = urllib.request.urlopen(bandlink)
bandsoup = BeautifulSoup(bandresp, "html5lib")
bandtable = bandsoup.find('table')
names = bandsoup.body.findAll(text=re.compile("Name"))
reference = ''
for link in bandsoup.body.findAll('a'):
if 'adsabs' in link['href']:
reference = str(link).replace('"', "'")
bibcode = unescape(suspectrefdict[reference])
source = add_source(name, bibcode = bibcode)
secondaryreference = "SUSPECT"
secondaryrefurl = "https://www.nhn.ou.edu/~suspect/"
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if ei == 1:
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, secondarysource)
add_quantity(name, 'host', names[1].split(':')[1].strip(), secondarysource)
redshifts = bandsoup.body.findAll(text=re.compile("Redshift"))
if redshifts:
add_quantity(name, 'redshift', redshifts[0].split(':')[1].strip(), secondarysource, kind = 'heliocentric')
hvels = bandsoup.body.findAll(text=re.compile("Heliocentric Velocity"))
#if hvels:
# add_quantity(name, 'velocity', hvels[0].split(':')[1].strip().split(' ')[0],
# secondarysource, kind = 'heliocentric')
types = bandsoup.body.findAll(text=re.compile("Type"))
add_quantity(name, 'claimedtype', types[0].split(':')[1].strip().split(' ')[0], secondarysource)
for r, row in enumerate(bandtable.findAll('tr')):
if r == 0:
continue
col = row.findAll('td')
mjd = str(jd_to_mjd(Decimal(col[0].contents[0])))
mag = col[3].contents[0]
if mag.isspace():
mag = ''
else:
mag = str(mag)
e_magnitude = col[4].contents[0]
if e_magnitude.isspace():
e_magnitude = ''
else:
e_magnitude = str(e_magnitude)
add_photometry(name, time = mjd, band = band, magnitude = mag, e_magnitude = e_magnitude, source = secondarysource + ',' + source)
journal_events()
# CfA data
if do_task(task, 'cfa'):
for fname in tq(sorted(glob("../sne-external/cfa-input/*.dat"), key=lambda s: s.lower()), currenttask):
f = open(fname,'r')
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
csv_data = []
for r, row in enumerate(tsvin):
new = []
for item in row:
new.extend(item.split("\t"))
csv_data.append(new)
for r, row in enumerate(csv_data):
for c, col in enumerate(row):
csv_data[r][c] = col.strip()
csv_data[r] = [_f for _f in csv_data[r] if _f]
eventname = os.path.basename(os.path.splitext(fname)[0])
eventparts = eventname.split('_')
name = snname(eventparts[0])
name = add_event(name)
secondaryname = 'CfA Supernova Archive'
secondaryurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
secondarysource = add_source(name, refname = secondaryname, url = secondaryurl, secondary = True, acknowledgment = cfaack)
add_quantity(name, 'alias', name, secondarysource)
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, secondarysource)
eventbands = list(eventparts[1])
tu = 'MJD'
jdoffset = Decimal(0.)
for rc, row in enumerate(csv_data):
if len(row) > 0 and row[0][0] == "#":
if len(row[0]) > 2 and row[0][:3] == "#JD":
tu = 'JD'
rowparts = row[0].split('-')
jdoffset = Decimal(rowparts[1])
elif len(row[0]) > 6 and row[0][:7] == "#Julian":
tu = 'JD'
jdoffset = Decimal(0.)
elif len(row) > 1 and row[1].lower() == "photometry":
for ci, col in enumerate(row[2:]):
if col[0] == "(":
refstr = ' '.join(row[2+ci:])
refstr = refstr.replace('(','').replace(')','')
bibcode = unescape(refstr)
source = add_source(name, bibcode = bibcode)
elif len(row) > 1 and row[1] == "HJD":
tu = "HJD"
continue
elif len(row) > 0:
mjd = row[0]
for v, val in enumerate(row):
if v == 0:
if tu == 'JD':
mjd = str(jd_to_mjd(Decimal(val) + jdoffset))
tuout = 'MJD'
elif tu == 'HJD':
mjd = str(jd_to_mjd(Decimal(val)))
tuout = 'MJD'
else:
mjd = val
tuout = tu
elif v % 2 != 0:
if float(row[v]) < 90.0:
add_photometry(name, u_time = tuout, time = mjd, band = eventbands[(v-1)//2], magnitude = row[v], e_magnitude = row[v+1], source = secondarysource + ',' + source)
f.close()
# Hicken 2012
f = open("../sne-external/hicken-2012-standard.dat", 'r')
tsvin = csv.reader(f, delimiter='|', skipinitialspace=True)
for r, row in enumerate(tq(tsvin, currenttask)):
if r <= 47:
continue
if row[0][:2] != 'sn':
name = 'SN' + row[0].strip()
else:
name = row[0].strip()
name = add_event(name)
source = add_source(name, bibcode = '2012ApJS..200...12H')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', 'Ia', source)
add_photometry(name, u_time = 'MJD', time = row[2].strip(), band = row[1].strip(),
magnitude = row[6].strip(), e_magnitude = row[7].strip(), source = source)
# Bianco 2014
tsvin = open("../sne-external/bianco-2014-standard.dat", 'r')
tsvin = csv.reader(tsvin, delimiter=' ', skipinitialspace=True)
for row in tq(tsvin, currenttask):
name = 'SN' + row[0]
name = add_event(name)
source = add_source(name, bibcode = '2014ApJS..213...19B')
add_quantity(name, 'alias', name, source)
add_photometry(name, u_time = 'MJD', time = row[2], band = row[1], magnitude = row[3],
e_magnitude = row[4], telescope = row[5], system = "Standard", source = source)
f.close()
journal_events()
# New UCB import
if do_task(task, 'ucb'):
secondaryreference = "UCB Filippenko Group's Supernova Database (SNDB)"
secondaryrefurl = "http://heracles.astro.berkeley.edu/sndb/info"
secondaryrefbib = "2012MNRAS.425.1789S"
jsontxt = load_cached_url("http://heracles.astro.berkeley.edu/sndb/download?id=allpubphot",
'../sne-external-spectra/UCB/allpub.json')
if not jsontxt:
continue
photom = json.loads(jsontxt)
photom = sorted(photom, key = lambda k: k['ObjName'])
for phot in tq(photom, currenttask = currenttask):
name = phot["ObjName"]
name = add_event(name)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondaryrefbib, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
sources = [secondarysource]
if phot["Reference"]:
sources += [add_source(name, bibcode = phot["Reference"])]
sources = uniq_cdl(sources)
if phot["Type"] and phot["Type"].strip() != "NoMatch":
for ct in phot["Type"].strip().split(','):
add_quantity(name, 'claimedtype', ct.replace('-norm', '').strip(), sources)
if phot["DiscDate"]:
add_quantity(name, 'discoverdate', phot["DiscDate"].replace('-', '/'), sources)
if phot["HostName"]:
add_quantity(name, 'host', urllib.parse.unquote(phot["HostName"]).replace('*', ''), sources)
filename = phot["Filename"] if phot["Filename"] else ''
if not filename:
raise(ValueError('Filename not found for SNDB phot!'))
if not phot["PhotID"]:
raise(ValueError('ID not found for SNDB phot!'))
filepath = '../sne-external/SNDB/' + filename
if archived_task('ucb') and os.path.isfile(filepath):
with open(filepath, 'r') as f:
phottxt = f.read()
else:
session = requests.Session()
response = session.get("http://heracles.astro.berkeley.edu/sndb/download?id=dp:" + str(phot["PhotID"]))
phottxt = response.text
with open(filepath, 'w') as f:
f.write(phottxt)
tsvin = csv.reader(phottxt.splitlines(), delimiter=' ', skipinitialspace=True)
for r, row in enumerate(tsvin):
if len(row) > 0 and row[0] == "#":
continue
mjd = row[0]
magnitude = row[1]
if magnitude and float(magnitude) > 99.0:
continue
e_magnitude = row[2]
band = row[4]
telescope = row[5]
add_photometry(name, time = mjd, telescope = telescope, band = band, magnitude = magnitude,
e_magnitude = e_magnitude, source = sources)
journal_events()
# Import SDSS
if do_task(task, 'sdss'):
with open('../sne-external/SDSS/2010ApJ...708..661D.txt', 'r') as f:
bibcodes2010 = f.read().split("\n")
sdssbands = ['u', 'g', 'r', 'i', 'z']
for fname in tq(sorted(glob("../sne-external/SDSS/*.sum"), key=lambda s: s.lower()), currenttask):
f = open(fname,'r')
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
basename = os.path.basename(fname)
if basename in bibcodes2010:
bibcode = '2010ApJ...708..661D'
else:
bibcode = '2008AJ....136.2306H'
for r, row in enumerate(tsvin):
if r == 0:
if row[5] == "RA:":
name = "SDSS-II " + row[3]
else:
name = "SN" + row[5]
name = add_event(name)
source = add_source(name, bibcode = bibcode)
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', "SDSS-II " + row[3], source)
if row[5] != "RA:":
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, source)
add_quantity(name, 'ra', row[-4], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row[-2], source, unit = 'floatdegrees')
if r == 1:
error = row[4] if float(row[4]) >= 0.0 else ''
add_quantity(name, 'redshift', row[2], source, error = error, kind = 'heliocentric')
if r >= 19:
# Skip bad measurements
if int(row[0]) > 1024:
continue
mjd = row[1]
band = sdssbands[int(row[2])]
magnitude = row[3]
e_magnitude = row[4]
telescope = "SDSS"
add_photometry(name, time = mjd, telescope = telescope, band = band, magnitude = magnitude,
e_magnitude = e_magnitude, source = source, system = "SDSS")
f.close()
journal_events()
#Import GAIA
if do_task(task, 'gaia'):
fname = '../sne-external/GAIA/alerts.csv'
csvtxt = load_cached_url('http://gsaweb.ast.cam.ac.uk/alerts/alerts.csv', fname)
if not csvtxt:
continue
tsvin = csv.reader(csvtxt.splitlines(), delimiter=',', skipinitialspace=True)
reference = "Gaia Photometric Science Alerts"
refurl = "http://gsaweb.ast.cam.ac.uk/alerts/alertsindex"
for ri, row in enumerate(tq(tsvin, currenttask)):
if ri == 0 or not row:
continue
name = add_event(row[0])
source = add_source(name, refname = reference, url = refurl)
add_quantity(name, 'alias', name, source)
year = '20' + re.findall(r'\d+', row[0])[0]
add_quantity(name, 'discoverdate', year, source)
add_quantity(name, 'ra', row[2], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row[3], source, unit = 'floatdegrees')
if row[7] and row[7] != 'unknown':
add_quantity(name, 'claimedtype', row[7].replace('SNe', '').replace('SN', '').strip(), source)
elif (True in [x in row[9].upper() for x in ['SN CANDIATE', 'CANDIDATE SN', 'HOSTLESS SN']]):
add_quantity(name, 'claimedtype', 'Candidate', source)
if 'aka' in row[9].replace('gakaxy','galaxy').lower() and 'AKARI' not in row[9]:
commentsplit = (row[9].replace('_', ' ').replace('MLS ', 'MLS').replace('CSS ', 'CSS').
replace('SN iPTF', 'iPTF').replace('SN ', 'SN').replace('AT ', 'AT').split())
for csi, cs in enumerate(commentsplit):
if 'aka' in cs.lower() and csi < len(commentsplit) - 1:
alias = commentsplit[csi+1].strip('(),:.').replace('PSNJ', 'PSN J')
if alias[:6] == 'ASASSN' and alias[6] != '-':
alias = 'ASASSN-' + alias[6:]
add_quantity(name, 'alias', alias, source)
break
fname = '../sne-external/GAIA/' + row[0] + '.csv'
if not args.fullrefresh and archived_task('gaia') and os.path.isfile(fname):
with open(fname, 'r') as f:
csvtxt = f.read()
else:
response = urllib.request.urlopen("http://gsaweb.ast.cam.ac.uk/alerts/alert/" + row[0] + "/lightcurve.csv")
with open(fname, 'w') as f:
csvtxt = response.read().decode('utf-8')
f.write(csvtxt)
tsvin2 = csv.reader(csvtxt.splitlines())
for ri2, row2 in enumerate(tsvin2):
if ri2 <= 1 or not row2:
continue
mjd = str(jd_to_mjd(Decimal(row2[1].strip())))
magnitude = row2[2].strip()
if magnitude == 'null':
continue
e_magnitude = 0.
telescope = 'GAIA'
band = 'G'
add_photometry(name, time = mjd, telescope = telescope, band = band, magnitude = magnitude, e_magnitude = e_magnitude, source = source)
if args.update:
journal_events()
journal_events()
# Import CSP
# VizieR catalogs exist for this: J/AJ/139/519, J/AJ/142/156. Should replace eventually.
if do_task(task, 'csp'):
cspbands = ['u', 'B', 'V', 'g', 'r', 'i', 'Y', 'J', 'H', 'K']
for fname in tq(sorted(glob("../sne-external/CSP/*.dat"), key=lambda s: s.lower()), currenttask):
f = open(fname,'r')
tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
eventname = os.path.basename(os.path.splitext(fname)[0])
eventparts = eventname.split('opt+')
name = snname(eventparts[0])
name = add_event(name)
reference = "Carnegie Supernova Project"
refbib = "2010AJ....139..519C"
refurl = "http://csp.obs.carnegiescience.edu/data"
source = add_source(name, bibcode = refbib, refname = reference, url = refurl)
add_quantity(name, 'alias', name, source)
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, source)
for r, row in enumerate(tsvin):
if len(row) > 0 and row[0][0] == "#":
if r == 2:
add_quantity(name, 'redshift', row[0].split(' ')[-1], source, kind = 'cmb')
add_quantity(name, 'ra', row[1].split(' ')[-1], source)
add_quantity(name, 'dec', row[2].split(' ')[-1], source)
continue
for v, val in enumerate(row):
if v == 0:
mjd = val
elif v % 2 != 0:
if float(row[v]) < 90.0:
add_photometry(name, time = mjd, observatory = 'LCO', band = cspbands[(v-1)//2],
system = 'CSP', magnitude = row[v], e_magnitude = row[v+1], source = source)
f.close()
journal_events()
# Import ITEP
if do_task(task, 'itep'):
itepbadsources = ['2004ApJ...602..571B']
needsbib = []
with open("../sne-external/itep-refs.txt",'r') as f:
refrep = f.read().splitlines()
refrepf = dict(list(zip(refrep[1::2], refrep[::2])))
f = open("../sne-external/itep-lc-cat-28dec2015.txt",'r')
tsvin = csv.reader(f, delimiter='|', skipinitialspace=True)
curname = ''
for r, row in enumerate(tq(tsvin, currenttask)):
if r <= 1 or len(row) < 7:
continue
name = 'SN' + row[0].strip()
mjd = str(jd_to_mjd(Decimal(row[1].strip())))
band = row[2].strip()
magnitude = row[3].strip()
e_magnitude = row[4].strip()
reference = row[6].strip().strip(',')
if curname != name:
curname = name
name = add_event(name)
secondaryreference = "Sternberg Astronomical Institute Supernova Light Curve Catalogue"
secondaryrefurl = "http://dau.itep.ru/sn/node/72"
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, secondarysource)
if reference in refrepf:
bibcode = unescape(refrepf[reference])
source = add_source(name, bibcode = bibcode)
else:
needsbib.append(reference)
source = add_source(name, refname = reference) if reference else ''
if bibcode not in itepbadsources:
add_photometry(name, time = mjd, band = band, magnitude = magnitude, e_magnitude = e_magnitude, source = secondarysource + ',' + source)
f.close()
# Write out references that could use a bibcode
needsbib = list(OrderedDict.fromkeys(needsbib))
with open('../itep-needsbib.txt', 'w') as f:
f.writelines(["%s\n" % i for i in needsbib])
journal_events()
# Now import the Asiago catalog
if do_task(task, 'asiago'):
#response = urllib.request.urlopen('http://graspa.oapd.inaf.it/cgi-bin/sncat.php')
path = os.path.abspath('../sne-external/asiago-cat.php')
response = urllib.request.urlopen('file://' + path)
html = response.read().decode('utf-8')
html = html.replace("\r", "")
soup = BeautifulSoup(html, "html5lib")
table = soup.find("table")
records = []
for r, row in enumerate(table.findAll('tr')):
if r == 0:
continue
col = row.findAll('td')
records.append([utf8(x.renderContents()) for x in col])
for record in tq(records, currenttask):
if len(record) > 1 and record[1] != '':
name = snname("SN" + record[1]).strip('?')
name = add_event(name)
reference = 'Asiago Supernova Catalogue'
refurl = 'http://graspa.oapd.inaf.it/cgi-bin/sncat.php'
refbib = '1989A&AS...81..421B'
source = add_source(name, refname = reference, url = refurl, bibcode = refbib, secondary = True)
add_quantity(name, 'alias', name, source)
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, source)
hostname = record[2]
hostra = record[3]
hostdec = record[4]
ra = record[5].strip(':')
dec = record[6].strip(':')
redvel = record[11].strip(':')
discoverer = record[19]
datestring = year
monthday = record[18]
if "*" in monthday:
datekey = 'discover'
else:
datekey = 'max'
if monthday.strip() != '':
monthstr = ''.join(re.findall("[a-zA-Z]+", monthday))
monthstr = str(list(calendar.month_abbr).index(monthstr))
datestring = datestring + '/' + monthstr
dayarr = re.findall(r'\d+', monthday)
if dayarr:
daystr = dayarr[0]
datestring = datestring + '/' + daystr
add_quantity(name, datekey + 'date', datestring, source)
velocity = ''
redshift = ''
if redvel != '':
if round(float(redvel)) == float(redvel):
velocity = int(redvel)
else:
redshift = float(redvel)
redshift = str(redshift)
velocity = str(velocity)
claimedtype = record[17].replace(':', '').replace('*', '').strip()
if (hostname != ''):
add_quantity(name, 'host', hostname, source)
if (claimedtype != ''):
add_quantity(name, 'claimedtype', claimedtype, source)
if (redshift != ''):
add_quantity(name, 'redshift', redshift, source, kind = 'host')
if (velocity != ''):
add_quantity(name, 'velocity', velocity, source, kind = 'host')
if (hostra != ''):
add_quantity(name, 'hostra', hostra, source, unit = 'nospace')
if (hostdec != ''):
add_quantity(name, 'hostdec', hostdec, source, unit = 'nospace')
if (ra != ''):
add_quantity(name, 'ra', ra, source, unit = 'nospace')
if (dec != ''):
add_quantity(name, 'dec', dec, source, unit = 'nospace')
if (discoverer != ''):
add_quantity(name, 'discoverer', discoverer, source)
journal_events()
if do_task(task, 'lennarz'):
Vizier.ROW_LIMIT = -1
result = Vizier.get_catalogs("J/A+A/538/A120/usc")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
bibcode = "2012A&A...538A.120L"
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = 'SN' + row['SN']
name = add_event(name)
source = add_source(name, bibcode = bibcode)
add_quantity(name, 'alias', name, source)
if row['RAJ2000']:
add_quantity(name, 'ra', row['RAJ2000'], source)
if row['DEJ2000']:
add_quantity(name, 'dec', row['DEJ2000'], source)
if row['RAG']:
add_quantity(name, 'hostra', row['RAG'], source)
if row['DEG']:
add_quantity(name, 'hostdec', row['DEG'], source)
if row['Gal']:
add_quantity(name, 'host', row['Gal'], source)
if row['Type']:
claimedtypes = row['Type'].split('|')
for claimedtype in claimedtypes:
add_quantity(name, 'claimedtype', claimedtype.strip(' -'), source)
if row['z']:
if name not in ['SN1985D', 'SN2004cq']:
add_quantity(name, 'redshift', row['z'], source, kind = 'host')
if row['Dist']:
if row['e_Dist']:
add_quantity(name, 'lumdist', row['Dist'], source, error = row['e_Dist'], kind = 'host')
else:
add_quantity(name, 'lumdist', row['Dist'], source, kind = 'host')
if row['Ddate']:
datestring = row['Ddate'].replace('-', '/')
add_quantity(name, 'discoverdate', datestring, source)
if 'photometry' not in events[name]:
if 'Dmag' in row and is_number(row['Dmag']) and not isnan(float(row['Dmag'])):
datesplit = row['Ddate'].strip().split('-')
if len(datesplit) == 3:
datestr = row['Ddate'].strip()
elif len(datesplit) == 2:
datestr = row['Ddate'].strip() + '-01'
elif len(datesplit) == 1:
datestr = row['Ddate'].strip() + '-01-01'
mjd = str(astrotime(datestr).mjd)
add_photometry(name, time = mjd, band = row['Dband'], magnitude = row['Dmag'], source = source)
if row['Mdate']:
datestring = row['Mdate'].replace('-', '/')
add_quantity(name, 'maxdate', datestring, source)
if 'photometry' not in events[name]:
if 'MMag' in row and is_number(row['MMag']) and not isnan(float(row['MMag'])):
datesplit = row['Mdate'].strip().split('-')
if len(datesplit) == 3:
datestr = row['Mdate'].strip()
elif len(datesplit) == 2:
datestr = row['Mdate'].strip() + '-01'
elif len(datesplit) == 1:
datestr = row['Mdate'].strip() + '-01-01'
mjd = str(astrotime(datestr).mjd)
add_photometry(name, time = mjd, band = row['Mband'], magnitude = row['Mmag'], source = source)
f.close()
journal_events()
if do_task(task, 'fermi'):
with open("../sne-external/1SC_catalog_v01.asc", 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for ri, row in enumerate(tq(tsvin, currenttask)):
if row[0].startswith('#'):
if len(row) > 1 and 'UPPER_LIMITS' in row[1]:
break
continue
if 'Classified' not in row[1]:
continue
name = row[0].replace('SNR', 'G')
name = add_event(name)
source = add_source(name, bibcode = '2016ApJS..224....8A')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', row[0].replace('SNR', 'MWSNR'), source)
add_quantity(name, 'ra', row[2], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row[3], source, unit = 'floatdegrees')
journal_events()
if do_task(task, 'tns'):
session = requests.Session()
csvtxt = load_cached_url("https://wis-tns.weizmann.ac.il/search?&num_page=1&format=html&sort=desc&order=id&format=csv&page=0",
"../sne-external/TNS/index.csv")
if not csvtxt:
continue
maxid = csvtxt.splitlines()[1].split(",")[0].strip('"')
maxpages = ceil(int(maxid)/1000.)
for page in tq(range(maxpages), currenttask):
fname = '../sne-external/TNS/page-' + str(page).zfill(2) + '.csv'
if archived_task('tns') and os.path.isfile(fname) and page < 7:
with open(fname, 'r') as f:
csvtxt = f.read()
else:
with open(fname, 'w') as f:
session = requests.Session()
response = session.get("https://wis-tns.weizmann.ac.il/search?&num_page=1000&format=html&edit[type]=&edit[objname]=&edit[id]=&sort=asc&order=id&display[redshift]=1&display[hostname]=1&display[host_redshift]=1&display[source_group_name]=1&display[programs_name]=1&display[internal_name]=1&display[isTNS_AT]=1&display[public]=1&display[end_pop_period]=0&display[spectra_count]=1&display[discoverymag]=1&display[discmagfilter]=1&display[discoverydate]=1&display[discoverer]=1&display[sources]=1&display[bibcode]=1&format=csv&page=" + str(page))
csvtxt = response.text
f.write(csvtxt)
tsvin = csv.reader(csvtxt.splitlines(), delimiter=',')
for ri, row in enumerate(tq(tsvin, currenttask, leave = False)):
if ri == 0:
continue
if row[4] and 'SN' not in row[4]:
continue
name = row[1].replace(' ', '')
name = add_event(name)
source = add_source(name, refname = 'Transient Name Server', url = 'https://wis-tns.weizmann.ac.il')
add_quantity(name, 'alias', name, source)
if row[2] and row[2] != '00:00:00.00':
add_quantity(name, 'ra', row[2], source)
if row[3] and row[3] != '+00:00:00.00':
add_quantity(name, 'dec', row[3], source)
if row[4]:
add_quantity(name, 'claimedtype', row[4].replace('SN', '').strip(), source)
if row[5]:
add_quantity(name, 'redshift', row[5], source, kind = 'spectroscopic')
if row[6]:
add_quantity(name, 'host', row[6], source)
if row[7]:
add_quantity(name, 'redshift', row[7], source, kind = 'host')
if row[8]:
add_quantity(name, 'discoverer', row[8], source)
# Currently, all events listing all possible observers. TNS bug?
#if row[9]:
# observers = row[9].split(',')
# for observer in observers:
# add_quantity(name, 'observer', observer.strip(), source)
if row[10]:
add_quantity(name, 'alias', row[10], source)
if row[8] and row[14] and row[15] and row[16]:
survey = row[8]
magnitude = row[14]
band = row[15].split('-')[0]
mjd = astrotime(row[16]).mjd
add_photometry(name, time = mjd, magnitude = magnitude, band = band, survey = survey, source = source)
if row[16]:
date = row[16].split()[0].replace('-', '/')
if date != '0000/00/00':
date = date.replace('/00', '')
time = row[16].split()[1]
if time != '00:00:00':
ts = time.split(':')
date += pretty_num(timedelta(hours = int(ts[0]), minutes = int(ts[1]), seconds = int(ts[2])).total_seconds()/(24*60*60), sig=6).lstrip('0')
add_quantity(name, 'discoverdate', date, source)
if args.update:
journal_events()
journal_events()
if do_task(task, 'rochester'):
rochesterpaths = ['http://www.rochesterastronomy.org/snimages/snredshiftall.html', 'http://www.rochesterastronomy.org/sn2016/snredshift.html']
rochesterupdate = [False, True]
for p, path in enumerate(tq(rochesterpaths, currenttask)):
if args.update and not rochesterupdate[p]:
continue
filepath = '../sne-external/rochester/' + os.path.basename(path)
html = load_cached_url(path, filepath)
if not html:
continue
soup = BeautifulSoup(html, "html5lib")
rows = soup.findAll('tr')
secondaryreference = "Latest Supernovae"
secondaryrefurl = "http://www.rochesterastronomy.org/snimages/snredshiftall.html"
for r, row in enumerate(tq(rows, currenttask)):
if r == 0:
continue
cols = row.findAll('td')
if not len(cols):
continue
name = ''
if cols[14].contents:
aka = str(cols[14].contents[0]).strip()
if is_number(aka.strip('?')):
aka = 'SN' + aka.strip('?') + 'A'
name = add_event(aka)
elif len(aka) >= 4 and is_number(aka[:4]):
aka = 'SN' + aka
name = add_event(aka)
ra = str(cols[3].contents[0]).strip()
dec = str(cols[4].contents[0]).strip()
sn = re.sub('<[^<]+?>', '', str(cols[0].contents[0])).strip()
if is_number(sn.strip('?')):
sn = 'SN' + sn.strip('?') + 'A'
elif len(sn) >= 4 and is_number(sn[:4]):
sn = 'SN' + sn
if not name:
if not sn:
continue
if sn[:8] == 'MASTER J':
sn = sn.replace('MASTER J', 'MASTER OT J').replace('SNHunt', 'SNhunt')
if 'POSSIBLE' in sn.upper() and ra and dec:
sn = 'PSN J' + ra.replace(':', '').replace('.', '') + dec.replace(':', '').replace('.', '')
name = add_event(sn)
reference = cols[12].findAll('a')[0].contents[0].strip()
refurl = cols[12].findAll('a')[0]['href'].strip()
source = add_source(name, refname = reference, url = refurl)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, secondary = True)
sources = uniq_cdl(list(filter(None, [source, secondarysource])))
add_quantity(name, 'alias', name, sources)
add_quantity(name, 'alias', sn, sources)
if cols[14].contents:
if aka == 'SNR G1.9+0.3':
aka = 'G001.9+00.3'
if aka[:4] == 'PS1 ':
aka = 'PS1-' + aka[4:]
if aka[:8] == 'MASTER J':
aka = aka.replace('MASTER J', 'MASTER OT J').replace('SNHunt', 'SNhunt')
if 'POSSIBLE' in aka.upper() and ra and dec:
aka = 'PSN J' + ra.replace(':', '').replace('.', '') + dec.replace(':', '').replace('.', '')
add_quantity(name, 'alias', aka, sources)
if str(cols[1].contents[0]).strip() != 'unk':
add_quantity(name, 'claimedtype', str(cols[1].contents[0]).strip(' :,'), sources)
if str(cols[2].contents[0]).strip() != 'anonymous':
add_quantity(name, 'host', str(cols[2].contents[0]).strip(), sources)
add_quantity(name, 'ra', ra, sources)
add_quantity(name, 'dec', dec, sources)
if str(cols[6].contents[0]).strip() not in ['2440587', '2440587.292']:
astrot = astrotime(float(str(cols[6].contents[0]).strip()), format='jd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), sources)
if str(cols[7].contents[0]).strip() not in ['2440587', '2440587.292']:
astrot = astrotime(float(str(cols[7].contents[0]).strip()), format='jd')
if (float(str(cols[8].contents[0]).strip()) <= 90.0 and
not any('GRB' in x for x in get_aliases(name))):
add_photometry(name, time = str(astrot.mjd), magnitude = str(cols[8].contents[0]).strip(), source = sources)
if cols[11].contents[0] != 'n/a':
add_quantity(name, 'redshift', str(cols[11].contents[0]).strip(), sources)
add_quantity(name, 'discoverer', str(cols[13].contents[0]).strip(), sources)
if args.update:
journal_events()
if not args.update:
vsnetfiles = ["latestsne.dat"]
for vsnetfile in vsnetfiles:
f = open("../sne-external/" + vsnetfile,'r',encoding='latin1')
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
for r, row in enumerate(tsvin):
if not row or row[0][:4] in ['http', 'www.'] or len(row) < 3:
continue
name = row[0].strip()
if name[:4].isdigit():
name = 'SN' + name
if name.startswith('PSNJ'):
name = 'PSN J' + name[4:]
if name.startswith('MASTEROTJ'):
name = name.replace('MASTEROTJ', 'MASTER OT J')
name = add_event(name)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if not is_number(row[1]):
continue
year = row[1][:4]
month = row[1][4:6]
day = row[1][6:]
if '.' not in day:
day = day[:2] + '.' + day[2:]
mjd = astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day))
magnitude = row[2].rstrip(ascii_letters)
if not is_number(magnitude):
continue
if magnitude.isdigit():
if int(magnitude) > 100:
magnitude = magnitude[:2] + '.' + magnitude[2:]
if float(str(cols[8].contents[0]).strip()) >= 90.0:
continue
if len(row) >= 4:
if is_number(row[3]):
e_magnitude = row[3]
refind = 4
else:
e_magnitude = ''
refind = 3
if refind >= len(row):
sources = secondarysource
else:
reference = ' '.join(row[refind:])
source = add_source(name, refname = reference)
add_quantity(name, 'alias', name, secondarysource)
sources = uniq_cdl([source,secondarysource])
else:
sources = secondarysource
band = row[2].lstrip('1234567890.')
add_photometry(name, time = mjd, band = band, magnitude = magnitude, e_magnitude = e_magnitude, source = sources)
f.close()
journal_events()
if do_task(task, 'ogle'):
basenames = ['transients', 'transients/2014b', 'transients/2014', 'transients/2013', 'transients/2012']
oglenames = []
ogleupdate = [True, False, False, False, False]
for b, bn in enumerate(tq(basenames, currenttask)):
if args.update and not ogleupdate[b]:
continue
filepath = '../sne-external/OGLE-' + bn.replace('/', '-') + '-transients.html'
htmltxt = load_cached_url('http://ogle.astrouw.edu.pl/ogle4/' + bn + '/transients.html', filepath)
if not htmltxt:
continue
soup = BeautifulSoup(htmltxt, "html5lib")
links = soup.findAll('a')
breaks = soup.findAll('br')
datalinks = []
datafnames = []
for a in links:
if a.has_attr('href'):
if '.dat' in a['href']:
datalinks.append('http://ogle.astrouw.edu.pl/ogle4/' + bn + '/' + a['href'])
datafnames.append(bn.replace('/', '-') + '-' + a['href'].replace('/', '-'))
ec = -1
reference = 'OGLE-IV Transient Detection System'
refurl = 'http://ogle.astrouw.edu.pl/ogle4/transients/transients.html'
for br in tq(breaks, currenttask):
sibling = br.nextSibling
if 'Ra,Dec=' in sibling:
line = sibling.replace("\n", '').split('Ra,Dec=')
name = line[0].strip()
ec += 1
if 'NOVA' in name or 'dupl' in name:
continue
if name in oglenames:
continue
oglenames.append(name)
name = add_event(name)
mySibling = sibling.nextSibling
atelref = ''
claimedtype = ''
while 'Ra,Dec=' not in mySibling:
if isinstance(mySibling, NavigableString):
if 'Phot.class=' in str(mySibling):
claimedtype = re.sub(r'\([^)]*\)', '', str(mySibling).split('=')[-1]).replace('SN','').strip()
if isinstance(mySibling, Tag):
atela = mySibling
if atela and atela.has_attr('href') and 'astronomerstelegram' in atela['href']:
atelref = atela.contents[0].strip()
atelurl = atela['href']
mySibling = mySibling.nextSibling
if mySibling is None:
break
nextSibling = sibling.nextSibling
if isinstance(nextSibling, Tag) and nextSibling.has_attr('alt') and nextSibling.contents[0].strip() != 'NED':
radec = nextSibling.contents[0].strip().split()
else:
radec = line[-1].split()
ra = radec[0]
dec = radec[1]
fname = '../sne-external/OGLE/' + datafnames[ec]
if not args.fullrefresh and archived_task('ogle') and os.path.isfile(fname):
with open(fname, 'r') as f:
csvtxt = f.read()
else:
response = urllib.request.urlopen(datalinks[ec])
with open(fname, 'w') as f:
csvtxt = response.read().decode('utf-8')
f.write(csvtxt)
lcdat = csvtxt.splitlines()
sources = [add_source(name, refname = reference, url = refurl)]
add_quantity(name, 'alias', name, sources[0])
if atelref and atelref != 'ATel#----':
sources.append(add_source(name, refname = atelref, url = atelurl))
sources = uniq_cdl(sources)
if name.startswith('OGLE'):
if name[4] == '-':
if is_number(name[5:9]):
add_quantity(name, 'discoverdate', name[5:9], sources)
else:
if is_number(name[4:6]):
add_quantity(name, 'discoverdate', '20' + name[4:6], sources)
# RA and Dec from OGLE pages currently not reliable
#add_quantity(name, 'ra', ra, sources)
#add_quantity(name, 'dec', dec, sources)
if claimedtype and claimedtype != '-':
add_quantity(name, 'claimedtype', claimedtype, sources)
elif 'SN' not in name and 'claimedtype' not in events[name]:
add_quantity(name, 'claimedtype', 'Candidate', sources)
for row in lcdat:
row = row.split()
mjd = str(jd_to_mjd(Decimal(row[0])))
magnitude = row[1]
if float(magnitude) > 90.0:
continue
e_magnitude = row[2]
upperlimit = False
if e_magnitude == '-1' or float(e_magnitude) > 10.0:
e_magnitude = ''
upperlimit = True
add_photometry(name, time = mjd, band = 'I', magnitude = magnitude, e_magnitude = e_magnitude,
system = 'Vega', source = sources, upperlimit = upperlimit)
if args.update:
journal_events()
journal_events()
if do_task(task, 'snls'):
with open("../sne-external/SNLS-ugriz.dat", 'r') as f:
data = csv.reader(f, delimiter=' ', quotechar='"', skipinitialspace = True)
for row in data:
flux = row[3]
err = row[4]
# Being extra strict here with the flux constraint, see note below.
if float(flux) < 3.0*float(err):
continue
name = 'SNLS-' + row[0]
name = add_event(name)
source = add_source(name, bibcode = '2010A&A...523A...7G')
add_quantity(name, 'alias', name, source)
band = row[1]
mjd = row[2]
sig = get_sig_digits(flux.split('E')[0])+1
# Conversion comes from SNLS-Readme
# NOTE: Datafiles available for download suggest different zeropoints than 30, need to inquire.
magnitude = pretty_num(30.0-2.5*log10(float(flux)), sig = sig)
e_magnitude = pretty_num(2.5*log10(1.0 + float(err)/float(flux)), sig = sig)
#e_magnitude = pretty_num(2.5*(log10(float(flux) + float(err)) - log10(float(flux))), sig = sig)
add_photometry(name, time = mjd, band = band, magnitude = magnitude, e_magnitude = e_magnitude, counts = flux,
e_counts = err, source = source)
journal_events()
if do_task(task, 'psthreepi'):
fname = '../sne-external/3pi/page00.html'
html = load_cached_url("http://psweb.mp.qub.ac.uk/ps1threepi/psdb/public/?page=1&sort=followup_flag_date", fname, write = False)
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
div = bs.find('div', {"class":"pagination"})
offline = False
if not div:
offline = True
else:
links = div.findAll('a')
if not links:
offline = True
if offline:
if args.update:
continue
warnings.warn("Pan-STARRS 3pi offline, using local files only.")
with open(fname, 'r') as f:
html = f.read()
bs = BeautifulSoup(html, "html5lib")
div = bs.find('div', {"class":"pagination"})
links = div.findAll('a')
else:
with open(fname, 'w') as f:
f.write(html)
numpages = int(links[-2].contents[0])
oldnumpages = len(glob('../sne-external/3pi/page*'))
for page in tq(range(1,numpages), currenttask):
fname = '../sne-external/3pi/page' + str(page).zfill(2) + '.html'
if not args.fullrefresh and archived_task('psthreepi') and os.path.isfile(fname) and page < oldnumpages:
with open(fname, 'r') as f:
html = f.read()
elif not offline:
response = urllib.request.urlopen("http://psweb.mp.qub.ac.uk/ps1threepi/psdb/public/?page=" + str(page) + "&sort=followup_flag_date")
with open(fname, 'w') as f:
html = response.read().decode('utf-8')
f.write(html)
else:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.findAll('tr')
for tr in tq(trs, currenttask):
tds = tr.findAll('td')
if not tds:
continue
refs = []
aliases = []
ttype = ''
ctype = ''
for tdi, td in enumerate(tds):
if tdi == 0:
psname = td.contents[0]
pslink = psname['href']
psname = psname.text
elif tdi == 1:
ra = td.contents[0]
elif tdi == 2:
dec = td.contents[0]
elif tdi == 3:
ttype = td.contents[0]
if ttype != 'sn' and ttype != 'orphan':
break
elif tdi == 5:
if not td.contents:
continue
ctype = td.contents[0]
if ctype == 'Observed':
ctype = ''
elif tdi == 16:
if td.contents:
crossrefs = td.findAll('a')
for cref in crossrefs:
if 'atel' in cref.contents[0].lower():
refs.append([cref.contents[0], cref['href']])
elif is_number(cref.contents[0][:4]):
continue
else:
aliases.append(cref.contents[0])
if ttype != 'sn' and ttype != 'orphan':
continue
name = ''
for alias in aliases:
if alias[:2] == 'SN':
name = alias
if not name:
name = psname
name = add_event(name)
sources = [add_source(name, refname = 'Pan-STARRS 3Pi', url = 'http://psweb.mp.qub.ac.uk/ps1threepi/psdb/')]
add_quantity(name, 'alias', name, sources[0])
for ref in refs:
sources.append(add_source(name, refname = ref[0], url = ref[1]))
source = uniq_cdl(sources)
for alias in aliases:
newalias = alias
if alias[:3] in ['CSS', 'SSS', 'MLS']:
newalias = alias.replace('-', ':', 1)
newalias = newalias.replace('PSNJ', 'PSN J')
add_quantity(name, 'alias', newalias, source)
add_quantity(name, 'ra', ra, source)
add_quantity(name, 'dec', dec, source)
add_quantity(name, 'claimedtype', ctype, source)
fname2 = '../sne-external/3pi/candidate-' + pslink.rstrip('/').split('/')[-1] + '.html'
if archived_task('psthreepi') and os.path.isfile(fname2):
with open(fname2, 'r') as f:
html2 = f.read()
elif not offline:
pslink = 'http://psweb.mp.qub.ac.uk/ps1threepi/psdb/public/' + pslink
with open(fname2, 'w') as f:
response2 = urllib.request.urlopen(pslink)
html2 = response2.read().decode('utf-8')
f.write(html2)
else:
continue
bs2 = BeautifulSoup(html2, "html5lib")
scripts = bs2.findAll('script')
nslines = []
nslabels = []
for script in scripts:
if 'jslcdata.push' not in script.text:
continue
slines = script.text.splitlines()
for line in slines:
if 'jslcdata.push' in line:
nslines.append(json.loads(line.strip().replace('jslcdata.push(','').replace(');','')))
if 'jslabels.push' in line and 'blanks' not in line and 'non det' not in line:
nslabels.append(json.loads(line.strip().replace('jslabels.push(','').replace(');',''))['label'])
for li, line in enumerate(nslines[:len(nslabels)]):
if not line:
continue
for obs in line:
add_photometry(name, time = str(obs[0]), band = nslabels[li], magnitude = str(obs[1]), e_magnitude = str(obs[2]), source = source,
telescope = 'Pan-STARRS1')
for li, line in enumerate(nslines[2*len(nslabels):]):
if not line:
continue
for obs in line:
add_photometry(name, time = str(obs[0]), band = nslabels[li], magnitude = str(obs[1]), upperlimit = True, source = source,
telescope = 'Pan-STARRS1')
assoctab = bs2.find('table', {"class":"generictable"})
hostname = ''
redshift = ''
if assoctab:
trs = assoctab.findAll('tr')
headertds = [x.contents[0] for x in trs[1].findAll('td')]
tds = trs[1].findAll('td')
for tdi, td in enumerate(tds):
if tdi == 1:
hostname = td.contents[0].strip()
elif tdi == 4:
if 'z' in headertds:
redshift = td.contents[0].strip()
# Skip galaxies with just SDSS id
if is_number(hostname):
continue
add_quantity(name, 'host', hostname, source)
if redshift:
add_quantity(name, 'redshift', redshift, source, kind = 'host')
if args.update:
journal_events()
journal_events()
if do_task(task, 'psmds'):
with open('../sne-external/MDS/apj506838t1_mrt.txt') as f:
for ri, row in enumerate(tq(f.read().splitlines(), currenttask)):
if ri < 35:
continue
cols = [x.strip() for x in row.split(',')]
name = add_event(cols[0])
source = add_source(name, bibcode = '2015ApJ...799..208S')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'ra', cols[2], source)
add_quantity(name, 'dec', cols[3], source)
astrot = astrotime(float(cols[4]), format='mjd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), source)
add_quantity(name, 'redshift', cols[5], source, kind = 'spectroscopic')
add_quantity(name, 'claimedtype', 'II P', source)
journal_events()
if do_task(task, 'crts'):
crtsnameerrors = ['2011ax']
folders = ["catalina", "MLS", "SSS"]
for fold in tq(folders, currenttask):
html = load_cached_url("http://nesssi.cacr.caltech.edu/" + fold + "/AllSN.html", '../sne-external/CRTS/' + fold + '.html')
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.findAll('tr')
for tr in tq(trs, currenttask):
tds = tr.findAll('td')
if not tds:
continue
refs = []
aliases = []
ttype = ''
ctype = ''
for tdi, td in enumerate(tds):
if tdi == 0:
crtsname = td.contents[0].text.strip()
elif tdi == 1:
ra = td.contents[0]
elif tdi == 2:
dec = td.contents[0]
elif tdi == 11:
lclink = td.find('a')['onclick']
lclink = lclink.split("'")[1]
elif tdi == 13:
aliases = re.sub('[()]', '', re.sub('<[^<]+?>', '', td.contents[-1].strip()))
aliases = [x.strip('; ') for x in list(filter(None, aliases.split(' ')))]
name = ''
hostmag = ''
hostupper = False
validaliases = []
for ai, alias in enumerate(aliases):
if alias in ['SN', 'SDSS']:
continue
if alias in crtsnameerrors:
continue
if alias == 'mag':
if ai < len(aliases) - 1:
ind = ai+1
if aliases[ai+1] in ['SDSS']:
ind = ai+2
elif aliases[ai+1] in ['gal', 'obj', 'object', 'source']:
ind = ai-1
if '>' in aliases[ind]:
hostupper = True
hostmag = aliases[ind].strip('>~').replace(',', '.')
continue
if is_number(alias[:4]) and alias[:2] == '20' and len(alias) > 4:
name = 'SN' + alias
lalias = alias.lower()
if (('asassn' in alias and len(alias) > 6) or ('ptf' in alias and len(alias) > 3) or
('ps1' in alias and len(alias) > 3) or 'snhunt' in alias or
('mls' in alias and len(alias) > 3) or 'gaia' in alias or ('lsq' in alias and len(alias) > 3)):
alias = alias.replace('SNHunt', 'SNhunt')
validaliases.append(alias)
if not name:
name = crtsname
name = add_event(name)
source = add_source(name, refname = 'Catalina Sky Survey', bibcode = '2009ApJ...696..870D',
url = 'http://nesssi.cacr.caltech.edu/catalina/AllSN.html')
add_quantity(name, 'alias', name, source)
for alias in validaliases:
add_quantity(name, 'alias', alias, source)
add_quantity(name, 'ra', ra, source, unit = 'floatdegrees')
add_quantity(name, 'dec', dec, source, unit = 'floatdegrees')
if hostmag:
# 1.0 magnitude error based on Drake 2009 assertion that SN are only considered real if they are 2 mags brighter than host.
add_photometry(name, band = 'C', magnitude = hostmag, e_magnitude = 1.0, source = source, host = True,
telescope = 'Catalina Schmidt', upperlimit = hostupper)
fname2 = '../sne-external/' + fold + '/' + lclink.split('.')[-2].rstrip('p').split('/')[-1] + '.html'
if not args.fullrefresh and archived_task('crts') and os.path.isfile(fname2):
with open(fname2, 'r') as f:
html2 = f.read()
else:
with open(fname2, 'w') as f:
response2 = urllib.request.urlopen(lclink)
html2 = response2.read().decode('utf-8')
f.write(html2)
lines = html2.splitlines()
for line in lines:
if 'javascript:showx' in line:
mjdstr = re.search("showx\('(.*?)'\)", line).group(1).split('(')[0].strip()
if not is_number(mjdstr):
continue
mjd = str(Decimal(mjdstr) + Decimal(53249.0))
else:
continue
if 'javascript:showy' in line:
mag = re.search("showy\('(.*?)'\)", line).group(1)
if 'javascript:showz' in line:
err = re.search("showz\('(.*?)'\)", line).group(1)
add_photometry(name, time = mjd, band = 'C', magnitude = mag, source = source, includeshost = True,
telescope = 'Catalina Schmidt', e_magnitude = err if float(err) > 0.0 else '', upperlimit = (float(err) == 0.0))
if args.update:
journal_events()
journal_events()
if do_task(task, 'snhunt'):
html = load_cached_url('http://nesssi.cacr.caltech.edu/catalina/current.html', '../sne-external/SNhunt/current.html')
if not html:
continue
text = html.splitlines()
findtable = False
for ri, row in enumerate(text):
if 'Supernova Discoveries' in row:
findtable = True
if findtable and '<table' in row:
tstart = ri+1
if findtable and '</table>' in row:
tend = ri-1
tablestr = '<html><body><table>'
for row in text[tstart:tend]:
if row[:3] == 'tr>':
tablestr = tablestr + '<tr>' + row[3:]
else:
tablestr = tablestr + row
tablestr = tablestr + '</table></body></html>'
bs = BeautifulSoup(tablestr, 'html5lib')
trs = bs.find('table').findAll('tr')
for tr in tq(trs, currenttask):
cols = [str(x.text) for x in tr.findAll('td')]
if not cols:
continue
name = re.sub('<[^<]+?>', '', cols[4]).strip().replace(' ', '').replace('SNHunt', 'SNhunt')
name = add_event(name)
source = add_source(name, refname = 'Supernova Hunt', url = 'http://nesssi.cacr.caltech.edu/catalina/current.html')
add_quantity(name, 'alias', name, source)
host = re.sub('<[^<]+?>', '', cols[1]).strip().replace('_', ' ')
add_quantity(name, 'host', host, source)
add_quantity(name, 'ra', cols[2], source, unit = 'floatdegrees')
add_quantity(name, 'dec', cols[3], source, unit = 'floatdegrees')
dd = cols[0]
discoverdate = dd[:4] + '/' + dd[4:6] + '/' + dd[6:8]
add_quantity(name, 'discoverdate', discoverdate, source)
discoverers = cols[5].split('/')
for discoverer in discoverers:
add_quantity(name, 'discoverer', 'CRTS', source)
add_quantity(name, 'discoverer', discoverer, source)
if args.update:
journal_events()
journal_events()
if do_task(task, 'nedd'):
f = open("../sne-external/NED25.12.1-D-10.4.0-20151123.csv", 'r')
data = csv.reader(f, delimiter=',', quotechar='"')
reference = "NED-D"
refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
nedddict = OrderedDict()
oldhostname = ''
for r, row in enumerate(data):
if r <= 12:
continue
hostname = row[3]
if args.update and oldhostname != hostname:
journal_events()
distmod = row[4]
moderr = row[5]
dist = row[6]
bibcode = unescape(row[8])
name = ''
if hostname.startswith('SN '):
if is_number(hostname[3:7]):
name = 'SN' + hostname[3:]
else:
name = hostname[3:]
elif hostname.startswith('SNLS '):
name = 'SNLS-' + hostname[5:].split()[0]
else:
cleanhost = hostname.replace('MESSIER 0', 'M').replace('MESSIER ', 'M').strip()
if True in [x in cleanhost for x in ['UGC', 'PGC', 'IC']]:
cleanhost = ' '.join([x.lstrip('0') for x in cleanhost.split()])
if 'ESO' in cleanhost:
cleanhost = cleanhost.replace(' ', '').replace('ESO', 'ESO ')
nedddict.setdefault(cleanhost,[]).append(Decimal(dist))
if name:
name = add_event(name)
secondarysource = add_source(name, refname = reference, url = refurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if bibcode:
source = add_source(name, bibcode = bibcode)
sources = uniq_cdl([source, secondarysource])
else:
sources = secondarysource
add_quantity(name, 'comovingdist', dist, sources)
oldhostname = hostname
journal_events()
# Import CPCS
if do_task(task, 'cpcs'):
jsontxt = load_cached_url("http://gsaweb.ast.cam.ac.uk/followup/list_of_alerts?format=json&num=100000&published=1&observed_only=1&hashtag=JG_530ad9462a0b8785bfb385614bf178c6",
"../sne-external/CPCS/index.json")
if not jsontxt:
continue
alertindex = json.loads(jsontxt, object_pairs_hook=OrderedDict)
ids = [x["id"] for x in alertindex]
for i, ai in enumerate(tq(ids, currenttask)):
name = alertindex[i]['ivorn'].split('/')[-1].strip()
# Skip a few weird entries
if name == 'ASASSNli':
continue
# Just use a whitelist for now since naming seems inconsistent
if True in [x in name.upper() for x in ['GAIA', 'OGLE', 'ASASSN', 'MASTER', 'OTJ', 'PS1', 'IPTF']]:
name = name.replace('Verif', '').replace('_', ' ')
if 'ASASSN' in name and name[6] != '-':
name = 'ASASSN-' + name[6:]
if 'MASTEROTJ' in name:
name = name.replace('MASTEROTJ', 'MASTER OT J')
if 'OTJ' in name:
name = name.replace('OTJ', 'MASTER OT J')
if name.upper().startswith('IPTF'):
name = 'iPTF' + name[4:]
# Only add events that are classified as SN.
if event_exists(name):
continue
name = add_event(name)
else:
continue
secondarysource = add_source(name, refname = 'Cambridge Photometric Calibration Server', url = 'http://gsaweb.ast.cam.ac.uk/followup/', secondary = True)
add_quantity(name, 'alias', name, secondarysource)
add_quantity(name, 'ra', str(alertindex[i]['ra']), secondarysource, unit = 'floatdegrees')
add_quantity(name, 'dec', str(alertindex[i]['dec']), secondarysource, unit = 'floatdegrees')
alerturl = "http://gsaweb.ast.cam.ac.uk/followup/get_alert_lc_data?alert_id=" + str(ai)
source = add_source(name, refname = 'CPCS Alert ' + str(ai), url = alerturl)
fname = '../sne-external/CPCS/alert-' + str(ai).zfill(2) + '.json'
if archived_task('cpcs') and os.path.isfile(fname):
with open(fname, 'r') as f:
jsonstr = f.read()
else:
session = requests.Session()
response = session.get(alerturl + "&hashtag=JG_530ad9462a0b8785bfb385614bf178c6")
with open(fname, 'w') as f:
jsonstr = response.text
f.write(jsonstr)
try:
cpcsalert = json.loads(jsonstr)
except:
continue
mjds = [round_sig(x, sig=9) for x in cpcsalert['mjd']]
mags = [round_sig(x, sig=6) for x in cpcsalert['mag']]
errs = [round_sig(x, sig=6) if (is_number(x) and float(x) > 0.0) else '' for x in cpcsalert['magerr']]
bnds = cpcsalert['filter']
obs = cpcsalert['observatory']
for mi, mjd in enumerate(mjds):
add_photometry(name, time = mjd, magnitude = mags[mi], e_magnitude = errs[mi],
band = bnds[mi], observatory = obs[mi], source = uniq_cdl([source,secondarysource]))
if args.update:
journal_events()
journal_events()
if do_task(task, 'ptf'):
#response = urllib.request.urlopen("http://wiserep.weizmann.ac.il/objects/list")
#bs = BeautifulSoup(response, "html5lib")
#select = bs.find('select', {"name":"objid"})
#options = select.findAll('option')
#for option in options:
# print(option.text)
# name = option.text
# if ((name.startswith('PTF') and is_number(name[3:5])) or
# name.startswith('PTFS') or name.startswith('iPTF')):
# name = add_event(name)
if archived_task('ptf'):
with open('../sne-external/PTF/update.html', 'r') as f:
html = f.read()
else:
session = requests.Session()
response = session.get("http://wiserep.weizmann.ac.il/spectra/update")
html = response.text
with open('../sne-external/PTF/update.html', 'w') as f:
f.write(html)
bs = BeautifulSoup(html, "html5lib")
select = bs.find('select', {"name":"objid"})
options = select.findAll('option')
for option in options:
name = option.text
if ((name.startswith('PTF') and is_number(name[3:5])) or
name.startswith('PTFS') or name.startswith('iPTF')):
if '(' in name:
alias = name.split('(')[0].strip(' ')
name = name.split('(')[-1].strip(') ').replace('sn', 'SN')
name = add_event(name)
source = add_source(name, bibcode = '2012PASP..124..668Y')
add_quantity(name, 'alias', alias, source)
else:
name = add_event(name)
with open('../sne-external/PTF/old-ptf-events.csv') as f:
for suffix in f.read().splitlines():
name = add_event('PTF' + suffix)
with open('../sne-external/PTF/perly-2016.csv') as f:
for row in f.read().splitlines():
cols = [x.strip() for x in row.split(',')]
alias = ''
if cols[8]:
name = cols[8]
alias = 'PTF' + cols[0]
else:
name = 'PTF' + cols[0]
name = add_event(name)
source = add_source(name, bibcode = '2016arXiv160408207P')
add_quantity(name, 'alias', name, source)
if alias:
add_quantity(name, 'alias', alias, source)
add_quantity(name, 'ra', cols[1], source)
add_quantity(name, 'dec', cols[2], source)
add_quantity(name, 'claimedtype', 'SLSN-' + cols[3], source)
add_quantity(name, 'redshift', cols[4], source, kind = 'spectroscopic')
maxdate = cols[6].replace('-', '/')
add_quantity(name, 'maxdate', maxdate.lstrip('<'), source, upperlimit = maxdate.startswith('<'))
add_quantity(name, 'ebv', cols[7], source, kind = 'spectroscopic')
name = add_event('PTF' + suffix)
journal_events()
if do_task(task, 'des'):
html = load_cached_url("https://portal.nersc.gov/des-sn/transients/", "../sne-external/DES/transients.html")
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.find('tbody').findAll('tr')
for tri, tr in enumerate(tq(trs, currenttask)):
name = ''
source = ''
if tri == 0:
continue
tds = tr.findAll('td')
for tdi, td in enumerate(tds):
if tdi == 0:
name = add_event(td.text.strip())
if tdi == 1:
(ra, dec) = [x.strip() for x in td.text.split('\xa0')]
if tdi == 6:
atellink = td.find('a')
if atellink:
atellink = atellink['href']
else:
atellink = ''
sources = [add_source(name, url = 'https://portal.nersc.gov/des-sn/', refname = 'DES Bright Transients',
acknowledgment = 'http://www.noao.edu/noao/library/NOAO_Publications_Acknowledgments.html#DESdatause')]
if atellink:
sources.append(add_source(name, refname = 'ATel ' + atellink.split('=')[-1], url = atellink))
sources += [add_source(name, bibcode = '2012ApJ...753..152B'),
add_source(name, bibcode = '2015AJ....150..150F'),
add_source(name, bibcode = '2015AJ....150...82G'),
add_source(name, bibcode = '2015AJ....150..172K')]
sources = ','.join(sources)
add_quantity(name, 'alias', name, sources)
add_quantity(name, 'ra', ra, sources)
add_quantity(name, 'dec', dec, sources)
html2 = load_cached_url("https://portal.nersc.gov/des-sn/transients/" + name, "../sne-external/DES/" + name + ".html")
if not html2:
continue
lines = html2.splitlines()
for line in lines:
if 'var data = ' in line:
jsontxt = json.loads(line.split('=')[-1].rstrip(';'))
for i, band in enumerate(jsontxt['band']):
add_photometry(name, time = jsontxt['mjd'][i], magnitude = jsontxt['mag'][i], e_magnitude = jsontxt['mag_error'][i],
band = band, observatory = 'CTIO', telescope = 'Blanco 4m', instrument = 'DECam',
upperlimit = True if float(jsontxt['snr'][i]) <= 3.0 else '', source = sources)
journal_events()
if do_task(task, 'asassn'):
html = load_cached_url("http://www.astronomy.ohio-state.edu/~assassin/sn_list.html", "../sne-external/ASASSN/sn_list.html")
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.find('table').findAll('tr')
for tri, tr in enumerate(tq(trs, currenttask)):
name = ''
source = ''
ra = ''
dec = ''
redshift = ''
hostoff = ''
claimedtype = ''
host = ''
atellink = ''
typelink = ''
if tri == 0:
continue
tds = tr.findAll('td')
for tdi, td in enumerate(tds):
if tdi == 1:
name = add_event(td.text.strip())
atellink = td.find('a')
if atellink:
atellink = atellink['href']
else:
atellink = ''
if tdi == 2:
discdate = td.text.replace('-', '/')
if tdi == 3:
ra = td.text
if tdi == 4:
dec = td.text
if tdi == 5:
redshift = td.text
if tdi == 8:
hostoff = td.text
if tdi == 9:
claimedtype = td.text
typelink = td.find('a')
if typelink:
typelink = typelink['href']
else:
typelink = ''
if tdi == 12:
host = td.text
sources = [add_source(name, url = 'http://www.astronomy.ohio-state.edu/~assassin/sn_list.html', refname = 'ASAS-SN Supernovae')]
typesources = sources[:]
if atellink:
sources.append(add_source(name, refname = 'ATel ' + atellink.split('=')[-1], url = atellink))
if typelink:
typesources.append(add_source(name, refname = 'ATel ' + typelink.split('=')[-1], url = typelink))
sources = ','.join(sources)
typesources = ','.join(typesources)
add_quantity(name, 'alias', name, sources)
add_quantity(name, 'discoverdate', discdate, sources)
add_quantity(name, 'ra', ra, sources, unit = 'floatdegrees')
add_quantity(name, 'dec', dec, sources, unit = 'floatdegrees')
add_quantity(name, 'redshift', redshift, sources)
add_quantity(name, 'hostoffset', hostoff, sources, unit = 'arcseconds')
for ct in claimedtype.split('/'):
if ct != 'Unk':
add_quantity(name, 'claimedtype', ct, typesources)
if host != 'Uncatalogued':
add_quantity(name, 'host', host, sources)
journal_events()
if do_task(task, 'asiagospectra'):
html = load_cached_url("http://sngroup.oapd.inaf.it./cgi-bin/output_class.cgi?sn=1990", "../sne-external-spectra/Asiago/spectra.html")
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.findAll('tr')
for tr in tq(trs, currenttask):
tds = tr.findAll('td')
name = ''
host = ''
fitsurl = ''
source = ''
reference = ''
for tdi, td in enumerate(tds):
if tdi == 0:
butt = td.find('button')
if not butt:
break
alias = butt.text.strip()
alias = alias.replace('PSNJ', 'PSN J').replace('GAIA', 'Gaia')
elif tdi == 1:
name = td.text.strip().replace('PSNJ', 'PSN J').replace('GAIA', 'Gaia')
if name.startswith('SN '):
name = 'SN' + name[3:]
if not name:
name = alias
if is_number(name[:4]):
name = 'SN' + name
name = add_event(name)
reference = 'Asiago Supernova Catalogue'
refurl = 'http://graspa.oapd.inaf.it/cgi-bin/sncat.php'
secondarysource = add_source(name, refname = reference, url = refurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if alias != name:
add_quantity(name, 'alias', alias, secondarysource)
elif tdi == 2:
host = td.text.strip()
if host == 'anonymous':
host = ''
elif tdi == 3:
discoverer = td.text.strip()
elif tdi == 5:
ra = td.text.strip()
elif tdi == 6:
dec = td.text.strip()
elif tdi == 7:
claimedtype = td.text.strip()
elif tdi == 8:
redshift = td.text.strip()
elif tdi == 9:
epochstr = td.text.strip()
if epochstr:
mjd = (astrotime(epochstr[:4] + '-' + epochstr[4:6] + '-' + str(floor(float(epochstr[6:]))).zfill(2)).mjd +
float(epochstr[6:]) - floor(float(epochstr[6:])))
else:
mjd = ''
elif tdi == 10:
refs = td.findAll('a')
source = ''
reference = ''
refurl = ''
for ref in refs:
if ref.text != 'REF':
reference = ref.text
refurl = ref['href']
if reference:
source = add_source(name, refname = reference, url = refurl)
add_quantity(name, 'alias', name, secondarysource)
sources = uniq_cdl(list(filter(None, [source, secondarysource])))
elif tdi == 12:
fitslink = td.find('a')
if fitslink:
fitsurl = fitslink['href']
if name:
add_quantity(name, 'claimedtype', claimedtype, sources)
add_quantity(name, 'ra', ra, sources)
add_quantity(name, 'dec', dec, sources)
add_quantity(name, 'redshift', redshift, sources)
add_quantity(name, 'discoverer', discoverer, sources)
add_quantity(name, 'host', host, sources)
#if fitsurl:
# response = urllib.request.urlopen("http://sngroup.oapd.inaf.it./" + fitsurl)
# compressed = io.BytesIO(response.read())
# decompressed = gzip.GzipFile(fileobj=compressed)
# hdulist = fits.open(decompressed)
# scidata = hdulist[0].data
# print(hdulist[0].header)
# print(scidata[3])
# sys.exit()
journal_events()
if do_task(task, 'wiserepspectra'):
secondaryreference = 'WISeREP'
secondaryrefurl = 'http://wiserep.weizmann.ac.il/'
secondarybibcode = '2012PASP..124..668Y'
wiserepcnt = 0
# These are known to be in error on the WISeREP page, either fix or ignore them.
wiserepbibcorrectdict = {'2000AJ....120..367G]':'2000AJ....120..367G',
'Harutyunyan+et+al.+2008':'2008A&A...488..383H',
'0609268':'2007AJ....133...58K',
'2006ApJ...636...400Q':'2006ApJ...636..400Q',
'2011ApJ...741...76':'2011ApJ...741...76C',
'2016PASP...128...961':'2016PASP..128...961',
'2002AJ....1124..417H':'2002AJ....1124.417H',
'2013ApJ…774…58D':'2013ApJ...774...58D',
'2011Sci.333..856S':'2011Sci...333..856S',
'2014MNRAS.438,368':'2014MNRAS.438..368T',
'2012MNRAS.420.1135':'2012MNRAS.420.1135S',
'2012Sci..337..942D':'2012Sci...337..942D',
'stt1839':''}
oldname = ''
for folder in tq(sorted(next(os.walk("../sne-external-WISEREP"))[1], key=lambda s: s.lower()), currenttask):
files = glob("../sne-external-WISEREP/" + folder + '/*')
for fname in tq(files, currenttask):
if '.html' in fname:
lfiles = deepcopy(files)
with open(fname, 'r') as f:
path = os.path.abspath(fname)
response = urllib.request.urlopen('file://' + path)
bs = BeautifulSoup(response, "html5lib")
trs = bs.findAll('tr', {'valign': 'top'})
for tri, tr in enumerate(trs):
if "Click to show/update object" in str(tr.contents):
claimedtype = ''
instrument = ''
epoch = ''
observer = ''
reducer = ''
specfile = ''
produceoutput = True
specpath = ''
tds = tr.findAll('td')
for tdi, td in enumerate(tds):
if td.contents:
if tdi == 3:
name = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
elif tdi == 5:
claimedtype = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
if claimedtype == 'SN':
claimedtype = ''
continue
if claimedtype[:3] == 'SN ':
claimedtype = claimedtype[3:].strip()
claimedtype = claimedtype.replace('-like', '').strip()
elif tdi == 9:
instrument = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
elif tdi == 11:
epoch = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
elif tdi == 13:
observer = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
if observer == 'Unknown' or observer == 'Other':
observer = ''
elif tdi == 17:
reducer = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
if reducer == 'Unknown' or reducer == 'Other':
reducer = ''
elif tdi == 25:
speclinks = td.findAll('a')
try:
for link in speclinks:
if 'Ascii' in link['href']:
specfile = link.contents[0].strip()
tfiles = deepcopy(lfiles)
for fi, fname in enumerate(lfiles):
if specfile in fname:
specpath = fname
del(tfiles[fi])
lfiles = deepcopy(tfiles)
raise(StopIteration)
except StopIteration:
pass
if not specpath:
warnings.warn('Spectrum file not found, "' + specfile + '"')
else:
continue
if "Spec Type:</span>" in str(tr.contents) and produceoutput:
produceoutput = False
trstr = str(tr)
result = re.search('redshift=(.*?)&', trstr)
redshift = ''
if result:
redshift = result.group(1)
if not is_number(redshift) or float(redshift) > 100.:
redshift = ''
result = re.search('publish=(.*?)&', trstr)
bibcode = ''
if result:
bibcode = unescape(urllib.parse.unquote(urllib.parse.unquote(result.group(1))).split('/')[-1])
if not bibcode:
biblink = tr.find('a', {'title': 'Link to NASA ADS'})
if biblink:
bibcode = biblink.contents[0]
if name.startswith('sn'):
name = 'SN' + name[2:]
if name.startswith(('CSS', 'SSS', 'MLS')) and ':' not in name:
name = name.replace('-', ':', 1)
if name.startswith('MASTERJ'):
name = name.replace('MASTERJ', 'MASTER OT J')
if name.startswith('PSNJ'):
name = name.replace('PSNJ', 'PSN J')
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
#print(name + " " + claimedtype + " " + epoch + " " + observer + " " + reducer + " " + specfile + " " + bibcode + " " + redshift)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondarybibcode, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if bibcode:
newbibcode = bibcode
if bibcode in wiserepbibcorrectdict:
newbibcode = wiserepbibcorrectdict[bibcode]
if newbibcode:
source = add_source(name, bibcode = unescape(newbibcode))
else:
source = add_source(name, refname = unescape(bibcode))
sources = uniq_cdl([source, secondarysource])
else:
sources = secondarysource
if claimedtype not in ['Other']:
add_quantity(name, 'claimedtype', claimedtype, secondarysource)
add_quantity(name, 'redshift', redshift, secondarysource)
if not specpath:
continue
with open(specpath,'r') as f:
data = [x.split() for x in f]
skipspec = False
newdata = []
oldval = ''
for row in data:
if row and '#' not in row[0]:
if len(row) >= 2 and is_number(row[0]) and is_number(row[1]) and row[1] != oldval:
newdata.append(row)
oldval = row[1]
if skipspec or not newdata:
warnings.warn('Skipped adding spectrum file ' + specfile)
continue
data = [list(i) for i in zip(*newdata)]
wavelengths = data[0]
fluxes = data[1]
errors = ''
if len(data) == 3:
errors = data[1]
time = str(astrotime(epoch).mjd)
if max([float(x) for x in fluxes]) < 1.0e-5:
fluxunit = 'erg/s/cm^2/Angstrom'
else:
fluxunit = 'Uncalibrated'
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = fluxunit, errors = errors, errorunit = fluxunit, wavelengths = wavelengths,
fluxes = fluxes, u_time = 'MJD', time = time, instrument = instrument, source = sources, observer = observer, reducer = reducer,
filename = specfile)
wiserepcnt = wiserepcnt + 1
if args.travis and wiserepcnt % travislimit == 0:
break
tprint('Unadded files: ' + str(len(lfiles) - 1) + "/" + str(len(files)-1))
tprint('WISeREP spectrum count: ' + str(wiserepcnt))
journal_events()
if do_task(task, 'cfaspectra'):
# Ia spectra
oldname = ''
for name in tq(sorted(next(os.walk("../sne-external-spectra/CfA_SNIa"))[1], key=lambda s: s.lower()), currenttask):
fullpath = "../sne-external-spectra/CfA_SNIa/" + name
origname = name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
if name.startswith('snf') and is_number(name[3:7]):
name = 'SNF' + name[3:]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = add_source(name, refname = reference, url = refurl, secondary = True, acknowledgment = cfaack)
add_quantity(name, 'alias', name, source)
for fi, fname in enumerate(sorted(glob(fullpath + '/*'), key=lambda s: s.lower())):
filename = os.path.basename(fname)
fileparts = filename.split('-')
if origname.startswith("sn") and is_number(origname[2:6]):
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:]
instrument = fileparts[2].split('.')[0]
else:
year = fileparts[2][:4]
month = fileparts[2][4:6]
day = fileparts[2][6:]
instrument = fileparts[3].split('.')[0]
time = str(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = data[1]
errors = data[2]
sources = uniq_cdl([source, add_source(name, bibcode = '2012AJ....143..126B'), add_source(name, bibcode = '2008AJ....135.1598M')])
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', filename = filename,
wavelengths = wavelengths, fluxes = fluxes, u_time = 'MJD' if time else '', time = time, instrument = instrument,
errorunit = "ergs/s/cm^2/Angstrom", errors = errors, source = sources, dereddened = False, deredshifted = False)
if args.travis and fi >= travislimit:
break
journal_events()
# Ibc spectra
oldname = ''
for name in tq(sorted(next(os.walk("../sne-external-spectra/CfA_SNIbc"))[1], key=lambda s: s.lower()), currenttask):
fullpath = "../sne-external-spectra/CfA_SNIbc/" + name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = add_source(name, refname = reference, url = refurl, secondary = True, acknowledgment = cfaack)
add_quantity(name, 'alias', name, source)
for fi, fname in enumerate(sorted(glob(fullpath + '/*'), key=lambda s: s.lower())):
filename = os.path.basename(fname)
fileparts = filename.split('-')
instrument = ''
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:].split('.')[0]
if len(fileparts) > 2:
instrument = fileparts[-1].split('.')[0]
time = str(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = data[1]
sources = uniq_cdl([source, add_source(name, bibcode = '2014AJ....147...99M')])
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', wavelengths = wavelengths, filename = filename,
fluxes = fluxes, u_time = 'MJD' if time else '', time = time, instrument = instrument, source = sources,
dereddened = False, deredshifted = False)
if args.travis and fi >= travislimit:
break
journal_events()
# Other spectra
oldname = ''
for name in tq(sorted(next(os.walk("../sne-external-spectra/CfA_Extra"))[1], key=lambda s: s.lower()), currenttask):
fullpath = "../sne-external-spectra/CfA_Extra/" + name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = add_source(name, refname = reference, url = refurl, secondary = True, acknowledgment = cfaack)
add_quantity(name, 'alias', name, source)
for fi, fname in enumerate(sorted(glob(fullpath + '/*'), key=lambda s: s.lower())):
if not os.path.isfile(fname):
continue
filename = os.path.basename(fname)
if (not filename.startswith('sn') or not filename.endswith('flm') or
any(x in filename for x in ['-interp', '-z', '-dered', '-obj', '-gal'])):
continue
fileparts = filename.split('.')[0].split('-')
instrument = ''
time = ''
if len(fileparts) > 1:
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:]
if is_number(year) and is_number(month) and is_number(day):
if len(fileparts) > 2:
instrument = fileparts[-1]
time = str(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = [str(Decimal(x)*Decimal(1.0e-15)) for x in data[1]]
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', wavelengths = wavelengths, filename = filename,
fluxes = fluxes, u_time = 'MJD' if time else '', time = time, instrument = instrument, source = source,
dereddened = False, deredshifted = False)
if args.travis and fi >= travislimit:
break
journal_events()
if do_task(task, 'snlsspectra'):
result = Vizier.get_catalogs("J/A+A/507/85/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
datedict = {}
for row in table:
datedict['SNLS-' + row['SN']] = str(astrotime(row['Date']).mjd)
oldname = ''
for fi, fname in enumerate(tq(sorted(glob('../sne-external-spectra/SNLS/*'), key=lambda s: s.lower()), currenttask = currenttask)):
filename = os.path.basename(fname)
fileparts = filename.split('_')
name = 'SNLS-' + fileparts[1]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
source = add_source(name, bibcode = "2009A&A...507...85B")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + fileparts[1][:2], source)
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
specdata = []
for r, row in enumerate(data):
if row[0] == '@TELESCOPE':
telescope = row[1].strip()
elif row[0] == '@REDSHIFT':
add_quantity(name, 'redshift', row[1].strip(), source)
if r < 14:
continue
specdata.append(list(filter(None, [x.strip(' \t') for x in row])))
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[1]
fluxes = [pretty_num(float(x)*1.e-16, sig = get_sig_digits(x)) for x in specdata[2]]
errors = [pretty_num(float(x)*1.e-16, sig = get_sig_digits(x)) for x in specdata[3]]
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', wavelengths = wavelengths,
fluxes = fluxes, u_time = 'MJD' if name in datedict else '', time = datedict[name] if name in datedict else '', telescope = telescope, source = source,
filename = filename)
if args.travis and fi >= travislimit:
break
journal_events()
if do_task(task, 'cspspectra'):
oldname = ''
for fi, fname in enumerate(tq(sorted(glob('../sne-external-spectra/CSP/*'), key=lambda s: s.lower()), currenttask = currenttask)):
filename = os.path.basename(fname)
sfile = filename.split('.')
if sfile[1] == 'txt':
continue
sfile = sfile[0]
fileparts = sfile.split('_')
name = 'SN20' + fileparts[0][2:]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
telescope = fileparts[-2]
instrument = fileparts[-1]
source = add_source(name, bibcode = "2013ApJ...773...53F")
add_quantity(name, 'alias', name, source)
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
specdata = []
for r, row in enumerate(data):
if row[0] == '#JDate_of_observation:':
jd = row[1].strip()
time = str(jd_to_mjd(Decimal(jd)))
elif row[0] == '#Redshift:':
add_quantity(name, 'redshift', row[1].strip(), source)
if r < 7:
continue
specdata.append(list(filter(None, [x.strip(' ') for x in row])))
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
add_spectrum(name = name, u_time = 'MJD', time = time, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', wavelengths = wavelengths,
fluxes = fluxes, telescope = telescope, instrument = instrument, source = source, deredshifted = True, filename = filename)
if args.travis and fi >= travislimit:
break
journal_events()
if do_task(task, 'ucbspectra'):
secondaryreference = "UCB Filippenko Group's Supernova Database (SNDB)"
secondaryrefurl = "http://heracles.astro.berkeley.edu/sndb/info"
secondaryrefbib = "2012MNRAS.425.1789S"
ucbspectracnt = 0
jsontxt = load_cached_url("http://heracles.astro.berkeley.edu/sndb/download?id=allpubspec",
'../sne-external-spectra/UCB/allpub.json')
if not jsontxt:
continue
spectra = json.loads(jsontxt)
spectra = sorted(spectra, key = lambda k: k['ObjName'])
oldname = ''
for spectrum in tq(spectra, currenttask = currenttask):
name = spectrum["ObjName"]
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondaryrefbib, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
sources = [secondarysource]
if spectrum["Reference"]:
sources += [add_source(name, bibcode = spectrum["Reference"])]
sources = uniq_cdl(sources)
if spectrum["Type"] and spectrum["Type"].strip() != "NoMatch":
for ct in spectrum["Type"].strip().split(','):
add_quantity(name, 'claimedtype', ct.replace('-norm', '').strip(), sources)
if spectrum["DiscDate"]:
add_quantity(name, 'discoverdate', spectrum["DiscDate"].replace('-', '/'), sources)
if spectrum["HostName"]:
add_quantity(name, 'host', urllib.parse.unquote(spectrum["HostName"]).replace('*', ''), sources)
if spectrum["UT_Date"]:
epoch = str(spectrum["UT_Date"])
year = epoch[:4]
month = epoch[4:6]
day = epoch[6:]
sig = get_sig_digits(day) + 5
mjd = pretty_num(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)), sig = sig)
filename = spectrum["Filename"] if spectrum["Filename"] else ''
instrument = spectrum["Instrument"] if spectrum["Instrument"] else ''
reducer = spectrum["Reducer"] if spectrum["Reducer"] else ''
observer = spectrum["Observer"] if spectrum["Observer"] else ''
snr = str(spectrum["SNR"]) if spectrum["SNR"] else ''
if not filename:
raise(ValueError('Filename not found for SNDB spectrum!'))
if not spectrum["SpecID"]:
raise(ValueError('ID not found for SNDB spectrum!'))
filepath = '../sne-external-spectra/UCB/' + filename
if archived_task('ucbspectra') and os.path.isfile(filepath):
with open(filepath, 'r') as f:
spectxt = f.read()
else:
session = requests.Session()
response = session.get("http://heracles.astro.berkeley.edu/sndb/download?id=ds:" + str(spectrum["SpecID"]))
spectxt = response.text
with open(filepath, 'w') as f:
f.write(spectxt)
specdata = list(csv.reader(spectxt.splitlines(), delimiter=' ', skipinitialspace=True))
startrow = 0
for row in specdata:
if row[0][0] == '#':
startrow += 1
else:
break
specdata = specdata[startrow:]
haserrors = len(specdata[0]) == 3 and specdata[0][2] and specdata[0][2] != 'NaN'
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
errors = ''
if haserrors:
errors = specdata[2]
if not list(filter(None, errors)):
errors = ''
add_spectrum(name = name, u_time = 'MJD', time = mjd, waveunit = 'Angstrom', fluxunit = 'Uncalibrated',
wavelengths = wavelengths, filename = filename, fluxes = fluxes, errors = errors, errorunit = 'Uncalibrated',
instrument = instrument, source = sources, snr = snr, observer = observer, reducer = reducer,
deredshifted = ('-noz' in filename))
ucbspectracnt = ucbspectracnt + 1
if args.travis and ucbspectracnt >= travislimit:
break
journal_events()
if do_task(task, 'suspectspectra'):
with open('../sne-external-spectra/Suspect/sources.json', 'r') as f:
sourcedict = json.loads(f.read())
with open('../sne-external-spectra/Suspect/filename-changes.txt', 'r') as f:
rows = f.readlines()
changedict = {}
for row in rows:
if not row.strip() or row[0] == "#":
continue
items = row.strip().split(' ')
changedict[items[1]] = items[0]
suspectcnt = 0
folders = next(os.walk('../sne-external-spectra/Suspect'))[1]
for folder in tq(folders, currenttask):
eventfolders = next(os.walk('../sne-external-spectra/Suspect/'+folder))[1]
oldname = ''
for eventfolder in tq(eventfolders, currenttask):
name = eventfolder
if is_number(name[:4]):
name = 'SN' + name
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
secondaryreference = "SUSPECT"
secondaryrefurl = "https://www.nhn.ou.edu/~suspect/"
secondarybibcode = "2001AAS...199.8408R"
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondarybibcode, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
eventspectra = next(os.walk('../sne-external-spectra/Suspect/'+folder+'/'+eventfolder))[2]
for spectrum in eventspectra:
sources = [secondarysource]
bibcode = ''
if spectrum in changedict:
specalias = changedict[spectrum]
else:
specalias = spectrum
if specalias in sourcedict:
bibcode = sourcedict[specalias]
elif name in sourcedict:
bibcode = sourcedict[name]
if bibcode:
source = add_source(name, bibcode = unescape(bibcode))
sources += source
sources = uniq_cdl(sources)
date = spectrum.split('_')[1]
year = date[:4]
month = date[4:6]
day = date[6:]
sig = get_sig_digits(day) + 5
time = pretty_num(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)), sig = sig)
with open('../sne-external-spectra/Suspect/'+folder+'/'+eventfolder+'/'+spectrum) as f:
specdata = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
specdata = list(filter(None, specdata))
newspec = []
oldval = ''
for row in specdata:
if row[1] == oldval:
continue
newspec.append(row)
oldval = row[1]
specdata = newspec
haserrors = len(specdata[0]) == 3 and specdata[0][2] and specdata[0][2] != 'NaN'
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
errors = ''
if haserrors:
errors = specdata[2]
add_spectrum(name = name, u_time = 'MJD', time = time, waveunit = 'Angstrom', fluxunit = 'Uncalibrated', wavelengths = wavelengths,
fluxes = fluxes, errors = errors, errorunit = 'Uncalibrated', source = sources, filename = spectrum)
suspectcnt = suspectcnt + 1
if args.travis and suspectcnt % travislimit == 0:
break
journal_events()
if do_task(task, 'snfspectra'):
eventfolders = next(os.walk('../sne-external-spectra/SNFactory'))[1]
bibcodes = {'SN2005gj':'2006ApJ...650..510A', 'SN2006D':'2007ApJ...654L..53T', 'SN2007if':'2010ApJ...713.1073S', 'SN2011fe':'2013A&A...554A..27P'}
oldname = ''
snfcnt = 0
for eventfolder in eventfolders:
name = eventfolder
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
secondaryreference = "Nearby Supernova Factory"
secondaryrefurl = "http://snfactory.lbl.gov/"
secondarybibcode = "2002SPIE.4836...61A"
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondarybibcode, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
bibcode = bibcodes[name]
source = add_source(name, bibcode = bibcode)
sources = uniq_cdl([source,secondarysource])
eventspectra = glob('../sne-external-spectra/SNFactory/'+eventfolder+'/*.dat')
for spectrum in eventspectra:
filename = os.path.basename(spectrum)
with open(spectrum) as f:
specdata = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
specdata = list(filter(None, specdata))
newspec = []
time = ''
telescope = ''
instrument = ''
observer = ''
observatory = ''
if 'Keck_20060202_R' in spectrum:
time = '53768.23469'
elif 'Spectrum05_276' in spectrum:
time = pretty_num(astrotime('2005-10-03').mjd, sig = 5)
elif 'Spectrum05_329' in spectrum:
time = pretty_num(astrotime('2005-11-25').mjd, sig = 5)
elif 'Spectrum05_336' in spectrum:
time = pretty_num(astrotime('2005-12-02').mjd, sig = 5)
for row in specdata:
if row[0][0] == '#':
joinrow = (' '.join(row)).split('=')
if len(joinrow) < 2:
continue
field = joinrow[0].strip('# ')
value = joinrow[1].split('/')[0].strip("' ")
if not time:
if field == 'JD':
time = str(jd_to_mjd(Decimal(value)))
elif field == 'MJD':
time = value
elif field == 'MJD-OBS':
time = value
if field == 'OBSERVER':
observer = value.capitalize()
if field == 'OBSERVAT':
observatory = value.capitalize()
if field == 'TELESCOP':
telescope = value.capitalize()
if field == 'INSTRUME':
instrument = value.capitalize()
else:
newspec.append(row)
if not time:
raise(ValueError('Time missing from spectrum.'))
specdata = newspec
haserrors = len(specdata[0]) == 3 and specdata[0][2] and specdata[0][2] != 'NaN'
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
errors = ''
if haserrors:
errors = specdata[2]
add_spectrum(name = name, u_time = 'MJD', time = time, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom',
wavelengths = wavelengths, fluxes = fluxes, errors = errors, observer = observer, observatory = observatory,
telescope = telescope, instrument = instrument,
errorunit = ('Variance' if name == 'SN2011fe' else 'erg/s/cm^2/Angstrom'), source = sources, filename = filename)
snfcnt = snfcnt + 1
if args.travis and snfcnt % travislimit == 0:
break
journal_events()
if do_task(task, 'superfitspectra'):
sfdirs = glob('../sne-external-spectra/superfit/*')
for sfdir in tq(sfdirs, currenttask = currenttask):
sffiles = sorted(glob(sfdir + "/*.dat"))
lastname = ''
oldname = ''
for sffile in tq(sffiles, currenttask = currenttask):
basename = os.path.basename(sffile)
name = basename.split('.')[0]
if name.startswith('sn'):
name = 'SN' + name[2:]
if len(name) == 7:
name = name[:6] + name[6].upper()
elif name.startswith('ptf'):
name = 'PTF' + name[3:]
if 'theory' in name:
continue
if event_exists(name):
prefname = get_preferred_name(name)
if 'spectra' in events[prefname] and lastname != prefname:
continue
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
epoch = basename.split('.')[1]
(mldt, mlmag, mlband, mlsource) = get_max_light(name)
if mldt:
epoff = Decimal(0.0) if epoch == 'max' else (Decimal(epoch[1:]) if epoch[0] == 'p' else -Decimal(epoch[1:]))
else:
epoff = ''
source = add_source(name, refname = 'Superfit', url = 'http://www.dahowell.com/superfit.html', secondary = True)
add_quantity(name, 'alias', name, source)
with open(sffile) as f:
rows = f.read().splitlines()
specdata = []
for row in rows:
if row.strip():
specdata.append(list(filter(None,re.split('\t+|\s+', row, maxsplit=0))))
specdata = [[x.replace('D','E') for x in list(i)] for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
mlmjd = str(Decimal(astrotime('-'.join([str(mldt.year), str(mldt.month), str(mldt.day)])).mjd) + epoff) if (epoff != '') else ''
add_spectrum(name, u_time = 'MJD' if mlmjd else '', time = mlmjd, waveunit = 'Angstrom', fluxunit = 'Uncalibrated',
wavelengths = wavelengths, fluxes = fluxes, source = source)
lastname = name
journal_events()
if do_task(task, 'mergeduplicates'):
if args.update and not len(events):
tprint('No sources changed, event files unchanged in update.')
sys.exit(1)
merge_duplicates()
if do_task(task, 'setprefnames'):
set_preferred_names()
files = repo_file_list()
path = '../bibauthors.json'
if os.path.isfile(path):
with open(path, 'r') as f:
bibauthordict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
bibauthordict = OrderedDict()
path = '../extinctions.json'
if os.path.isfile(path):
with open(path, 'r') as f:
extinctionsdict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
extinctionsdict = OrderedDict()
for fi in tq(files, 'Sanitizing and deriving quantities for events'):
events = OrderedDict()
name = os.path.basename(os.path.splitext(fi)[0]).replace('.json', '')
name = add_event(name, loadifempty = False)
derive_and_sanitize()
if has_task('writeevents'):
write_all_events(empty = True, gz = True, bury = True)
jsonstring = json.dumps(bibauthordict, indent='\t', separators=(',', ':'), ensure_ascii=False)
with codecs.open('../bibauthors.json', 'w', encoding='utf8') as f:
f.write(jsonstring)
jsonstring = json.dumps(extinctionsdict, indent='\t', separators=(',', ':'), ensure_ascii=False)
with codecs.open('../extinctions.json', 'w', encoding='utf8') as f:
f.write(jsonstring)
print("Memory used (MBs on Mac, GBs on Linux): " + "{:,}".format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024./1024.))
sys.exit(0)
| mit | 6,726,496,294,048,525,000 | 49.503873 | 561 | 0.497728 | false |
dparks1134/STAMP | stamp/plugins/samples/plots/SeqHistogram.py | 1 | 9822 | #=======================================================================
# Author: Donovan Parks
#
# Sequence histogram plot.
#
# Copyright 2011 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
import sys
import math
from PyQt4 import QtCore, QtGui
from stamp.plugins.samples.AbstractSamplePlotPlugin import AbstractSamplePlotPlugin, TestWindow, ConfigureDialog
from stamp.plugins.samples.plots.configGUI.seqHistogramUI import Ui_SeqHistogramDialog
class SeqHistogram(AbstractSamplePlotPlugin):
'''
Sequence histogram plot.
'''
def __init__(self, preferences, parent=None):
AbstractSamplePlotPlugin.__init__(self, preferences, parent)
self.preferences = preferences
self.name = 'Sequence histogram'
self.type = 'Exploratory'
self.settings = preferences['Settings']
self.figWidth = self.settings.value(self.name + '/width', 7.0).toDouble()[0]
self.figHeight = self.settings.value(self.name + '/height', 7.0).toDouble()[0]
self.bCustomBinWidth = self.settings.value(self.name + '/custom bin width', False).toBool()
self.binWidth = self.settings.value(self.name + '/bin width', 100.0).toDouble()[0]
self.yAxisLogScale = self.settings.value(self.name + '/log scale', False).toBool()
self.bCustomXaxis = self.settings.value(self.name + '/custom x-axis extents', False).toBool()
self.xLimitLeft = self.settings.value(self.name + '/min value', 0.0).toDouble()[0]
self.xLimitRight = self.settings.value(self.name + '/max value', 1.0).toDouble()[0]
self.legendPos = self.settings.value(self.name + '/legend position', 0).toInt()[0]
def mirrorProperties(self, plotToCopy):
self.name = plotToCopy.name
self.figWidth = plotToCopy.figWidth
self.figHeight = plotToCopy.figHeight
self.bCustomBinWidth = plotToCopy.bCustomBinWidth
self.binWidth = plotToCopy.binWidth
self.yAxisLogScale = plotToCopy.yAxisLogScale
self.bCustomXaxis = plotToCopy.bCustomXaxis
self.xLimitLeft = plotToCopy.xLimitLeft
self.xLimitRight = plotToCopy.xLimitRight
self.legendPos = plotToCopy.legendPos
def plot(self, profile, statsResults):
if len(profile.profileDict) <= 0:
self.emptyAxis()
return
# *** Colour of plot elements
axesColour = str(self.preferences['Axes colour'].name())
profile1Colour = str(self.preferences['Sample 1 colour'].name())
profile2Colour = str(self.preferences['Sample 2 colour'].name())
# *** Get sequence counts
seqs1 = profile.getSequenceCounts(0)
seqs2 = profile.getSequenceCounts(1)
# *** Set x-axis limit
self.xMin = min(min(seqs1),min(seqs2))
if self.xLimitLeft == None:
self.xLimitLeft = self.xMin
self.xMax = max(max(seqs1),max(seqs2))
if self.xLimitRight == None:
self.xLimitRight = self.xMax
# Set bin width
if not self.bCustomBinWidth:
self.binWidth = (self.xMax - self.xMin) / 40
# *** Set size of figure
self.fig.clear()
self.fig.set_size_inches(self.figWidth, self.figHeight)
heightBottomLabels = 0.4 # inches
widthSideLabel = 0.5 # inches
padding = 0.2 # inches
axesHist = self.fig.add_axes([widthSideLabel/self.figWidth,heightBottomLabels/self.figHeight,\
1.0-(widthSideLabel+padding)/self.figWidth,\
1.0-(heightBottomLabels+padding)/self.figHeight])
# *** Histogram plot
bins = [0]
binEnd = self.binWidth
while binEnd <= self.xMax:
bins.append(binEnd)
binEnd += self.binWidth
bins.append(binEnd)
n, b, patches = axesHist.hist([seqs1, seqs2], bins=bins, log=self.yAxisLogScale)
for patch in patches[0]:
patch.set_facecolor(profile1Colour)
for patch in patches[1]:
patch.set_facecolor(profile2Colour)
if self.bCustomXaxis:
axesHist.set_xlim(self.xLimitLeft, self.xLimitRight)
axesHist.set_xlabel('Sequences')
axesHist.set_ylabel('Number of features')
# *** Prettify plot
if self.legendPos != -1:
legend = axesHist.legend([patches[0][0], patches[1][0]], (profile.sampleNames[0], profile.sampleNames[1]), loc=self.legendPos)
legend.get_frame().set_linewidth(0)
for a in axesHist.yaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for a in axesHist.xaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for line in axesHist.yaxis.get_ticklines():
line.set_color(axesColour)
for line in axesHist.xaxis.get_ticklines():
line.set_color(axesColour)
for loc, spine in axesHist.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none')
else:
spine.set_color(axesColour)
self.updateGeometry()
self.draw()
def configure(self, profile, statsResults):
self.profile = profile
self.configDlg = ConfigureDialog(Ui_SeqHistogramDialog)
self.connect(self.configDlg.ui.chkCustomBinWidth, QtCore.SIGNAL('toggled(bool)'), self.changeCustomBinWidth)
self.connect(self.configDlg.ui.chkCustomXaxis, QtCore.SIGNAL('toggled(bool)'), self.changeCustomXaxis)
self.connect(self.configDlg.ui.btnXmin, QtCore.SIGNAL('clicked()'), self.setXaxisMin)
self.connect(self.configDlg.ui.btnXmax, QtCore.SIGNAL('clicked()'), self.setXaxisMax)
self.configDlg.ui.spinFigWidth.setValue(self.figWidth)
self.configDlg.ui.spinFigHeight.setValue(self.figHeight)
self.configDlg.ui.chkCustomBinWidth.setChecked(self.bCustomBinWidth)
self.configDlg.ui.spinBinWidth.setValue(self.binWidth)
self.configDlg.ui.chkLogScale.setChecked(self.yAxisLogScale)
self.configDlg.ui.chkCustomXaxis.setChecked(self.bCustomXaxis)
self.configDlg.ui.spinXmin.setValue(self.xLimitLeft)
self.configDlg.ui.spinXmax.setValue(self.xLimitRight)
self.changeCustomBinWidth()
self.changeCustomXaxis()
# legend position
if self.legendPos == 0:
self.configDlg.ui.radioLegendPosBest.setDown(True)
elif self.legendPos == 1:
self.configDlg.ui.radioLegendPosUpperRight.setChecked(True)
elif self.legendPos == 7:
self.configDlg.ui.radioLegendPosCentreRight.setChecked(True)
elif self.legendPos == 4:
self.configDlg.ui.radioLegendPosLowerRight.setChecked(True)
elif self.legendPos == 2:
self.configDlg.ui.radioLegendPosUpperLeft.setChecked(True)
elif self.legendPos == 6:
self.configDlg.ui.radioLegendPosCentreLeft.setChecked(True)
elif self.legendPos == 3:
self.configDlg.ui.radioLegendPosLowerLeft.setChecked(True)
else:
self.configDlg.ui.radioLegendPosNone.setChecked(True)
if self.configDlg.exec_() == QtGui.QDialog.Accepted:
self.figWidth = self.configDlg.ui.spinFigWidth.value()
self.figHeight = self.configDlg.ui.spinFigHeight.value()
self.bCustomBinWidth = self.configDlg.ui.chkCustomBinWidth.isChecked()
self.binWidth = self.configDlg.ui.spinBinWidth.value()
self.yAxisLogScale = self.configDlg.ui.chkLogScale.isChecked()
self.bCustomXaxis = self.configDlg.ui.chkCustomXaxis.isChecked()
self.xLimitLeft = self.configDlg.ui.spinXmin.value()
self.xLimitRight = self.configDlg.ui.spinXmax.value()
# legend position
if self.configDlg.ui.radioLegendPosBest.isChecked() == True:
self.legendPos = 0
elif self.configDlg.ui.radioLegendPosUpperRight.isChecked() == True:
self.legendPos = 1
elif self.configDlg.ui.radioLegendPosCentreRight.isChecked() == True:
self.legendPos = 7
elif self.configDlg.ui.radioLegendPosLowerRight.isChecked() == True:
self.legendPos = 4
elif self.configDlg.ui.radioLegendPosUpperLeft.isChecked() == True:
self.legendPos = 2
elif self.configDlg.ui.radioLegendPosCentreLeft.isChecked() == True:
self.legendPos = 6
elif self.configDlg.ui.radioLegendPosLowerLeft.isChecked() == True:
self.legendPos = 3
else:
self.legendPos = -1
self.settings.setValue(self.name + '/width', self.figWidth)
self.settings.setValue(self.name + '/height', self.figHeight)
self.settings.setValue(self.name + '/custom bin width', self.bCustomBinWidth)
self.settings.setValue(self.name + '/bin width', self.binWidth)
self.settings.setValue(self.name + '/log scale', self.yAxisLogScale)
self.settings.setValue(self.name + '/custom x-axis extents', self.bCustomXaxis)
self.settings.setValue(self.name + '/min value', self.xLimitLeft)
self.settings.setValue(self.name + '/max value', self.xLimitRight)
self.settings.setValue(self.name + '/legend position', self.legendPos)
self.plot(profile, statsResults)
def changeCustomBinWidth(self):
self.configDlg.ui.spinBinWidth.setEnabled(self.configDlg.ui.chkCustomBinWidth.isChecked())
def changeCustomXaxis(self):
self.configDlg.ui.spinXmin.setEnabled(self.configDlg.ui.chkCustomXaxis.isChecked())
self.configDlg.ui.spinXmax.setEnabled(self.configDlg.ui.chkCustomXaxis.isChecked())
def setXaxisMin(self):
seqs1 = self.profile.getSequenceCounts(0)
seqs2 = self.profile.getSequenceCounts(1)
self.configDlg.ui.spinXmin.setValue(min(min(seqs1), min(seqs2)))
def setXaxisMax(self):
seqs1 = self.profile.getSequenceCounts(0)
seqs2 = self.profile.getSequenceCounts(1)
self.configDlg.ui.spinXmax.setValue(max(max(seqs1), max(seqs2)))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
testWindow = TestWindow(SeqHistogram)
testWindow.show()
sys.exit(app.exec_())
| gpl-3.0 | -6,319,789,311,616,871,000 | 36.492366 | 129 | 0.723376 | false |
jtladner/Scripts | BEAST_prep/beast_nexus_prep_v2.0.py | 1 | 4797 | #!/usr/bin/env python
from __future__ import division
import optparse, os
#This script uses an aligned fasta file and a tab deliminted file containing CDS coordinates to create a nexus input for BEAST
#In version 2.0, added a flag to throw if you only want coding sequence to be included in the nexus file
def main():
usage = '%prog [options]'
p = optparse.OptionParser()
p.add_option('-f', '--fasta', help='Aligned fasta. [None]')
p.add_option('-c', '--coords', help='Tab delimited file with coordinates of CDS. Should have at least 3 tab delimited columns. The first is not used, will probably have some sort of CDS name. The next two have start and stop base positions.[None]')
p.add_option('-o', '--out', help='Name for output nexus file. [None]')
p.add_option('--onlyCDS', default=False, action="store_true", help='Use this flag if you only want coding regions to be included in the output nexus file. [None]')
opts, args = p.parse_args()
make_beast_nexus(opts)
#----------------------End of main()
def make_beast_nexus(opts):
fout=open(opts.out, 'w')
#Read in seqs
names, seqs = read_fasta_lists(opts.fasta)
#Get coding coordinates
coding_coords=get_coords(opts.coords)
#Make concatenated coding seqs
coding_seqs=['']*len(seqs)
for start, end in coding_coords:
for i in range(len(seqs)):
coding_seqs[i]+=seqs[i][start-1:end]
if opts.onlyCDS:
fout.write("#NEXUS\n[File created using beast_nexus_prep.py using %s and %s]\n\nBEGIN TAXA;\n" % (opts.fasta, opts.coords))
fout.write("DIMENSIONS NTAX=%d;\n\nTAXLABELS\n%s\n;\n\nEND;\n" % (len(names), '\n'.join(names)))
fout.write("BEGIN CHARACTERS;\nDIMENSIONS NCHAR=%d;\nFORMAT DATATYPE=DNA MISSING=N GAP=-;\nMATRIX\n\n%s\n;\n\nEND;\n\n" % (len(coding_seqs[0]), '\n'.join(['%s %s' % (names[x], coding_seqs[x]) for x in range(len(names))])))
fout.write("BEGIN ASSUMPTIONS;\n\tcharset coding = 1-%d;\nend;\n" % (len(coding_seqs[0])))
else:
#Get non-coding coordinates
noncoding_coords=extrap_noncoding(coding_coords, len(seqs[0]))
#Make concatenated noncoding seqs
noncoding_seqs=['']*len(seqs)
for start, end in noncoding_coords:
for i in range(len(seqs)):
noncoding_seqs[i]+=seqs[i][start-1:end]
concat_seqs=[coding_seqs[i]+noncoding_seqs[i] for i in range(len(seqs))]
coding_start=1
coding_end=len(coding_seqs[0])
noncoding_start=coding_end+1
noncoding_end=len(concat_seqs[0])
fout.write("#NEXUS\n[File created using beast_nexus_prep.py using %s and %s]\n\nBEGIN TAXA;\n" % (opts.fasta, opts.coords))
fout.write("DIMENSIONS NTAX=%d;\n\nTAXLABELS\n%s\n;\n\nEND;\n" % (len(names), '\n'.join(names)))
fout.write("BEGIN CHARACTERS;\nDIMENSIONS NCHAR=%d;\nFORMAT DATATYPE=DNA MISSING=N GAP=-;\nMATRIX\n\n%s\n;\n\nEND;\n\n" % (len(concat_seqs[0]), '\n'.join(['%s %s' % (names[x], concat_seqs[x]) for x in range(len(names))])))
fout.write("BEGIN ASSUMPTIONS;\n\tcharset coding = %d-%d;\n\tcharset noncoding = %d-%d;\nend;\n" % (coding_start, coding_end, noncoding_start, noncoding_end ))
fout.close()
def extrap_noncoding(coding_coords, seq_len):
non_coords=[]
#To handle noncoding at the very beginning of the sequence
if coding_coords[0][0] != 1:
non_coords.append((1,coding_coords[0][0]-1))
#To handle noncoding regions in between coding seqs
coding_sorted=sorted(coding_coords[:])
for i in range(len(coding_sorted[:-1])):
if coding_sorted[i+1][0]-coding_sorted[i][1]>0:
non_coords.append((coding_sorted[i][1]+1,coding_sorted[i+1][0]-1))
#To handle non-coding at the very end of the sequence
if coding_coords[-1][1] != seq_len:
non_coords.append((coding_coords[-1][1]+1, seq_len))
print non_coords
return non_coords
def get_coords(c_file):
fin=open(c_file, 'r')
coords=[]
for line in fin:
cols=line.strip().split('\t')
coords.append((int(cols[1]), int(cols[2])))
return coords
# Extracts data from a fasta sequence file. Returns two lists, the first holds the names of the seqs (excluding the '>' symbol), and the second holds the sequences
def read_fasta_lists(file):
fin = open(file, 'r')
count=0
names=[]
seqs=[]
seq=''
for line in fin:
line=line.strip()
if line and line[0] == '>': #indicates the name of the sequence
count+=1
names.append(line[1:])
if count>1:
seqs.append(seq)
seq=''
else: seq +=line
seqs.append(seq)
return names, seqs
###------------------------------------->>>>
if __name__ == "__main__":
main()
| gpl-3.0 | 1,622,315,349,966,179,600 | 38.644628 | 252 | 0.62143 | false |
jhuapl-boss/intern | examples/cutout_timeseries_ex.py | 1 | 1548 | from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
import numpy as np
from requests import HTTPError
rmt = BossRemote('neurodata.cfg')
xmax = 8
ymax = 4
zmax = 5
tmax = 10
COLL_NAME = 'gray'
EXP_NAME = 'timeseries_test'
CHAN_NAME = 'Ch1'
COORD_FRAME = COLL_NAME + '_' + EXP_NAME
coord = CoordinateFrameResource(
COORD_FRAME, '', 0, xmax, 0, ymax, 0, zmax)
try:
coord_actual = rmt.get_project(coord)
except HTTPError:
coord_actual = rmt.create_project(coord)
# Create or get experiment
chan_setup = ExperimentResource(
EXP_NAME, COLL_NAME, coord_frame=COORD_FRAME,
num_time_samples=tmax, time_step=1)
try:
chan_actual = rmt.get_project(chan_setup)
except HTTPError:
chan_actual = rmt.create_project(chan_setup)
# Create or get a channel to write to
chan_setup = ChannelResource(
CHAN_NAME, COLL_NAME, EXP_NAME, 'image', '', datatype='uint16')
try:
chan_actual = rmt.get_project(chan_setup)
except HTTPError:
chan_actual = rmt.create_project(chan_setup)
x_rng = [0, xmax]
y_rng = [0, ymax]
z_rng = [0, zmax]
t_rng = [0, tmax]
print('Data model setup.')
data = np.random.randint(1, 3000, (tmax, zmax, ymax, xmax))
data = data.astype(np.uint16)
# Upload the cutout to the channel.
rmt.create_cutout(chan_actual, 0, x_rng, y_rng, z_rng, data,
time_range=t_rng)
cutout_data = rmt.get_cutout(
chan_actual, 0, x_rng, y_rng, z_rng, time_range=t_rng)
np.testing.assert_array_equal(data, cutout_data)
print(np.shape(cutout_data))
# (10, 5, 4, 8)
| apache-2.0 | 8,872,616,388,416,469,000 | 22.815385 | 67 | 0.683463 | false |
Xicnet/radioflow-scheduler | project/icecast_stats/views.py | 1 | 4776 | import os.path
import datetime
import pytz
from django.conf import settings
from django.db.models import F
from datetime import timedelta
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.auth.models import User
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from rest_framework import generics
from rest_framework import serializers
from timeslot.models import Program, Day, Config
from icecast_stats.models import IcecastLog, ProgramStat
from realtime_stats import StatsCollector
@login_required
def index(request):
logs = IcecastLog.objects.all()[:50]
return render_to_response(
'icecast_stats/dashboard.html',
{
'logs': logs,
'weekly_programs': Program.get_weekly(request),
},
context_instance=RequestContext(request)
)
@login_required
def programacion(request):
return redirect('/program/')
@login_required
def realtime(request):
print settings.ICECAST_URL
stats = StatsCollector(
settings.ICECAST_URL,
settings.ICECAST_USER,
settings.ICECAST_PASS,
settings.ICECAST_REALM,
settings.ICECAST_MOUNT
)
stats_data = stats.run()
return render_to_response(
'icecast_stats/realtime.html',
{
'listeners': stats_data,
},
context_instance=RequestContext(request)
)
@login_required
def chat(request):
return render_to_response(
'icecast_stats/chat.html',
{
'weekly_programs': Program.get_weekly(request),
},
context_instance=RequestContext(request)
)
# Serializers define the API representation.
class IcecastLogSerializer(serializers.ModelSerializer):
class Meta:
model = IcecastLog
class IcecastLogViewSet( generics.ListAPIView):
#@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(IcecastLogViewSet, self).dispatch(*args, **kwargs)
serializer_class = IcecastLogSerializer
def get_queryset(self):
"""
This view should return a list of all the purchases for
the user as determined by the mount portion of the URL.
"""
mount = self.request.query_params.get('mount', None)
start = "%s 00:00:00" % self.request.query_params.get('start', None)
end = "%s 00:00:00" % self.request.query_params.get('end', None)
#end = datetime.date("%s 00:00:00" % self.request.query_params.get('end', None), tzinfo=pytz.UTC)
limit = self.request.query_params.get('limit', None)
if self.request.user.is_superuser:
#logs = IcecastLog.objects.all()
logs = IcecastLog.objects.filter(mount=mount)
else:
#mount = os.path.basename(User.objects.get(username=self.request.user.username).config.streamurl)
logs = IcecastLog.objects.filter(mount=mount)
if mount:
logs = logs.filter(mount=mount)
if start and end:
logs = logs.filter(datetime_start__gte=start, datetime_end__lte=end, datetime_end__gt=F('datetime_start') + timedelta(seconds=5) )
return logs[:limit]
class ProgramStatSerializer(serializers.ModelSerializer):
class Meta:
model = ProgramStat
class ProgramStatViewSet( generics.ListAPIView):
#@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ProgramStatViewSet, self).dispatch(*args, **kwargs)
serializer_class = ProgramStatSerializer
def get_queryset(self):
"""
This view should return a list of all the purchases for
the user as determined by the mount portion of the URL.
"""
mount = self.request.query_params.get('mount', None)
start = "%s 00:00:00" % self.request.query_params.get('start', None)
end = "%s 00:00:00" % self.request.query_params.get('end', None)
limit = self.request.query_params.get('limit', None)
if self.request.user.is_superuser:
#program_stat = ProgramStat.objects.all()
program_stat = ProgramStat.objects.filter(log_entry__mount=mount)
else:
program_stat = ProgramStat.objects.filter(log_entry__mount=mount)
if mount:
program_stat = program_stat.filter(log_entry__mount=mount)
if start and end:
program_stat = program_stat.filter(log_entry__datetime_start__gte=start, log_entry__datetime_end__lte=end)
return program_stat[:limit]
| agpl-3.0 | -1,338,487,912,704,370,200 | 31.27027 | 142 | 0.650126 | false |
jsharkey13/isaac-selenium-testing | isaactest/tests/back_to_board.py | 1 | 2871 | import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.isaac import get_hexagon_properties
from ..utils.i_selenium import assert_tab, image_div
from ..tests import TestWithDependency
from selenium.common.exceptions import NoSuchElementException
__all__ = ["back_to_board"]
#####
# Test : Back to Board Button
#####
@TestWithDependency("BACK_TO_BOARD")
def back_to_board(driver, ISAAC_WEB, WAIT_DUR, **kwargs):
"""Test whether the back to board button works.
- 'driver' should be a Selenium WebDriver.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
- 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.
"""
assert_tab(driver, ISAAC_WEB)
driver.get(ISAAC_WEB)
log(INFO, "Got: %s" % ISAAC_WEB)
time.sleep(WAIT_DUR)
try:
questions_tab = driver.find_element_by_xpath("//a[@ui-sref='gameBoards({filter: true})']")
questions_tab.click()
time.sleep(WAIT_DUR)
except NoSuchElementException:
log(ERROR, "Can't find 'Questions' tab link; can't continue!")
return False
try:
url = str(driver.current_url).replace("?filter=true", "").replace("?filter", "") # Ignore whether the filter state is set!
log(INFO, "Currently on '%s', attempt to access 5 hexagons." % url)
for i in range(5):
hexagons = driver.find_elements_by_xpath("//a[@class='ru-hex-home-content']") # This has to be inside the loop
hexagon = get_hexagon_properties(hexagons[i]) # so that we don't get stale elements.
log(INFO, "Got hexagon %s." % (i + 1))
if hexagon["type"] == "Question":
hexagons[i].click()
log(INFO, "Hexagon is a question; clicked on it.")
time.sleep(WAIT_DUR)
back_to_board_button = driver.find_element_by_xpath("//a[@ng-click='backToBoard()']")
back_to_board_button.click()
log(INFO, "Clicked back to board button.")
time.sleep(WAIT_DUR)
new_url = str(driver.current_url).replace("?filter=true", "").replace("?filter", "")
assert new_url == url, "Expected to end on '%s', actually ended on '%s'!" % (url, new_url)
log(PASS, "Back to board button worked as expected.")
return True
except NoSuchElementException:
image_div(driver, "ERROR_back_to_board")
log(ERROR, "Couldn't find 'Back to Board' button; see 'ERROR_back_to_board.png'! Can't continue!")
return False
except AssertionError, e:
image_div(driver, "ERROR_back_to_board")
log(ERROR, "Back to board button failed! %s" % e.message)
return False
except IndexError:
log(ERROR, "Not enough hexagons to click; can't continue!")
return False
| mit | 4,891,062,847,319,937,000 | 45.306452 | 131 | 0.607106 | false |
ndp-systemes/odoo-addons | stock_specific_inventory/__openerp__.py | 1 | 1636 | # -*- coding: utf8 -*-
#
# Copyright (C) 2015 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
{
'name': 'Stock Specific Inventory',
'version': '0.1',
'author': 'NDP Systèmes',
'maintainer': 'NDP Systèmes',
'category': 'Warehouse',
'depends': ['stock'],
'description': """
Stock Specific Inventory
==========================
This module adds the possibility to make inventories on a product selection.
It also improves the view of last inventories per product. Products can be selected directly from this view and a new
inventory on these products be made.
""",
'website': 'http://www.ndp-systemes.fr',
'data': [
'security/ir.model.access.csv',
'views/stock_specific_product_for_inventory.xml'
],
'demo': [
'test_stock_specific_inventory_demo.xml'
],
'test': [],
'installable': True,
'auto_install': False,
'license': 'AGPL-3',
'application': False,
}
| agpl-3.0 | 438,542,075,688,769,860 | 33.020833 | 117 | 0.660747 | false |
PaddoInWonderland/PaddoCogs | schmeckles/schmeckles.py | 1 | 2347 | import re
class Schmeckles:
def __init__(self, bot):
self.bot = bot
self.p = re.compile('([^\n\.\,\r\d-]{0,30})(-?[\d|,]{0,300}\.{0,1}\d{1,300} schmeckle[\w]{0,80})([^\n\.\,\r\d-]{0,30})', re.IGNORECASE)
async def schmeckle2usd(self, schmeckle):
"""1 Schmeckle = $148 USD
https://www.reddit.com/r/IAmA/comments/202owt/we_are_dan_harmon_and_justin_roiland_creators_of/cfzfv79"""
return schmeckle * 148.0
async def schmeckle2eur(self, schmeckle):
return schmeckle * 139.25 # latest USDEUR value
async def schmeckle2yen(self, schmeckle):
return schmeckle * 139.25 # latest USDYEN value
async def schmeckle2rub(self, schmeckle):
return schmeckle * 139.25 # latest USDRUB value
async def searchForSchmeckles(self, content):
if any([x in content.lower() for x in ['?', 'how much', 'what is', 'how many', 'euro', 'usd', 'dollars', 'dollar', 'euros']]):
return self.p.search(content)
return None
async def getSchmeckles(self, content):
get_schmeckles = await self.searchForSchmeckles(content)
if get_schmeckles:
match = get_schmeckles.groups()
euro = any([x in match[-1].lower() for x in ['eur', 'euro', 'euros']])
dollar = any([x in match[-1].lower() for x in ['usd', 'dollar', 'dollars']])
if euro and not dollar:
value = await self.schmeckle2eur(float(match[1].split()[0])), 'EUR', match[1].split()[0]
elif dollar and not euro:
value = await self.schmeckle2usd(float(match[1].split()[0])), 'USD', match[1].split()[0]
elif not dollar and not euro:
value = await self.schmeckle2usd(float(match[1].split()[0])), 'USD', match[1].split()[0]
return value
return None
async def _on_message(self, message):
content = message.content
author = message.author
channel = message.channel
if author.id != self.bot.user.id:
schmeckles = await self.getSchmeckles(content)
if schmeckles:
await self.bot.send_message(channel, '{0[2]} SHM is about {0[0]:.2f} {0[1]}'.format(schmeckles))
def setup(bot):
cog = Schmeckles(bot)
bot.add_listener(cog._on_message, "on_message")
bot.add_cog(cog)
| gpl-3.0 | -4,986,179,851,531,409,000 | 40.910714 | 143 | 0.588411 | false |
gabstopper/smc-python | smc/core/general.py | 1 | 15630 | """
General configuration areas for an engine. This correlates to service level
settings such as DNSRelay, SNMP, etc.
"""
import collections
from smc.base.model import Element
from smc.base.util import element_resolver
from smc.elements.profiles import DNSRelayProfile
from smc.base.structs import NestedDict
from smc.policy.interface import InterfacePolicy
from smc.api.exceptions import LoadPolicyFailed
class SNMP(object):
"""
SNMP configuration details for applying SNMP on an engine.
SNMP requires at minimum an assigned SNMPAgent configuration
which defines the SNMP specific settings (version, community
string, etc). You can also define specific interfaces to enable
SNMP on. By default, if no addresses are specified, SNMP will
be defined on all interfaces.
.. seealso:: :class:`smc.elements.profiles.SNMPAgent`
"""
def __init__(self, engine):
self.engine = engine
@property
def agent(self):
"""
The SNMP agent profile used for this engine.
:rtype: SNMPAgent
"""
return self.engine.from_href(
getattr(self.engine, 'snmp_agent_ref', None))
@property
def status(self):
return bool(getattr(self.engine, 'snmp_agent_ref', False))
def disable(self):
"""
Disable SNMP on this engine. You must call `update` on the engine
for this to take effect.
:return: None
"""
self.engine.data.update(
snmp_agent_ref=None,
snmp_location='',
snmp_interface=[])
def enable(self, snmp_agent, snmp_location=None, snmp_interface=None):
"""
Enable SNMP on the engine. Specify a list of interfaces
by ID to enable only on those interfaces. Only interfaces
that have NDI's are supported.
:param str,Element snmp_agent: the SNMP agent reference for this engine
:param str snmp_location: the SNMP location identifier for the engine
:param list snmp_interface: list of interface IDs to enable SNMP
:raises ElementNotFound: unable to resolve snmp_agent
:raises InterfaceNotFound: specified interface by ID not found
"""
agent = element_resolver(snmp_agent)
snmp_interface = [] if not snmp_interface else snmp_interface
interfaces = self._iface_dict(snmp_interface)
self.engine.data.update(
snmp_agent_ref=agent,
snmp_location=snmp_location if snmp_location else '',
snmp_interface=interfaces)
def _iface_dict(self, snmp_interface):
return [values for interface in snmp_interface
for values in self.engine.interface.get(interface).ndi_interfaces]
@property
def _nicids(self):
return [str(nic.get('nicid'))
for nic in getattr(self.engine, 'snmp_interface', [])]
def update_configuration(self, **kwargs):
"""
Update the SNMP configuration using any kwargs supported in the
`enable` constructor. Return whether a change was made. You must call
update on the engine to commit any changes.
:param dict kwargs: keyword arguments supported by enable constructor
:rtype: bool
"""
updated = False
if 'snmp_agent' in kwargs:
kwargs.update(snmp_agent_ref=kwargs.pop('snmp_agent'))
snmp_interface = kwargs.pop('snmp_interface', None)
for name, value in kwargs.items():
_value = element_resolver(value)
if getattr(self.engine, name, None) != _value:
self.engine.data[name] = _value
updated = True
if snmp_interface is not None:
_snmp_interface = getattr(self.engine, 'snmp_interface', [])
if not len(snmp_interface) and len(_snmp_interface):
self.engine.data.update(snmp_interface=[])
updated = True
elif len(snmp_interface):
if set(self._nicids) ^ set(map(str, snmp_interface)):
self.engine.data.update(
snmp_interface=self._iface_dict(snmp_interface))
updated = True
return updated
@property
def location(self):
"""
Return the SNMP location string
:rtype: str
"""
return getattr(self.engine, 'snmp_location', None)
@property
def interface(self):
"""
Return a list of physical interfaces that the SNMP
agent is bound to.
:rtype: list(PhysicalInterface)
"""
nics = set([nic.get('nicid') for nic in \
getattr(self.engine, 'snmp_interface', [])])
return [self.engine.interface.get(nic) for nic in nics]
def __repr__(self):
return '{0}(enabled={1})'.format(
self.__class__.__name__, self.status)
class DNSRelay(object):
"""
DNS Relay allows the engine to provide DNS caching or specific
host, IP and domain replies to clients. It can also be used
to sinkhole specific DNS requests.
.. seealso:: :class:`smc.elements.profiles.DNSRelayProfile`
"""
def __init__(self, engine):
self.engine = engine
@property
def status(self):
"""
Status of DNS Relay on this engine.
:rtype: bool
"""
return getattr(self.engine, 'dns_relay_profile_ref', False)
def enable(self, interface_id, dns_relay_profile=None):
"""
Enable the DNS Relay service on this engine.
:param int interface_id: interface id to enable relay
:param str,DNSRelayProfile dns_relay_profile: DNSRelayProfile element
or str href
:raises EngineCommandFailed: interface not found
:raises ElementNotFound: profile not found
:return: None
"""
if not dns_relay_profile: # Use default
href = DNSRelayProfile('Cache Only').href
else:
href = element_resolver(dns_relay_profile)
intf = self.engine.interface.get(interface_id)
self.engine.data.update(dns_relay_profile_ref=href)
self.engine.data.update(dns_relay_interface=intf.ndi_interfaces)
def disable(self):
"""
Disable DNS Relay on this engine
:return: None
"""
self.engine.data.update(dns_relay_interface=[])
self.engine.data.pop('dns_relay_profile_ref', None)
def __repr__(self):
return '{0}(enabled={1})'.format(
self.__class__.__name__, self.status)
class DefaultNAT(object):
"""
Default NAT on the engine is used to automatically create NAT
configurations based on internal routing. This simplifies the
need to create specific NAT rules, primarily for outbound traffic.
.. note:: You must call engine.update() to commit any changes.
"""
def __init__(self, engine):
self.engine = engine
@property
def status(self):
"""
Status of default nat on the engine.
:rtype: bool
"""
return self.engine.data['default_nat']
def enable(self):
"""
Enable default NAT on this engine
"""
self.engine.data['default_nat'] = True
def disable(self):
"""
Disable default NAT on this engine
"""
self.engine.data['default_nat'] = False
def __repr__(self):
return '{0}(enabled={1})'.format(
self.__class__.__name__, self.status)
class RankedDNSAddress(object):
"""
A RankedDNSAddress represents a list of DNS entries used as a ranked list to
provide an ordered way to perform DNS queries.
DNS entries can be added as raw IP addresses, or as elements of type
:class:`smc.elements.network.Host` or :class:`smc.elements.servers.DNSServer`
(or combination of both). This is an iterable class yielding namedtuples of
type :class:`.DNSEntry`.
Normal access is done through an engine reference::
>>> list(engine.dns)
[DNSEntry(rank=0,value=8.8.8.8,ne_ref=None),
DNSEntry(rank=1,value=None,ne_ref=DNSServer(name=mydnsserver))]
>>> engine.dns.append(['8.8.8.8', '9.9.9.9'])
>>> engine.dns.prepend(['1.1.1.1'])
>>> engine.dns.remove(['8.8.8.8', DNSServer('mydnsserver')])
.. note:: You must call engine.update() to commit any changes.
"""
def __init__(self, entries):
self.entries = entries
def __iter__(self):
for entry in self.entries:
yield DNSEntry(**entry)
def __len__(self):
return len(self.entries)
def __contains__(self, entry):
for e in self:
try:
if e.ne_ref == entry.href:
return True
except AttributeError:
if e.value == entry:
return True
return False
def _rank_dns(self, entry, prepend=False):
if prepend and len(self) or not len(self):
start_rank = 0
else:
start_rank = self.entries[-1].get('rank')+1
additions = []
for e in entry:
if e not in self and e not in additions:
additions.append(e)
if not additions:
return
if prepend: # Rerank
for e in self.entries:
e.update((k, v+1) for k, v in e.items() if k == 'rank')
for num, addr in enumerate(additions, int(start_rank)):
try:
self.entries.append({'rank': float(num), 'ne_ref': addr.href})
except AttributeError:
self.entries.append({'rank': float(num), 'value': addr})
def add(self, values):
return self.append(values)
def append(self, values):
"""
Add DNS entries to the engine at the end of the existing list (if any).
A DNS entry can be either a raw IP Address, or an element of type
:class:`smc.elements.network.Host` or :class:`smc.elements.servers.DNSServer`.
:param list values: list of IP addresses, Host and/or DNSServer elements.
:return: None
.. note:: If the DNS entry added already exists, it will not be
added. It's not a valid configuration to enter the same DNS IP
multiple times. This is also true if the element is assigned the
same address as a raw IP address already defined.
"""
self._rank_dns(values)
def prepend(self, values):
"""
Prepend DNS entries to the engine at the beginning of the existing list
(if any). A DNS entry can be either a raw IP Address, or an element of type
:class:`smc.elements.network.Host` or :class:`smc.elements.servers.DNSServer`.
:param list values: list of IP addresses, Host and/or DNSServer elements.
:return: None
"""
self._rank_dns(values, prepend=True)
def remove(self, values):
"""
Remove DNS entries from this ranked DNS list. A DNS entry can be either
a raw IP Address, or an element of type :class:`smc.elements.network.Host`
or :class:`smc.elements.servers.DNSServer`.
:param list values: list of IP addresses, Host and/or DNSServer elements.
:return: None
"""
removables = []
for value in values:
if value in self:
removables.append(value)
if removables:
self.entries[:] = [entry._asdict() for entry in self
if entry.value not in removables and not entry.element in removables]
# Rerank to maintain order
for i, entry in enumerate(self.entries):
entry.update(rank='{}'.format(i))
class DNSEntry(collections.namedtuple('DNSEntry', 'value rank ne_ref')):
"""
DNSEntry represents a single DNS entry within an engine
DNSAddress list.
:ivar str value: IP address value of this entry (None if type Element is used)
:ivar int rank: order rank for the entry
:ivar str ne_ref: network element href of entry. Use element property to resolve
to type Element.
:ivar Element element: If the DNS entry is an element type, this property
will returned a resolved version of the ne_ref field.
"""
__slots__ = ()
def __new__(cls, rank, value=None, ne_ref=None): # @ReservedAssignment
return super(DNSEntry, cls).__new__(cls, value, rank, ne_ref)
@property
def element(self):
return Element.from_href(self.ne_ref)
def __repr__(self):
return 'DNSEntry(rank={0},value={1},ne_ref={2})'\
.format(self.rank, self.value, self.element)
class Layer2Settings(NestedDict):
"""
Layer 2 Settings are only applicable on Layer 3 Firewall engines
that want to run specific interfaces in layer 2 mode. This
requires that a Layer 2 Interface Policy is applied to the engine.
You can also set connection tracking and bypass on overload
settings for these interfaces as well.
Set policy for the engine::
engine.l2fw_settings.enable(InterfacePolicy('mylayer2'))
:ivar bool bypass_overload_traffic: whether to bypass traffic on overload
:ivar str tracking_mode: connection tracking mode
.. note:: You must call engine.update() to commit any changes.
.. warning:: This feature requires SMC and engine version >= 6.3
"""
def __init__(self, engine):
l2 = engine.data['l2fw_settings']
super(Layer2Settings, self).__init__(data=l2)
def connection_tracking(self, mode):
"""
Set the connection tracking mode for these layer 2 settings.
:param str mode: normal, strict, loose
:return: None
"""
if mode in ('normal', 'strict', 'loose'):
self.update(tracking_mode=mode)
def bypass_on_overload(self, value):
"""
Set the l2fw settings to bypass on overload.
:param bool value: boolean to indicate bypass setting
:return: None
"""
self.update(bypass_overload_traffic=value)
def disable(self):
"""
Disable the layer 2 interface policy
"""
self.pop('l2_interface_policy_ref', None)
def enable(self, policy):
"""
Set a layer 2 interface policy.
:param str,Element policy: an InterfacePolicy or str href
:raises LoadPolicyFailed: Invalid policy specified
:raises ElementNotFound: InterfacePolicy not found
:return: None
"""
if hasattr(policy, 'href'):
if not isinstance(policy, InterfacePolicy):
raise LoadPolicyFailed('Invalid policy type specified. The policy'
'type must be InterfacePolicy')
self.update(l2_interface_policy_ref=element_resolver(policy))
@property
def policy(self):
"""
Return the InterfacePolicy for this layer 3 firewall.
:rtype: InterfacePolicy
"""
return InterfacePolicy.from_href(self.get('l2_interface_policy_ref'))
def __repr__(self):
return '{0}(policy={1})'.format(
self.__class__.__name__, self.policy)
| apache-2.0 | 6,845,697,112,201,236,000 | 33.656319 | 86 | 0.591363 | false |
joshua-cogliati-inl/raven | framework/Optimizers/GradientBasedOptimizer.py | 1 | 52237 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Gradient Based Optimization strategy
Created on June 16, 2016
@author: chenj
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import sys
import os
import copy
import abc
import numpy as np
from numpy import linalg as LA
from sklearn.neighbors import NearestNeighbors
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .Optimizer import Optimizer
from Assembler import Assembler
from utils import utils,cached_ndarray,mathUtils
#Internal Modules End--------------------------------------------------------------------------------
class GradientBasedOptimizer(Optimizer):
"""
This is the base class for gradient based optimizer. The following methods need to be overridden by all derived class
self.localLocalInputAndChecks(self, xmlNode,paraminput)
self.localLocalInitialize(self, solutionExport)
self.localLocalGenerateInput(self,model,oldInput)
self.localEvaluateGradient(self, optVarsValues, gradient = None)
"""
##########################
# Initialization Methods #
##########################
def __init__(self):
"""
Default Constructor that will initialize member variables with reasonable
defaults or empty lists/dictionaries where applicable.
@ In, None
@ Out, None
"""
Optimizer.__init__(self)
self.ableToHandelFailedRuns = True # is this optimizer able to handle failed runs?
self.constraintHandlingPara = {} # Dict containing parameters for parameters related to constraints handling
self.gradientNormTolerance = 1.e-3 # tolerance on the L2 norm of the gradient
self.gradDict = {} # Dict containing information for gradient related operations
self.gradDict['numIterForAve' ] = 1 # Number of iterations for gradient estimation averaging, denoising number.
self.gradDict['pertNeeded' ] = 1 # Number of perturbation needed to evaluate gradient (globally, considering denoising) for example, pertNeeded = dimension * 1(if not central differenc) * denoise in Finite Difference
self.paramDict['pertSingleGrad'] = 1 # Number of perturbation needed to evaluate a single gradient denoised points needed to evaluate gradient, eg, 1 for SPSA, dim for FD w/o central diff, 2*dim for central diff
self.gradDict['pertPoints' ] = {} # Dict containing normalized inputs sent to model for gradient evaluation
self.readyVarsUpdate = {} # Bool variable indicating the finish of gradient evaluation and the ready to update decision variables
self.counter['perturbation' ] = {} # Counter for the perturbation performed.
self.counter['gradientHistory' ] = {} # In this dict we store the gradient value (versor) for current and previous iterations {'trajectoryID':[{},{}]}
self.counter['gradNormHistory' ] = {} # In this dict we store the gradient norm for current and previous iterations {'trajectoryID':[float,float]}
self.counter['varsUpdate' ] = {}
self.counter['solutionUpdate' ] = {}
self.counter['lastStepSize' ] = {} # counter to track the last step size taken, by trajectory
# line search parameters used in dcsrch function inside minpack2 from Scipy
self.counter['iSave'] = {} # integer work array of dimension 2 for line search in scipy minpack2
# isave(1): whether a minimizer has been bracketed in an interval with endpoints
# isave(2): whether a lower function value has been obtained
self.counter['dSave'] = {} # double precision work array of dimension 13 for line search, this array store the previous line search results as:
# dsave(1): derivative of the problem at previous step
# dsave(2) nonnegative tolerance for the sufficient decrease condition on gradient calculation
# dsave(3) derivative at the best step on variables
# dsave(4) derivative at best residuals;
# dsave(5) value of the problem at step
# dsave(6) value of the problem at best step
# dsave(7) value of the problem at second best step
# dsave(8) best step obtained so far, endpoint of the interval that contains the minimizer.
# dsave(9) second endpoint of the interval that contains the minimizer.
# dsave(10) minimum step in line search
# dsave(11) maximum step in line search
# dsave(12) range of the step
# dsave(13) range to decide if a bisection step is needed
self.counter['task'] = {} # bite string for the task in line search, initial entry task must be set to 'START', at the end of each line search exit with convergence, a warning or an error
# Conjugate gradient parameters
self.counter['gtol'] = {} # specifies a nonnegative tolerance for the curvature condition in conjugate gradient calculation
self.counter['xk'] = {} # ndarray, best optimal point as an array for conjugate gradient calculation
self.counter['gfk'] = {} # ndarray, gradient value as an array for current point in searching the strong wolfe condition in conjugate calculation
self.counter['pk'] = {} # ndarray, search direction in searching the strong wolfe condition in conjugate calculation
self.counter['newFVal'] = {} # float, function value for a new optimal point
self.counter['oldFVal'] = {} # float, function value for last optimal point
self.counter['oldOldFVal'] = {} # float, function value for penultimate optimal point
self.counter['oldGradK'] = {} # ndarray, gradient value as an array for current best optimal point
self.counter['gNorm'] = {} # float, norm of the current gradient
self.counter['deltaK'] = {} # float, inner product of the current gradient for calculation of the Polak–Ribière stepsize
self.counter['derPhi0'] = {} # float, objective function derivative at each begining of the line search
self.counter['alpha'] = {} # float, stepsize for conjugate gradient method in current dirrection
self.resampleSwitch = True # bool, resample switch
self.resample = {} # bool, whether next point is a resample opt point if True, then the next submit point is a resample opt point with new perturbed gradient point
self.convergeTraj = {}
self.convergenceProgress = {} # tracks the convergence progress, by trajectory
self.trajectoriesKilled = {} # by traj, store traj killed, so that there's no mutual destruction
self.recommendToGain = {} # recommended action to take in next step, by trajectory
self.gainGrowthFactor = 2. # max step growth factor
self.gainShrinkFactor = 2. # max step shrinking factor
self.optPointIndices = [] # in this list we store the indices that correspond to the opt point
self.perturbationIndices = [] # in this list we store the indices that correspond to the perturbation.
self.useCentralDiff = False # whether to use central differencing
self.useGradHist = False # whether to use Gradient history
# REWORK 2018-10 for simultaneous point-and-gradient evaluations
self.realizations = {} # by trajectory, stores the results obtained from the jobs running, see setupNewStorage for structure
# register metadata
self.addMetaKeys(['trajID','varsUpdate','prefix'])
def localInputAndChecks(self, xmlNode, paramInput):
"""
Method to read the portion of the xml input that belongs to all gradient based optimizer only
and initialize some stuff based on the inputs got
@ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
@ In, paramInput, InputData.ParameterInput, the parsed parameters
@ Out, None
"""
for child in paramInput.subparts:
if child.getName() == "initialization":
for grandchild in child.subparts:
tag = grandchild.getName()
if tag == "resample":
self.resampleSwitch = grandchild.value
if child.getName() == "convergence":
for grandchild in child.subparts:
tag = grandchild.getName()
if tag == "gradientThreshold":
self.gradientNormTolerance = grandchild.value
elif tag == "gainGrowthFactor":
self.gainGrowthFactor = grandchild.value
self.raiseADebug('Gain growth factor is set at',self.gainGrowthFactor)
elif tag == "gainShrinkFactor":
self.gainShrinkFactor = grandchild.value
self.raiseADebug('Gain shrink factor is set at',self.gainShrinkFactor)
elif tag == "centralDifference":
self.useCentralDiff = grandchild.value
elif tag == "useGradientHistory":
self.useGradHist = grandchild.value
self.gradDict['numIterForAve'] = int(self.paramDict.get('numGradAvgIterations', 1))
def localInitialize(self,solutionExport):
"""
Method to initialize settings that belongs to all gradient based optimizer
@ In, solutionExport, DataObject, a PointSet to hold the solution
@ Out, None
"""
for traj in self.optTraj:
self.gradDict['pertPoints'][traj] = {}
self.counter['perturbation'][traj] = 0
self.counter['varsUpdate'][traj] = 0
self.counter['solutionUpdate'][traj] = 0
self.counter['gradientHistory'][traj] = [{},{}]
self.counter['lastStepSize'][traj] = [{},{}]
self.counter['gradNormHistory'][traj] = [0.0,0.0]
self.counter['persistence'][traj] = 0
self.counter['iSave'][traj] = np.zeros((2,), np.intc)
self.counter['dSave'][traj] = np.zeros((13,), float)
self.counter['task'][traj] = b'START'
self.counter['gtol'][traj] = 1e-08
self.counter['xk'][traj] = None
self.counter['gfk'][traj] = None
self.counter['pk'][traj] = None
self.counter['newFVal'][traj] = None
self.counter['oldFVal'][traj] = None
self.counter['oldOldFVal'][traj] = None
self.counter['oldGradK'][traj] = None
self.counter['gNorm'][traj] = None
self.counter['deltaK'][traj] = None
self.counter['derPhi0'][traj] = None
self.counter['alpha'][traj] = None
self.resample[traj] = False
self.optVarsHist[traj] = {}
self.readyVarsUpdate[traj] = False
self.convergeTraj[traj] = False
self.status[traj] = {'process':'submitting new opt points', 'reason':'just started'}
self.counter['recentOptHist'][traj] = [{},{}]
self.trajectoriesKilled[traj] = []
# end job runnable equal to number of trajectory
self._endJobRunnable = len(self.optTraj)
# initialize index lists
## opt point evaluations are indices 0 through number of re-evaluation points
self.optPointIndices = list(range(0,self.gradDict['numIterForAve']+1))
## perturbation evaluations are indices starting at the end of optPoint and going through all the rest
self.perturbationIndices = list(range(self.gradDict['numIterForAve'],self.gradDict['numIterForAve']*(self.paramDict['pertSingleGrad']+1)))
#specializing the self.localLocalInitialize()
self.localLocalInitialize(solutionExport=solutionExport)
@abc.abstractmethod
def localLocalInitialize(self, solutionExport):
"""
Method to initialize local settings.
@ In, solutionExport, DataObject, a PointSet to hold the solution
@ Out, None
"""
pass
###############
# Run Methods #
###############
def evaluateGradient(self, traj):
"""
Method to evaluate gradient based on perturbed points and model evaluations.
@ In, traj, int, the trajectory id
@ Out, gradient, dict, dictionary containing gradient estimation. gradient should have the form {varName: gradEstimation}
"""
# let the local do the main gradient evaluation
gradient = self.localEvaluateGradient(traj)
# we intend for gradient to give direction only, so get the versor
## NOTE this assumes gradient vectors are 0 or 1 dimensional, not 2 or more! (vectors or scalars, not matrices)
gradientNorm = self.calculateMultivectorMagnitude(gradient.values())
# store this norm, infinite or not
self.counter['gradNormHistory'][traj][0] = gradientNorm
#fix inf
if gradientNorm == np.inf:
# if there are infinites, then only infinites should remain, and they are +-1
for v,var in enumerate(gradient.keys()):
# first, set all non-infinites to 0, since they can't compete with infinites
gradient[var][-np.inf < gradient[var] < np.inf] = 0.0
# set +- infinites to +- 1 (arbitrary) since they're all equally important
gradient[var][gradient[var] == -np.inf] = -1.0
gradient[var][gradient[var] == np.inf] = 1.0
# set up the new grad norm
gradientNorm = self.calculateMultivectorMagnitude(gradient.values())
# normalize gradient (if norm is zero, skip this)
if gradientNorm != 0.0:
for var in gradient.keys():
gradient[var] = gradient[var]/gradientNorm
# if float coming in, make it a float going out
if len(gradient[var])==1:
gradient[var] = float(gradient[var])
# store gradient
try:
self.counter['gradientHistory'][traj][1] = self.counter['gradientHistory'][traj][0]
self.counter['lastStepSize'][traj][1] = self.counter['lastStepSize'][traj][0]
except IndexError:
pass # don't have a history on the first pass
self.counter['gradientHistory'][traj][0] = gradient
return gradient
def finalizeSampler(self, failedRuns):
"""
Method called at the end of the Step when no more samples will be taken. Closes out optimizer.
@ In, failedRuns, list, list of JobHandler.ExternalRunner objects
@ Out, None
"""
Optimizer.handleFailedRuns(self, failedRuns)
# get the most optimal point among the trajectories
bestValue = None
bestTraj = None
for traj in self.counter['recentOptHist'].keys():
value = self.counter['recentOptHist'][traj][0][self.objVar]
self.raiseADebug('For trajectory "{}" the best value was'.format(traj+1),value)
if bestTraj is None:
bestTraj = traj
bestValue = value
continue
if self.checkIfBetter(value,bestValue):
bestTraj = traj
bestValue = value
# now have the best trajectory, so write solution export
bestPoint = self.denormalizeData(self.counter['recentOptHist'][bestTraj][0])
self.raiseADebug('The best overall trajectory ending was for trajectory "{}".'.format(bestTraj+1))
self.raiseADebug(' The optimal location is at:')
for v in self.getOptVars():
self.raiseADebug(' {} = {}'.format(v,bestPoint[v]))
self.raiseADebug(' The objective value there: {}'.format(bestValue))
self.raiseADebug('====================')
self.raiseADebug('| END OPTIMIZATION |')
self.raiseADebug('====================')
# _always_ re-add the last point to the solution export, but use a new varsUpdate value
overwrite = {'varsUpdate': self.counter['varsUpdate'][traj]}
self.writeToSolutionExport(bestTraj, self.normalizeData(bestPoint), True, overwrite=overwrite)
def localEvaluateGradient(self, optVarsValues, gradient = None):
"""
Local method to evaluate gradient.
@ In, optVarsValues, dict, dictionary containing perturbed points.
optVarsValues should have the form {pertIndex: {varName: [varValue1 varValue2]}}
Therefore, each optVarsValues[pertIndex] should return a dict of variable values
that is sufficient for gradient evaluation for at least one variable
(depending on specific optimization algorithm)
@ In, gradient, dict, optional, dictionary containing gradient estimation by the caller.
gradient should have the form {varName: gradEstimation}
@ Out, gradient, dict, dictionary containing gradient estimation. gradient should have the form {varName: gradEstimation}
"""
return gradient
def localFinalizeActualSampling(self,jobObject,model,myInput):
"""
Overwrite only if you need something special at the end of each run....
This function is used by optimizers that need to collect information from the just ended run
@ In, jobObject, instance, an instance of a Runner
@ In, model, Model, instance of a RAVEN model
@ In, myInput, list, the generating input
@ Out, None
"""
# collect finished jobs
prefix = jobObject.getMetadata()['prefix']
traj, step, identifier = [int(x) for x in prefix.split('_')] # FIXME This isn't generic for any prefixing system
self.raiseADebug('Collected sample "{}"'.format(prefix))
failed = jobObject.getReturnCode() != 0
if failed:
self.raiseADebug(' ... sample "{}" FAILED. Cutting step and re-queueing.'.format(prefix))
# since run failed, cut the step and requeue
## cancel any further runs at this point
self.cancelJobs([self._createEvaluationIdentifier(traj, self.counter['varsUpdate'][traj], i) for i in range(1, self.perturbationIndices[-1]+1)])
self.recommendToGain[traj] = 'cut'
grad = self.counter['gradientHistory'][traj][0]
new = self._newOptPointAdd(grad, traj)
if new is not None:
self._createPerturbationPoints(traj, new)
else:
self.raiseADebug('After failing a point, trajectory {} is not adding new points!'.format(traj))
self._setupNewStorage(traj)
else:
# update self.realizations dictionary for the right trajectory
# category: is this point an "opt" or a "grad" evaluations?
# number is which variable is being perturbed, ie which dimension 0 indexed
category, number, _, cdId = self._identifierToLabel(identifier)
# done is whether the realization finished
# index: where is it in the dataObject
# find index of sample in the target evaluation data object
done, index = self._checkModelFinish(str(traj), str(step), str(identifier))
# sanity check
if not done:
self.raiseAnError(RuntimeError,'Trying to collect "{}" but identifies as not done!'.format(prefix))
# store index for future use
# number is the varID
number = number + (cdId * len(self.fullOptVars))
self.realizations[traj]['collect'][category][number].append(index)
# check if any further action needed because we have all the points we need for opt or grad
if len(self.realizations[traj]['collect'][category][number]) == self.realizations[traj]['need']:
# get the output space (input space included as well)
outputs = self._averageCollectedOutputs(self.realizations[traj]['collect'][category][number])
# store denoised results
self.realizations[traj]['denoised'][category][number] = outputs
# if we just finished "opt", check some acceptance and convergence checking
if category == 'opt':
converged = self._finalizeOptimalCandidate(traj,outputs)
else:
converged = False
# if both opts and grads are now done, then we can do an evaluation
## note that by now we've ALREADY accepted the point; if it was rejected, it would have been reset by now.
optDone = bool(len(self.realizations[traj]['denoised']['opt'][0]))
gradDone = all( len(self.realizations[traj]['denoised']['grad'][i]) for i in range(self.paramDict['pertSingleGrad']))
if not converged and optDone and gradDone:
optCandidate = self.normalizeData(self.realizations[traj]['denoised']['opt'][0])
# update solution export
## only write here if we want to write on EVERY optimizer iteration (each new optimal point)
if self.writeSolnExportOn == 'every':
self.writeToSolutionExport(traj, optCandidate, self.realizations[traj]['accepted'])
# whether we wrote to solution export or not, update the counter
self.counter['solutionUpdate'][traj] += 1
self.counter['varsUpdate'][traj] += 1
## since accepted, update history
try:
self.counter['recentOptHist'][traj][1] = copy.deepcopy(self.counter['recentOptHist'][traj][0])
except KeyError:
# this means we don't have an entry for this trajectory yet, so don't copy anything
pass
# store realization of most recent developments
self.counter['recentOptHist'][traj][0] = optCandidate
# find the new gradient for this trajectory at the new opt point
grad = self.evaluateGradient(traj)
# get a new candidate
new = self._newOptPointAdd(grad, traj)
if new is not None:
# add new gradient points
self._createPerturbationPoints(traj, new)
# reset storage
self._setupNewStorage(traj)
def localGenerateInput(self,model,oldInput):
"""
Method to generate input for model to run
@ In, model, model instance, it is the instance of a RAVEN model
@ In, oldInput, list, a list of the original needed inputs for the model (e.g. list of files, etc. etc)
@ Out, None
"""
self.readyVarsUpdate = {traj:False for traj in self.optTrajLive}
def localStillReady(self,ready, convergence = False): #,lastOutput=None
"""
Determines if optimizer is ready to provide another input. If not, and if jobHandler is finished, this will end sampling.
@ In, ready, bool, variable indicating whether the caller is prepared for another input.
@ In, convergence, bool, optional, variable indicating whether the convergence criteria has been met.
@ Out, ready, bool, boolean variable indicating whether the caller is prepared for another input.
"""
#let this be handled at the local subclass level for now
return ready
###################
# Utility Methods #
###################
def _averageCollectedOutputs(self,collection):
"""
Averages the results of several realizations that are denoising evaluations of a single point
@ In, collection, list, list of indices of evaluations for a single point
@ Out, outputs, dict, dictionary of average values
"""
# make a place to store distinct evaluation values
outputs = dict((var,np.zeros(self.gradDict['numIterForAve'],dtype=object))
for var in self.solutionExport.getVars('output')
if var in self.mdlEvalHist.getVars('output'))
for i, index in enumerate(collection):
vals = self.mdlEvalHist.realization(index=index)
# store the inputs for later on first iteration
if i == 0:
inputs = dict((var,vals[var]) for var in self.getOptVars())
for var in outputs.keys():
# store values; cover vector variables as well as scalars, as well as vectors that should be scalars
if hasattr(vals[var],'__len__') and len(vals[var]) == 1:
outputs[var][i] = float(vals[var])
else:
outputs[var][i] = vals[var]
# average the collected outputs for the opt point
for var,vals in outputs.items():
outputs[var] = vals.mean()
outputs.update(inputs)
return outputs
def calculateMultivectorMagnitude(self,values):
"""
Calculates the magnitude of vector "values", where values might be a combination of scalars and vectors (but not matrices [yet]).
Calculates the magnitude as if "values" were flattened into a 1d array.
@ In, values, list, values for which the magnitude will be calculated
@ Out, mag, float, magnitude
"""
# use np.linalg.norm (Frobenius norm) to calculate magnitude
## pre-normalise vectors, this is mathematically equivalent to flattening the vector first
## NOTE this assumes gradient vectors are 0 or 1 dimensional, not 2 or more! (vectors or scalars, not matrices)
# TODO this could be sped up if we could avoid calling np.atleast_1d twice, but net slower if we loop first
preMag = [np.linalg.norm(val) if len(np.atleast_1d(val))>1 else np.atleast_1d(val)[0] for val in values]
## then get the magnitude of the result, and return it
return np.linalg.norm(preMag)
def checkConvergence(self):
"""
Method to check whether the convergence criteria has been met.
@ In, None
@ Out, convergence, list, list of bool variable indicating whether the convergence criteria has been met for each trajectory.
"""
convergence = True
for traj in self.optTraj:
if not self.convergeTraj[traj]:
convergence = False
break
return convergence
def _checkModelFinish(self, traj, updateKey, evalID):
"""
Determines if the Model has finished running an input and returned the output
@ In, traj, int, traj on which the input is being checked
@ In, updateKey, int, the id of variable update on which the input is being checked
@ In, evalID, int or string, indicating the id of the perturbation (int) or its a variable update (string 'v')
@ Out, _checkModelFinish, tuple(bool, int), (1,realization dictionary),
(indicating whether the Model has finished the evaluation over input identified by traj+updateKey+evalID, the index of the location of the input in dataobject)
"""
if len(self.mdlEvalHist) == 0:
return (False,-1)
lookFor = '{}_{}_{}'.format(traj,updateKey,evalID)
index,match = self.mdlEvalHist.realization(matchDict = {'prefix':lookFor})
# if no match, return False
if match is None:
return False,-1
# otherwise, return index of match
return True, index
def _createEvaluationIdentifier(self,trajID,iterID,evalType):
"""
Create evaluation identifier
@ In, trajID, integer, trajectory identifier
@ In, iterID, integer, iteration number (identifier)
@ In, evalType, integer or string, evaluation type (v for variable update; otherwise id for gradient evaluation)
@ Out, identifier, string, the evaluation identifier
"""
identifier = str(trajID) + '_' + str(iterID) + '_' + str(evalType)
return identifier
def _finalizeOptimalCandidate(self,traj,outputs):
"""
Once all the data for an opt point has been collected:
- determine convergence
- determine redundancy
- determine acceptability
- queue new points (if rejected)
@ In, traj, int, the trajectory we are currently considering
@ In, outputs, dict, denoised new optimal point
@ Out, converged, bool, if True then indicates convergence has been reached
"""
# check convergence and check if new point is accepted (better than old point)
if self.resample[traj]:
accepted = True
else:
accepted = self._updateConvergenceVector(traj, self.counter['solutionUpdate'][traj], outputs)
# if converged, we can wrap up this trajectory
if self.convergeTraj[traj]:
# end any excess gradient evaluation jobs
self.cancelJobs([self._createEvaluationIdentifier(traj,self.counter['varsUpdate'][traj],i) for i in self.perturbationIndices])
return True #converged
# if not accepted, we need to scrap this run and set up a new one
if accepted:
# store acceptance for later
if self.resample[traj]:
self.realizations[traj]['accepted'] = 'resample'
self.raiseADebug('This is a resample point')
else:
self.realizations[traj]['accepted'] = 'accepted'
self.resample[traj] = False
else:
self.resample[traj] = self.checkResampleOption(traj)
# cancel all gradient evaluations for the rejected candidate immediately
self.cancelJobs([self._createEvaluationIdentifier(traj,self.counter['varsUpdate'][traj],i) for i in self.perturbationIndices])
# update solution export
optCandidate = self.normalizeData(self.realizations[traj]['denoised']['opt'][0])
## only write here if we want to write on EVERY optimizer iteration (each new optimal point)
if self.writeSolnExportOn == 'every':
self.writeToSolutionExport(traj, optCandidate, self.realizations[traj]['accepted'])
# whether we wrote to solution export or not, update the counter
self.counter['solutionUpdate'][traj] += 1
self.counter['varsUpdate'][traj] += 1
# new point setup
## keep the old grad point
grad = self.counter['gradientHistory'][traj][0]
new = self._newOptPointAdd(grad, traj,resample = self.resample[traj])
if new is not None:
self._createPerturbationPoints(traj, new, resample = self.resample[traj])
self._setupNewStorage(traj)
return False #not converged
def fractionalStepChangeFromGradHistory(self,traj):
"""
Uses the dot product between two successive gradients to determine a fractional multiplier for the step size.
For instance, if the dot product is 1.0, we're consistently moving in a straight line, so increase step size.
If the dot product is -1.0, we've gone forward and then backward again, so cut the step size down before moving again.
If the dot product is 0.0, we're moving orthogonally, so don't change step size just yet.
@ In, traj, int, the trajectory for whom we are creating a fractional step size
@ Out, frac, float, the fraction by which to multiply the existing step size
"""
# if we have a recommendation from elsewhere, take that first
if traj in self.recommendToGain.keys():
recommend = self.recommendToGain.pop(traj)
if recommend == 'cut':
frac = 1./self.gainShrinkFactor
elif recommend == 'grow':
frac = self.gainGrowthFactor
else:
self.raiseAnError(RuntimeError,'unrecognized gain recommendation:',recommend)
self.raiseADebug('Based on recommendation "{}", step size multiplier is: {}'.format(recommend,frac))
return frac
# otherwise, no recommendation for this trajectory, so move on
#if we don't have two evaluated gradients, just return 1.0
grad1 = self.counter['gradientHistory'][traj][1]
if len(grad1) == 0: # aka if grad1 is empty dict
return 1.0
#otherwise, do the dot product between the last two gradients
grad0 = self.counter['gradientHistory'][traj][0]
# scalar product
## NOTE assumes scalar or vector, not matrix, values
prod = np.sum( [np.sum(grad0[key]*grad1[key]) for key in grad0.keys()] )
#rescale from [-1, 1] to [1/g, g]
if prod > 0:
frac = self.gainGrowthFactor**prod
else:
frac = self.gainShrinkFactor**prod
self.raiseADebug('Based on gradient history, step size multiplier is:',frac)
return frac
def _getJobsByID(self,traj):
"""
Overwrite only if you need something special at the end of each run....
This function is used by optimizers that need to collect information from the just ended run
@ In, traj, int, ID of the trajectory for whom we collect jobs
@ Out, solutionExportUpdatedFlag, bool, True if the solutionExport needs updating
@ Out, solutionIndeces, list(int), location of updates within the full targetEvaluation data object
"""
solutionUpdateList = []
solutionIndeces = []
# get all the opt point results (these are the multiple evaluations of the opt point)
for i in range(self.gradDict['numIterForAve']):
identifier = i
solutionExportUpdatedFlag, index = self._checkModelFinish(traj, self.counter['solutionUpdate'][traj], str(identifier))
solutionUpdateList.append(solutionExportUpdatedFlag)
solutionIndeces.append(index)
solutionExportUpdatedFlag = all(solutionUpdateList)
return solutionExportUpdatedFlag,solutionIndeces
def getQueuedPoint(self,traj,denorm=True):
"""
Pops the first point off the submission queue (or errors if empty). By default denormalized the point before returning.
@ In, traj, int, the trajectory from whose queue we should obtain an entry
@ In, denorm, bool, optional, if True the input data will be denormalized before returning
@ Out, prefix, #_#_#
@ Out, point, dict, {var:val}
"""
try:
entry = self.submissionQueue[traj].popleft()
except IndexError:
self.raiseAnError(RuntimeError,'Tried to get a point from submission queue of trajectory "{}" but it is empty!'.format(traj))
prefix = entry['prefix']
point = entry['inputs']
if denorm:
point = self.denormalizeData(point)
return prefix,point
def _identifierToLabel(self,identifier):
"""
Maps identifiers (eg. prefix = trajectory_step_identifier) to labels (eg. ("grad",2) or ("opt",0))
@ In, identifier, int, number of evaluation within trajectory and step
@ Out, label, tuple, first entry is "grad" or "opt", second is which grad it belongs to (opt is always 0)
"""
if identifier in self.perturbationIndices:
category = 'grad'
if self.paramDict['pertSingleGrad'] == 1:
# no need to calculate the pertPerVar if pertSingleGrad is 1
pertPerVar = 1
else:
pertPerVar = self.paramDict['pertSingleGrad'] // (1+self.useCentralDiff)
varId = (identifier - self.gradDict['numIterForAve']) % pertPerVar
denoId = (identifier - self.gradDict['numIterForAve'])// self.paramDict['pertSingleGrad']
# for cdId 0 is the first cdID 1 is the second side of central Diff
if len(self.fullOptVars) == 1:
#expect 0 or 1 for cdID, but % len(self.fullOptVars) will always be 0 if len(self.fullOptVars)=1
cdId = (identifier - self.gradDict['numIterForAve']) % self.paramDict['pertSingleGrad']
else:
cdId = ((identifier - self.gradDict['numIterForAve'])// pertPerVar) % len(self.fullOptVars)
if not self.useCentralDiff:
cdId = 0
else:
category = 'opt'
varId = 0
denoId = identifier
cdId = 0
return category, varId, denoId, cdId
def localCheckConstraint(self, optVars, satisfaction = True):
"""
Local method to check whether a set of decision variables satisfy the constraint or not
@ In, optVars, dict, dictionary containing the value of decision variables to be checked, in form of {varName: varValue}
@ In, satisfaction, bool, optional, variable indicating how the caller determines the constraint satisfaction at the point optVars
@ Out, satisfaction, bool, variable indicating the satisfaction of constraints at the point optVars
"""
return satisfaction
def proposeNewPoint(self,traj,point):
"""
See base class. Used to set next recommended point to use for algorithm, overriding the gradient descent.
@ In, traj, int, trajectory who gets proposed point
@ In, point, dict, input space as dictionary {var:val}
@ Out, None
"""
Optimizer.proposeNewPoint(self,traj,point)
self.counter['varsUpdate'][traj] += 1 #usually done when evaluating gradient, but we're bypassing that
self.queueUpOptPointRuns(traj,self.recommendedOptPoint[traj])
def queueUpOptPointRuns(self,traj,point):
"""
Establishes a queue of runs, all on the point currently stored in "point", to satisfy stochastic denoising.
@ In, traj, int, the trajectory who needs the queue
@ In, point, dict, input space as {var:val} NORMALIZED
@ Out, None
"""
# TODO sanity check, this could be removed for efficiency later
for i in range(self.gradDict['numIterForAve']):
#entries into the queue are as {'inputs':{var:val}, 'prefix':runid} where runid is <traj>_<varUpdate>_<evalNumber> as 0_0_2
nPoint = {'inputs':copy.deepcopy(point)} #deepcopy to prevent simultaneous alteration
nPoint['prefix'] = self._createEvaluationIdentifier(traj,self.counter['varsUpdate'][traj],i) # from 0 to self.gradDict['numIterForAve'] are opt point evals
# this submission queue only have the denoise number of opt point
self.submissionQueue[traj].append(nPoint)
def _removeRedundantTraj(self, trajToRemove, currentInput):
"""
Local method to remove multiple trajectory
@ In, trajToRemove, int, identifier of the trajector to remove
@ In, currentInput, dict, the last variable on trajectory traj
@ Out, removed, bool, if True then trajectory was halted
"""
# TODO replace this with a kdtree search
removeFlag = False
def getRemoved(trajThatSurvived, fullList=None):
"""
Collect list of all the trajectories removed by this one, or removed by trajectories removed by this one, and etc
@ In, trajThatSurvived, int, surviving trajectory that has potentially removed others
@ In, fullList, list, optional, if included is the partial list to add to
@ Out, fullList, list, list of all traj removed (explicitly or implicitly) by this one
"""
if fullList is None:
fullList = []
removed = self.trajectoriesKilled[trajThatSurvived]
fullList += removed
for rm in removed:
fullList = getRemoved(rm, fullList)
return fullList
#end function definition
notEligibleToRemove = [trajToRemove] + getRemoved(trajToRemove)
# determine if "trajToRemove" should be terminated because it is following "traj"
for traj in self.optTraj:
#don't consider removal if comparing against itself,
# or a trajectory removed by this one, or a trajectory removed by a trajectory removed by this one (recursive)
# -> this prevents mutual destruction cases
if traj not in notEligibleToRemove:
#FIXME this can be quite an expensive operation, looping through each other trajectory
for updateKey in self.optVarsHist[traj].keys():
inp = self.optVarsHist[traj][updateKey] #FIXME deepcopy needed? Used to be present, but removed for now.
if len(inp) < 1: #empty
continue
dist = self.calculateMultivectorMagnitude( [inp[var] - currentInput[var] for var in self.getOptVars()] )
if dist < self.thresholdTrajRemoval:
self.raiseADebug('Halting trajectory "{}" because it is following trajectory "{}"'.format(trajToRemove,traj))
# cancel existing jobs for trajectory
self.cancelJobs([self._createEvaluationIdentifier(traj, self.counter['varsUpdate'][traj]-1, i) for i in self.perturbationIndices])
self.trajectoriesKilled[traj].append(trajToRemove)
#TODO the trajectory to remove should be chosen more carefully someday, for example, the one that has the smallest steps or lower loss value currently
removeFlag = True
break
if removeFlag:
break
if removeFlag:
for trajInd, tr in enumerate(self.optTrajLive):
if tr == trajToRemove:
self.optTrajLive.pop(trajInd)
self.status[trajToRemove] = {'process':'following traj '+str(traj),'reason':'removed as redundant'}
break
return True
else:
return False
def _setupNewStorage(self,traj,keepOpt=False):
"""
Assures correct structure for receiving results from sample evaluations
@ In, traj, int, trajectory of interest
@ In, keepOpt, bool, optional, if True then don't reset the denoised opt
@ Out, None
"""
# store denoised opt if requested
if keepOpt:
den = self.realizations[traj]['denoised']['opt']
denoises = self.gradDict['numIterForAve']
self.realizations[traj] = {'collect' : {'opt' : [ [] ],
'grad': [ [] for _ in range(self.paramDict['pertSingleGrad']) ] },
'denoised': {'opt' : [ [] ],
'grad': [ [] for _ in range(self.paramDict['pertSingleGrad']) ] },
'need' : denoises,
'accepted': 'rejected',
}
# reset opt if requested
if keepOpt:
self.realizations[traj]['denoised']['opt'] = den
self.realizations[traj]['accepted'] = True
def _updateConvergenceVector(self, traj, varsUpdate, currentPoint, conj=False):
"""
Local method to update convergence vector.
@ In, traj, int, identifier of the trajector to update
@ In, varsUpdate, int, current variables update iteration number
@ In, currentPoint, float, candidate point for optimization path
@ In, conj, bool, optional, identify whether using conjugate gradient to check convergence, if true then do not clear the persistance
@ Out, accepted, True if point was rejected otherwise False
"""
# first, check if we're at varsUpdate 0 (first entry); if so, we are at our first point
if varsUpdate == 0:
# we don't have enough points to decide to accept or reject the new point, so accept it as the initial point
self.raiseADebug('Accepting first point, since we have no rejection criteria.')
return True
## first, determine if we want to keep the new point
# obtain the loss values for comparison
currentLossVal = currentPoint[self.objVar]
oldPoint = self.counter['recentOptHist'][traj][0]
oldLossVal = oldPoint[self.objVar]
# see if new point is better than old point
newerIsBetter = self.checkIfBetter(currentLossVal,oldLossVal)
# if this was a recommended preconditioning point, we should not be converged.
pointFromRecommendation = self.status[traj]['reason'] == 'received recommended point'
# if improved, keep it and move forward; otherwise, reject it and recommend cutting step size
if newerIsBetter:
self.status[traj]['reason'] = 'found new opt point'
self.raiseADebug('Accepting potential opt point for improved loss value. Diff: {}, New: {}, Old: {}'.format(abs(currentLossVal-oldLossVal),currentLossVal,oldLossVal))
else:
self.status[traj]['reason'] = 'rejecting bad opt point'
self.raiseADebug('Rejecting potential opt point for worse loss value. old: "{}", new: "{}"'.format(oldLossVal,currentLossVal))
# cut the next step size to hopefully stay in the valley instead of climb up the other side
self.recommendToGain[traj] = 'cut'
## determine convergence
if pointFromRecommendation:
self.raiseAMessage('Setting convergence for Trajectory "{}" to "False" because of preconditioning.'.format(traj))
converged = False
else:
self.raiseAMessage('Checking convergence for Trajectory "{}":'.format(traj))
self.convergenceProgress[traj] = {} # tracks progress for grad norm, abs, rel tolerances
converged = False # updated for each individual criterion using "or" (pass one, pass all)
#printing utility
printString = ' {:<21}: {:<5}'
printVals = printString + ' (check: {:>+9.2e} < {:>+9.2e}, diff: {:>9.2e})'
# TODO rewrite this action as a lambda?
def printProgress(name,boolCheck,test,gold):
"""
Consolidates a commonly-used print statement to prevent errors and improve readability.
@ In, name, str, printed name of convergence check
@ In, boolCheck, bool, boolean convergence results for this check
@ In, test, float, value of check at current opt point
@ In, gold, float, convergence threshold value
@ Out, None
"""
self.raiseAMessage(printVals.format(name,str(boolCheck),test,gold,abs(test-gold)))
# "min step size" and "gradient norm" are both always valid checks, whether rejecting or accepting new point
## min step size check
try:
lastStep = self.counter['lastStepSize'][traj][0]
minStepSizeCheck = lastStep <= self.minStepSize
except KeyError:
#we reset the step size, so we don't have a value anymore
lastStep = np.nan
minStepSizeCheck = False
printProgress('Min step size',minStepSizeCheck,lastStep,self.minStepSize)
converged = converged or minStepSizeCheck
# if accepting new point, then "same coordinate" and "abs" and "rel" checks are also valid reasons to converge
if newerIsBetter:
#absolute tolerance
absLossDiff = abs(mathUtils.diffWithInfinites(currentLossVal,oldLossVal))
self.convergenceProgress[traj]['abs'] = absLossDiff
absTolCheck = absLossDiff <= self.absConvergenceTol
printProgress('Absolute Loss Diff',absTolCheck,absLossDiff,self.absConvergenceTol)
converged = converged or absTolCheck
#relative tolerance
relLossDiff = mathUtils.relativeDiff(currentLossVal,oldLossVal)
self.convergenceProgress[traj]['rel'] = relLossDiff
relTolCheck = relLossDiff <= self.relConvergenceTol
printProgress('Relative Loss Diff',relTolCheck,relLossDiff,self.relConvergenceTol)
converged = converged or relTolCheck
#same coordinate check
sameCoordinateCheck = True
for var in self.getOptVars():
# don't check constants, of course they're the same
if var in self.constants:
continue
old = oldPoint[var]
current = currentPoint[var]
# differentiate vectors and scalars for checking
if hasattr(old,'__len__'):
if any(old != current):
sameCoordinateCheck = False
break
else:
if old != current:
sameCoordinateCheck = False
break
self.raiseAMessage(printString.format('Same coordinate check',str(sameCoordinateCheck)))
converged = converged or sameCoordinateCheck
if converged:
# update number of successful convergences
self.counter['persistence'][traj] += 1
# check if we've met persistence requirement; if not, keep going
if self.counter['persistence'][traj] >= self.convergencePersistence:
self.raiseAMessage(' ... Trajectory "{}" converged {} times consecutively!'.format(traj,self.counter['persistence'][traj]))
self.convergeTraj[traj] = True
self.removeConvergedTrajectory(traj)
else:
self.raiseAMessage(' ... converged Traj "{}" {} times, required persistence is {}.'.format(traj,self.counter['persistence'][traj],self.convergencePersistence))
else:
if not conj:
self.counter['persistence'][traj] = 0
self.raiseAMessage(' ... continuing trajectory "{}".'.format(traj))
return newerIsBetter
def writeToSolutionExport(self,traj, recent, accepted, overwrite=None):
"""
Standardizes how the solution export is written to.
Uses data from "recentOptHist" and other counters to fill in values.
@ In, traj, int, the trajectory for which an entry is being written
@ In, recent, dict, the new optimal point (NORMALIZED) that needs to get written to the solution export
@ In, accepted, string, whether the most recent point was accepted or rejected as a bad move
@ In, overwrite, dict, optional, values to overwrite if requested as {key:val}
@ Out, None
"""
if overwrite is None:
overwrite = {}
# create realization to add to data object
rlz = {}
badValue = -1.0 #value to use if we don't have a value # TODO make this accessible to user?
for var in self.solutionExport.getVars():
# if this variable has indices, add them to the realization
indexes = self.solutionExport.getDimensions(var)[var]
if len(indexes):
# use the prefix to find the right realization
## NOTE there will be a problem with unsynchronized histories!
varUpdate = self.counter['solutionUpdate'][traj]
# negative values wouldn't make sense
varUpdate = max(0,varUpdate-1)
prefix = self._createEvaluationIdentifier(traj, varUpdate, 0)
_,match = self.mdlEvalHist.realization(matchDict = {'prefix':prefix})
for index in indexes:
rlz[index] = match[index]
# CASE: what variable is asked for:
# inputs, objVar, other outputs
if var in overwrite:
new = overwrite[var]
elif var in recent.keys():
new = self.denormalizeData(recent)[var]
elif var in self.constants:
new = self.constants[var]
# custom counters: varsUpdate, trajID, stepSize
elif var == 'varsUpdate':
new = self.counter['solutionUpdate'][traj]
elif var == 'trajID':
new = traj+1 # +1 is for historical reasons, when histories were indexed on 1 instead of 0
elif var == 'stepSize':
try:
new = self.counter['lastStepSize'][traj][0]
except KeyError:
new = badValue
elif var == 'accepted':
new = accepted
elif var.startswith( 'convergenceAbs'):
try:
new = self.convergenceProgress[traj].get('abs',badValue)
except KeyError:
new = badValue
elif var.startswith( 'convergenceRel'):
try:
new = self.convergenceProgress[traj].get('rel',badValue)
except KeyError:
new = badValue
else:
self.raiseAnError(IOError,'Unrecognized output request:',var)
# format for realization
rlz[var] = np.atleast_1d(new)
self.solutionExport.addRealization(rlz)
def checkResampleOption(self,traj):
"""
Turn on self.resample[traj] while checking self.resampleSwitch.
This method is equivalent to self.resample[traj] = self.resampleSwitch while needed
@ In, traj, int, the trajectory for which an entry is being written
@ Out, self.resampleSwitch, bool, True if resample switch is on
"""
return self.resampleSwitch
| apache-2.0 | 3,588,424,962,406,685,000 | 53.128497 | 240 | 0.646571 | false |
marcwagner/alarmclock | testset.py | 1 | 6477 | import time, datetime
import unittest
import alarm
format = "%a %d-%b-%Y %H:%M"
current_time = datetime.datetime(2015,4,4,16,4,0) #april 4 2015 16:04
default = {'name' : 'default', 'time' : datetime.time(12,0,0), 'days' : ('MO','TU','WE','TH','FR','SA','SU'),
'date' : None, 'path' : './', 'date' : None, 'active' : True}
alarms_list = ({'name' : 'current', 'time' : datetime.time(16,4,0)}, # the current time
{'name' : 'alarm1', 'time' : datetime.time(11,0,0)}, # every day at 11 am
{'name' : 'alarm2', 'time' : datetime.time(9,0,0), 'days' : ('MO','TU')},#monday and tuesday an 9 am
{'name' : 'alarm3', 'time' : datetime.time(22,0,0), 'days' : ('WE','TU','SA')}, # tuesday, wednesday, sunday at 10 pm
{'name' : 'christmas','time' : datetime.time(21,0,0), 'date' : datetime.date(2015,12,24)}, # 9pm on christmas eve
{'name' : 'past', 'time' : datetime.time(12,0,0), 'date' : datetime.date(1999,12,31)}, # noon on dec 31 1999 --> in the past
{'name' : 'path', 'time' : datetime.time(12,0,0), 'path' : '/media/music/1Kindermuziek/K3/K3-Eyo(2011)MP3 Nlt-release/'},
{'name' : 'n_active','time' : datetime.time(12,0,0), 'active' : False},
default)
alarm_times = {'current': datetime.datetime(2015,4,4,16,4,0),
'alarm1': datetime.datetime(2015,4,5,11,0,0),
'alarm2': datetime.datetime(2015,4,6,9,0,0),
'alarm3': datetime.datetime(2015,4,4,22,0,0),
'christmas': datetime.datetime(2015,12,24,21,0,0),
'past': None,
'path': datetime.datetime(2015,4,5,12,0,0),
'n_active':None,
'default': datetime.datetime(2015,4,5,12,0,0)}
root_playlist = ['engeltjes.mp3']
path_playlist = ['01 Eyo.mp3', '02 Hallo K3.mp3', '03 Willem - Alexander.mp3', '04 Smoorverliefd.mp3',
'05 K3 - Airlines.mp3', '06 Beroemd.mp3', '07 Meiden Van De Brandweer.mp3',
'08 Verstoppertje.mp3', '09 Telepathie.mp3', '10 Dubbeldekkertrein.mp3',
'11 Bel Me Ringeling.mp3', '12 Cowboys En Indianen.mp3']
class testcases_alarm(unittest.TestCase):
'''test all cases of working alarms'''
def are_all_the_vars_present(self, alarm, default, **a):
self.assertEqual(a.get('name'), alarm.name)
self.assertEqual(a.get('time'), alarm.time)
self.assertEqual(a.get('date', default['date']), alarm.date)
self.assertEqual(a.get('days', default['days']), alarm.days)
self.assertEqual(a.get('path', default['path']), alarm.path)
self.assertEqual(a.get('active', default['active']), alarm.active)
def test_create_alarm(self):
'''create a basic alarm'''
for a in alarms_list:
al = alarm.alarm(**a)
self.are_all_the_vars_present(al, default, **a)
def test_edit_alarm_correct(self):
'''update an alarm with the parameters of another alarm'''
if len(alarms_list) < 2: # need at least 2 alarms for this test
return
for i in range(len(alarms_list)-1):
a1 = alarms_list[i]
a2 = alarms_list[i+1]
al = alarm.alarm(**a1)
copy_of_default = default.copy()
self.are_all_the_vars_present(al, copy_of_default, **a1)
al.update_alarm(**a2)
copy_of_default.update(a1)
self.are_all_the_vars_present(al, copy_of_default, **a2)
def test_is_the_next_alarm_correct(self):
'''test next_alarm'''
for a in alarms_list:
myalarm = alarm.alarm(**a)
nexttime = alarm_times[myalarm.name]
self.assertEqual(myalarm.next_alarm(current_time), nexttime)
def test_add_alarm_correct_alarms(self):
'''create a set of alarms'''
alarms = alarm.alarmset()
for a in alarms_list:
alarms.add(alarm.alarm(**a))
al = alarms[-1]
self.are_all_the_vars_present(al, default, **a)
self.assertEqual(alarms.exists(a['name']), True)
def test_remove_alarm(self):
'''remove an alarm from a set'''
alarms = alarm.alarmset()
for a in alarms_list:
name = a['name']
alarms.add(alarm.alarm(**a))
self.assertEqual(alarms.exists(name), True)
alarms.remove(alarms[name])
self.assertEqual(alarms.exists(name), False)
def test_the_next_alarm_in_set(self):
'''alarmset next_alarm'''
alarms = alarm.alarmset()
for a in alarms_list:
alarms.add(alarm.alarm(**a))
self.assertEqual(alarms.next_alarm(current_time).next_alarm(current_time), current_time)
def test_generate_playlist(self):
'''based on the path, generate a list of files'''
alarm1 = alarm.alarm(**alarms_list[1])
path = alarm.alarm(**alarms_list[6])
self.assertEqual(alarm1.generate_playlist(), root_playlist)
self.assertEqual(path.generate_playlist(), path_playlist)
def test_play_a_song(self):
'''play a song form file'''
alarm1 = alarm.alarm(**alarms_list[1])
self.assertEqual(alarm1.playing, False)
self.assertEqual(alarm1.blocking, False)
self.assertEqual(alarm1.player_active(), False)
alarm1.play(root_playlist[0])
time.sleep(0.2)
self.assertEqual(alarm1.playing, True)
self.assertEqual(alarm1.blocking, False)
self.assertEqual(alarm1.player_active(), True)
alarm1.stop()
def test_save_and_load_alarms(self):
alarms_1 = alarm.alarmset()
alarms_2 = alarm.alarmset()
for a in alarms_list:
alarms_1.add(alarm.alarm(**a))
alarms_1.save_alarms('test_config.file')
alarms_2.load_alarms('test_config.file')
for a_1, a_2 in zip (alarms_1, alarms_2):
self.assertEqual(a_1.name, a_2.name)
self.assertEqual(a_1.time, a_2.time)
self.assertEqual(a_1.date, a_2.date)
self.assertEqual(a_1.days, a_2.days)
self.assertEqual(a_1.path, a_2.path)
self.assertEqual(a_1.active, a_2.active)
def test_player_active(self):
pass
def test_stop(self):
pass
if __name__ == '__main__':
unittest.main()
| mit | 8,184,091,684,865,688,000 | 42.469799 | 142 | 0.561371 | false |
timsavage/extopen | setup.py | 1 | 1140 | from setuptools import setup, find_packages
setup(
name = 'extopen',
version = '0.1.1',
description = "Cross platform helper for opening a file with the default external application.",
long_description = open('README.rst').read(),
url='https://github.com/timsavage/extopen',
author = 'Tim Savage',
author_email = '[email protected]',
license = 'BSD',
platforms = 'Posix; MacOS X; Windows',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
zip_safe = True,
py_modules = ['extopen']
)
| bsd-3-clause | 4,112,152,975,187,760,000 | 33.545455 | 100 | 0.607018 | false |
reinaldomaslim/Singaboat_RobotX2016 | robotx_vision/nodes/color_sequence.py | 1 | 20932 | #! /usr/bin/python
""" detect color sequence
ren ye 2016-10-21
reference:
http://stackoverflow.com/questions/14476683/identifying-color-sequence-in-opencv
algorithm:
# image preparation
1. subwindowing to light buoy by laser and camera
2. convert to hsv
3. check hue for the blob
# detection
1. wait until first detection is made
2. wait until no detection is found for 2 seconds
3. record color
4. if color is different from previous frame, add to sequence
5. if no detection, to step 2
6. if sequence is length 3, report and end
"""
#!/usr/bin/env python
""" camshift_color.py - Version 1.1 2013-12-20
Modification of the ROS OpenCV Camshift example using cv_bridge and publishing the ROI
coordinates to the /roi topic.
"""
import time
import rospy
import cv2
from cv2 import cv as cv
from robotx_vision.ros2opencv2 import ROS2OpenCV2
from std_msgs.msg import String, Float64MultiArray, MultiArrayDimension
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Image, RegionOfInterest
import numpy as np
from sklearn.cluster import KMeans
class ColorSequence(ROS2OpenCV2):
# taken from robotx_vision.find_shapes.Color_Detection
x0, y0 = 0, 0
hist_list = list()
MAX_LEN = 7 * 5
counter = 0
roi_x_offset, roi_y_offset, roi_width, roi_height = [0, 0, 0, 0]
def __init__(self, node_name, debug=False):
ROS2OpenCV2.__init__(self, node_name, debug)
self.sequence_pub = rospy.Publisher("color_sequence", Vector3, queue_size=10)
# self.odom_received = False
# rospy.Subscriber("odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
# while not self.odom_received:
# pass
# print "waiting for roi"
rospy.wait_for_message("led_sequence_roi", RegionOfInterest)
rospy.Subscriber("led_sequence_roi", RegionOfInterest, self.roi_callback, queue_size=50)
# print "roi received"
self.node_name = node_name
# The minimum saturation of the tracked color in HSV space,
# as well as the min and max value (the V in HSV) and a
# threshold on the backprojection probability image.
self.smin = rospy.get_param("~smin", 85)
self.vmin = rospy.get_param("~vmin", 50)
self.vmax = rospy.get_param("~vmax", 254)
self.threshold = rospy.get_param("~threshold", 50)
# all done in ros2opencv2.py:
# self.depth_sub, self.depth_callback, self.depth_image
# self.depth_image can be used globally
# self.depth_sub = rospy.Subscriber("input_depth_image", Image, self.depth_callback, queue_size=1)
# Create a number of windows for displaying the histogram,
# parameters controls, and backprojection image
if self.debug:
cv.NamedWindow("Histogram", cv.CV_WINDOW_NORMAL)
cv.MoveWindow("Histogram", 300, 50)
cv.NamedWindow("Parameters", 0)
cv.MoveWindow("Parameters", 700, 50)
cv.NamedWindow("Backproject", 0)
cv.MoveWindow("Backproject", 700, 325)
# cv.NamedWindow("Tracked_obj", 0)
# cv.MoveWindow("Tracked_obj", 700, 900)
# Create the slider controls for saturation, value and threshold
cv.CreateTrackbar("Saturation", "Parameters", self.smin, 255, self.set_smin)
cv.CreateTrackbar("Min Value", "Parameters", self.vmin, 255, self.set_vmin)
cv.CreateTrackbar("Max Value", "Parameters", self.vmax, 255, self.set_vmax)
cv.CreateTrackbar("Threshold", "Parameters", self.threshold, 255, self.set_threshold)
# Initialize a number of variables
self.hist = None
self.track_window = None
self.show_backproj = False
# These are the callbacks for the slider controls
def set_smin(self, pos):
self.smin = pos
def set_vmin(self, pos):
self.vmin = pos
def set_vmax(self, pos):
self.vmax = pos
def set_threshold(self, pos):
self.threshold = pos
# def color_masking(self, frame):
# hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# mask = cv2.inRange(hsv, self.lower_orange, self.upper_orange) + \
# cv2.inRange(hsv, self.lower_yellow, self.upper_yellow)
# return mask
# def depth_masking(self):
# self.depth_array = np.array(self.depth_image, dtype=np.float32)
# # self.depth_image
# depth_mask = np.zeros((self.frame_height, self.frame_width))
# for x in range(self.frame_height):
# for y in range(self.frame_width):
# try:
# # Get a depth value in meters
# z = self.depth_array[y, x]
# # Check for NaN values returned by the camera driver
# if isnan(z):
# continue
# except:
# # It seems to work best if we convert exceptions to big value
# z = 255
# if z < self.depth_threshold:
# depth_mask[y, x] = 255 # white
# else:
# depth_mask[y, x] = 0
# return depth_mask
def find_max_contour(self, mask):
# find contours
contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for multiple contours, find the maximum
area=list()
approx=list()
for i, cnt in enumerate(contours):
approx.append(cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True))
area.append(cv2.contourArea(cnt))
# overwrite selection box by automatic color matching
return cv2.boundingRect(approx[np.argmax(area)])
def find_contours(self, mask):
# find contours
mask = self.morphological(mask)
contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for multiple contours, find the maximum
area=list()
approx=list()
for i, cnt in enumerate(contours):
approx.append(cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True))
area.append(cv2.contourArea(cnt))
# overwrite selection box by automatic color matching
self.area_ratio = np.sum(area) / (self.frame_width * self.frame_height)
if np.max(area) / np.sum(area) > 0.95:
# print "one blob"
self.number_blob = 1
else:
# print "more than one blobs"
self.number_blob = 2
if len(area) > 1: # more than one blob, find the ratio of the 1st and 2nd largest
area_rev_sorted = np.sort(area)[::-1]
self.area_ratio = area_rev_sorted[0] / area_rev_sorted[1]
else: # only one blob found
self.area_ratio = 0
# print self.area_ratio
def morphological(self, mask):
""" tune the mask """
# morphological openning (remove small objects from the foreground)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# # morphological closing (fill small objects from the foreground)
kernel = np.ones((10, 10), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
return mask
# The main processing function computes the histogram and backprojection
def process_image(self, cv_image):
try:
# First blur the image
frame = cv2.blur(cv_image, (5, 5))
# Convert from RGB to HSV space
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Create a mask using the current saturation and value parameters
mask = cv2.inRange(hsv, np.array((0., self.smin, self.vmin)), np.array((180., 255., self.vmax)))
# not select any region, do automatic color rectangle
if self.selection is None:
# obtain the color mask
# edge_roi = self.edge_masking()
# print "edge mask", edge_mask
# create bounding box from the maximum mask
self.selection = [self.roi_x_offset, self.roi_y_offset, self.roi_width, self.roi_height] # in x y w h
# print "selection", self.selection
self.detect_box = self.selection
self.track_box = None
# If the user is making a selection with the mouse,
# calculate a new histogram to track
if self.selection is not None:
x0, y0, w, h = self.selection
x1 = x0 + w
y1 = y0 + h
self.track_window = (x0, y0, x1, y1)
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
self.hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(self.hist, self.hist, 0, 255, cv2.NORM_MINMAX)
self.hist = self.hist.reshape(-1)
self.hist_prob = np.argmax(self.hist)
# print self.hist_prob
self.show_hist()
if self.detect_box is not None:
self.selection = None
# If we have a histogram, track it with CamShift
# if self.hist is not None:
# # Compute the backprojection from the histogram
# backproject = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
# # Mask the backprojection with the mask created earlier
# backproject &= mask
# # Threshold the backprojection
# ret, backproject = cv2.threshold(backproject, self.threshold, 255, cv.CV_THRESH_TOZERO)
# # self.find_contours(backproject)
# # Detect blobs.
# # keypoints = self.blob_detector.detect(backproject)
# # print keypoints
# x, y, w, h = self.track_window
# if self.track_window is None or w <= 0 or h <=0:
# self.track_window = 0, 0, self.frame_width - 1, self.frame_height - 1
# # Set the criteria for the CamShift algorithm
# term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
# # Run the CamShift algorithm
# self.track_box, self.track_window = cv2.CamShift(backproject, self.track_window, term_crit)
# x0, y0, x1, y1 = self.track_window
# # print self.track_window
# # Display the resulting backprojection
# cv2.imshow("Backproject", backproject)
except:
pass
return cv_image
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
# print np.argmax(self.hist)
self.hist_prob = np.argmax(self.hist)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
if self.debug:
cv2.imshow('Histogram', img)
def hue_histogram_as_image(self, hist):
""" Returns a nice representation of a hue histogram """
histimg_hsv = cv.CreateImage((320, 200), 8, 3)
mybins = cv.CloneMatND(hist.bins)
cv.Log(mybins, mybins)
(_, hi, _, _) = cv.MinMaxLoc(mybins)
cv.ConvertScale(mybins, mybins, 255. / hi)
w,h = cv.GetSize(histimg_hsv)
hdims = cv.GetDims(mybins)[0]
for x in range(w):
xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image
val = int(mybins[int(hdims * x / w)] * h / 255)
cv2.rectangle(histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
cv2.rectangle(histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)
histimg = cv2.cvtColor(histimg_hsv, cv.CV_HSV2BGR)
return histimg
def odom_callback(self, msg):
""" call back to subscribe, get odometry data:
pose and orientation of the current boat,
suffix 0 is for origin """
self.x0 = msg.pose.pose.position.x
self.y0 = msg.pose.pose.position.y
self.odom_received = True
def image_callback(self, data):
# Store the image header in a global variable
self.image_header = data.header
# Time this loop to get cycles per second
start = time.time()
# Convert the ROS image to OpenCV format using a cv_bridge helper function
frame = self.convert_image(data)
# Some webcams invert the image
if self.flip_image:
frame = cv2.flip(frame, 0)
# Store the frame width and height in a pair of global variables
if self.frame_width is None:
self.frame_size = (frame.shape[1], frame.shape[0])
self.frame_width, self.frame_height = self.frame_size
# Create the marker image we will use for display purposes
if self.marker_image is None:
self.marker_image = np.zeros_like(frame)
# Copy the current frame to the global image in case we need it elsewhere
self.frame = frame.copy()
# Reset the marker image if we're not displaying the history
if not self.keep_marker_history:
self.marker_image = np.zeros_like(self.marker_image)
# Process the image to detect and track objects or features
processed_image = self.process_image(frame)
# If the result is a greyscale image, convert to 3-channel for display purposes """
#if processed_image.channels == 1:
#cv.CvtColor(processed_image, self.processed_image, cv.CV_GRAY2BGR)
#else:
# Make a global copy
self.processed_image = processed_image.copy()
# Display the user-selection rectangle or point
self.display_selection()
# Night mode: only display the markers
if self.night_mode:
self.processed_image = np.zeros_like(self.processed_image)
# Merge the processed image and the marker image
self.display_image = cv2.bitwise_or(self.processed_image, self.marker_image)
# If we have a track box, then display it. The track box can be either a regular
# cvRect (x,y,w,h) or a rotated Rect (center, size, angle).
if self.show_boxes:
if self.track_box is not None and self.is_rect_nonzero(self.track_box):
if len(self.track_box) == 4:
x,y,w,h = self.track_box
size = (w, h)
center = (x + w / 2, y + h / 2)
angle = 0
self.track_box = (center, size, angle)
else:
(center, size, angle) = self.track_box
# For face tracking, an upright rectangle looks best
if self.face_tracking:
pt1 = (int(center[0] - size[0] / 2), int(center[1] - size[1] / 2))
pt2 = (int(center[0] + size[0] / 2), int(center[1] + size[1] / 2))
cv2.rectangle(self.display_image, pt1, pt2, cv.RGB(50, 255, 50), self.feature_size, 8, 0)
else:
# Otherwise, display a rotated rectangle
vertices = np.int0(cv2.cv.BoxPoints(self.track_box))
cv2.drawContours(self.display_image, [vertices], 0, cv.RGB(50, 255, 50), self.feature_size)
# If we don't yet have a track box, display the detect box if present
elif self.detect_box is not None and self.is_rect_nonzero(self.detect_box):
(pt1_x, pt1_y, w, h) = self.detect_box
if self.show_boxes:
cv2.rectangle(self.display_image, (pt1_x, pt1_y), (pt1_x + w, pt1_y + h), cv.RGB(50, 255, 50), self.feature_size, 8, 0)
# Publish the ROI
self.publish_roi()
self.publish_sequence()
# Compute the time for this loop and estimate CPS as a running average
end = time.time()
duration = end - start
fps = int(1.0 / duration)
self.cps_values.append(fps)
if len(self.cps_values) > self.cps_n_values:
self.cps_values.pop(0)
self.cps = int(sum(self.cps_values) / len(self.cps_values))
# Display CPS and image resolution if asked to
if self.show_text:
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
""" Print cycles per second (CPS) and resolution (RES) at top of the image """
if self.frame_size[0] >= 640:
vstart = 25
voffset = int(50 + self.frame_size[1] / 120.)
elif self.frame_size[0] == 320:
vstart = 15
voffset = int(35 + self.frame_size[1] / 120.)
else:
vstart = 10
voffset = int(20 + self.frame_size[1] / 120.)
cv2.putText(self.display_image, "CPS: " + str(self.cps), (10, vstart), font_face, font_scale, cv.RGB(255, 255, 0))
cv2.putText(self.display_image, "RES: " + str(self.frame_size[0]) + "X" + str(self.frame_size[1]), (10, voffset), font_face, font_scale, cv.RGB(255, 255, 0))
if self.debug:
# Update the image display
cv2.imshow(self.node_name, self.display_image)
# Process any keyboard commands
self.keystroke = cv2.waitKey(5)
if self.keystroke is not None and self.keystroke != -1:
try:
cc = chr(self.keystroke & 255).lower()
if cc == 'n':
self.night_mode = not self.night_mode
elif cc == 'f':
self.show_features = not self.show_features
elif cc == 'b':
self.show_boxes = not self.show_boxes
elif cc == 't':
self.show_text = not self.show_text
elif cc == 'q':
# The has press the q key, so exit
rospy.signal_shutdown("User hit q key to quit.")
except:
pass
def publish_sequence(self):
# Watch out for negative offsets
# pass
# append all data to hist_list
if len(self.hist_list) > self.MAX_LEN:
self.hist_list.pop(0)
try:
self.hist_list.append([self.counter, self.hist_prob])
except:
pass
# print self.hist_list
self.counter += 1
# find distinct hist_prob
try:
kmeans = KMeans(n_clusters=3)
kmeans.fit(np.array(self.hist_list))
color_sequence = kmeans.cluster_centers_
order = np.argsort(color_sequence[:,0])[::-1]
ordered_sequence = color_sequence[order,1]
# print "ordered seq", ordered_sequence
color_seq = ["", "", ""]
c = 0
for i in ordered_sequence:
print i
if i< 1 or i > 14:
color_seq[c] = "red"
elif 7 < i < 12:
color_seq[c] = "blue"
elif 1 < i < 4:
color_seq[c] = "yellow"
elif 3 < i < 7:
color_seq[c] = "green"
c += 1
print "color_seq", color_seq
a = Vector3()
a.x = ordered_sequence[0]
a.y = ordered_sequence[1]
a.z = ordered_sequence[2]
self.sequence_pub.publish(a)
rospy.set_param("/gui/color1", color_seq[0])
rospy.set_param("/gui/color2", color_seq[1])
rospy.set_param("/gui/color3", color_seq[2])
except:
print "sequence publish failed"
def roi_callback(self, msg):
# print msg.x_offset
self.roi_x_offset = msg.x_offset
self.roi_y_offset = msg.y_offset
self.roi_width = msg.width
self.roi_height = msg.height
# try:
# sequence = Vector3()
# sequence.data.x = self.x0
# sequence.data.y = self.y0
# sequence.data.z = self.hist_prob
# print sequence.data
# self.sequence_pub.publish(sequence)
# except:
# rospy.loginfo("Publishing sequence failed")
if __name__ == '__main__':
try:
node_name = "color_sequence"
ColorSequence(node_name, debug=True)
try:
rospy.init_node(node_name)
except:
pass
rospy.spin()
except KeyboardInterrupt:
print "Shutting down vision node."
cv.DestroyAllWindows()
| gpl-3.0 | 2,657,304,766,669,864,400 | 37.691312 | 169 | 0.559431 | false |
albertobeta/UberSimpleWebsockets | send.py | 1 | 1219 | import tornado.httpserver
import tornado.websocket
import tornado.ioloop
from tornado.ioloop import PeriodicCallback
import tornado.web
from random import randint #Random generator
#Config
port = 9000 #Websocket Port
timeInterval= 2000 #Milliseconds
class WSHandler(tornado.websocket.WebSocketHandler):
#check_origin fixes an error 403 with Tornado
#http://stackoverflow.com/questions/24851207/tornado-403-get-warning-when-opening-websocket
def check_origin(self, origin):
return True
def open(self):
#Send message periodic via socket upon a time interval
self.callback = PeriodicCallback(self.send_values, timeInterval)
self.callback.start()
def send_values(self):
#Generates random values to send via websocket
self.write_message(str(randint(1,10)) + ';' + str(randint(1,10)) + ';' + str(randint(1,10)) + ';' + str(randint(1,10)))
def on_message(self, message):
pass
def on_close(self):
self.callback.stop()
application = tornado.web.Application([
(r'/', WSHandler),
])
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start() | mit | -5,836,817,919,319,675,000 | 29.5 | 127 | 0.710418 | false |
vojtechtrefny/anaconda | pyanaconda/anaconda.py | 1 | 8728 | # anaconda: The Red Hat Linux Installation program
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brent Fox <[email protected]>
# Mike Fulbright <[email protected]>
# Jakub Jelinek <[email protected]>
# Jeremy Katz <[email protected]>
# Chris Lumens <[email protected]>
# Paul Nasrat <[email protected]>
# Erik Troan <[email protected]>
# Matt Wilson <[email protected]>
#
import os
import sys
import stat
from glob import glob
from tempfile import mkstemp
import threading
from pyanaconda.bootloader import get_bootloader
from pyanaconda import constants
from pyanaconda import iutil
from pyanaconda import addons
import logging
log = logging.getLogger("anaconda")
stdoutLog = logging.getLogger("anaconda.stdout")
class Anaconda(object):
def __init__(self):
from pyanaconda import desktop
self._bootloader = None
self.canReIPL = False
self.desktop = desktop.Desktop()
self.dir = None
self.displayMode = None
self.id = None
self._instClass = None
self._intf = None
self.isHeadless = False
self.ksdata = None
self.mediaDevice = None
self.methodstr = None
self.opts = None
self._payload = None
self.proxy = None
self.proxyUsername = None
self.proxyPassword = None
self.reIPLMessage = None
self.rescue_mount = True
self.rootParts = None
self.stage2 = None
self._storage = None
self.updateSrc = None
self.mehConfig = None
# *sigh* we still need to be able to write this out
self.xdriver = None
# Data for inhibiting the screensaver
self.dbus_session_connection = None
self.dbus_inhibit_id = None
# This is used to synchronize Gtk.main calls between the graphical
# interface and error dialogs. Whoever gets to their initialization code
# first will lock gui_initializing
self.gui_initialized = threading.Lock()
@property
def bootloader(self):
if not self._bootloader:
self._bootloader = get_bootloader()
return self._bootloader
@property
def instClass(self):
if not self._instClass:
from pyanaconda.installclass import DefaultInstall
self._instClass = DefaultInstall()
return self._instClass
def _getInterface(self):
return self._intf
def _setInterface(self, v):
# "lambda cannot contain assignment"
self._intf = v
def _delInterface(self):
del self._intf
intf = property(_getInterface, _setInterface, _delInterface)
@property
def payload(self):
# Try to find the packaging payload class. First try the install
# class. If it doesn't give us one, fall back to the default.
if not self._payload:
klass = self.instClass.getBackend()
if not klass:
from pyanaconda.flags import flags
if self.ksdata.ostreesetup.seen:
from pyanaconda.packaging.rpmostreepayload import RPMOSTreePayload
klass = RPMOSTreePayload
elif flags.livecdInstall:
from pyanaconda.packaging.livepayload import LiveImagePayload
klass = LiveImagePayload
elif self.ksdata.method.method == "liveimg":
from pyanaconda.packaging.livepayload import LiveImageKSPayload
klass = LiveImageKSPayload
else:
from pyanaconda.packaging.dnfpayload import DNFPayload
klass = DNFPayload
self._payload = klass(self.ksdata)
return self._payload
@property
def protected(self):
specs = []
if os.path.exists("/run/initramfs/livedev") and \
stat.S_ISBLK(os.stat("/run/initramfs/livedev")[stat.ST_MODE]):
specs.append(os.readlink("/run/initramfs/livedev"))
if self.methodstr and self.methodstr.startswith("hd:"):
specs.append(self.methodstr[3:].split(":", 3)[0])
if self.stage2 and self.stage2.startswith("hd:"):
specs.append(self.stage2[3:].split(":", 3)[0])
# zRAM swap devices need to be protected
for zram_dev in glob("/dev/zram*"):
specs.append(zram_dev)
return specs
@property
def storage(self):
if not self._storage:
import blivet
self._storage = blivet.Blivet(ksdata=self.ksdata)
if self.instClass.defaultFS:
self._storage.setDefaultFSType(self.instClass.defaultFS)
return self._storage
def dumpState(self):
from meh import ExceptionInfo
from meh.dump import ReverseExceptionDump
from inspect import stack as _stack
from traceback import format_stack
# Skip the frames for dumpState and the signal handler.
stack = _stack()[2:]
stack.reverse()
exn = ReverseExceptionDump(ExceptionInfo(None, None, stack),
self.mehConfig)
# gather up info on the running threads
threads = "\nThreads\n-------\n"
for thread_id, frame in sys._current_frames().items():
threads += "\nThread %s\n" % (thread_id,)
threads += "".join(format_stack(frame))
# dump to a unique file
(fd, filename) = mkstemp(prefix="anaconda-tb-", dir="/tmp")
dump_text = exn.traceback_and_object_dump(self)
dump_text += threads
dump_text = dump_text.encode("utf-8")
iutil.eintr_retry_call(os.write, fd, dump_text)
iutil.eintr_retry_call(os.close, fd)
# append to a given file
with open("/tmp/anaconda-tb-all.log", "a+") as f:
f.write("--- traceback: %s ---\n" % filename)
f.write(dump_text + "\n")
def initInterface(self, addon_paths=None):
if self._intf:
raise RuntimeError("Second attempt to initialize the InstallInterface")
if self.displayMode == 'g':
from pyanaconda.ui.gui import GraphicalUserInterface
# Run the GUI in non-fullscreen mode, so live installs can still
# use the window manager
self._intf = GraphicalUserInterface(self.storage, self.payload,
self.instClass, gui_lock=self.gui_initialized,
fullscreen=False)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="gui")
elif self.displayMode in ['t', 'c']: # text and command line are the same
from pyanaconda.ui.tui import TextUserInterface
self._intf = TextUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="tui")
else:
raise RuntimeError("Unsupported displayMode: %s" % self.displayMode)
if addon_paths:
self._intf.update_paths(addon_paths)
def writeXdriver(self, root=None):
# this should go away at some point, but until it does, we
# need to keep it around.
if self.xdriver is None:
return
if root is None:
root = iutil.getSysroot()
if not os.path.isdir("%s/etc/X11" %(root,)):
os.makedirs("%s/etc/X11" %(root,), mode=0o755)
f = open("%s/etc/X11/xorg.conf" %(root,), 'w')
f.write('Section "Device"\n\tIdentifier "Videocard0"\n\tDriver "%s"\nEndSection\n' % self.xdriver)
f.close()
| gpl-2.0 | 8,755,333,330,063,547,000 | 35.066116 | 106 | 0.601169 | false |
swisscom/cleanerversion | versions_tests/tests/test_utils.py | 1 | 4249 | from unittest import skipUnless
from django.db import IntegrityError
from django.db import connection
from django.test import TestCase, TransactionTestCase
from versions.util.postgresql import get_uuid_like_indexes_on_table
from versions_tests.models import ChainStore, Color
@skipUnless(connection.vendor == 'postgresql', "Postgresql-specific test")
class PostgresqlVersionUniqueTests(TransactionTestCase):
def setUp(self):
self.red = Color.objects.create(name='red')
self.green = Color.objects.create(name='green')
self.black = Color.objects.create(name='black')
self.yellow = Color.objects.create(name='yellow')
# - only one store with the same name and subchain_id can exist in a
# single city
# - no two stores can share the same door_frame_color and door_color
store = {
'subchain_id': 1,
'city': 'Santa Barbara',
'name': 'Barbara style',
'opening_hours': '9-9 everyday',
'door_frame_color': self.red,
'door_color': self.black,
}
self.sb1 = ChainStore.objects.create(**store)
def test_version_unique(self):
# It should not be possible to create another store with the same name,
# city, and subchain_id
with self.assertRaises(IntegrityError):
sb2 = ChainStore.objects.create(
subchain_id=self.sb1.subchain_id,
city=self.sb1.city,
name=self.sb1.name,
door_frame_color=self.sb1.door_frame_color,
door_color=self.green
)
# It should not be possible to create another store with the same door
# and door_frame color
with self.assertRaises(IntegrityError):
sb3 = ChainStore.objects.create(
subchain_id=self.sb1.subchain_id,
city=self.sb1.city,
name="Bearded Bob's style",
door_frame_color=self.sb1.door_frame_color,
door_color=self.sb1.door_color
)
# It should be possible to create objects as long as they follow the
# unique constraints, though:
sb4 = ChainStore.objects.create(
subchain_id=self.sb1.subchain_id,
city=self.sb1.city,
name="Bearded Bob's style",
door_frame_color=self.sb1.door_frame_color,
door_color=self.green
)
sb5 = ChainStore.objects.create(
subchain_id=sb4.subchain_id + 1,
city=sb4.city,
name=sb4.name,
door_frame_color=sb4.door_frame_color,
door_color=self.yellow
)
# If a version is soft-deleted, it should be possible to create a new
# object with the
# value of that old version
sb4.delete()
sb6 = ChainStore.objects.create(
subchain_id=sb4.subchain_id,
city=sb4.city,
name=sb4.name,
door_frame_color=sb4.door_frame_color,
door_color=sb4.door_color
)
def test_identity_unique(self):
c = Color.objects.create(name='sky blue')
c.identity = self.green.identity
# It should not be possible to have two "current" objects with the
# same identity:
with self.assertRaises(IntegrityError):
c.save()
@skipUnless(connection.vendor == 'postgresql', "Postgresql-specific test")
class PostgresqlUuidLikeIndexesTest(TestCase):
def test_no_like_indexes_on_uuid_columns(self):
# Django creates like indexes on char columns. In Django 1.7.x and
# below, there is no support for native uuid columns, so
# CleanerVersion uses a CharField to store the uuid values. For
# postgresql, Django creates special indexes for char fields so that
# like searches (e.g. WHERE foo like '%bar') are fast.
# Those indexes are not going to be used in our case, and extra
# indexes will slow down updates and inserts. So, they should have
# been removed by the post_migrate handler in
# versions_tests.apps.VersionsTestsConfig.ready.
self.assertEqual(0, len(get_uuid_like_indexes_on_table(ChainStore)))
| apache-2.0 | 2,115,446,103,184,632,000 | 38.342593 | 79 | 0.618028 | false |
globocom/database-as-a-service | dbaas/workflow/steps/tests/test_dns_step.py | 1 | 1957 | from random import randint
from mock import patch
from workflow.steps.util.dns import ChangeTTL, ChangeTTLTo5Minutes, ChangeTTLTo3Hours
from . import TestBaseStep
class FakeDNSProvider(object):
dns_ttl = {}
@classmethod
def update_database_dns_ttl(cls, infra, seconds):
cls.dns_ttl[infra] = seconds
class DNSStepTests(TestBaseStep):
def setUp(self):
super(DNSStepTests, self).setUp()
FakeDNSProvider.dns_ttl = {}
@patch(
'dbaas_dnsapi.provider.DNSAPIProvider.update_database_dns_ttl',
new=FakeDNSProvider.update_database_dns_ttl
)
def test_change_ttl_five_minutes(self):
self.assertEqual(FakeDNSProvider.dns_ttl, {})
ChangeTTLTo5Minutes(self.instance).do()
self.assertEqual(len(FakeDNSProvider.dns_ttl), 1)
self.assertIn(self.infra, FakeDNSProvider.dns_ttl)
ttl_seconds = FakeDNSProvider.dns_ttl[self.infra]
self.assertEqual(ttl_seconds, 300)
@patch(
'dbaas_dnsapi.provider.DNSAPIProvider.update_database_dns_ttl',
new=FakeDNSProvider.update_database_dns_ttl
)
def test_change_ttl_3_hours(self):
self.assertEqual(FakeDNSProvider.dns_ttl, {})
ChangeTTLTo3Hours(self.instance).do()
self.assertEqual(len(FakeDNSProvider.dns_ttl), 1)
self.assertIn(self.infra, FakeDNSProvider.dns_ttl)
ttl_seconds = FakeDNSProvider.dns_ttl[self.infra]
self.assertEqual(ttl_seconds, 10800)
def test_minutes_to_seconds(self):
change_ttl = ChangeTTLTo5Minutes(self.instance)
change_ttl.minutes = randint(1, 10000)
self.assertEqual(change_ttl.seconds, change_ttl.minutes*60)
def test_unicode(self):
change_ttl = ChangeTTLTo5Minutes(self.instance)
change_ttl.minutes = randint(1, 10000)
self.assertEqual(
unicode(change_ttl),
'Changing DNS TLL to {} minutes...'.format(change_ttl.minutes)
)
| bsd-3-clause | -3,945,911,910,310,747,000 | 32.741379 | 85 | 0.676546 | false |
pnpnpn/gimsan-py | gimsan_result.py | 1 | 15480 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import time_benchmark
import os
import sys
import json
import re
import time
import numpy as np
import ConfigParser
import subprocess
from tornado import template
from argparse_plus import ArgumentParserPlus
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio.Motif import Motif
from gibbslib.batch_experiment import *
from gibbslib.simple_logging import *
from gibbslib.gimsan_base import GimsanJob
from gibbslib.gimsan_exception import *
class FinderResult():
def __init__(self, finder_meta, nullset_size, statsig_dir, outdir, gimsan_home):
self.logger = logging.getLogger(self.__class__.__name__)
self.finder_id = finder_meta['finder_id']
self.finder_id_alt = self.finder_id.replace('_', '-')
self.finder_outdir = finder_meta['finder_outdir'] #per-finder
self.width = finder_meta['width']
self.nullset_size = nullset_size
self.statsig_dir = statsig_dir
self.outdir = outdir #per-template
self.load_template_json()
self.pval_r_path = os.path.join(gimsan_home, 'misc', "conf_pval_only.R")
#results
self.pvalue_comment = None
self.kmer_lst = None
self.kmer_filename = None
self.logowidth = 5
self.weblogo_basename = None
self.weblogo_revcompl_basename = None
self.coldep_outfile = None
self.coldep_num_pairs = None
def construct_weblogo(self, weblogo_filename, weblogo_revcompl_filename):
self.weblogo_basename = os.path.basename(weblogo_filename)
self.weblogo_revcompl_basename = os.path.basename(weblogo_revcompl_filename)
motif = Motif(alphabet=IUPAC.unambiguous_dna)
for kmer in self.kmer_lst:
motif.add_instance(Seq(kmer, motif.alphabet))
logowidth_normal = self.construct_weblogo_helper(weblogo_filename, motif)
#reverse complement
motif_revcompl = motif.reverse_complement()
logowidth_revcompl = self.construct_weblogo_helper(weblogo_revcompl_filename, motif_revcompl)
self.logowidth = max(self.logowidth, logowidth_normal, logowidth_revcompl)
def construct_weblogo_helper(self, weblogo_filename, motif):
logowidth = (20.0/45.0) * self.width + 2;
motif.weblogo(weblogo_filename, logowidth=logowidth)
#return logowidth * 50 #width to pixel conversion
return logowidth
def load_json(self, filename):
json_dct = {}
with open(filename, 'rb') as fh:
json_dct = json.loads(fh.read())
if json_dct is None or "kmers" not in json_dct:
raise InvalidMotifJsonFileError("File does not seem to be a valid json file for motif results: %s" % json_filename)
return json_dct
def get_pvalue_comment_from_rout(self):
rout_filename = self.get_rout_filename()
with open(rout_filename, 'rb') as fh:
for ln in fh:
ln = unicode(ln, 'utf-8')
if 'MLE of the p-value' in ln:
self.pvalue_comment = ln.strip()
break
if not self.pvalue_comment:
raise ParsingMotifResultError('Cannot find P-value comment: %s' % rout_filename)
def load_template_json(self):
json_filename = os.path.join(self.finder_outdir, 'motif-00000.stdout')
self.template_json = self.load_json(json_filename)
def extract_and_write_kmers(self):
self.kmer_lst = [l[1] for l in self.template_json['kmers'].values()]
self.kmer_filename = os.path.join(self.statsig_dir, '%s.kmers' % self.finder_id)
self.write_kmers_file(self.kmer_lst, self.kmer_filename)
def write_kmers_file(self, curr_kmer_lst, curr_kmer_filename):
self.logger.info('Writing kmers: %s' % curr_kmer_filename)
with open(curr_kmer_filename, 'wb') as fh:
for kmer in curr_kmer_lst:
print('>FASTA header', file=fh)
print(kmer, file=fh)
def get_rscript_text(self, template_score, null_scores_path):
params = {
'pval_r_path' : self.pval_r_path,
'null_scores_path' : null_scores_path,
'template_score' : template_score,
}
rscript_text = """
source("%(pval_r_path)s")
library(MASS)
sample<-scan("%(null_scores_path)s")
getConfPvalLat(%(template_score)s, sample, conf=0.1, mins=7, maxs=200)
""" % params
return rscript_text
def get_rout_filename(self):
return os.path.join(self.statsig_dir, '%s.r_out' % self.finder_id)
def extract_and_write_scores(self):
score_lst = [None] * (self.nullset_size + 1)
for i in range(self.nullset_size+1):
if i == 0:
json_filename = os.path.join(self.finder_outdir, 'motif-00000.stdout')
else:
json_filename = os.path.join(self.finder_outdir, 'null-%05d.stdout' % i )
json_dct = self.load_json(json_filename)
score_lst[i] = json_dct['score_ranking_runs']
#write gm_width008.scores file
nullscores_filename = os.path.join(self.statsig_dir, '%s.scores' % self.finder_id)
self.logger.info('Writing null scores: %s' % nullscores_filename)
with open(nullscores_filename, 'wb') as fh:
print('\n' . join([str(s) for s in score_lst[1:]]), file=fh)
#write R file
rscript_text = self.get_rscript_text(score_lst[0], nullscores_filename)
rscript_filename = os.path.join(self.statsig_dir, '%s.R' % self.finder_id)
self.logger.info('Writing R script: %s' % rscript_filename)
with open(rscript_filename, 'wb') as fh:
print(rscript_text, file=fh)
return rscript_filename
class GimsanResultManager(GimsanJob):
def __init__(self, name, template_file, outdir, config, conf_file, is_overwrite=False, dryrun=False, verbose=False):
super(GimsanResultManager, self).__init__(outdir, config)
self.logger = logging.getLogger(self.__class__.__name__)
self.name = name
self.template_file = template_file
self.outdir = outdir
self.conf_file = conf_file
self.verbose = verbose
self.dryrun = dryrun
self.is_overwrite = is_overwrite
self.css_outdir = os.path.join(self.outdir, 'css')
self.js_outdir = os.path.join(self.outdir, 'js')
self.statsig_dir = os.path.join(self.outdir, 'statsig')
self.r_path = os.path.expanduser(self.config.get('result', 'r_path'))
self.check_result_path()
self.get_finders()
#column-dependency
self.column_dependency_exec = os.path.join(self.gimsan_home, 'column_dependency_app/column_dependency.out')
if not os.path.isfile(self.column_dependency_exec):
raise Exception('Column-Dependency executable missing: %s' % self.column_dependency_exec)
def check_result_path(self):
if not os.path.isdir(self.outdir):
raise MissingDirError('Missing output directory: %s' % self.outdir)
if not os.path.isdir(self.statsig_dir):
os.mkdir(self.statsig_dir)
elif not self.is_overwrite:
raise AlreadyExistOutputDirError('Directory already exist: %s' % self.statsig_dir)
if not os.path.isdir(self.css_outdir):
os.mkdir(self.css_outdir)
elif not self.is_overwrite:
raise AlreadyExistOutputDirError('Directory already exist: %s' % self.css_outdir)
if not os.path.isdir(self.js_outdir):
os.mkdir(self.js_outdir)
elif not self.is_overwrite:
raise AlreadyExistOutputDirError('Directory already exist: %s' % self.js_outdir)
def get_all_finder_meta(self):
lst = []
for width in self.width_lst:
lst.append(self.get_gibbsmarkov_meta(width))
return lst
def generate_finder_result_list(self):
finder_meta_lst = self.get_all_finder_meta()
finder_res_lst = []
for finder_meta in finder_meta_lst:
finder_res_lst.append(FinderResult(finder_meta, self.nullset_size, self.statsig_dir, self.outdir, self.gimsan_home))
rscript_jobs = []
for finder_res in finder_res_lst:
rscript_filename = finder_res.extract_and_write_scores()
cmd = "%s -f %s &>%s" % (self.r_path, rscript_filename, finder_res.get_rout_filename())
job = {
'cmd' : cmd,
'job_id' : rscript_filename,
}
rscript_jobs.append(job)
#run R in parallel
if self.dryrun:
for job in rscript_jobs:
self.logger.info(job['cmd'])
else:
import multiprocessing
pool = multiprocessing.Pool(processes=self.num_proc)
pool.map(subprocess_exec_func, rscript_jobs)
#pvalue
for finder_res in finder_res_lst:
finder_res.get_pvalue_comment_from_rout()
#weblogo
img_dir = os.path.join(self.outdir, 'images')
if not os.path.isdir(img_dir):
os.mkdir(img_dir)
for finder_res in finder_res_lst:
finder_res.extract_and_write_kmers()
weblogo_filename = os.path.join(img_dir, '%s.png' % finder_res.finder_id)
weblogo_revcompl_filename = os.path.join(img_dir, '%s.revcompl.png' % finder_res.finder_id)
finder_res.construct_weblogo(weblogo_filename, weblogo_revcompl_filename)
#column dependency
self.compute_column_dependency(finder_res_lst)
return finder_res_lst
def compute_column_dependency(self, finder_res_lst):
coldep_dir = os.path.join(self.outdir, 'coldep')
if not os.path.isdir(coldep_dir):
os.mkdir(coldep_dir)
if self.config.has_option('column_dependency', 'randseed'):
randseed_param = '-s %d' % self.config.getint('column_dependency', 'randseed')
else:
randseed_param = ''
job_lst = []
for finder_res in finder_res_lst:
coldep_fileroot = '%s.coldep' % finder_res.finder_id
stdout_fn = coldep_fileroot + ".txt"
stderr_fn = coldep_fileroot + ".stderr"
finder_res.coldep_outfile = os.path.join('coldep', stdout_fn)
cmd = "%s -fsa %s %s 1>%s 2>%s" % (
self.column_dependency_exec,
finder_res.kmer_filename,
randseed_param,
os.path.join(coldep_dir, stdout_fn),
os.path.join(coldep_dir, stderr_fn))
job = {
'cmd' : cmd,
'job_id' : coldep_fileroot,
}
job_lst.append(job)
#run R in parallel
if self.dryrun:
for job in job_lst:
self.logger.info(job['cmd'])
else:
import multiprocessing
pool = multiprocessing.Pool(processes=self.num_proc)
pool.map(subprocess_exec_func, job_lst)
for finder_res in finder_res_lst:
full_path = os.path.join(self.outdir, finder_res.coldep_outfile)
with open(full_path, 'rb') as fh:
for ln in fh:
m = re.search(r'for statistically significant pairs \((\d+) pairs\)', ln)
if m:
finder_res.coldep_num_pairs = int(m.group(1))
break
if finder_res.coldep_num_pairs is None:
raise Exception('Unable to find statistically significant pairs from %s' % full_path)
if finder_res.coldep_num_pairs == 0:
finder_res.coldep_btn_style = 'btn-default'
else:
finder_res.coldep_btn_style = 'btn-success'
def generate_html(self):
finder_res_lst = self.generate_finder_result_list()
gm_finder0 = self.finder_lst[0]
tp = None
out_tp_file = os.path.join(self.gimsan_home, 'misc/output_template.html')
with open(out_tp_file) as fh:
tp = template.Template(fh.read())
if tp is None:
raise MissingFileError('Unable to generate HTML from template: %s' % out_tp_file)
output_html = tp.generate(
experiment_name = self.name,
config_filename = os.path.join('../meta', os.path.basename(self.conf_file)),
fsa_filename = os.path.basename(self.template_file),
nullset_size = self.nullset_size,
per_seq_model_comment = gm_finder0.get_per_seq_model(),
stop_crit_comment = gm_finder0.get_stop_crit(),
rapid_conv = gm_finder0.get_rapid_conv(),
double_strand_comment = 'yes' if gm_finder0.get_double_strand() else 'no',
markov_order = gm_finder0.markov_order,
genomic_file_comment = self.genome_filename if self.genome_filename else 'input FASTA file',
finder_res_lst = finder_res_lst,
)
output_html_file = os.path.join(self.outdir, 'output.html')
self.logger.info('Writing HTML file to: %s' % output_html_file)
with open(output_html_file, 'wb') as fh:
print(output_html, file=fh)
self.copy_html_assets()
def copy_html_assets(self):
lst = [
(os.path.join(self.gimsan_home, 'misc', 'css', "bootstrap.min.css"), self.css_outdir),
(os.path.join(self.gimsan_home, 'misc', 'js', "bootstrap.min.js"), self.js_outdir),
(os.path.join(self.gimsan_home, 'misc', 'js', "jquery-1.10.2.min.js"), self.js_outdir),
]
for l in lst:
os.system('cp -v %s %s' % (l[0], l[1]))
def subprocess_exec_func(job):
import logging
logging.info('(%s): %s' % (job['job_id'], job['cmd']))
ret_code = subprocess.call(job['cmd'], shell=True)
if __name__ == '__main__':
benchmark = time_benchmark.Benchmark()
#defaults
description = """
Generate GIMSAN result
"""
epilog = """
Examples:
%(prog)s --dir=testout -v
"""
argp = ArgumentParserPlus(description=description, epilog=epilog)
argp.add_argument('--dir', required=True, help="main output directory used with gimsan_submit.py")
argp.add_argument('--overwrite', action="store_true", help="")
argp.add_argument('--dryrun', action="store_true", help="")
argp.add_argument('-v', '--verbose', action='store_true')
args = argp.parse_args()
import logging
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
simple_stream_logging(log_level)
args.dir = os.path.expanduser(args.dir)
conf_file = BatchExperiment.get_conf_file(args.dir)
batch_exp = BatchExperiment(conf_file, args.overwrite)
for exp in batch_exp.experiments:
gr_manager = GimsanResultManager(
exp['name'],
exp['fasta_file'],
exp['outdir'],
batch_exp.config,
batch_exp.conf_file,
is_overwrite = args.overwrite,
dryrun = args.dryrun,
verbose = args.verbose)
gr_manager.generate_html()
benchmark.print_time(sys.stderr)
| mit | -2,596,120,700,495,742,000 | 37.034398 | 128 | 0.598385 | false |
PolicyStat/docx2html | docx2html/core.py | 1 | 48167 | import cgi
import logging
import os
import os.path
import re
from PIL import Image
from lxml import etree
from lxml.etree import XMLSyntaxError
from collections import namedtuple, defaultdict
from zipfile import ZipFile, BadZipfile
from docx2html.exceptions import (
ConversionFailed,
FileNotDocx,
MalformedDocx,
UnintendedTag,
SyntaxNotSupported,
)
DETECT_FONT_SIZE = False
EMUS_PER_PIXEL = 9525
NSMAP = {}
IMAGE_EXTENSIONS_TO_SKIP = ['emf', 'wmf', 'svg']
DEFAULT_LIST_NUMBERING_STYLE = 'decimal'
logger = logging.getLogger(__name__)
###
# Help functions
###
def replace_ext(file_path, new_ext):
"""
>>> replace_ext('one/two/three.four.doc', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', 'html')
'one/two/three.four.html'
"""
if not new_ext.startswith(os.extsep):
new_ext = os.extsep + new_ext
index = file_path.rfind(os.extsep)
return file_path[:index] + new_ext
def ensure_tag(tags):
# For some functions we can short-circuit and early exit if the tag is not
# the right kind.
def wrapped(f):
def wrap(*args, **kwargs):
passed_in_tag = args[0]
if passed_in_tag is None:
return None
w_namespace = get_namespace(passed_in_tag, 'w')
valid_tags = [
'%s%s' % (w_namespace, t) for t in tags
]
if passed_in_tag.tag in valid_tags:
return f(*args, **kwargs)
return None
return wrap
return wrapped
def get_namespace(el, namespace):
if namespace not in NSMAP:
NSMAP[namespace] = '{%s}' % el.nsmap[namespace]
return NSMAP[namespace]
def convert_image(target, image_size):
_, extension = os.path.splitext(os.path.basename(target))
# If the image size has a zero in it early return
if image_size and not all(image_size):
return target
# All the image types need to be converted to gif.
invalid_extensions = (
'.bmp',
'.dib',
'.tiff',
'.tif',
)
# Open the image and get the format.
try:
image = Image.open(target)
except IOError:
return target
image_format = image.format
image_file_name = target
# Make sure the size of the image and the size of the embedded image are
# the same.
if image_size is not None and image.size != image_size:
# Resize if needed
try:
image = image.resize(image_size, Image.ANTIALIAS)
except IOError:
pass
# If we have an invalid extension, change the format to gif.
if extension.lower() in invalid_extensions:
image_format = 'GIF'
image_file_name = replace_ext(target, '.gif')
# Resave the image (Post resizing) with the correct format
try:
image.save(image_file_name, image_format)
except IOError:
return target
return image_file_name
@ensure_tag(['p'])
def get_font_size(p, styles_dict):
w_namespace = get_namespace(p, 'w')
r = p.find('%sr' % w_namespace)
if r is None:
return None
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return None
size = rpr.find('%ssz' % w_namespace)
if size is None:
# Need to get the font size off the styleId
pPr = p.find('%spPr' % w_namespace)
if pPr is None:
return None
pStyle = pPr.find('%spStyle' % w_namespace)
if pStyle is None:
return None
pStyle = pStyle.get('%sval' % w_namespace)
font_size = None
style_value = styles_dict.get(pStyle, None)
if style_value is None:
return None
if 'font_size' in style_value:
font_size = styles_dict[pStyle]['font_size']
while font_size is None:
old_pStyle = pStyle
# If pStyle is not in the styles_dict then we have to break.
if pStyle not in styles_dict:
break
# If based on is not in the styles_dict for pStyle then we have to
# break.
if 'based_on' not in styles_dict[pStyle]:
break
# Try to derive what the font size is based on what the current
# style is based on.
pStyle = styles_dict[pStyle]['based_on']
if old_pStyle == pStyle:
break
# If pStyle is not in styles_dict then break.
if pStyle not in styles_dict:
break
# We have found a new font size
font_size = styles_dict[pStyle]['font_size']
return font_size
return size.get('%sval' % w_namespace)
@ensure_tag(['p'])
def is_natural_header(el, styles_dict):
w_namespace = get_namespace(el, 'w')
pPr = el.find('%spPr' % w_namespace)
if pPr is None:
return False
pStyle = pPr.find('%spStyle' % w_namespace)
if pStyle is None:
return False
style_id = pStyle.get('%sval' % w_namespace)
if (
style_id in styles_dict and
'header' in styles_dict[style_id] and
styles_dict[style_id]['header']):
return styles_dict[style_id]['header']
@ensure_tag(['p'])
def is_header(el, meta_data):
if _is_top_level_upper_roman(el, meta_data):
return 'h2'
el_is_natural_header = is_natural_header(el, meta_data.styles_dict)
if el_is_natural_header:
return el_is_natural_header
if _is_li(el):
return False
w_namespace = get_namespace(el, 'w')
if el.tag == '%stbl' % w_namespace:
return False
# Check to see if this is a header because the font size is different than
# the normal font size.
# Since get_font_size is a method used before meta is created, just pass in
# styles_dict.
if DETECT_FONT_SIZE:
font_size = get_font_size(el, meta_data.styles_dict)
if font_size is not None:
if meta_data.font_sizes_dict[font_size]:
return meta_data.font_sizes_dict[font_size]
# If a paragraph is longer than eight words it is likely not supposed to be
# an h tag.
num_words = len(
etree.tostring(
el,
encoding=unicode,
method='text',
).split(' ')
)
if num_words > 8:
return False
# Check to see if the full line is bold.
whole_line_bold, whole_line_italics = whole_line_styled(el)
if whole_line_bold or whole_line_italics:
return 'h2'
return False
@ensure_tag(['p'])
def _is_top_level_upper_roman(el, meta_data):
w_namespace = get_namespace(el, 'w')
ilvl = get_ilvl(el, w_namespace)
# If this list is not in the root document (indentation of 0), then it
# cannot be a top level upper roman list.
if ilvl != 0:
return False
numId = get_numId(el, w_namespace)
list_type = meta_data.numbering_dict[numId].get(ilvl, False)
return list_type == 'upperRoman'
@ensure_tag(['p'])
def _is_li(el):
return len(el.xpath('.//w:numPr/w:ilvl', namespaces=el.nsmap)) != 0
@ensure_tag(['p'])
def is_li(el, meta_data):
"""
The only real distinction between an ``li`` tag and a ``p`` tag is that an
``li`` tag has an attribute called numPr which holds the list id and ilvl
(indentation level)
"""
if is_header(el, meta_data):
return False
return _is_li(el)
def has_text(p):
"""
It is possible for a ``p`` tag in document.xml to not have any content. If
this is the case we do not want that tag interfering with things like
lists. Detect if this tag has any content.
"""
return '' != etree.tostring(p, encoding=unicode, method='text').strip()
def is_last_li(li, meta_data, current_numId):
"""
Determine if ``li`` is the last list item for a given list
"""
if not is_li(li, meta_data):
return False
w_namespace = get_namespace(li, 'w')
next_el = li
while True:
# If we run out of element this must be the last list item
if next_el is None:
return True
next_el = next_el.getnext()
# Ignore elements that are not a list item
if not is_li(next_el, meta_data):
continue
new_numId = get_numId(next_el, w_namespace)
if current_numId != new_numId:
return True
# If we have gotten here then we have found another list item in the
# current list, so ``li`` is not the last li in the list.
return False
@ensure_tag(['p'])
def get_single_list_nodes_data(li, meta_data):
"""
Find consecutive li tags that have content that have the same list id.
"""
yield li
w_namespace = get_namespace(li, 'w')
current_numId = get_numId(li, w_namespace)
starting_ilvl = get_ilvl(li, w_namespace)
el = li
while True:
el = el.getnext()
if el is None:
break
# If the tag has no content ignore it.
if not has_text(el):
continue
# Stop the lists if you come across a list item that should be a
# heading.
if _is_top_level_upper_roman(el, meta_data):
break
if (
is_li(el, meta_data) and
(starting_ilvl > get_ilvl(el, w_namespace))):
break
new_numId = get_numId(el, w_namespace)
if new_numId is None or new_numId == -1:
# Not a p tag or a list item
yield el
continue
# If the list id of the next tag is different that the previous that
# means a new list being made (not nested)
if current_numId != new_numId:
# Not a subsequent list.
break
if is_last_li(el, meta_data, current_numId):
yield el
break
yield el
@ensure_tag(['p'])
def get_ilvl(li, w_namespace):
"""
The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not.
"""
ilvls = li.xpath('.//w:ilvl', namespaces=li.nsmap)
if len(ilvls) == 0:
return -1
return int(ilvls[0].get('%sval' % w_namespace))
@ensure_tag(['p'])
def get_numId(li, w_namespace):
"""
The numId on an li tag maps to the numbering dictionary along side the ilvl
to determine what the list should look like (unordered, digits, lower
alpha, etc)
"""
numIds = li.xpath('.//w:numId', namespaces=li.nsmap)
if len(numIds) == 0:
return -1
return numIds[0].get('%sval' % w_namespace)
def create_list(list_type):
"""
Based on the passed in list_type create a list objects (ol/ul). In the
future this function will also deal with what the numbering of an ordered
list should look like.
"""
list_types = {
'bullet': 'ul',
}
el = etree.Element(list_types.get(list_type, 'ol'))
# These are the supported list style types and their conversion to css.
list_type_conversions = {
'decimal': DEFAULT_LIST_NUMBERING_STYLE,
'decimalZero': 'decimal-leading-zero',
'upperRoman': 'upper-roman',
'lowerRoman': 'lower-roman',
'upperLetter': 'upper-alpha',
'lowerLetter': 'lower-alpha',
'ordinal': DEFAULT_LIST_NUMBERING_STYLE,
'cardinalText': DEFAULT_LIST_NUMBERING_STYLE,
'ordinalText': DEFAULT_LIST_NUMBERING_STYLE,
}
if list_type != 'bullet':
el.set(
'data-list-type',
list_type_conversions.get(list_type, DEFAULT_LIST_NUMBERING_STYLE),
)
return el
@ensure_tag(['tc'])
def get_v_merge(tc):
"""
vMerge is what docx uses to denote that a table cell is part of a rowspan.
The first cell to have a vMerge is the start of the rowspan, and the vMerge
will be denoted with 'restart'. If it is anything other than restart then
it is a continuation of another rowspan.
"""
if tc is None:
return None
v_merges = tc.xpath('.//w:vMerge', namespaces=tc.nsmap)
if len(v_merges) != 1:
return None
v_merge = v_merges[0]
return v_merge
@ensure_tag(['tc'])
def get_grid_span(tc):
"""
gridSpan is what docx uses to denote that a table cell has a colspan. This
is much more simple than rowspans in that there is a one-to-one mapping
from gridSpan to colspan.
"""
w_namespace = get_namespace(tc, 'w')
grid_spans = tc.xpath('.//w:gridSpan', namespaces=tc.nsmap)
if len(grid_spans) != 1:
return 1
grid_span = grid_spans[0]
return int(grid_span.get('%sval' % w_namespace))
@ensure_tag(['tr'])
def get_td_at_index(tr, index):
"""
When calculating the rowspan for a given cell it is required to find all
table cells 'below' the initial cell with a v_merge. This function will
return the td element at the passed in index, taking into account colspans.
"""
current = 0
for td in tr.xpath('.//w:tc', namespaces=tr.nsmap):
if index == current:
return td
current += get_grid_span(td)
@ensure_tag(['tbl'])
def get_rowspan_data(table):
w_namespace = get_namespace(table, 'w')
# We need to keep track of what table row we are on as well as which table
# cell we are on.
tr_index = 0
td_index = 0
# Get a list of all the table rows.
tr_rows = list(table.xpath('.//w:tr', namespaces=table.nsmap))
# Loop through each table row.
for tr in table.xpath('.//w:tr', namespaces=table.nsmap):
# Loop through each table cell.
for td in tr.xpath('.//w:tc', namespaces=tr.nsmap):
# Check to see if this cell has a v_merge
v_merge = get_v_merge(td)
# If not increment the td_index and move on
if v_merge is None:
td_index += get_grid_span(td)
continue
# If it does have a v_merge we need to see if it is the ``root``
# table cell (the first in a row to have a rowspan)
# If the value is restart then this is the table cell that needs
# the rowspan.
if v_merge.get('%sval' % w_namespace) == 'restart':
row_span = 1
# Loop through each table row after the current one.
for tr_el in tr_rows[tr_index + 1:]:
# Get the table cell at the current td_index.
td_el = get_td_at_index(tr_el, td_index)
td_el_v_merge = get_v_merge(td_el)
# If the td_ell does not have a v_merge then the rowspan is
# done.
if td_el_v_merge is None:
break
val = td_el_v_merge.get('%sval' % w_namespace)
# If the v_merge is restart then there is another cell that
# needs a rowspan, so the current cells rowspan is done.
if val == 'restart':
break
# Increment the row_span
row_span += 1
yield row_span
# Increment the indexes.
td_index += get_grid_span(td)
tr_index += 1
# Reset the td_index when we finish each table row.
td_index = 0
@ensure_tag(['b', 'i', 'u'])
def style_is_false(style):
"""
For bold, italics and underline. Simply checking to see if the various tags
are present will not suffice. If the tag is present and set to False then
the style should not be present.
"""
if style is None:
return False
w_namespace = get_namespace(style, 'w')
return style.get('%sval' % w_namespace) != 'false'
@ensure_tag(['r'])
def is_bold(r):
"""
The function will return True if the r tag passed in is considered bold.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
bold = rpr.find('%sb' % w_namespace)
return style_is_false(bold)
@ensure_tag(['r'])
def is_italics(r):
"""
The function will return True if the r tag passed in is considered
italicized.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
italics = rpr.find('%si' % w_namespace)
return style_is_false(italics)
@ensure_tag(['r'])
def is_underlined(r):
"""
The function will return True if the r tag passed in is considered
underlined.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
underline = rpr.find('%su' % w_namespace)
return style_is_false(underline)
@ensure_tag(['p'])
def is_title(p):
"""
Certain p tags are denoted as ``Title`` tags. This function will return
True if the passed in p tag is considered a title.
"""
w_namespace = get_namespace(p, 'w')
styles = p.xpath('.//w:pStyle', namespaces=p.nsmap)
if len(styles) == 0:
return False
style = styles[0]
return style.get('%sval' % w_namespace) == 'Title'
@ensure_tag(['r'])
def get_text_run_content_data(r):
"""
It turns out that r tags can contain both t tags and drawing tags. Since we
need both, this function will return them in the order in which they are
found.
"""
w_namespace = get_namespace(r, 'w')
valid_elements = (
'%st' % w_namespace,
'%sdrawing' % w_namespace,
'%spict' % w_namespace,
'%sbr' % w_namespace,
)
for el in r:
if el.tag in valid_elements:
yield el
@ensure_tag(['drawing', 'pict'])
def get_image_id(drawing):
r_namespace = get_namespace(drawing, 'r')
for el in drawing.iter():
# For drawing
image_id = el.get('%sembed' % r_namespace)
if image_id is not None:
return image_id
# For pict
if 'v' not in el.nsmap:
continue
v_namespace = get_namespace(drawing, 'v')
if el.tag == '%simagedata' % v_namespace:
image_id = el.get('%sid' % r_namespace)
if image_id is not None:
return image_id
@ensure_tag(['p'])
def whole_line_styled(p):
"""
Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise.
"""
r_tags = p.xpath('.//w:r', namespaces=p.nsmap)
tags_are_bold = [
is_bold(r) or is_underlined(r) for r in r_tags
]
tags_are_italics = [
is_italics(r) for r in r_tags
]
return all(tags_are_bold), all(tags_are_italics)
MetaData = namedtuple(
'MetaData',
[
'numbering_dict',
'relationship_dict',
'styles_dict',
'font_sizes_dict',
'image_handler',
'image_sizes',
],
)
###
# Pre-processing
###
def get_numbering_info(tree):
"""
There is a separate file called numbering.xml that stores how lists should
look (unordered, digits, lower case letters, etc.). Parse that file and
return a dictionary of what each combination should be based on list Id and
level of indentation.
"""
if tree is None:
return {}
w_namespace = get_namespace(tree, 'w')
num_ids = {}
result = defaultdict(dict)
# First find all the list types
for list_type in tree.findall('%snum' % w_namespace):
list_id = list_type.get('%snumId' % w_namespace)
# Each list type is assigned an abstractNumber that defines how lists
# should look.
abstract_number = list_type.find('%sabstractNumId' % w_namespace)
num_ids[abstract_number.get('%sval' % w_namespace)] = list_id
# Loop through all the abstractNumbers
for abstract_number in tree.findall('%sabstractNum' % w_namespace):
abstract_num_id = abstract_number.get('%sabstractNumId' % w_namespace)
# If we find an abstractNumber that is not being used in the document
# then ignore it.
if abstract_num_id not in num_ids:
continue
# Get the level of the abstract number.
for lvl in abstract_number.findall('%slvl' % w_namespace):
ilvl = int(lvl.get('%silvl' % w_namespace))
lvl_format = lvl.find('%snumFmt' % w_namespace)
list_style = lvl_format.get('%sval' % w_namespace)
# Based on the list type and the ilvl (indentation level) store the
# needed style.
result[num_ids[abstract_num_id]][ilvl] = list_style
return result
def get_style_dict(tree):
"""
Some things that are considered lists are actually supposed to be H tags
(h1, h2, etc.) These can be denoted by their styleId
"""
# This is a partial document and actual h1 is the document title, which
# will be displayed elsewhere.
headers = {
'heading 1': 'h2',
'heading 2': 'h3',
'heading 3': 'h4',
'heading 4': 'h5',
'heading 5': 'h6',
'heading 6': 'h6',
'heading 7': 'h6',
'heading 8': 'h6',
'heading 9': 'h6',
'heading 10': 'h6',
}
if tree is None:
return {}
w_namespace = get_namespace(tree, 'w')
result = {}
for el in tree:
style_id = el.get('%sstyleId' % w_namespace)
el_result = {
'header': False,
'font_size': None,
'based_on': None,
}
# Get the header info
name = el.find('%sname' % w_namespace)
if name is None:
continue
value = name.get('%sval' % w_namespace).lower()
if value in headers:
el_result['header'] = headers[value]
# Get the size info.
rpr = el.find('%srPr' % w_namespace)
if rpr is None:
continue
size = rpr.find('%ssz' % w_namespace)
if size is None:
el_result['font_size'] = None
else:
el_result['font_size'] = size.get('%sval' % w_namespace)
# Get based on info.
based_on = el.find('%sbasedOn' % w_namespace)
if based_on is None:
el_result['based_on'] = None
else:
el_result['based_on'] = based_on.get('%sval' % w_namespace)
result[style_id] = el_result
return result
def get_image_sizes(tree):
drawings = []
result = {}
w_namespace = get_namespace(tree, 'w')
for el in tree.iter():
if el.tag == '%sdrawing' % w_namespace:
drawings.append(el)
for d in drawings:
for el in d.iter():
if 'a' not in el.nsmap:
continue
a_namespace = get_namespace(el, 'a')
if el.tag == '%sxfrm' % a_namespace:
ext = el.find('%sext' % a_namespace)
cx = int(ext.get('cx')) / EMUS_PER_PIXEL
cy = int(ext.get('cy')) / EMUS_PER_PIXEL
result[get_image_id(d)] = (cx, cy)
return result
def get_relationship_info(tree, media, image_sizes):
"""
There is a separate file holds the targets to links as well as the targets
for images. Return a dictionary based on the relationship id and the
target.
"""
if tree is None:
return {}
result = {}
# Loop through each relationship.
for el in tree.iter():
el_id = el.get('Id')
if el_id is None:
continue
# Store the target in the result dict.
target = el.get('Target')
if any(
target.lower().endswith(ext) for
ext in IMAGE_EXTENSIONS_TO_SKIP):
continue
if target in media:
image_size = image_sizes.get(el_id)
target = convert_image(media[target], image_size)
# cgi will replace things like & < > with & < >
result[el_id] = cgi.escape(target)
return result
def get_font_sizes_dict(tree, styles_dict):
font_sizes_dict = defaultdict(int)
# Get all the fonts sizes and how often they are used in a dict.
for p in tree.xpath('//w:p', namespaces=tree.nsmap):
# If this p tag is a natural header, skip it
if is_natural_header(p, styles_dict):
continue
if _is_li(p):
continue
font_size = get_font_size(p, styles_dict)
if font_size is None:
continue
font_sizes_dict[font_size] += 1
# Find the most used font size.
most_used_font_size = -1
highest_count = -1
for size, count in font_sizes_dict.items():
if count > highest_count:
highest_count = count
most_used_font_size = size
# Consider the most used font size to be the 'default' font size. Any font
# size that is different will be considered an h tag.
result = {}
for size in font_sizes_dict:
if size is None:
continue
if int(size) > int(most_used_font_size):
# Not an h tag
result[size] = 'h2'
else:
result[size] = None
return result
def _get_document_data(f, image_handler=None):
'''
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
'''
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id)
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
path, _ = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser)
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser)
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser)
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser)
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser)
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(
item.filename,
path,
)
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(
relationship_xml,
media,
image_sizes
)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict)
meta_data = MetaData(
numbering_dict=numbering_dict,
relationship_dict=relationship_dict,
styles_dict=styles_dict,
font_sizes_dict=font_sizes_dict,
image_handler=image_handler,
image_sizes=image_sizes,
)
return document_xml, meta_data
###
# HTML Building functions
###
def get_ordered_list_type(meta_data, numId, ilvl):
"""
Return the list type. If numId or ilvl not in the numbering dict then
default to returning decimal.
This function only cares about ordered lists, unordered lists get dealt
with elsewhere.
"""
# Early return if numId or ilvl are not valid
numbering_dict = meta_data.numbering_dict
if numId not in numbering_dict:
return DEFAULT_LIST_NUMBERING_STYLE
if ilvl not in numbering_dict[numId]:
return DEFAULT_LIST_NUMBERING_STYLE
return meta_data.numbering_dict[numId][ilvl]
def build_list(li_nodes, meta_data):
"""
Build the list structure and return the root list
"""
# Need to keep track of all incomplete nested lists.
ol_dict = {}
# Need to keep track of the current indentation level.
current_ilvl = -1
# Need to keep track of the current list id.
current_numId = -1
# Need to keep track of list that new li tags should be added too.
current_ol = None
# Store the first list created (the root list) for the return value.
root_ol = None
visited_nodes = []
list_contents = []
def _build_li(list_contents):
data = '<br />'.join(t for t in list_contents if t is not None)
return etree.XML('<li>%s</li>' % data)
def _build_non_li_content(el, meta_data):
w_namespace = get_namespace(el, 'w')
if el.tag == '%stbl' % w_namespace:
new_el, visited_nodes = build_table(el, meta_data)
return etree.tostring(new_el), visited_nodes
elif el.tag == '%sp' % w_namespace:
return get_element_content(el, meta_data), [el]
if has_text(el):
raise UnintendedTag('Did not expect %s' % el.tag)
def _merge_lists(ilvl, current_ilvl, ol_dict, current_ol):
for i in reversed(range(ilvl, current_ilvl)):
# Any list that is more indented that ilvl needs to
# be merged to the list before it.
if i not in ol_dict:
continue
if ol_dict[i] is not current_ol:
if ol_dict[i] is current_ol:
continue
ol_dict[i][-1].append(current_ol)
current_ol = ol_dict[i]
# Clean up finished nested lists.
for key in list(ol_dict):
if key > ilvl:
del ol_dict[key]
return current_ol
for li_node in li_nodes:
w_namespace = get_namespace(li_node, 'w')
if not is_li(li_node, meta_data):
# Get the content and visited nodes
new_el, el_visited_nodes = _build_non_li_content(
li_node,
meta_data,
)
list_contents.append(new_el)
visited_nodes.extend(el_visited_nodes)
continue
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Get the data needed to build the current list item
list_contents.append(get_element_content(
li_node,
meta_data,
))
ilvl = get_ilvl(li_node, w_namespace)
numId = get_numId(li_node, w_namespace)
list_type = get_ordered_list_type(meta_data, numId, ilvl)
# If the ilvl is greater than the current_ilvl or the list id is
# changing then we have the first li tag in a nested list. We need to
# create a new list object and update all of our variables for keeping
# track.
if (ilvl > current_ilvl) or (numId != current_numId):
# Only create a new list
ol_dict[ilvl] = create_list(list_type)
current_ol = ol_dict[ilvl]
current_ilvl = ilvl
current_numId = numId
# Both cases above are not True then we need to close all lists greater
# than ilvl and then remove them from the ol_dict
else:
# Merge any nested lists that need to be merged.
current_ol = _merge_lists(
ilvl=ilvl,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
# Set the root list after the first list is created.
if root_ol is None:
root_ol = current_ol
# Set the current list.
if ilvl in ol_dict:
current_ol = ol_dict[ilvl]
else:
# In some instances the ilvl is not in the ol_dict, if that is the
# case, create it here (not sure how this happens but it has
# before.) Only do this if the current_ol is not the root_ol,
# otherwise etree will crash.
if current_ol is not root_ol:
# Merge the current_ol into the root_ol. _merge_lists is not
# equipped to handle this situation since the only way to get
# into this block of code is to have mangled ilvls.
root_ol[-1].append(current_ol)
# Reset the current_ol
current_ol = create_list(list_type)
# Create the li element.
visited_nodes.extend(list(li_node.iter()))
# If a list item is the last thing in a document, then you will need to add
# it here. Should probably figure out how to get the above logic to deal
# with it.
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Merge up any nested lists that have not been merged.
current_ol = _merge_lists(
ilvl=0,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
return root_ol, visited_nodes
@ensure_tag(['tr'])
def build_tr(tr, meta_data, row_spans):
"""
This will return a single tr element, with all tds already populated.
"""
# Create a blank tr element.
tr_el = etree.Element('tr')
w_namespace = get_namespace(tr, 'w')
visited_nodes = []
for el in tr:
if el in visited_nodes:
continue
visited_nodes.append(el)
# Find the table cells.
if el.tag == '%stc' % w_namespace:
v_merge = get_v_merge(el)
# If there is a v_merge and it is not restart then this cell can be
# ignored.
if (
v_merge is not None and
v_merge.get('%sval' % w_namespace) != 'restart'):
continue
# Loop through each and build a list of all the content.
texts = []
for td_content in el:
# Since we are doing look-a-heads in this loop we need to check
# again to see if we have already visited the node.
if td_content in visited_nodes:
continue
# Check to see if it is a list or a regular paragraph.
if is_li(td_content, meta_data):
# If it is a list, create the list and update
# visited_nodes.
li_nodes = get_single_list_nodes_data(
td_content,
meta_data,
)
list_el, list_visited_nodes = build_list(
li_nodes,
meta_data,
)
visited_nodes.extend(list_visited_nodes)
texts.append(etree.tostring(list_el))
elif td_content.tag == '%stbl' % w_namespace:
table_el, table_visited_nodes = build_table(
td_content,
meta_data,
)
visited_nodes.extend(table_visited_nodes)
texts.append(etree.tostring(table_el))
elif td_content.tag == '%stcPr' % w_namespace:
# Do nothing
visited_nodes.append(td_content)
continue
else:
text = get_element_content(
td_content,
meta_data,
is_td=True,
)
texts.append(text)
data = '<br />'.join(t for t in texts if t is not None)
td_el = etree.XML('<td>%s</td>' % data)
# if there is a colspan then set it here.
colspan = get_grid_span(el)
if colspan > 1:
td_el.set('colspan', '%d' % colspan)
v_merge = get_v_merge(el)
# If this td has a v_merge and it is restart then set the rowspan
# here.
if (
v_merge is not None and
v_merge.get('%sval' % w_namespace) == 'restart'):
rowspan = next(row_spans)
td_el.set('rowspan', '%d' % rowspan)
tr_el.append(td_el)
return tr_el
@ensure_tag(['tbl'])
def build_table(table, meta_data):
"""
This returns a table object with all rows and cells correctly populated.
"""
# Create a blank table element.
table_el = etree.Element('table')
w_namespace = get_namespace(table, 'w')
# Get the rowspan values for cells that have a rowspan.
row_spans = get_rowspan_data(table)
for el in table:
if el.tag == '%str' % w_namespace:
# Create the tr element.
tr_el = build_tr(
el,
meta_data,
row_spans,
)
# And append it to the table.
table_el.append(tr_el)
visited_nodes = list(table.iter())
return table_el, visited_nodes
@ensure_tag(['t'])
def get_t_tag_content(
t, parent, remove_bold, remove_italics, meta_data):
"""
Generate the string data that for this particular t tag.
"""
if t is None or t.text is None:
return ''
# Need to escape the text so that we do not accidentally put in text
# that is not valid XML.
# cgi will replace things like & < > with & < >
text = cgi.escape(t.text)
# Wrap the text with any modifiers it might have (bold, italics or
# underline)
el_is_bold = not remove_bold and (
is_bold(parent) or
is_underlined(parent)
)
el_is_italics = not remove_italics and is_italics(parent)
if el_is_bold:
text = '<strong>%s</strong>' % text
if el_is_italics:
text = '<em>%s</em>' % text
return text
def _get_image_size_from_image(target):
image = Image.open(target)
return image.size
def build_hyperlink(el, meta_data):
# If we have a hyperlink we need to get relationship_id
r_namespace = get_namespace(el, 'r')
hyperlink_id = el.get('%sid' % r_namespace)
# Once we have the hyperlink_id then we need to replace the
# hyperlink tag with its child run tags.
content = get_element_content(
el,
meta_data,
remove_bold=True,
remove_italics=True,
)
if not content:
return ''
if hyperlink_id in meta_data.relationship_dict:
href = meta_data.relationship_dict[hyperlink_id]
# Do not do any styling on hyperlinks
return '<a href="%s">%s</a>' % (href, content)
return ''
def build_image(el, meta_data):
image_id = get_image_id(el)
if image_id not in meta_data.relationship_dict:
# This image does not have an image_id
return ''
src = meta_data.image_handler(
image_id,
meta_data.relationship_dict,
)
if image_id in meta_data.image_sizes:
width, height = meta_data.image_sizes[image_id]
else:
target = meta_data.relationship_dict[image_id]
width, height = _get_image_size_from_image(target)
# Make sure the width and height are not zero
if all((width, height)):
return '<img src="%s" height="%d" width="%d" />' % (
src,
height,
width,
)
else:
return '<img src="%s" />' % src
return ''
def get_text_run_content(el, meta_data, remove_bold, remove_italics):
w_namespace = get_namespace(el, 'w')
text_output = ''
for child in get_text_run_content_data(el):
if child.tag == '%st' % w_namespace:
text_output += get_t_tag_content(
child,
el,
remove_bold,
remove_italics,
meta_data,
)
elif child.tag == '%sbr' % w_namespace:
text_output += '<br />'
elif child.tag in (
'%spict' % w_namespace,
'%sdrawing' % w_namespace,
):
text_output += build_image(child, meta_data)
else:
raise SyntaxNotSupported(
'"%s" is not a supported content-containing '
'text run child.' % child.tag
)
return text_output
@ensure_tag(['p', 'ins', 'smartTag', 'hyperlink'])
def get_element_content(
p,
meta_data,
is_td=False,
remove_italics=False,
remove_bold=False,
):
"""
P tags are made up of several runs (r tags) of text. This function takes a
p tag and constructs the text that should be part of the p tag.
image_handler should be a callable that returns the desired ``src``
attribute for a given image.
"""
# Only remove bold or italics if this tag is an h tag.
# Td elements have the same look and feel as p/h elements. Right now we are
# never putting h tags in td elements, as such if we are in a td we will
# never be stripping bold/italics since that is only done on h tags
if not is_td and is_header(p, meta_data):
# Check to see if the whole line is bold or italics.
remove_bold, remove_italics = whole_line_styled(p)
p_text = ''
w_namespace = get_namespace(p, 'w')
if len(p) == 0:
return ''
# Only these tags contain text that we care about (eg. We don't care about
# delete tags)
content_tags = (
'%sr' % w_namespace,
'%shyperlink' % w_namespace,
'%sins' % w_namespace,
'%ssmartTag' % w_namespace,
)
elements_with_content = []
for child in p:
if child is None:
break
if child.tag in content_tags:
elements_with_content.append(child)
# Gather the content from all of the children
for el in elements_with_content:
# Hyperlinks and insert tags need to be handled differently than
# r and smart tags.
if el.tag in ('%sins' % w_namespace, '%ssmartTag' % w_namespace):
p_text += get_element_content(
el,
meta_data,
remove_bold=remove_bold,
remove_italics=remove_italics,
)
elif el.tag == '%shyperlink' % w_namespace:
p_text += build_hyperlink(el, meta_data)
elif el.tag == '%sr' % w_namespace:
p_text += get_text_run_content(
el,
meta_data,
remove_bold=remove_bold,
remove_italics=remove_italics,
)
else:
raise SyntaxNotSupported(
'Content element "%s" not handled.' % el.tag
)
# This function does not return a p tag since other tag types need this as
# well (td, li).
return p_text
def _strip_tag(tree, tag):
"""
Remove all tags that have the tag name ``tag``
"""
for el in tree.iter():
if el.tag == tag:
el.getparent().remove(el)
def get_zip_file_handler(file_path):
return ZipFile(file_path)
def read_html_file(file_path):
with open(file_path) as f:
html = f.read()
return html
def convert(file_path, image_handler=None, fall_back=None, converter=None):
"""
``file_path`` is a path to the file on the file system that you want to be
converted to html.
``image_handler`` is a function that takes an image_id and a
relationship_dict to generate the src attribute for images. (see readme
for more details)
``fall_back`` is a function that takes a ``file_path``. This function will
only be called if for whatever reason the conversion fails.
``converter`` is a function to convert a document that is not docx to docx
(examples in docx2html.converters)
Returns html extracted from ``file_path``
"""
file_base, extension = os.path.splitext(os.path.basename(file_path))
if extension == '.html' or extension == '.htm':
return read_html_file(file_path)
# Create the converted file as a file in the same dir with the
# same name only with a .docx extension
docx_path = replace_ext(file_path, '.docx')
if extension == '.docx':
# If the file is already html, just leave it in place.
docx_path = file_path
else:
if converter is None:
raise FileNotDocx('The file passed in is not a docx.')
converter(docx_path, file_path)
if not os.path.isfile(docx_path):
if fall_back is None:
raise ConversionFailed('Conversion to docx failed.')
else:
return fall_back(file_path)
try:
# Docx files are actually just zip files.
zf = get_zip_file_handler(docx_path)
except BadZipfile:
raise MalformedDocx('This file is not a docx')
# Need to populate the xml based on word/document.xml
tree, meta_data = _get_document_data(zf, image_handler)
return create_html(tree, meta_data)
def create_html(tree, meta_data):
# Start the return value
new_html = etree.Element('html')
w_namespace = get_namespace(tree, 'w')
visited_nodes = []
_strip_tag(tree, '%ssectPr' % w_namespace)
for el in tree.iter():
# The way lists are handled could double visit certain elements; keep
# track of which elements have been visited and skip any that have been
# visited already.
if el in visited_nodes:
continue
header_value = is_header(el, meta_data)
if is_header(el, meta_data):
p_text = get_element_content(el, meta_data)
if p_text == '':
continue
new_html.append(
etree.XML('<%s>%s</%s>' % (
header_value,
p_text,
header_value,
))
)
elif el.tag == '%sp' % w_namespace:
# Strip out titles.
if is_title(el):
continue
if is_li(el, meta_data):
# Parse out the needed info from the node.
li_nodes = get_single_list_nodes_data(el, meta_data)
new_el, list_visited_nodes = build_list(
li_nodes,
meta_data,
)
visited_nodes.extend(list_visited_nodes)
# Handle generic p tag here.
else:
p_text = get_element_content(el, meta_data)
# If there is not text do not add an empty tag.
if p_text == '':
continue
new_el = etree.XML('<p>%s</p>' % p_text)
new_html.append(new_el)
elif el.tag == '%stbl' % w_namespace:
table_el, table_visited_nodes = build_table(
el,
meta_data,
)
visited_nodes.extend(table_visited_nodes)
new_html.append(table_el)
continue
# Keep track of visited_nodes
visited_nodes.append(el)
result = etree.tostring(
new_html,
method='html',
with_tail=True,
)
return _make_void_elements_self_close(result)
def _make_void_elements_self_close(html):
#XXX Hack not sure how to get etree to do this by default.
void_tags = [
r'br',
r'img',
]
for tag in void_tags:
regex = re.compile(r'<%s.*?>' % tag)
matches = regex.findall(html)
for match in matches:
new_tag = match.strip('<>')
new_tag = '<%s />' % new_tag
html = re.sub(match, new_tag, html)
return html
| bsd-3-clause | 5,576,685,716,385,988,000 | 31.326846 | 79 | 0.567048 | false |
Guymer/PyGuymer | simplify_poly.py | 1 | 1717 | # -*- coding: utf-8 -*-
##############################################################################################
# This file is deprecated because Python 2.x is deprecated #
# A Python 3.x version of this file can be found at: #
# #
# https://github.com/Guymer/PyGuymer3/blob/master/simplify_poly.py #
##############################################################################################
def simplify_poly(poly1, simp = 0.1):
"""
This function accepts either a Polygon or a MultiPolygon and creates a
Polygon or a MultiPolygon from the simplified (member) Polygon(s).
"""
# Import modules ...
import shapely
import shapely.geometry
import shapely.validation
# Create empty list ...
poly2 = []
# Check what the argument is ...
if isinstance(poly1, shapely.geometry.multipolygon.MultiPolygon):
# Loop over Polygons and add simplified copys to the list ...
for tmp1 in poly1.geoms:
poly2.append(tmp1.simplify(simp))
elif isinstance(poly1, shapely.geometry.polygon.Polygon):
# Add simplified copy to the list ...
poly2.append(poly1.simplify(simp))
else:
raise TypeError("\"poly1\" is an unexpected type")
# Convert list to MultiPolygon ...
poly2 = shapely.geometry.multipolygon.MultiPolygon(poly2)
if not poly2.is_valid:
raise Exception("\"poly2\" is not a valid [Multi]Polygon ({0:s})".format(shapely.validation.explain_validity(poly2)))
# Return answer ...
return poly2
| apache-2.0 | -1,302,850,525,396,926,500 | 40.878049 | 125 | 0.518346 | false |
pymir3/pymir3 | mir3/modules/supervised/linear/decomposer/beta_nmf.py | 1 | 19321 | import argparse
import numpy
import numpy.random
import mir3.data.linear_decomposition as ld
import mir3.data.metadata as md
import mir3.data.spectrogram as spectrogram
import mir3.module
# TODO: maybe split this into 2 modules to compute activation and
# basis+activation
class BetaNMF(mir3.module.Module):
def get_help(self):
return """use beta nmf algorithm to compute the activations"""
def build_arguments(self, parser):
parser.add_argument('-b','--beta', type=float, default=2., help="""beta
value to be used by the algorithm (default:
%(default)s)""")
parser.add_argument('-i','--max-iterations', type=int, default=100,
help="""maximum number of iterations""")
parser.add_argument('-d','--min-delta', type=float, default=0.,
help="""minimum difference between iterations to
consider convergence""")
parser.add_argument('-B','--basis', type=argparse.FileType('rb'),
help="""basis file to be used""")
parser.add_argument('-s','--size', nargs=3, metavar=('SIZE',
'INSTRUMENT', 'NOTE'), help="""size of the
decomposition and instrument and note names to be
used for the basis. 'INSTRUMENT' or 'NOTE' can be
set to 'None' or 'null' to ignore that parameter""")
parser.add_argument('piece', nargs='+', help="""piece spectrogram
file""")
parser.add_argument('outfile', type=argparse.FileType('wb'),
help="""linear decomposition file""")
def run(self, args):
# Loads basis if present
if args.basis is not None:
b = ld.LinearDecomposition().load(args.basis)
else:
b = None
if args.basis is not None and b.data.right != {}:
print "Basis doesn't have empty right side. Ignoring it."
# Size of the decomposition (used when finding a basis too)
if args.size is None:
args.size = [None, None, None] # Simulate 3 values
for i in range(len(args.size)):
if args.size[i] == 'None' or args.size[i] == 'null':
args.size[i] = None
# Gather input spectrograms
s_list = []
s_meta = []
for filename in args.piece:
with open(filename, 'rb') as handler:
s_list.append(spectrogram.Spectrogram().load(handler))
s_meta.append(md.FileMetadata(handler))
# Converts arguments
size = int(args.size[0]) if args.size[0] is not None else None
instrument = args.size[1] if args.size[1] is not None else ''
note = args.size[2] if args.size[2] is not None else ''
# Decompose
d = self.compute(s_list,
size,
instrument,
note,
b,
args.beta,
args.min_delta,
args.max_iterations,
False)
# Associates an activation metadata with its given spectrogram's
# metadata
for k, data, metadata in d.right():
metadata.spectrogram_input = s_meta[k[-1]]
# Checks if basis was provided
if b is not None:
# If provided, adds it as basis metadata for each activation
meta = md.FileMetadata(args.basis)
for k, data, metadata in d.right():
metadata.basis_input = meta
else:
# Otherwise, the basis was computed right now, so we set its
# metadata with the list of all spectrograms' metadata
d.metadata.left[(args.size[1], args.size[2])].spectrogram_input = \
s_meta
d.save(args.outfile)
def compute(self, spectrograms, size=None, instrument=None, note=None,
basis=None, beta=2., min_delta=0., max_iterations=100,
save_metadata=True):
"""Computes the activation matrix from a basis matrix and a spectrogram.
Uses the beta divergence to compute the activations.
If min_delta is zero, the code may run faster because no beta divergence
is actually computed. Otherwise, the code stops computing if two
iterations of the algorithm don't improve the result by more than
min_delta.
Only one of 'basis' and 'size' arguments may be set, as they specify
different things. With 'size', the user extracts both a basis and an
activation from the spectrogram, while with 'basis' only an activation
is computed.
Each activation computed has the same key as the corresponding basis
plus the spectrogram's index in the list provided.
If a basis is being created, it's name is a tuple of (instrument, note),
even if they are None.
Args:
spectrograms: list of Spectrograms to be merged and used to compute
the activations.
size: Number of basis to extract from the spectrogram. Must be None
if the 'basis' argument is defined.
instrument: Name of the instrument. This is used only if size is
set. If None, it's ignored. Default: None.
note: Name of the note. This is used only if size is set. If None,
it's ignored. Default: None.
basis: LinearDecomposition object describing the basis to be used.
Must be none if the 'size' argument is defined.
beta: value for the beta used in divergence. Default: 2.
min_delta: threshold for early stop. Default: 0.
max_iterations: maximum number of iterations to use. Default: 100.
save_metadata: flag indicating whether the metadata should be
computed. Default: True.
Returns:
LinearDecomposition object with basis and activations for the
spectrograms.
Raises:
ValueError: matrices have incompatible sizes.
"""
# Check arguments compatibility
if size is None and basis is None:
raise ValueError("One of 'size' or 'basis' must not be None.")
if basis is not None and size is not None:
raise ValueError("Only one of 'size' or 'basis' must not be None.")
# Saves metadata
if save_metadata:
s_meta = [md.ObjectMetadata(s) for s in spectrograms]
else:
s_meta = [None for s in spectrograms]
# Marks the limits of each spectrogram
X_start = [0]
for s in spectrograms:
X_start.append(X_start[-1]+s.data.shape[1])
# Merges spectrograms
X = numpy.hstack([s.data for s in spectrograms])
# If we have a basis, we only need to compute the activations
if basis is not None:
# Merges basis but keep track where each one starts so that it can
# be used to characterize the activations
B = []
B_start = [0]
for k, data, metadata in basis.left():
B.append(data)
B_start.append(B_start[-1]+data.shape[1])
B = numpy.hstack(B)
# Saves metadata
if save_metadata:
b_meta = md.ObjectMetadata(B)
else:
b_meta = None
# Initilizes activations
A = numpy.ones((B.shape[1], X.shape[1]))
# Computes the activation
self.compute_activation(X, B, A, beta, min_delta, max_iterations)
# Starts creating the decomposition object
d = ld.LinearDecomposition()
# Copy the left stuff from the basis, since they came from there
d.data.left = basis.data.left
d.metadata.left = basis.metadata.left
# Cuts the activation. For each combination of basis and
# spectrograms, we get an activation
i = 0
for k, data, metadata in basis.left():
for j in range(len(spectrograms)):
# Since spectrograms don't have name, we call it by its
# sequence number
s_name = (j,)
# Cuts the activation
A_cut = A[B_start[i]:B_start[i+1], X_start[j]:X_start[j+1]]
# Merges the basis key with the spectrogram name to create a
# key for the activation. Then stores a lot of metadata
# about what was used to compute it.
d.add(k+s_name,
right=A_cut,
right_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta[j],
spectrogram=s.metadata,
basis_input=b_meta,
basis=metadata))
# Increase basis iterator
i += 1
else:
# Everyone gets the same matrices to work with every time, so we
# avoid consistency problems. However, we can't have the same values
# filling the matrices or the algorithm can't separate the basis and
# activations (everyone keeps getting the same value).
numpy.random.seed(0)
B = numpy.random.rand(X.shape[0], size)
A = numpy.random.rand(size, X.shape[1])
# Computes both basis and activations
self.compute_both(X, B, A, beta, min_delta, max_iterations)
# Key for the basis created
key = (instrument, note)
# Starts creating the decomposition object
d = ld.LinearDecomposition()
# Adds basis
d.add(key,
left=B,
left_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta,
spectrogram=[s.metadata for s in spectrograms]))
# Adds the activations cutted to match the spectrograms
for j in range(len(spectrograms)):
# Since spectrograms don't have name, we call it by its sequence
# number
s = spectrograms[j]
s_name = (j,)
# Cuts the activation
A_cut = A[:, X_start[j]:X_start[j+1]]
# Merges the basis key with the spectrogram name to create a key
# for the activation. Then stores a lot of metadata about what
# was used to compute it.
d.add(key+s_name,
right=A_cut,
right_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta[j],
spectrogram=s.metadata))
return d
def compute_both(self, X, B, A, beta=2., min_delta=0., max_iterations=100):
"""Computes both the basis and activation.
Args:
X: matrix to be approximated.
B: initial guess for B.
A: initial guess for A.
beta: value of beta to be used. Default: 2.
min_delta: minimum improvement necessary for the algorithm to
continue. Default: 0.
max_iterations: maximum number of iterations. Default: 100;
Raises:
ValueError: matrices have incompatible sizes.
"""
# Checks shapes match
if X.shape[0] != B.shape[0] or X.shape[1] != A.shape[1]:
raise ValueError("Incompatible matrix sizes: %r = %r * %r." %
(X.shape, B.shape, A.shape))
# Makes decomposition
self.beta_nmf(1e-6+X, # Avoids near-zero values
B,
A,
beta=beta,
update_B=True,
update_A=True,
min_delta=min_delta,
max_iterations=max_iterations)
def compute_activation(self, X, B, A, beta=2., min_delta=0.,
max_iterations=100):
"""Computes both the activation for a given basis.
Args:
X: matrix to be approximated.
B: basis to be used.
A: initial guess for A.
beta: value of beta to be used. Default: 2.
min_delta: minimum improvement necessary for the algorithm to
continue. Default: 0.
max_iterations: maximum number of iterations. Default: 100.
Raises:
ValueError: matrices have incompatible sizes.
"""
# Checks shapes match
if X.shape[0] != B.shape[0] or X.shape[1] != A.shape[1]:
raise ValueError("Incompatible matrix sizes: %r = %r * %r." %
(X.shape, B.shape, A.shape))
# Computes 100 activations at the same time for speed
# TODO: make this a parameter
step = 100
for i in range(0,X.shape[1],step):
self.beta_nmf(1e-6+X[:,i:i+step], # Avoids near-zero values
B,
A[:,i:i+step],
beta=beta,
update_B=False,
update_A=True,
min_delta=min_delta,
max_iterations=max_iterations)
def betadivergence(self, x, y, beta=2.0):
"""Computes the beta-divergence d(x|y).
The beta-divergence, as defined by Eguchi and Kano [1], is given by:
1/(beta*(beta-1)))*(x**b + (beta-1)*y**(beta) - beta*x*(y**(beta-1))),
if beta is not 0 or 1;
x * log(x/y) + (y-x), if beta=1
(x/y) - log(x/y) - 1, if beta=0
The special cases for the beta divergence are:
beta=0 -> Itakura-Saito divergence
beta=1 -> Kullback-Leibler divergence
beta=2 -> Euclidean distance
Args:
x: left side of the divergence
y: right side of the divergence
beta: value of beta used to compute. Default: 2.
Returns:
Divergence value.
"""
# Common values of beta with faster evaluation
if beta == 1:
return numpy.sum(x * numpy.log(x/y) + (y-x))
elif beta == 0:
return numpy.sum((x/y) - numpy.log(x/y) - 1)
elif beta == 2:
return numpy.sum((x-y)**2)/2.
# Magic formula for beta-divergence
beta = float(beta)
d = (1/(beta*(beta-1))) * \
numpy.sum((x**beta)+(beta-1)*(y**beta)-beta*x*(y**(beta-1)))
return d
def beta_nmf_step(self, X, B, A, beta=2.0, update_B=False, update_A=True):
"""Computes a step of a non-negative factorization towards X using B and
A as initial conditions.
X = B * A
The matrices A and B are updated in place, so any previous value is
destroyed. Because of convergence problems, only one update is performed
at a time, with A update having priority. If you want to update both,
call this twice.
Returns B, A and the error after the step was taken. Uses the
multiplicative approach as defined in:
Cedric Fevotte and Jerome Idier: Algorithms for nonnegative matrix
factorization with the beta-divergence (pg 13, eqs. 67 and 68)
Download paper at http://arxiv.org/pdf/1010.1763v3.pdf
Args:
X: matrix to be approximated.
B: initial guess for B.
A: initial guess for A.
beta: value of beta to be used. Default: 2.
update_B: flag indicating that the value of B should be updated.
Default: False.
update_A: flag indicating that the value of A should be updated.
Default: False.
"""
# Computes current approximation
Xtil = numpy.dot(B,A)
# Auxiliary variables for speed
Xtil2 = Xtil**(beta-2)
XtilNum = Xtil2*X
XtilDen = Xtil2*Xtil
if update_A:
A_numerator = numpy.dot(B.transpose(), XtilNum)
A_denominator = numpy.dot(B.transpose(), XtilDen)
A *= A_numerator/A_denominator
elif update_B:
B_numerator = numpy.dot(XtilNum, A.transpose())
B_denominator = numpy.dot(XtilDen, A.transpose())
B *= B_numerator/B_denominator
def beta_nmf(self, X, B, A, beta, update_B, update_A, min_delta,
max_iterations):
"""Performs non-negative matrix factorization for X=BA using a
beta-divergence.
The algorithm stops if either the number of iterations exceed a maximum
or the improvement is less the a threshold.
If minDelta is 0, no beta divergence is computed and the algorithm may
run faster!
The values of B and A are updated in place.
Args:
X: matrix to be approximated.
B: initial guess for B.
A: initial guess for A.
beta: value of beta to be used.
updateB: flag indicating that the value of B should be updated.
updateA: flag indicating that the value of A should be updated.
minDelta: minimum improvement necessary for the algorithm to
continue.
maxIterations: maximum number of iterations
"""
# If tolerance is zero, we can skip beta divergence computation. This
# may increase speed a lot
min_delta_is_zero = (min_delta == 0)
# If we have a tolerance, compute initial values to check for
# convergence
if not min_delta_is_zero:
last_delta = 2*min_delta
curr_err = self.betadivergence(X, numpy.dot(B,A), beta)
n_iterations = 0
while (min_delta_is_zero or last_delta > min_delta) and \
n_iterations < max_iterations:
# Update the chosen matrices
if update_B and update_A:
self.beta_nmf_step(X, B, A, beta, False, True)
self.beta_nmf_step(X, B, A, beta, True, False)
else:
self.beta_nmf_step(X, B, A, beta, update_B, update_A)
# If tolerance isn't zero, we need to check for convergence
if not min_delta_is_zero:
new_err = self.betadivergence(X, numpy.dot(B, A), beta)
last_delta = curr_err-new_err
curr_err = new_err
n_iterations = n_iterations + 1
| mit | 3,395,723,832,904,271,400 | 39.420502 | 80 | 0.537239 | false |
slacker007/OFF-ToolKit | Modules/Correlation.py | 1 | 1103 | from Builders import wigile_query
from Builders import kml_builder
class ClassName():
#These are the options we will set
def __init__(self):
# Descriptions that are required!!!
self.name = "Registry Network info corelation"
self.description = "WIGLE Query your known BSSID"
self.language = "python"
self.extension = "py"
self.rating = "Excellent"
# options we require user interaction for- format is {Option : [Value, Description]]}
self.required_options = {"bssid" : ['00:22:55:DF:C8:01', "Set BSSID or MAC of AP"],
"user" : ['offtest', "Set Username to WIGLE"],
"pass" : ['83128312', "Set Password to WIGLE"]}
def startx(self):
wa = wigile_query.WigleAgent(self.required_options["user"][0], self.required_options["pass"][0])
final = wa.get_lat_lng(self.required_options["bssid"][0])
print final
kml = kml_builder.kml()
kml.build(final["lat"], final["lng"], final["bssid"]) #Pass SSID name of network
print "[*] Check output"
| gpl-2.0 | -6,857,196,701,473,363,000 | 41.423077 | 102 | 0.600181 | false |
DayGitH/Python-Challenges | DailyProgrammer/DP20140718C.py | 1 | 2012 | """
[7/18/2014] Challenge #171 [Hard] Intergalatic Bitstream
https://www.reddit.com/r/dailyprogrammer/comments/2b21mp/7182014_challenge_171_hard_intergalatic_bitstream/
#Description:
Keeping with our "Bit" theme this week. We will look into the future. It is 2114. We have colonized the Galaxy. To
communicate we send 140 character max messages using [A-Z0-9 ]. The technology to do this requires faster than light
pulses to beam the messages to relay stations.
Your challenge is to implement the compression for these messages. The design is very open and the solutions will vary.
Your goals:
* Compact 140 Bytes down to a stream of bits to send and then decompact the message and verify 100% data contained.
* The goal is bit reduction. 140 bytes or less at 8 bits per byte so thats 1120 bits max. If you take a message of 140
bytes and compress it to 900 bits you have 220 less bits for 20% reduction.
#Input:
A text message of 140 or less characters that can be [A-Z0-9 ]
#Output:
Read Message of x Bytes.
Compressing x*8 Bits into y Bits. (z% compression)
Sending Message.
Decompressing Message into x Bytes.
Message Matches!
* x - size of your message
* x* 8 = bits of your message
* z - the percentage of message compressed by
* y bits of your bit stream for transmission
So compress your tiny message and show some stats on it and then decompress it and verify it matches the original
message.
#Challenge Inputs:
three messages to send:
REMEMBER TO DRINK YOUR OVALTINE
GIANTS BEAT DODGERS 10 TO 9 AND PLAY TOMORROW AT 1300
SPACE THE FINAL FRONTIER THESE ARE THE VOYAGES OF THE BIT STREAM DAILY PROGRAMMER TO SEEK OUT NEW COMPRESSION
#Congrats!
We are a trending subreddit for today 7-18-2014. Welcome to first time viewers of /r/dailyprogrammers checking out our
cool subreddit. We have lots of programming challenges for you to take on in the past and many to look forward to in
the future.
"""
def main():
pass
if __name__ == "__main__":
main()
| mit | -8,248,801,363,357,385,000 | 42.73913 | 119 | 0.752982 | false |
ContinuumIO/dask | setup.py | 2 | 2210 | #!/usr/bin/env python
import sys
from os.path import exists
from setuptools import setup
import versioneer
# NOTE: These are tested in `continuous_integration/travis/test_imports.sh` If
# you modify these, make sure to change the corresponding line there.
extras_require = {
"array": ["numpy >= 1.13.0", "toolz >= 0.8.2"],
"bag": [
"cloudpickle >= 0.2.2",
"fsspec >= 0.6.0",
"toolz >= 0.8.2",
"partd >= 0.3.10",
],
"dataframe": [
"numpy >= 1.13.0",
"pandas >= 0.23.0",
"toolz >= 0.8.2",
"partd >= 0.3.10",
"fsspec >= 0.6.0",
],
"distributed": ["distributed >= 2.0"],
"diagnostics": ["bokeh >= 1.0.0"],
"delayed": ["cloudpickle >= 0.2.2", "toolz >= 0.8.2"],
}
extras_require["complete"] = sorted({v for req in extras_require.values() for v in req})
install_requires = ["pyyaml"]
packages = [
"dask",
"dask.array",
"dask.bag",
"dask.bytes",
"dask.dataframe",
"dask.dataframe.io",
"dask.dataframe.tseries",
"dask.diagnostics",
]
tests = [p + ".tests" for p in packages]
# Only include pytest-runner in setup_requires if we're invoking tests
if {"pytest", "test", "ptr"}.intersection(sys.argv):
setup_requires = ["pytest-runner"]
else:
setup_requires = []
setup(
name="dask",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Parallel PyData with Task Scheduling",
url="https://github.com/dask/dask/",
maintainer="Matthew Rocklin",
maintainer_email="[email protected]",
license="BSD",
keywords="task-scheduling parallel numpy pandas pydata",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=packages + tests,
long_description=open("README.rst").read() if exists("README.rst") else "",
python_requires=">=3.6",
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=["pytest"],
extras_require=extras_require,
include_package_data=True,
zip_safe=False,
)
| bsd-3-clause | -553,675,897,247,055,000 | 27.701299 | 88 | 0.60724 | false |
nkgilley/home-assistant | homeassistant/components/statistics/sensor.py | 1 | 12228 | """Support for statistics for sensor values."""
from collections import deque
import logging
import statistics
import voluptuous as vol
from homeassistant.components.recorder.models import States
from homeassistant.components.recorder.util import execute, session_scope
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_ENTITY_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, event
from homeassistant.helpers.entity import Entity
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_AVERAGE_CHANGE = "average_change"
ATTR_CHANGE = "change"
ATTR_CHANGE_RATE = "change_rate"
ATTR_COUNT = "count"
ATTR_MAX_AGE = "max_age"
ATTR_MAX_VALUE = "max_value"
ATTR_MEAN = "mean"
ATTR_MEDIAN = "median"
ATTR_MIN_AGE = "min_age"
ATTR_MIN_VALUE = "min_value"
ATTR_SAMPLING_SIZE = "sampling_size"
ATTR_STANDARD_DEVIATION = "standard_deviation"
ATTR_TOTAL = "total"
ATTR_VARIANCE = "variance"
CONF_SAMPLING_SIZE = "sampling_size"
CONF_MAX_AGE = "max_age"
CONF_PRECISION = "precision"
DEFAULT_NAME = "Stats"
DEFAULT_SIZE = 20
DEFAULT_PRECISION = 2
ICON = "mdi:calculator"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SAMPLING_SIZE, default=DEFAULT_SIZE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_AGE): cv.time_period,
vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Statistics sensor."""
entity_id = config.get(CONF_ENTITY_ID)
name = config.get(CONF_NAME)
sampling_size = config.get(CONF_SAMPLING_SIZE)
max_age = config.get(CONF_MAX_AGE)
precision = config.get(CONF_PRECISION)
async_add_entities(
[StatisticsSensor(entity_id, name, sampling_size, max_age, precision)], True
)
return True
class StatisticsSensor(Entity):
"""Representation of a Statistics sensor."""
def __init__(self, entity_id, name, sampling_size, max_age, precision):
"""Initialize the Statistics sensor."""
self._entity_id = entity_id
self.is_binary = self._entity_id.split(".")[0] == "binary_sensor"
self._name = name
self._sampling_size = sampling_size
self._max_age = max_age
self._precision = precision
self._unit_of_measurement = None
self.states = deque(maxlen=self._sampling_size)
self.ages = deque(maxlen=self._sampling_size)
self.count = 0
self.mean = self.median = self.stdev = self.variance = None
self.total = self.min = self.max = None
self.min_age = self.max_age = None
self.change = self.average_change = self.change_rate = None
self._update_listener = None
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def async_stats_sensor_state_listener(entity, old_state, new_state):
"""Handle the sensor state changes."""
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
self._add_state_to_queue(new_state)
self.async_schedule_update_ha_state(True)
@callback
def async_stats_sensor_startup(_):
"""Add listener and get recorded state."""
_LOGGER.debug("Startup for %s", self.entity_id)
event.async_track_state_change(
self.hass, self._entity_id, async_stats_sensor_state_listener
)
if "recorder" in self.hass.config.components:
# Only use the database if it's configured
self.hass.async_create_task(self._async_initialize_from_database())
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_stats_sensor_startup
)
def _add_state_to_queue(self, new_state):
"""Add the state to the queue."""
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
return
try:
if self.is_binary:
self.states.append(new_state.state)
else:
self.states.append(float(new_state.state))
self.ages.append(new_state.last_updated)
except ValueError:
_LOGGER.error(
"%s: parsing error, expected number and received %s",
self.entity_id,
new_state.state,
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self.mean if not self.is_binary else self.count
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement if not self.is_binary else None
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if not self.is_binary:
return {
ATTR_SAMPLING_SIZE: self._sampling_size,
ATTR_COUNT: self.count,
ATTR_MEAN: self.mean,
ATTR_MEDIAN: self.median,
ATTR_STANDARD_DEVIATION: self.stdev,
ATTR_VARIANCE: self.variance,
ATTR_TOTAL: self.total,
ATTR_MIN_VALUE: self.min,
ATTR_MAX_VALUE: self.max,
ATTR_MIN_AGE: self.min_age,
ATTR_MAX_AGE: self.max_age,
ATTR_CHANGE: self.change,
ATTR_AVERAGE_CHANGE: self.average_change,
ATTR_CHANGE_RATE: self.change_rate,
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def _purge_old(self):
"""Remove states which are older than self._max_age."""
now = dt_util.utcnow()
_LOGGER.debug(
"%s: purging records older then %s(%s)",
self.entity_id,
dt_util.as_local(now - self._max_age),
self._max_age,
)
while self.ages and (now - self.ages[0]) > self._max_age:
_LOGGER.debug(
"%s: purging record with datetime %s(%s)",
self.entity_id,
dt_util.as_local(self.ages[0]),
(now - self.ages[0]),
)
self.ages.popleft()
self.states.popleft()
def _next_to_purge_timestamp(self):
"""Find the timestamp when the next purge would occur."""
if self.ages and self._max_age:
# Take the oldest entry from the ages list and add the configured max_age.
# If executed after purging old states, the result is the next timestamp
# in the future when the oldest state will expire.
return self.ages[0] + self._max_age
return None
async def async_update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("%s: updating statistics.", self.entity_id)
if self._max_age is not None:
self._purge_old()
self.count = len(self.states)
if not self.is_binary:
try: # require only one data point
self.mean = round(statistics.mean(self.states), self._precision)
self.median = round(statistics.median(self.states), self._precision)
except statistics.StatisticsError as err:
_LOGGER.debug("%s: %s", self.entity_id, err)
self.mean = self.median = STATE_UNKNOWN
try: # require at least two data points
self.stdev = round(statistics.stdev(self.states), self._precision)
self.variance = round(statistics.variance(self.states), self._precision)
except statistics.StatisticsError as err:
_LOGGER.debug("%s: %s", self.entity_id, err)
self.stdev = self.variance = STATE_UNKNOWN
if self.states:
self.total = round(sum(self.states), self._precision)
self.min = round(min(self.states), self._precision)
self.max = round(max(self.states), self._precision)
self.min_age = self.ages[0]
self.max_age = self.ages[-1]
self.change = self.states[-1] - self.states[0]
self.average_change = self.change
self.change_rate = 0
if len(self.states) > 1:
self.average_change /= len(self.states) - 1
time_diff = (self.max_age - self.min_age).total_seconds()
if time_diff > 0:
self.change_rate = self.change / time_diff
self.change = round(self.change, self._precision)
self.average_change = round(self.average_change, self._precision)
self.change_rate = round(self.change_rate, self._precision)
else:
self.total = self.min = self.max = STATE_UNKNOWN
self.min_age = self.max_age = dt_util.utcnow()
self.change = self.average_change = STATE_UNKNOWN
self.change_rate = STATE_UNKNOWN
# If max_age is set, ensure to update again after the defined interval.
next_to_purge_timestamp = self._next_to_purge_timestamp()
if next_to_purge_timestamp:
_LOGGER.debug(
"%s: scheduling update at %s", self.entity_id, next_to_purge_timestamp
)
if self._update_listener:
self._update_listener()
self._update_listener = None
@callback
def _scheduled_update(now):
"""Timer callback for sensor update."""
_LOGGER.debug("%s: executing scheduled update", self.entity_id)
self.async_schedule_update_ha_state(True)
self._update_listener = None
self._update_listener = event.async_track_point_in_utc_time(
self.hass, _scheduled_update, next_to_purge_timestamp
)
async def _async_initialize_from_database(self):
"""Initialize the list of states from the database.
The query will get the list of states in DESCENDING order so that we
can limit the result to self._sample_size. Afterwards reverse the
list so that we get it in the right order again.
If MaxAge is provided then query will restrict to entries younger then
current datetime - MaxAge.
"""
_LOGGER.debug("%s: initializing values from the database", self.entity_id)
with session_scope(hass=self.hass) as session:
query = session.query(States).filter(
States.entity_id == self._entity_id.lower()
)
if self._max_age is not None:
records_older_then = dt_util.utcnow() - self._max_age
_LOGGER.debug(
"%s: retrieve records not older then %s",
self.entity_id,
records_older_then,
)
query = query.filter(States.last_updated >= records_older_then)
else:
_LOGGER.debug("%s: retrieving all records.", self.entity_id)
query = query.order_by(States.last_updated.desc()).limit(
self._sampling_size
)
states = execute(query, to_native=True, validate_entity_ids=False)
for state in reversed(states):
self._add_state_to_queue(state)
self.async_schedule_update_ha_state(True)
_LOGGER.debug("%s: initializing from database completed", self.entity_id)
| apache-2.0 | 5,336,948,942,436,154,000 | 35.177515 | 88 | 0.586932 | false |
rjdp/Easynginedemoplugin | ee/core/mysql.py | 1 | 4004 | """EasyEngine MySQL core classes."""
import pymysql
import configparser
from os.path import expanduser
import sys
import os
from ee.core.logging import Log
from ee.core.variables import EEVariables
class EEMysql():
"""Method for MySQL connection"""
def execute(self, statement, errormsg='', log=True):
"""Get login details from ~/.my.cnf & Execute MySQL query"""
config = configparser.RawConfigParser()
cnfpath = expanduser("~")+"/.my.cnf"
if [cnfpath] == config.read(cnfpath):
user = config.get('client', 'user')
passwd = config.get('client', 'password')
try:
host = config.get('client', 'host')
except configparser.NoOptionError as e:
host = 'localhost'
try:
port = config.get('client', 'port')
except configparser.NoOptionError as e:
port = '3306'
try:
conn = pymysql.connect(host=host, port=int(port),
user=user, passwd=passwd)
cur = conn.cursor()
except Exception as e:
if errormsg:
Log.debug(self, '{0}'
.format(e))
Log.error(self, '{0}'
.format(errormsg))
else:
Log.debug(self, '{0}'
.format(e))
Log.error(self, 'Unable to connect to database: {0}'
.format(e))
try:
if log:
Log.debug(self, "Executing MySQL statement: {0}"
.format(statement))
cur.execute(statement)
cur.close()
conn.close()
except Exception as e:
cur.close()
conn.close()
Log.debug(self, "{0}".format(e))
if not errormsg:
Log.error(self, 'Unable to execute statement')
else:
Log.error(self, '{0}'.format(errormsg))
def backupAll(self):
import subprocess
try:
Log.info(self, "Backing up database at location: "
"/var/ee-mysqlbackup")
# Setup Nginx common directory
if not os.path.exists('/var/ee-mysqlbackup'):
Log.debug(self, 'Creating directory'
'/var/ee-mysqlbackup')
os.makedirs('/var/ee-mysqlbackup')
db = subprocess.check_output(["mysql -Bse \'show databases\'"],
universal_newlines=True,
shell=True).split('\n')
for dbs in db:
if dbs == "":
continue
Log.info(self, "Backing up {0} database".format(dbs))
p1 = subprocess.Popen("mysqldump {0}"
" --max_allowed_packet=1024M"
" --single-transaction".format(dbs),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
p2 = subprocess.Popen("gzip -c > /var/ee-mysqlbackup/{0}{1}.s"
"ql.gz".format(dbs, EEVariables.ee_date),
stdin=p1.stdout,
shell=True)
# Allow p1 to receive a SIGPIPE if p2 exits
p1.stdout.close()
output = p1.stderr.read()
p1.wait()
if p1.returncode == 0:
Log.debug(self, "done")
else:
Log.error(self, output.decode("utf-8"))
except Exception as e:
Log.error(self, "Error: process exited with status %s"
% e)
| mit | 1,610,323,725,944,046,800 | 37.873786 | 79 | 0.441059 | false |
whtsky/catsup-docs-zh | catsup/generator/__init__.py | 1 | 4323 | import time
import os
import catsup.parser
from catsup.logger import logger
from catsup.generator.renderer import Renderer
from catsup.reader import get_reader
from catsup.options import g
from catsup.utils import smart_copy
from catsup.models import *
class Generator(object):
def __init__(self, config_path, local=False, base_url=None):
self.config_path = config_path
self.local = local
self.base_url = base_url
g.generator = self
self.posts = []
self.pages = []
self.non_post_files = []
self.archives = []
self.tags = []
self.caches = []
self.config = {}
self.renderer = None
self.reset()
def reset(self):
self.posts = []
self.pages = []
self.non_post_files = []
self.archives = g.archives = Archives()
self.tags = g.tags = Tags()
self.load_config()
self.load_posts()
self.load_renderer()
self.caches = {
"static_url": {},
"url_for": {}
}
def load_config(self):
self.config = g.config = catsup.parser.config(
self.config_path,
local=self.local,
base_url=self.base_url
)
def load_posts(self):
for f in os.listdir(g.source):
if f.startswith("."): # hidden file
continue
filename, ext = os.path.splitext(f)
ext = ext.lower()[1:]
reader = get_reader(ext)
if reader is not None:
logger.info('Loading file %s' % filename)
path = os.path.join(g.source, f)
post = reader(path)
if post.type == "page":
self.pages.append(post)
else:
self.posts.append(post)
else:
self.non_post_files.append(f)
self.posts.sort(
key=lambda x: x.datetime,
reverse=True
)
def load_renderer(self):
templates_path = [
g.public_templates_path,
os.path.join(g.theme.path, 'templates')
]
self.renderer = Renderer(
templates_path=templates_path,
generator=self
)
def generate_feed(self):
feed = Feed(self.posts)
feed.render(self.renderer)
def generate_pages(self):
page = Page(self.posts)
page.render_all(self.renderer)
def generate_posts(self):
for post in self.posts:
post.add_archive_and_tags()
post.render(self.renderer)
for page in self.pages:
page.render(self.renderer)
def generate_tags(self):
self.tags.render(self.renderer)
def generate_archives(self):
self.archives.render(self.renderer)
def generate_other_pages(self):
NotFound().render(self.renderer)
def copy_static_files(self):
static_path = self.config.config.static_output
smart_copy(
os.path.join(g.theme.path, 'static'),
static_path
)
smart_copy(
self.config.config.static_source,
static_path
)
for f in self.non_post_files:
smart_copy(
os.path.join(g.source, f),
os.path.join(self.config.config.output, f)
)
def generate(self):
started_loading = time.time()
self.reset()
finish_loading = time.time()
logger.info(
"Loaded config and %s posts in %.3fs" %
(len(self.posts), finish_loading - started_loading)
)
if self.posts:
self.generate_posts()
self.generate_tags()
self.generate_archives()
self.generate_feed()
self.generate_pages()
else:
logger.warning("Can't find any post.")
self.generate_other_pages()
self.copy_static_files()
self.renderer.render_sitemap()
finish_generating = time.time()
logger.info(
"Generated %s posts in %.3fs" %
(len(self.posts), finish_generating - finish_loading)
)
logger.info(
"Generating finished in %.3fs" %
(finish_generating - started_loading)
)
| mit | 2,849,377,199,365,245,000 | 27.440789 | 65 | 0.53065 | false |
skygeek/skyproc | data_server/data/model_Archives.py | 1 | 3610 | # -*- coding: utf-8 -*-
# Copyright 2012, Nabil SEFRIOUI
#
# This file is part of Skyproc.
#
# Skyproc is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or any later version.
#
# Skyproc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skyproc. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db.models import Max
from django.conf import settings
import base
import fields
from choices import *
class LoadLog(base.ArchiveModel):
archive = True
location = fields.UUIDField()
aircraft = fields.UUIDField()
pilot = fields.UUIDField()
aircraft_reg = models.CharField(max_length=16)
pilot_name = models.CharField(max_length=64)
date = models.DateField()
number = models.SmallIntegerField()
total_slots = models.SmallIntegerField()
prepaid_slots = models.SmallIntegerField()
postpaid_slots = models.SmallIntegerField()
unpaid_slots = models.SmallIntegerField()
staff_slots = models.SmallIntegerField()
prices = models.CharField(max_length=512)
note = models.CharField(max_length=200, blank=True, null=True)
class SlotLog(base.ArchiveModel):
archive = True
load = models.ForeignKey('LoadLog')
jumper = fields.UUIDField(blank=True, null=True)
jumper_name = models.CharField(max_length=64)
is_worker = models.BooleanField(default=False)
catalog_item = models.CharField(max_length=64)
exit_order = models.SmallIntegerField()
catalog_price = models.CharField(max_length=64, blank=True, null=True)
payer = models.CharField(max_length=64, blank=True, null=True)
payment = models.CharField(max_length=64)
payment_type = models.CharField(max_length=16)
class JumpLog(base.ArchiveModel):
archive = True
location = fields.UUIDField(blank=True, null=True)
number = models.IntegerField(blank=True, null=True)
location_name = models.CharField(max_length=64, blank=True, null=True)
aircraft_type = models.CharField(max_length=32, blank=True, null=True)
date = models.DateField()
jump_type = models.CharField(max_length=32, blank=True, null=True)
altitude = models.CharField(max_length=32, blank=True, null=True)
note = models.CharField(max_length=100, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.number:
max_number = JumpLog.objects.filter(owner=self.owner).aggregate(Max('number'))['number__max']
if max_number is None:
try:
person = models.get_model(settings.DATA_APP, 'Person').objects.get_by_natural_key(self.owner)
past_jumps = person.past_jumps
except: past_jumps = 0
self.number = past_jumps+1
else: self.number = max_number+1
super(JumpLog, self).save(*args, **kwargs)
class AccountOperationLog(base.ArchiveModel):
archive = True
location = fields.UUIDField()
date = models.DateField()
type = models.CharField(max_length=1, choices=ACCOUNT_OPERATIONS)
amount = models.CharField(max_length=64)
currency = models.CharField(max_length=5)
note = models.CharField(max_length=100)
| agpl-3.0 | -8,264,354,205,025,495,000 | 38.67033 | 113 | 0.69446 | false |
orting/emphysema-estimation | Experiments/02-ScalabilityOfClusteringAlgorithm/Scripts/RunScalability-1.py | 1 | 2142 | #!/usr/bin/python3
'''Run Scalability-1.
See README.md for details.
'''
import sys, subprocess, os.path
from Util import intersperse
def main():
skip = {
'Measure' : False,
}
basedir = ''
dirs = {
'Instances' : os.path.join(basedir, 'Data', 'Instances'),
'Statistics' : os.path.join(basedir, 'Data', 'Statistics', 'Scalability-1'),
'Bin' : '../../Build',
}
files = {
'Instances' : [
os.path.join(dirs['Instances'], 'instances500.csv'),
os.path.join(dirs['Instances'], 'instances1000.csv'),
os.path.join(dirs['Instances'], 'instances2500.csv'),
os.path.join(dirs['Instances'], 'instances5000.csv'),
os.path.join(dirs['Instances'], 'instances7500.csv'),
os.path.join(dirs['Instances'], 'instances10000.csv')
],
}
progs = {
'Scalability' : os.path.join(dirs['Bin'],'Experiments/02-ScalabilityOfClusteringAlgorithm/Scalability'),
}
params = {
'clusters' : [4, 8, 16, 32, 64],
'histograms' : 7*8, # 7 scales * 8 features
'burnin' : 10,
'iterations' : 100,
'branching' : 1,
}
if skip['Measure']:
print( 'Skipping: Measure' )
else:
print( 'Measuring' )
for instanceMatrix in files['Instances']:
args = [
progs['Scalability'],
'--input', instanceMatrix,
'--nHistograms', "%d" % params['histograms'],
'--output', os.path.join(dirs['Statistics'], 'stats_' + os.path.basename(instanceMatrix)),
'--burnin', "%d" % params['burnin'],
'--iterations', "%d" % params['iterations'],
'--branching', "%d" % params['branching']
] + list(intersperse('--clusters', ("%d" % k for k in params['clusters'])))
print(' '.join(args))
if subprocess.call( args ) != 0:
print( 'Error measuring', instanceMatrix )
return 1
return 0
if __name__ == '__main__':
sys.exit( main() )
| gpl-3.0 | -2,892,246,233,755,231,000 | 30.970149 | 112 | 0.505602 | false |
hideoussquid/aureus-12-bitcore | contrib/linearize/linearize-hashes.py | 1 | 3034 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class AureusRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = AureusRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit | 1,344,201,614,436,289,500 | 25.849558 | 90 | 0.663481 | false |
osgee/redigit | redigit/settings.py | 1 | 2956 | """
Django settings for redigit project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SETTINGS_PATH = os.path.realpath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7p8)$=frp+7336ak^oo1verce)=ywu(&of@qvrvylw4%!kpeak'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'redigit.urls'
WSGI_APPLICATION = 'redigit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'redigit/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TEMPLATE_DIRS = (
os.path.join(SETTINGS_PATH, 'templates'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
) | apache-2.0 | -2,283,062,519,946,228,700 | 24.491379 | 71 | 0.688769 | false |
tobi-wan-kenobi/bumblebee-status | bumblebee_status/modules/contrib/twmn.py | 1 | 1236 | # pylint: disable=C0111,R0903
"""Toggle twmn notifications.
Requires the following executable:
* systemctl
contributed by `Pseudonick47 <https://github.com/Pseudonick47>`_ - many thanks!
"""
import core.module
import core.widget
import core.input
import core.decorators
import util.cli
class Module(core.module.Module):
@core.decorators.every(minutes=60)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(""))
self.__paused = False
# Make sure that twmn is currently not paused
util.cli.execute("killall -SIGUSR2 twmnd", ignore_errors=True)
core.input.register(self, button=core.input.LEFT_MOUSE, cmd=self.toggle_status)
def toggle_status(self, event):
self.__paused = not self.__paused
try:
if self.__paused:
util.cli.execute("systemctl --user start twmnd")
else:
util.cli.execute("systemctl --user stop twmnd")
except:
self.__paused = not self.__paused # toggling failed
def state(self, widget):
if self.__paused:
return ["muted"]
return ["unmuted"]
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | -1,048,578,473,358,335,700 | 25.869565 | 87 | 0.632686 | false |
xArm-Developer/xArm-Python-SDK | example/wrapper/xarm7/2004-move_joint.py | 1 | 2851 | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
"""
Description: Move Joint
"""
import os
import sys
import time
import math
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
from xarm.wrapper import XArmAPI
#######################################################
"""
Just for test example
"""
if len(sys.argv) >= 2:
ip = sys.argv[1]
else:
try:
from configparser import ConfigParser
parser = ConfigParser()
parser.read('../robot.conf')
ip = parser.get('xArm', 'ip')
except:
ip = input('Please input the xArm ip address:')
if not ip:
print('input error, exit')
sys.exit(1)
########################################################
arm = XArmAPI(ip, is_radian=True)
arm.motion_enable(enable=True)
arm.set_mode(0)
arm.set_state(state=0)
arm.reset(wait=True)
speed = 50
arm.set_servo_angle(servo_id=1, angle=90, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=2, angle=-60, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=3, angle=-30, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=1, angle=0, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=3, angle=0, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=2, angle=0, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.reset(wait=True)
speed = math.radians(50)
arm.set_servo_angle(servo_id=1, angle=math.radians(90), speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=2, angle=math.radians(-60), speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=3, angle=math.radians(-30), speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=1, angle=0, speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=3, angle=0, speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=2, angle=0, speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.reset(wait=True)
arm.disconnect()
| bsd-3-clause | -8,415,697,118,851,235,000 | 33.768293 | 83 | 0.677306 | false |
laenderoliveira/exerclivropy | exercicios_resolvidos/capitulo 05/exercicio-05-28.py | 1 | 1772 | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2014
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/1012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: exercicios_resolvidos\capitulo 05\exercicio-05-28.py
##############################################################################
# Exercício 5.27
# Solução alternativa, usando apenas inteiros
n = int(input ("Digite o número a verificar:"))
# Com n é um número inteiro, vamos calcular sua
# quantidade de dígitos, encontrado a primeira
# potência de 10, superior a n.
# Exemplo: 341 - primeira potência de 10 maior: 1000 = 10 ^ 4
# Utilizaremos 4 e não 3 para possibilitar o tratamento de números
# com um só dígito. O ajuste é feito nas fórmulas abaixo
q = 0
while 10 ** q < n:
q = q + 1
i = q
f = 0
nf = ni = n # Aqui nós copiamos n para ni e nf
pi = pf = 0 # e fazemos pi = pf (para casos especiais)
while i > f:
pi = int(ni / (10 ** (i-1))) # Dígito mais à direita
pf = nf % 10 # Dígito mais à esquerda
if pi != pf: # Se são diferentes, saímos
break
f = f + 1 # Passamos para o próximo dígito a esqueda
i = i - 1 # Passamos para o dígito a direita seguinte
ni = ni - (pi * (10 ** i )) # Ajustamos ni de forma a retirar o dígito anterior
nf = int(nf / 10) # Ajustamos nf para retirar o último dígito
if pi == pf:
print("%d é palíndromo" % n)
else:
print("%d não é palíndromo" % n)
| mit | -280,417,563,851,955,170 | 38.25 | 83 | 0.607991 | false |
Tayamarn/socorro | socorro/unittest/processor/test_processor_2015.py | 1 | 4727 | import ujson
from configman import ConfigurationManager
from configman.dotdict import DotDict
from mock import Mock, patch
from socorro.lib.transform_rules import TransformRuleSystem
from socorro.processor.processor_2015 import (
Processor2015,
rule_sets_from_string
)
from socorro.processor.general_transform_rules import (
CPUInfoRule,
OSInfoRule
)
from socorro.unittest.testbase import TestCase
rule_set_01 = [
[
'ruleset01',
'tag0.tag1',
'socorro.lib.transform_rules.TransformRuleSystem',
'apply_all_rules',
'socorro.processor.general_transform_rules.CPUInfoRule, '
'socorro.processor.general_transform_rules.OSInfoRule '
]
]
rule_set_01_str = ujson.dumps(rule_set_01)
class TestProcessor2015(TestCase):
def test_rule_sets_from_string_1(self):
rule_set_config = rule_sets_from_string(rule_set_01_str)
rc = rule_set_config.get_required_config()
assert 'ruleset01' in rc
assert 'tag0.tag1' == rc.ruleset01.tag.default
expected = 'socorro.lib.transform_rules.TransformRuleSystem'
assert rc.ruleset01.rule_system_class.default == expected
assert 'apply_all_rules' == rc.ruleset01.action.default
expected = (
'socorro.processor.general_transform_rules.CPUInfoRule, '
'socorro.processor.general_transform_rules.OSInfoRule '
)
assert rc.ruleset01.rules_list.default == expected
def test_Processor2015_init(self):
cm = ConfigurationManager(
definition_source=Processor2015.get_required_config(),
values_source_list=[{'rule_sets': rule_set_01_str}],
)
config = cm.get_config()
config.logger = Mock()
p = Processor2015(config)
assert isinstance(p.rule_system, DotDict)
assert len(p.rule_system) == 1
assert 'ruleset01' in p.rule_system
assert isinstance(p.rule_system.ruleset01, TransformRuleSystem)
trs = p.rule_system.ruleset01
assert trs.act == trs.apply_all_rules
assert len(trs.rules) == 2
assert isinstance(trs.rules[0], CPUInfoRule)
assert isinstance(trs.rules[1], OSInfoRule)
def test_process_crash_no_rules(self):
cm = ConfigurationManager(
definition_source=Processor2015.get_required_config(),
values_source_list=[{'rule_sets': '[]'}],
)
config = cm.get_config()
config.logger = Mock()
config.processor_name = 'dwight'
p = Processor2015(config)
raw_crash = DotDict()
raw_dumps = {}
with patch('socorro.processor.processor_2015.utc_now') as faked_utcnow:
faked_utcnow.return_value = '2015-01-01T00:00:00'
processed_crash = p.process_crash(
raw_crash,
raw_dumps,
DotDict()
)
assert processed_crash.success
assert processed_crash.started_datetime == '2015-01-01T00:00:00'
assert processed_crash.startedDateTime == '2015-01-01T00:00:00'
assert processed_crash.completed_datetime == '2015-01-01T00:00:00'
assert processed_crash.completeddatetime == '2015-01-01T00:00:00'
assert processed_crash.processor_notes == 'dwight; Processor2015'
def test_process_crash_existing_processed_crash(self):
cm = ConfigurationManager(
definition_source=Processor2015.get_required_config(),
values_source_list=[{'rule_sets': '[]'}],
)
config = cm.get_config()
config.logger = Mock()
config.processor_name = 'dwight'
p = Processor2015(config)
raw_crash = DotDict()
raw_dumps = {}
processed_crash = DotDict()
processed_crash.processor_notes = "we've been here before; yep"
processed_crash.started_datetime = '2014-01-01T00:00:00'
with patch('socorro.processor.processor_2015.utc_now') as faked_utcnow:
faked_utcnow.return_value = '2015-01-01T00:00:00'
processed_crash = p.process_crash(
raw_crash,
raw_dumps,
processed_crash
)
assert processed_crash.success
assert processed_crash.started_datetime == '2015-01-01T00:00:00'
assert processed_crash.startedDateTime == '2015-01-01T00:00:00'
assert processed_crash.completed_datetime == '2015-01-01T00:00:00'
assert processed_crash.completeddatetime == '2015-01-01T00:00:00'
expected = (
"dwight; Processor2015; earlier processing: 2014-01-01T00:00:00; we've been here "
"before; yep"
)
assert processed_crash.processor_notes == expected
| mpl-2.0 | 6,571,304,992,638,448,000 | 36.515873 | 94 | 0.632536 | false |
ToFuProject/tofu | tofu/tests/tests01_geom/test_03_core_data/WEST_Ves_VesOut_Notes.py | 1 | 1355 | #!/usr/bin/env python
import os
import argparse
import numpy as np
_save = True
_here = os.path.abspath(os.path.dirname(__file__))
_Exp, _Cls, _name = os.path.split(__file__)[1].split('_')[:3]
assert not any([any([ss in s for ss in ['Notes','.']])
for s in [_Exp, _Cls, _name]])
def get_notes():
# Notes from creoView (-X,Z,Y)
notes = {'C': np.r_[2.465, 0.],
'r_in': 3.162/2., # r_out for later use (thick)
'r_out': 3.292/2.}
return notes
def make_Poly(save=_save, path=_here):
notes = get_notes()
C = notes['C']
nP = 100
theta = np.linspace(0.,2*np.pi, nP, endpoint=False)
P = np.array([C[0]+notes['r_out']*np.cos(theta),
C[1]+notes['r_out']*np.sin(theta)])
if save:
cstr = '%s_%s_%s'%(_Exp,_Cls,_name)
pathfilext = os.path.join(path, cstr+'_V0.txt')
np.savetxt(pathfilext, P)
return P, notes
if __name__=='__main__':
# Parse input arguments
msg = 'Launch creation of polygons txt from bash'
parser = argparse.ArgumentParser(description = msg)
parser.add_argument('-save', type=bool, help='save ?', default=_save)
parser.add_argument('-path', type=str, help='saving path ?', default=_here)
args = parser.parse_args()
# Call wrapper function
make_Poly(save=args.save, path=args.path)
| mit | -8,711,176,521,942,796,000 | 25.057692 | 79 | 0.57048 | false |
miooim/project_hours | src/project_hours/driver/mongo_driver.py | 1 | 2290 | import datetime
import pprint
__author__ = 'michaell'
import pymongo
from tornado.options import options
class ProjectHoursMongoDriver(object):
"""
Project hours mongo driver implementation
"""
@staticmethod
def get_month_data(month, year, user):
"""
Get results from database
:param month: month
:type month: int
:param year: year
:type year: int
:param user: user name
:type user: str
:return: result dictionary
:rtype: dict
"""
query = {
"$query": {
'month': int(month),
'year': int(year),
'user': user
}
}
# print(options.mongodb, options.mongod_name, options.mongod_name)
# pprint.pprint(query)
collection = pymongo.MongoClient(host=options.mongodb)[options.mongod_name][options.mongod_name]
return collection.find_one(query)
@staticmethod
def save(month, year, user, data):
"""
Saves data to mongod
:param month: month
:type month: int
:param year: year
:type year: int
:param user: user name
:type user: str
:param data: data to save
:type data: dict
:return: true is success
:rtype: bool
"""
for item in data:
if 'no_work' in item:
if item['no_work'] is True:
item['projects'] = []
item['total'] = 0
result = ProjectHoursMongoDriver.get_month_data(month, year, user)
if result:
to_save = {
'_id': result['_id'],
'month': int(month),
'year': int(year),
'user': user,
"days": data,
"timestamp": datetime.datetime.now(),
}
else:
to_save = {
'month': int(month),
'year': int(year),
'user': user,
"days": data,
"timestamp": datetime.datetime.now(),
}
collection = pymongo.MongoClient(host=options.mongodb)[options.mongod_name][options.mongod_name]
return collection.save(to_save=to_save, safe=True)
| mit | -589,547,220,272,757,100 | 27.271605 | 104 | 0.501747 | false |
asd43/Structural-Variation | popgen/getFeatureTable.py | 1 | 2289 | #!/usr/bin/env python3
# Copyright (c) 2017 Genome Research Ltd.
# Author: Alistair Dunham
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License , or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful , but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program. If not , see <http :// www.gnu.org/licenses/>.
## Script to convert bedtools cluster output of merged breakdancer calls into a feature table
## which has columns for each sample indicating the presence of each deletion
import argparse
import fileinput
import re
## Read arguments
parser = argparse.ArgumentParser(description="Transform bedtools cluster output for deletion calls into a feature table of 'genotypes'.")
parser.add_argument('tenx',metavar='T',type=str,help="Bed file containing clustered deletion calls")
parser.add_argument('--bd','-b',action='store_true',help="Expect BreakDancer formatted IDs. Otherwise expect 10X formatted IDs.")
args = parser.parse_args()
## Determine function to use for setting sample ID depending on given source format
if args.bd:
def getSample(x):
"""Extract sample from BreakDancer formatted ID tags"""
return(re.split("[_.]",x)[-2])
else:
def getSample(x):
"""Extract sample from 10X formatted ID tags"""
return(x.split('.')[0])
## Extract each deletion call and its cluster number
dels = []
samples = set()
with fileinput.input(args.tenx) as bed:
for li in bed:
t = li.strip().split()
s = getSample(t[3])
n = int(t[4])
samples.add(s)
if len(dels) < n:
dels.append(set([s]))
else:
dels[n - 1].add(s)
## Print feature table
samples = sorted(list(samples))
print("Deletion",*samples,sep='\t')
for n,delSamples in enumerate(dels):
## generate feature string
feats = [(1 if i in delSamples else 0) for i in samples]
print('_'.join(["del",str(n + 1)]),*feats,sep='\t')
| gpl-3.0 | -661,282,732,030,508,800 | 37.15 | 137 | 0.70817 | false |
2-B/etherpad-lite | bin/parsejson.py | 1 | 1119 | import json
import re
# Regular expression for comments
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def parse_json(filename):
""" Parse a JSON file
First remove comments and then use the json module package
Comments look like :
// ...
or
/*
...
*/
"""
with open(filename) as f:
content = ''.join(f.readlines())
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
#print content
# Return json file
return json.loads(content)
if __name__ == '__main__':
f = '../settings.json'
data = parse_json(f)
print "LOG="+data['log']
print "ERROR_HANDLING="+str(data['errorHandling'])
print "EMAIL_ADDRESS="+data['emailAddress']
print "TIME_BETWEEN_EMAILS="+str(data['timeBetweenEmails'])
print "NODEJS="+data['nodejs']
| apache-2.0 | -6,526,917,865,432,967,000 | 25.023256 | 69 | 0.540661 | false |
bitwiseman/js-beautify | python/cssbeautifier/css/options.py | 1 | 2360 | #
# The MIT License (MIT)
# Copyright (c) 2007-2018 Einar Lielmanis, Liam Newman, and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from jsbeautifier.core.options import Options as BaseOptions
class BeautifierOptions(BaseOptions):
def __init__(self, options=None):
BaseOptions.__init__(self, options, "css")
self.selector_separator_newline = self._get_boolean(
"selector_separator_newline", True
)
self.newline_between_rules = self._get_boolean("newline_between_rules", True)
brace_style_split = self._get_selection_list(
"brace_style",
["collapse", "expand", "end-expand", "none", "preserve-inline"],
)
self.brace_style = "collapse"
for bs in brace_style_split:
if bs != "expand":
# default to collapse, as only collapse|expand is implemented for now
self.brace_style = "collapse"
else:
self.brace_style = bs
# deprecated
space_around_selector_separator = self._get_boolean(
"space_around_selector_separator"
)
# Continue to accept deprecated option
self.space_around_combinator = (
self._get_boolean("space_around_combinator")
or space_around_selector_separator
)
| mit | 4,209,632,697,914,879,500 | 39 | 85 | 0.680508 | false |
openstack/python-heatclient | heatclient/osc/v1/software_deployment.py | 1 | 13006 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Orchestration v1 Software Deployment action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions as exc
from osc_lib import utils
from oslo_serialization import jsonutils
from heatclient._i18n import _
from heatclient.common import deployment_utils
from heatclient.common import format_utils
from heatclient.common import utils as heat_utils
from heatclient import exc as heat_exc
class CreateDeployment(format_utils.YamlFormat):
"""Create a software deployment."""
log = logging.getLogger(__name__ + '.CreateDeployment')
def get_parser(self, prog_name):
parser = super(CreateDeployment, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<deployment-name>',
help=_('Name of the derived config associated with this '
'deployment. This is used to apply a sort order to the '
'list of configurations currently deployed to the server.')
)
parser.add_argument(
'--input-value',
metavar='<key=value>',
action='append',
help=_('Input value to set on the deployment. This can be '
'specified multiple times.')
)
parser.add_argument(
'--action',
metavar='<action>',
default='UPDATE',
help=_('Name of an action for this deployment. This can be a '
'custom action, or one of CREATE, UPDATE, DELETE, SUSPEND, '
'RESUME. Default is UPDATE')
)
parser.add_argument(
'--config',
metavar='<config>',
help=_('ID of the configuration to deploy')
)
parser.add_argument(
'--signal-transport',
metavar='<signal-transport>',
default='TEMP_URL_SIGNAL',
help=_('How the server should signal to heat with the deployment '
'output values. TEMP_URL_SIGNAL will create a Swift '
'TempURL to be signaled via HTTP PUT. ZAQAR_SIGNAL will '
'create a dedicated zaqar queue to be signaled using the '
'provided keystone credentials.NO_SIGNAL will result in '
'the resource going to the COMPLETE state without waiting '
'for any signal')
)
parser.add_argument(
'--container',
metavar='<container>',
help=_('Optional name of container to store TEMP_URL_SIGNAL '
'objects in. If not specified a container will be created '
'with a name derived from the DEPLOY_NAME')
)
parser.add_argument(
'--timeout',
metavar='<timeout>',
type=int,
default=60,
help=_('Deployment timeout in minutes')
)
parser.add_argument(
'--server',
metavar='<server>',
required=True,
help=_('ID of the server being deployed to')
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
client = self.app.client_manager.orchestration
config = {}
if parsed_args.config:
try:
config = client.software_configs.get(parsed_args.config)
except heat_exc.HTTPNotFound:
msg = (_('Software configuration not found: %s') %
parsed_args.config)
raise exc.CommandError(msg)
derived_params = deployment_utils.build_derived_config_params(
parsed_args.action,
config,
parsed_args.name,
heat_utils.format_parameters(parsed_args.input_value, False),
parsed_args.server,
parsed_args.signal_transport,
signal_id=deployment_utils.build_signal_id(client, parsed_args)
)
derived_config = client.software_configs.create(**derived_params)
sd = client.software_deployments.create(
config_id=derived_config.id,
server_id=parsed_args.server,
action=parsed_args.action,
status='IN_PROGRESS'
)
return zip(*sorted(sd.to_dict().items()))
class DeleteDeployment(command.Command):
"""Delete software deployment(s) and correlative config(s)."""
log = logging.getLogger(__name__ + '.DeleteDeployment')
def get_parser(self, prog_name):
parser = super(DeleteDeployment, self).get_parser(prog_name)
parser.add_argument(
'deployment',
metavar='<deployment>',
nargs='+',
help=_('ID of the deployment(s) to delete.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
hc = self.app.client_manager.orchestration
failure_count = 0
for deploy_id in parsed_args.deployment:
try:
sd = hc.software_deployments.get(deployment_id=deploy_id)
hc.software_deployments.delete(
deployment_id=deploy_id)
except Exception as e:
if isinstance(e, heat_exc.HTTPNotFound):
print(_('Deployment with ID %s not found') % deploy_id)
else:
print(_('Deployment with ID %s failed to delete')
% deploy_id)
failure_count += 1
continue
# just try best to delete the corresponding config
try:
config_id = getattr(sd, 'config_id')
hc.software_configs.delete(config_id=config_id)
except Exception:
print(_('Failed to delete the correlative config'
' %(config_id)s of deployment %(deploy_id)s') %
{'config_id': config_id, 'deploy_id': deploy_id})
if failure_count:
raise exc.CommandError(_('Unable to delete %(count)s of the '
'%(total)s deployments.') %
{'count': failure_count,
'total': len(parsed_args.deployment)})
class ListDeployment(command.Lister):
"""List software deployments."""
log = logging.getLogger(__name__ + '.ListDeployment')
def get_parser(self, prog_name):
parser = super(ListDeployment, self).get_parser(prog_name)
parser.add_argument(
'--server',
metavar='<server>',
help=_('ID of the server to fetch deployments for')
)
parser.add_argument(
'--long',
action='store_true',
help=_('List more fields in output')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
return _list_deployment(heat_client, args=parsed_args)
def _list_deployment(heat_client, args=None):
kwargs = {'server_id': args.server} if args.server else {}
columns = ['id', 'config_id', 'server_id', 'action', 'status']
if args.long:
columns.append('creation_time')
columns.append('status_reason')
deployments = heat_client.software_deployments.list(**kwargs)
return (
columns,
(utils.get_item_properties(s, columns) for s in deployments)
)
class ShowDeployment(command.ShowOne):
"""Show SoftwareDeployment Details."""
log = logging.getLogger(__name__ + ".ShowSoftwareDeployment")
def get_parser(self, prog_name):
parser = super(ShowDeployment, self).get_parser(prog_name)
parser.add_argument(
'deployment',
metavar='<deployment>',
help=_('ID of the deployment')
)
parser.add_argument(
'--long',
action='store_true',
help=_('Show more fields in output')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
try:
data = heat_client.software_deployments.get(
deployment_id=parsed_args.deployment)
except heat_exc.HTTPNotFound:
raise exc.CommandError(
_('Software Deployment not found: %s')
% parsed_args.deployment)
else:
columns = [
'id',
'server_id',
'config_id',
'creation_time',
'updated_time',
'status',
'status_reason',
'input_values',
'action',
]
if parsed_args.long:
columns.append('output_values')
return columns, utils.get_item_properties(data, columns)
class ShowMetadataDeployment(command.Command):
"""Get deployment configuration metadata for the specified server."""
log = logging.getLogger(__name__ + '.ShowMetadataDeployment')
def get_parser(self, prog_name):
parser = super(ShowMetadataDeployment, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('ID of the server to fetch deployments for')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
md = heat_client.software_deployments.metadata(
server_id=parsed_args.server)
print(jsonutils.dumps(md, indent=2))
class ShowOutputDeployment(command.Command):
"""Show a specific deployment output."""
log = logging.getLogger(__name__ + '.ShowOutputDeployment')
def get_parser(self, prog_name):
parser = super(ShowOutputDeployment, self).get_parser(prog_name)
parser.add_argument(
'deployment',
metavar='<deployment>',
help=_('ID of deployment to show the output for')
)
parser.add_argument(
'output',
metavar='<output-name>',
nargs='?',
default=None,
help=_('Name of an output to display')
)
parser.add_argument(
'--all',
default=False,
action='store_true',
help=_('Display all deployment outputs')
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='Show full deployment logs in output',
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
if (not parsed_args.all and parsed_args.output is None or
parsed_args.all and parsed_args.output is not None):
raise exc.CommandError(
_('Error: either %(output)s or %(all)s argument is needed.')
% {'output': '<output-name>', 'all': '--all'})
try:
sd = heat_client.software_deployments.get(
deployment_id=parsed_args.deployment)
except heat_exc.HTTPNotFound:
raise exc.CommandError(_('Deployment not found: %s')
% parsed_args.deployment)
outputs = sd.output_values
if outputs:
if parsed_args.all:
print('output_values:\n')
for k in outputs:
format_utils.print_software_deployment_output(
data=outputs, name=k, long=parsed_args.long)
else:
if parsed_args.output not in outputs:
msg = (_('Output %(output)s does not exist in deployment'
' %(deployment)s')
% {'output': parsed_args.output,
'deployment': parsed_args.deployment})
raise exc.CommandError(msg)
else:
print('output_value:\n')
format_utils.print_software_deployment_output(
data=outputs, name=parsed_args.output)
| apache-2.0 | 8,401,926,805,075,033,000 | 35.533708 | 79 | 0.557204 | false |
staneyffer/my_blog | config.py | 1 | 1685 | # -*- coding: UTF-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_FOREIGN_KEY_CHECKS = False
SQLALCHEMY_RECORD_QUERIES = True
FLASKY_SLOW_DB_QUERY_TIME=0.5
FLASKY_ADMIN = '[email protected]'
FLASKY_POSTS_PER_PAGE = 6
UPLOADED_FILES_DEST = os.getcwd()+'/app/static/images/'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL')
class TestingConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False #在测试配置中禁用 CSRF 保护
#SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
# 'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
#测试采用sqlite
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| mit | -8,295,825,654,970,580,000 | 30.264151 | 86 | 0.671092 | false |
masc3d/btrfs-sxbackup | btrfs_sxbackup/shell.py | 1 | 2492 | # Copyright (c) 2014 Marco Schindler
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
import subprocess
import logging
_logger = logging.getLogger(__name__)
def build_subprocess_args(cmd, url=None):
"""
Create subprocess arguments for shell command/args to be executed
Internally Wraps command into ssh call if url host name is not None
:param cmd: Shell command string or argument list
:param url: url of remote host
:return: Subprocess arguments
"""
# in case cmd is a regular value, convert to list
cmd = cmd if isinstance(cmd, list) else [cmd]
# wrap into bash or ssh command respectively
# depending if command is executed locally (host==None) or remotely
url_string = None
ssh_args = ['ssh', '-o', 'ServerAliveInterval=5', '-o', 'ServerAliveCountMax=3']
if url is not None and url.hostname is not None:
url_string = url.hostname
if url.username is not None:
url_string = '%s@%s' % (url.username, url.hostname)
if url.port is not None:
ssh_args += ['-p', '%s' % url.port]
ssh_args += ['%s' % url_string]
subprocess_args = ['bash', '-c'] + cmd if url_string is None else \
ssh_args + cmd
_logger.debug(subprocess_args)
return subprocess_args
def exec_check_output(cmd, url=None) -> bytes:
"""
Wrapper for subprocess.check_output
:param cmd: Command text
:param url: URL
:return: output
"""
return subprocess.check_output(build_subprocess_args(cmd, url), stderr=subprocess.STDOUT)
def exec_call(cmd, url=None) -> int:
"""
Wrapper for subprocess.call
:param cmd: Command text
:param url: URL
:return:
"""
return subprocess.call(build_subprocess_args(cmd, url), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
def exists(command, url=None):
"""
Check if shell command exists
:param command: Command to verify
:param url: url of remote host
:return: True if location exists, otherwise False
"""
type_prc = subprocess.Popen(build_subprocess_args(['type ' + command], url),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
return type_prc.wait() == 0
| gpl-2.0 | 3,123,260,312,765,524,000 | 30.15 | 107 | 0.642055 | false |
Petr-By/qtpyvis | dltb/tool/adversarial.py | 1 | 1626 | """Tools for creating and definding againts adversarial examples.
"""
# third party imports
import numpy as np
# Toolbox imports
from ..datasource import Datasource
from .tool import Tool
from .classifier import Classifier, ClassIdentifier
class Attacker(Tool):
# pylint: disable=too-many-ancestors
"""An attacker can create adversarial attacks ("adversarial examples")
for a given victim. Currently we assume that the victim is a
classifier.
"""
def attack(self, victim: Classifier, example: np.ndarray,
correct: ClassIdentifier, target: ClassIdentifier = None,
**kwargs) -> np.ndarray:
"""
Perform an attack against a victim.
Arguments
---------
victim: Classifier
The victim (classifier) to attack.
example: np.ndarray
The example that should be altered.
correct: ClassIdentifier
Correct class label for the example.
target: ClassIdentifier
Target label for the attack. If none is given,
an untargeted attack is done.
Result
------
adversarial_example: np.ndarray
The adversarial example created by the attack.
"""
class Defender(Tool):
# pylint: disable=too-many-ancestors
"""A :py:class:`Defender` aims at making a victim more robust
against adversarial attacks.
"""
def defend(self, victim: Classifier, attacker: Attacker = None,
datasource: Datasource = None, **kwargs) -> None:
"""Defend the victim against adversarial attacks.
"""
| mit | -8,583,027,495,194,409,000 | 27.526316 | 74 | 0.633456 | false |
Litetokens/liteblockd | armory_utxsvr.py | 1 | 3736 | #! /usr/bin/env python3
"""
server for creating unsigned armory offline transactions
"""
import sys
import logging
import argparse
import json
import time
import threading
import flask
from flask import request
import jsonrpc
from jsonrpc import dispatcher
sys.path.append("/usr/lib/armory/")
from armoryengine.ALL import *
ARMORY_UTXSVR_PORT_MAINNET = 6590
ARMORY_UTXSVR_PORT_TESTNET = 6591
app = flask.Flask(__name__)
@dispatcher.add_method
def serialize_unsigned_tx(unsigned_tx_hex, public_key_hex):
print("REQUEST(serialize_unsigned_tx) -- unsigned_tx_hex: '%s', public_key_hex: '%s'" % (
unsigned_tx_hex, public_key_hex))
try:
unsigned_tx_bin = hex_to_binary(unsigned_tx_hex)
pytx = PyTx().unserialize(unsigned_tx_bin)
utx = UnsignedTransaction(pytx=pytx, pubKeyMap=hex_to_binary(public_key_hex))
unsigned_tx_ascii = utx.serializeAscii()
except Exception, e:
raise Exception("Could not serialize transaction: %s" % e)
return unsigned_tx_ascii
@dispatcher.add_method
def convert_signed_tx_to_raw_hex(signed_tx_ascii):
"""Converts a signed tx from armory's offline format to a raw hex tx that litecoind can broadcast/use"""
print("REQUEST(convert_signed_tx_to_raw_hex) -- signed_tx_ascii:\n'%s'\n" % (signed_tx_ascii,))
try:
utx = UnsignedTransaction()
utx.unserializeAscii(signed_tx_ascii)
except Exception, e:
raise Exception("Could not decode transaction: %s" % e)
#see if the tx is signed
if not utx.evaluateSigningStatus().canBroadcast:
raise Exception("Passed transaction is not signed")
try:
pytx = utx.getSignedPyTx()
raw_tx_bin = pytx.serialize()
raw_tx_hex = binary_to_hex(raw_tx_bin)
except Exception, e:
raise Exception("Could not serialize transaction: %s" % e)
return raw_tx_hex
@app.route('/', methods=["POST",])
@app.route('/api/', methods=["POST",])
def handle_post():
request_json = flask.request.get_data().decode('utf-8')
rpc_response = jsonrpc.JSONRPCResponseManager.handle(request_json, dispatcher)
rpc_response_json = json.dumps(rpc_response.data).encode()
response = flask.Response(rpc_response_json, 200, mimetype='application/json')
return response
class ArmoryBlockchainUpdaterThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
#loop to check for new blocks
print("**** Polling for blockchain updates ...")
while(True):
prevTop = TheBDM.getTopBlockHeight()
TheBDM.readBlkFileUpdate()
newTop = TheBDM.getTopBlockHeight()
if newTop > prevTop:
print 'New blocks: %d (top: %d)' % (newTop-prevTop, newTop)
time.sleep(1.0) # check every 1 second
if __name__ == '__main__':
print("**** Starting up ...")
parser = argparse.ArgumentParser(description='Armory offline transaction generator daemon')
parser.add_argument('--testnet', action='store_true', help='Run for testnet')
args = parser.parse_args()
ltcdir = "/home/xlt/.litecoin%s/" % ('-testnet/testnet3' if args.testnet else '')
print("**** Initializing armory ...")
#require armory to be installed, adding the configured armory path to PYTHONPATH
TheBDM.ltcdir = ltcdir
TheBDM.setBlocking(True)
TheBDM.setOnlineMode(True)
blockchainUpdaterThread = ArmoryBlockchainUpdaterThread()
blockchainUpdaterThread.start()
print("**** Initializing Flask (HTTP) server ...")
app.run(host="127.0.0.1", port=ARMORY_UTXSVR_PORT_MAINNET if not args.testnet else ARMORY_UTXSVR_PORT_TESTNET, threaded=True)
print("**** Ready to serve ...")
| mit | 2,905,679,054,115,964,000 | 34.923077 | 129 | 0.6697 | false |
cvandeplas/plaso | plaso/parsers/firefox_cache_test.py | 1 | 6803 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Firefox cache files parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import firefox_cache as firefox_cache_formatter
from plaso.lib import errors
from plaso.lib import timelib_test
from plaso.parsers import firefox_cache
from plaso.parsers import test_lib
__author__ = 'Petter Bjelland ([email protected])'
class FirefoxCacheTest(test_lib.ParserTestCase):
"""A unit test for the FirefoxCacheParser."""
def setUp(self):
self._parser = firefox_cache.FirefoxCacheParser()
def VerifyMajorMinor(self, events):
"""Verify that valid Firefox cahce version is extracted."""
for event_object in events:
self.assertEquals(event_object.major, 1)
self.assertEquals(event_object.minor, 19)
def testParseCache_InvalidFile(self):
"""Verify that parser do not accept small, invalid files."""
test_file = self._GetTestFilePath(['firefox_cache', 'invalid_file'])
with self.assertRaises(errors.UnableToParseFile):
_ = self._ParseFile(self._parser, test_file)
def testParseCache_001(self):
"""Test Firefox 28 cache file _CACHE_001_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox28',
'_CACHE_001_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(574, len(event_objects))
self.assertEquals(event_objects[1].url,
'HTTP:http://start.ubuntu.com/12.04/sprite.png')
self.assertEquals(event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2014-04-21 14:13:35'))
self.VerifyMajorMinor(event_objects)
expected_msg = (
u'Fetched 2 time(s) '
u'[HTTP/1.0 200 OK] GET '
u'"HTTP:http://start.ubuntu.com/12.04/sprite.png"')
expected_msg_short = (
u'[HTTP/1.0 200 OK] GET '
u'"HTTP:http://start.ubuntu.com/12.04/sprite.png"')
self._TestGetMessageStrings(event_objects[1],
expected_msg, expected_msg_short)
def testParseCache_002(self):
"""Test Firefox 28 cache file _CACHE_002_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox28',
'_CACHE_002_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(58, len(event_objects))
self.assertEquals(event_objects[2].url,
('HTTP:http://www.google-analytics.com/__utm.gif?utmwv=5.5.0&utms='
'1&utmn=1106893631&utmhn=www.dagbladet.no&utmcs=windows-1252&ut'
'msr=1920x1080&utmvp=1430x669&utmsc=24-bit&utmul=en-us&utmje=0&'
'utmfl=-&utmdt=Dagbladet.no%20-%20forsiden&utmhid=460894302&utm'
'r=-&utmp=%2F&utmht=1398089458997&utmac=UA-3072159-1&utmcc=__ut'
'ma%3D68537988.718312608.1398089459.1398089459.1398089459.1%3B%'
'2B__utmz%3D68537988.1398089459.1.1.utmcsr%3D(direct)%7Cutmccn'
'%3D(direct)%7Cutmcmd%3D(none)%3B&aip=1&utmu=qBQ~'))
self.assertEquals(event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2014-04-21 14:10:58'))
self.VerifyMajorMinor(event_objects)
def testParseCache_003(self):
"""Test Firefox 28 cache file _CACHE_003_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox28',
'_CACHE_003_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(4, len(event_objects))
self.assertEquals(event_objects[3].url,
'HTTP:https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js')
self.assertEquals(event_objects[3].timestamp,
timelib_test.CopyStringToTimestamp('2014-04-21 14:11:07'))
self.VerifyMajorMinor(event_objects)
def testParseAlternativeFilename(self):
"""Test Firefox 28 cache 003 file with alternative filename."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox28',
'E8D65m01'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(4, len(event_objects))
def testParseLegacyCache_001(self):
"""Test Firefox 3 cache file _CACHE_001_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox3',
'_CACHE_001_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(25, len(event_objects))
self.assertEquals(event_objects[0].timestamp,
timelib_test.CopyStringToTimestamp('2014-05-02 14:15:03'))
expected_msg = (
u'Fetched 1 time(s) '
u'[HTTP/1.1 200 OK] GET '
u'"HTTP:http://start.mozilla.org/en-US/"')
expected_msg_short = (
u'[HTTP/1.1 200 OK] GET '
u'"HTTP:http://start.mozilla.org/en-US/"')
self._TestGetMessageStrings(event_objects[0],
expected_msg, expected_msg_short)
def testParseLegacyCache_002(self):
"""Test Firefox 3 cache file _CACHE_002_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox3',
'_CACHE_002_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(3, len(event_objects))
self.assertEquals(event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2014-05-02 14:25:55'))
def testParseLegacyCache_003(self):
"""Test Firefox 3 cache file _CACHE_003_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox3',
'_CACHE_003_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(2, len(event_objects))
self.assertEquals(event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2014-05-02 14:15:07'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,736,644,465,021,609,000 | 35.772973 | 80 | 0.694694 | false |
Pikecillo/genna | external/PyXML-0.8.4/xml/xpath/ParsedAbbreviatedRelativeLocationPath.py | 1 | 2141 | ########################################################################
#
# File Name: ParsedAbbreviatedRelativeLocationPath.py
#
#
"""
A parsed token that represents a abbreviated relative location path.
WWW: http://4suite.org/XPATH e-mail: [email protected]
Copyright (c) 2000-2001 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.org/COPYRIGHT for license and copyright information
"""
from xml.xpath import ParsedNodeTest
from xml.xpath import ParsedPredicateList
from xml.xpath import ParsedAxisSpecifier
from xml.xpath import ParsedStep
import Set
class ParsedAbbreviatedRelativeLocationPath:
def __init__(self,left,right):
"""
left can be a step or a relative location path
right is only a step
"""
self._left = left
self._right = right
pnt = ParsedNodeTest.ParsedNodeTest('node','')
ppl = ParsedPredicateList.ParsedPredicateList([])
pas = ParsedAxisSpecifier.ParsedAxisSpecifier('descendant-or-self')
self._middle = ParsedStep.ParsedStep(pas, pnt, ppl)
def evaluate(self, context):
res = []
rt = self._left.select(context)
l = len(rt)
origState = context.copyNodePosSize()
for ctr in range(l):
context.setNodePosSize((rt[ctr],ctr+1,l))
subRt = self._middle.select(context)
res = Set.Union(res,subRt)
rt = res
res = []
l = len(rt)
for ctr in range(l):
context.setNodePosSize((rt[ctr],ctr+1,l))
subRt = self._right.select(context)
res = Set.Union(res,subRt)
context.setNodePosSize(origState)
return res
select = evaluate
def pprint(self, indent=''):
print indent + str(self)
self._left.pprint(indent + ' ')
self._middle.pprint(indent + ' ')
self._right.pprint(indent + ' ')
def __str__(self):
return '<AbbreviatedRelativeLocationPath at %x: %s>' % (
id(self),
repr(self),
)
def __repr__(self):
return repr(self._left) + '//' + repr(self._right)
| gpl-2.0 | 2,580,934,514,517,384,700 | 29.15493 | 75 | 0.586175 | false |
ntucllab/libact | libact/query_strategies/multiclass/tests/test_hierarchical_sampling.py | 1 | 2717 | """ HierarchicalSampling test
"""
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from sklearn import datasets
from sklearn.utils import shuffle
from libact.base.dataset import Dataset
from libact.models import SVM
from libact.query_strategies import UncertaintySampling
from libact.query_strategies.multiclass import HierarchicalSampling as HS
from ...tests.utils import run_qs
class HierarchicalSamplingTestCase(unittest.TestCase):
def setUp(self):
iris = datasets.load_iris()
X, y = shuffle(iris.data, iris.target, random_state=1126)
self.X = X.tolist()
self.y = y.tolist()
self.classes = list(set(self.y))
def test_hs_random_selecting(self):
ds = Dataset(self.X, self.y[:10] + [None] * (len(self.y) - 10))
qs = HS(ds, self.classes, active_selecting=False, random_state=1126)
qseq = run_qs(ds, qs, self.y, len(self.y)-10)
assert_array_equal(
np.concatenate([qseq[:10], qseq[-10:]]),
np.array([48, 143, 13, 142, 88, 130, 29, 87, 36, 28,
58, 137, 49, 105, 76, 71, 63, 47, 64, 55])
)
def test_hs_active_selecting(self):
ds = Dataset(self.X, self.y[:10] + [None] * (len(self.y) - 10))
qs = HS(ds, self.classes, active_selecting=True, random_state=1126)
qseq = run_qs(ds, qs, self.y, len(self.y)-10)
assert_array_equal(
np.concatenate([qseq[:10], qseq[-10:]]),
np.array([48, 143, 13, 64, 101, 108, 51, 87, 36, 28,
43, 118, 47, 25, 81, 82, 95, 40, 67, 120])
)
def test_hs_subsampling(self):
ds = Dataset(self.X, self.y[:10] + [None] * (len(self.y) - 10))
sub_qs = UncertaintySampling(ds,
model=SVM(gamma='auto', decision_function_shape='ovr'))
qs = HS(ds, self.classes, subsample_qs=sub_qs, random_state=1126)
qseq = run_qs(ds, qs, self.y, len(self.y)-10)
assert_array_equal(
np.concatenate([qseq[:10], qseq[-10:]]),
np.array([120, 50, 33, 28, 78, 133, 52, 124, 102, 109,
81, 108, 12, 10, 89, 114, 92, 126, 48, 25])
)
def test_hs_report_all_label(self):
ds = Dataset(self.X, self.y)
qs = HS(ds, self.classes, random_state=1126)
y_report = qs.report_all_label()
assert_array_equal(y_report, self.y)
def test_hs_report_entry_label(self):
ds = Dataset(self.X, self.y)
qs = HS(ds, self.classes, random_state=1126)
y_report = []
for i in range(len(self.y)):
y_report.append(qs.report_entry_label(i))
assert_array_equal(y_report, self.y)
| bsd-2-clause | 3,427,073,491,759,392,300 | 37.814286 | 76 | 0.580788 | false |
b1r3k/recruitment-challanges | data-hacking/src/task1/classifier/channel.py | 1 | 1678 | '''
* Author: Lukasz Jachym
* Date: 9/14/13
* Time: 5:40 PM
*
* This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
* To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/.
'''
from collections import namedtuple
from branding import is_branded
DIRECT = 1
PPC = 2
SEO = 3
Channel = namedtuple('Channel', ['type', 'keywords', 'branded'])
def get_ppc_keywords(ppc_str):
"""
:param ppc_str:
:return: keywords
"""
try:
keywords = ppc_str.split('_')[2]
except KeyError:
raise ValueError
return keywords
def get_seo_keywords(seo_query):
"""
:param ppc_str:
:return: keywords
"""
try:
keywords = seo_query.split(' :: ')[2]
except KeyError:
raise ValueError
return keywords
def parse_source(source_str):
if (source_str[0:3] == 'ppc'):
channel_type = PPC
keywords = get_ppc_keywords(source_str)
channel_keywords = keywords
channel_branded = is_branded(keywords)
channel = Channel(channel_type, channel_keywords, channel_branded)
else:
if source_str[0:3] == 'seo':
channel_type = SEO
keywords = get_seo_keywords(source_str)
channel_keywords = keywords
channel_branded = is_branded(keywords)
channel = Channel(channel_type, channel_keywords, channel_branded)
else:
channel_type = DIRECT
channel_keywords = None
channel_branded = False
channel = Channel(channel_type, channel_keywords, channel_branded)
return channel | mit | -6,849,401,520,349,252,000 | 23.691176 | 110 | 0.61621 | false |
leriomaggio/pycon_site | p3/templatetags/p3.py | 1 | 21562 | # -*- coding: UTF-8 -*-
from __future__ import absolute_import
import mimetypes
import os
import os.path
import re
import random
import sys
import urllib
from collections import defaultdict
from datetime import datetime
from itertools import groupby
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from conference import dataaccess as cdataaccess
from conference import models as ConferenceModels
from conference.settings import STUFF_DIR, STUFF_URL
from assopy import models as amodels
from p3 import dataaccess
from p3 import forms as p3forms
from p3 import models
from fancy_tag import fancy_tag
mimetypes.init()
register = template.Library()
@register.inclusion_tag('p3/box_pycon_italia.html')
def box_pycon_italia():
return {}
@register.inclusion_tag('p3/box_newsletter.html', takes_context=True)
def box_newsletter(context):
return context
@register.inclusion_tag('p3/box_cal.html', takes_context = True)
def box_cal(context, limit=None):
deadlines = ConferenceModels.Deadline.objects.valid_news()
if limit:
deadlines = deadlines[:int(limit)]
return {
'deadlines': [ (d, d.content(context['LANGUAGE_CODE'])) for d in deadlines ]
}
@register.inclusion_tag('p3/render_cal.html', takes_context=True)
def render_cal(context):
return context
@register.inclusion_tag('p3/box_download.html', takes_context = True)
def box_download(context, fname, label=None):
if '..' in fname:
raise template.TemplateSyntaxError("file path cannot contains ..")
if fname.startswith('/'):
raise template.TemplateSyntaxError("file path cannot starts with /")
if label is None:
label = os.path.basename(fname)
try:
fpath = os.path.join(settings.STUFF_DIR, fname)
stat = os.stat(fpath)
except (AttributeError, OSError), e:
fsize = ftype = None
else:
fsize = stat.st_size
ftype = mimetypes.guess_type(fpath)[0]
return {
'url': context['STUFF_URL'] + fname,
'label': label,
'fsize': fsize,
'ftype': ftype,
}
@register.inclusion_tag('p3/box_didyouknow.html', takes_context = True)
def box_didyouknow(context):
try:
d = ConferenceModels.DidYouKnow.objects.filter(visible = True).order_by('?')[0]
except IndexError:
d = None
return {
'd': d,
'LANGUAGE_CODE': context.get('LANGUAGE_CODE'),
}
@register.inclusion_tag('p3/box_googlemaps.html', takes_context = True)
def box_googlemaps(context, what='', zoom=13):
what = ','.join([ "'%s'" % w for w in what.split(',') ])
return {
'rand': random.randint(0, sys.maxint - 1),
'what': what,
'zoom': zoom
}
@register.inclusion_tag('p3/box_talks_conference.html', takes_context = True)
def box_talks_conference(context, talks):
"""
mostra i talk passati raggruppati per conferenza
"""
conf = defaultdict(list)
for t in talks:
conf[t.conference].append(t)
talks = []
for c in reversed(sorted(conf.keys())):
talks.append((c, conf[c]))
return { 'talks': talks }
@register.inclusion_tag('p3/box_latest_tweets.html', takes_context=True)
def box_latest_tweets(context):
ctx = Context(context)
ctx.update({
'screen_name': settings.P3_TWITTER_USER,
})
return ctx
@register.filter
def render_time(tweet, args=None):
time = tweet["timestamp"]
time = datetime.datetime.fromtimestamp(time)
return time.strftime("%d-%m-%y @ %H:%M")
@register.filter
def check_map(page):
"""
controlla se la pagina passata richiede o meno una mappa
"""
if page:
return '{% render_map' in page.expose_content()
return False
@register.inclusion_tag('p3/render_map.html', takes_context=True)
def render_map(context):
return {}
@register.inclusion_tag('p3/fragments/render_ticket.html', takes_context=True)
def render_ticket(context, ticket):
from p3 import forms
user = context['request'].user
if ticket.fare.ticket_type == 'conference':
try:
inst = ticket.p3_conference
except:
inst = None
form = forms.FormTicket(
instance=inst,
initial={
'ticket_name': ticket.name,
},
prefix='t%d' % (ticket.id,),
single_day=ticket.fare.code[2] == 'D',
)
if inst and inst.assigned_to:
blocked = inst.assigned_to != user.email
else:
blocked = False
elif ticket.fare.code in ('SIM01',):
try:
inst = ticket.p3_conference_sim
except:
inst = None
form = forms.FormTicketSIM(
instance=inst,
initial={
'ticket_name': ticket.name,
},
prefix='t%d' % (ticket.id,),
)
blocked = False
elif ticket.fare.code.startswith('H'):
# TicketRoom instances must exist, they're created by a listener
inst = ticket.p3_conference_room
form = forms.FormTicketRoom(
instance=inst,
initial={
'ticket_name': ticket.name,
},
prefix='t%d' % (ticket.id,),
)
blocked = False
else:
form = forms.FormTicketPartner(instance=ticket, prefix='t%d' % (ticket.id,))
blocked = False
ctx = Context(context)
ctx.update({
'ticket': ticket,
'form': form,
'user': user,
'blocked': blocked,
})
return ctx
@register.assignment_tag(takes_context=True)
def fares_available(context, fare_type, sort=None):
"""
Restituisce l'elenco delle tariffe attive in questo momento per la
tipologia specificata.
"""
assert fare_type in ('all', 'conference', 'goodies', 'partner', 'hotel-room', 'hotel-room-sharing', 'other')
if not settings.P3_FARES_ENABLED(context['user']):
return []
fares_list = filter(lambda f: f['valid'], cdataaccess.fares(settings.CONFERENCE_CONFERENCE))
if fare_type == 'conference':
fares = [ f for f in fares_list if f['code'][0] == 'T' and f['ticket_type'] == 'conference' ]
elif fare_type == 'hotel-room-sharing':
fares = [ f for f in fares_list if f['code'].startswith('HB') ]
elif fare_type == 'hotel-room':
fares = [ f for f in fares_list if f['code'].startswith('HR') ]
elif fare_type == 'other':
fares = [ f for f in fares_list if f['ticket_type'] in ('other', 'event') and f['code'][0] != 'H' ]
elif fare_type == 'partner':
fares = [ f for f in fares_list if f['ticket_type'] in 'partner' ]
elif fare_type == 'all':
fares = fares_list
if sort == "price":
fares.sort(key=lambda x: x['price'])
return fares
@fancy_tag(register, takes_context=True)
def render_cart_rows(context, fare_type, form):
assert fare_type in ('conference', 'goodies', 'partner', 'hotel-room', 'hotel-room-sharing', 'other')
ctx = Context(context)
request = ctx['request']
try:
company = request.user.assopy_user.account_type == 'c'
except AttributeError:
# anonymous user or without an assopy profile (impossible!)
company = False
ctx.update({
'form': form,
'company': company,
})
fares_list = filter(lambda f: f['valid'], cdataaccess.fares(settings.CONFERENCE_CONFERENCE))
if fare_type == 'conference':
tpl = 'p3/fragments/render_cart_conference_ticket_row.html'
# rendering "conference" tickets is a bit complex; each row in
# the cart corresponds to multiple "fare" (student, private, copany)
#
# The prices must be sorted on time + ticket type + owner
# early
# full [Student, Private, Company]
# lite (standard) [Student, Private, Company]
# daily [Student, Private, Company]
# regular (late)
# ...
# on desk
# ...
#
# The correct time ordering is guaranteed implicitly by
# excluding expired fares (it's not permitted to have overlaps
# of validity periods).
fares = dict((f['code'][2:], f) for f in fares_list if f['code'][0] == 'T')
rows = []
for t in ('S', 'L', 'D'):
# To simplify the template fares are packed in triplets:
# student, private, company.
#
# Each raw is a tuple with three elements:
# 1. Fare
# 2. FormField
# 3. Boolean flag telling if the price can be applied to the user
row = []
for k in ('S', 'P', 'C'):
try:
f = fares[t+k]
except KeyError:
row.append((None, None, None))
else:
# The price is valid if the time test is passed and if the
# account type is compatible
valid = not (company ^ (f['code'][-1] == 'C'))
row.append((f, form.__getitem__(f['code']), valid))
rows.append(row)
ctx['rows'] = rows
elif fare_type == 'hotel-room-sharing':
tpl = 'p3/fragments/render_cart_hotel_ticket_row.html'
ctx['field'] = form['bed_reservations']
ctx['field'].field.widget._errors = ctx['field'].errors
elif fare_type == 'hotel-room':
tpl = 'p3/fragments/render_cart_hotel_ticket_row.html'
ctx['field'] = form['room_reservations']
ctx['field'].field.widget._errors = ctx['field'].errors
elif fare_type == 'other':
tpl = 'p3/fragments/render_cart_og_ticket_row.html'
fares = defaultdict(dict)
order = ('p', 'c')
columns = set()
for f in fares_list:
if f['ticket_type'] in ('other', 'event') and f['code'][0] != 'H':
columns.add(f['recipient_type'])
fares[f['name']][f['recipient_type']] = f
ctx['fares'] = fares.values()
ctx['recipient_types'] = sorted(columns, key=lambda v: order.index(v))
elif fare_type == 'partner':
tpl = 'p3/fragments/render_cart_partner_ticket_row.html'
ctx['fares'] = [ f for f in fares_list if f['ticket_type'] in 'partner' ]
return render_to_string(tpl, ctx)
@register.inclusion_tag('p3/box_image_gallery.html', takes_context=True)
def box_image_gallery(context):
images = []
for f in os.listdir(STUFF_DIR):
images.append('%s%s' % (STUFF_URL, f))
context.update({
'images': images,
})
return context
@fancy_tag(register, takes_context=True)
def render_partner_program(context, conference=None):
if conference is None:
conference = settings.CONFERENCE_CONFERENCE
from conference import dataaccess
from conference.templatetags.conference import fare_blob
fares = [ x for x in dataaccess.fares(conference) if x['ticket_type'] == 'partner' and x['valid'] ]
fares.sort(key=lambda x: (slugify(x['name']), fare_blob(x, 'date')))
ctx = Context(context)
ctx.update({
'fares': [ (k, list(v)) for k, v in groupby(fares, key=lambda x: slugify(x['name'])) ],
})
return render_to_string('p3/fragments/render_partner_program.html', ctx)
@fancy_tag(register, takes_context=True)
def event_partner_program(context, event):
fare_id = re.search(r'f(\d+)', event.track)
if fare_id is None:
return ''
from conference.templatetags.conference import _request_cache
c = _request_cache(context['request'], 'fares')
if not c:
for f in ConferenceModels.Fare.objects.all():
c[str(f.id)] = f
fare = c[fare_id.group(1)]
return mark_safe('<a href="/partner-program/#%s">%s</a>' % (slugify(fare.name), event.custom,))
@register.filter
def schedule_to_be_splitted(s):
tracks = ConferenceModels.Track.objects.by_schedule(s)
s = []
for t in tracks:
if t.track.startswith('partner') or t.track.startswith('sprint'):
s.append(t)
return len(tracks) != len(s)
@register.filter
def tickets_url(user):
"""
ritorna la url più diretta per mandare l'utente sulla sua pagina ticket
"""
if user.assopy_user.token:
u = reverse('p3-user', kwargs={'token': user.assopy_user.token})
else:
u = reverse('p3-tickets')
return settings.DEFAULT_URL_PREFIX + u
@register.filter
def ticket_user(ticket):
try:
p3c = ticket.p3_conference
except models.TicketConference.DoesNotExist:
p3c = None
if p3c and p3c.assigned_to:
from assopy.models import User
return User.objects.get(user__email=p3c.assigned_to)
else:
return ticket.orderitem.order.user
@register.filter
def com_com_registration(user):
url = 'https://hotspot.com-com.it/signup/?'
name = user.name()
try:
fn, ln = name.split(' ', 1)
except ValueError:
fn = name
ln = ''
params = {
'autofill': 'yes',
'firstname': fn,
'lastname': ln,
'email': user.user.email,
}
if user.country:
params['nationality'] = user.country.pk
if user.phone and user.phone.startswith('+39'):
params['ita_mobile'] = user.phone
params['username'] = name.lower().replace(' ', '').replace('.', '')[:12]
for k, v in params.items():
params[k] = v.encode('utf-8')
return url + urllib.urlencode(params)
@register.inclusion_tag('p3/box_next_events.html', takes_context=True)
def box_next_events(context):
from conference.templatetags import conference as ctags
t = datetime.now()
try:
sch = ConferenceModels.Schedule.objects.get(date=t.date())
except ConferenceModels.Schedule.DoesNotExist:
current = next = {}
else:
current = ctags.current_events(context, t)
next = ctags.next_events(context, t)
tracks = dict(
(x, None)
for x in ConferenceModels.Track.objects.by_schedule(sch)
if x.outdoor == False
)
for track in tracks:
c = current.get(track)
if c:
if hasattr(c, 'evt'):
c = c.evt.ref
else:
c = c.ref
n = next.get(track)
if n:
n_time = n.time
if hasattr(n, 'evt'):
n = n.evt.ref
else:
n = n.ref
else:
n_time = None
tracks[track] = {
'current': c,
'next': (n, n_time),
}
events = sorted(tracks.items(), key=lambda x: x[0].order)
ctx = Context(context)
ctx.update({
'events': events,
})
return ctx
@fancy_tag(register)
def p3_profile_data(uid):
return dataaccess.profile_data(uid)
@fancy_tag(register)
def p3_profiles_data(uids):
return dataaccess.profiles_data(uids)
@fancy_tag(register)
def p3_talk_data(tid):
return dataaccess.talk_data(tid)
@fancy_tag(register, takes_context=True)
def get_form(context, name, bound="auto", bound_field=None):
if '.' in name:
from conference.utils import dotted_import
fc = dotted_import(name)
else:
fc = getattr(p3forms, name)
request = context['request']
if bound:
if bound == 'auto':
bound = request.method
if bound == 'GET':
data = request.GET
elif bound == 'POST':
data = request.POST
else:
from django.db.models import Model
if isinstance(bound, Model):
data = {}
for name in fc.base_fields:
data[name] = getattr(bound, name)
else:
data = bound
if bound_field and bound_field not in data:
data = None
else:
data = None
form = fc(data=data)
if data:
form.is_valid()
return form
@fancy_tag(register)
def pending_email_change(user):
try:
t = amodels.Token.objects.get(ctype='e', user=user)
except amodels.Token.DoesNotExist:
return None
return t.payload
@fancy_tag(register)
def admin_ticketroom_overall_status():
status = models.TicketRoom.objects.overall_status()
labels = dict(models.HOTELROOM_ROOM_TYPE)
days = sorted(status.keys())
rooms = {}
for day in days:
dst = status[day]
for room_type, dst in status[day].items():
try:
r = rooms[room_type]
except KeyError:
r = rooms[room_type] = {
'type': room_type,
'label': labels.get(room_type, room_type),
'days': [],
}
r['days'].append(dst)
return {
'days': days,
'rooms': rooms.values(),
}
@fancy_tag(register)
def warmup_conference_cache(conference=None):
"""
"""
if conference is None:
conference = settings.CONFERENCE_CONFERENCE
qs = ConferenceModels.TalkSpeaker.objects\
.filter(talk__conference=conference)\
.values_list('talk', 'speaker')
talks = set()
speakers = set()
for row in qs:
talks.add(row[0])
speakers.add(row[1])
return {
'speakers': dict([ (x['id'], x) for x in dataaccess.profiles_data(speakers) ]),
'talks': dict([ (x['id'], x) for x in cdataaccess.talks_data(talks) ]),
}
@register.filter
def frozen_reason(ticket):
if not ticket.frozen:
return ''
if amodels.RefundOrderItem.objects.filter(orderitem=ticket.orderitem).exists():
return 'refund pending'
else:
return ''
@fancy_tag(register, takes_context=True)
def all_user_tickets(context, uid=None, conference=None, status="complete", fare_type="conference"):
if uid is None:
uid = context['request'].user.id
if conference is None:
conference = settings.CONFERENCE_CONFERENCE
tickets = dataaccess.all_user_tickets(uid, conference)
if status == 'complete':
tickets = filter(lambda x: x[3], tickets)
elif status == 'incomplete':
tickets = filter(lambda x: not x[3], tickets)
if fare_type != "all":
tickets = filter(lambda x: x[1] == fare_type, tickets)
return tickets
@fancy_tag(register)
def p3_tags():
return dataaccess.tags()
@fancy_tag(register)
def p3_tags_for_talks():
conference = settings.CONFERENCE_CONFERENCE
return dataaccess.tags_for_conference_talks(conference=conference)
@fancy_tag(register, takes_context=True)
def render_profile_box(context, profile, conference=None, user_message="auto"):
if conference is None:
conference = settings.CONFERENCE_CONFERENCE
if isinstance(profile, int):
profile = dataaccess.profile_data(profile)
ctx = Context(context)
ctx.update({
'profile': profile,
'conference': conference,
'user_message': user_message if user_message in ('auto', 'always', 'none') else 'auto',
})
return render_to_string('p3/fragments/render_profile_box.html', ctx)
@register.inclusion_tag('p3/fragments/archive.html', takes_context=True)
def render_archive(context, conference):
ctx = Context(context)
def match(e, exclude_tags=set(('partner0', 'partner1', 'sprint1', 'sprint2', 'sprint3'))):
if e['tags'] & exclude_tags:
return False
if not e['talk']:
return False
return True
events = { x['id']:x for x in filter(match, cdataaccess.events(conf=conference)) }
talks = {}
for e in events.values():
t = e['talk']
if t['id'] in talks:
continue
t['dates'] = sorted([ (events[x]['time'], events[x]['talk']['video_url']) for x in t['events_id'] ])
talks[t['id']] = t
ctx.update({
'conference': conference,
'talks': sorted(talks.values(), key=lambda x: x['title']),
})
return ctx
@register.filter
def timetable_remove_first(timetable, tag):
if not tag:
return timetable
start = None
for time, events in timetable.iterOnTimes():
stop = False
for e in events:
if tag not in e['tags']:
stop = True
break
start = time.time()
if stop:
break
return timetable.slice(start=start)
@register.assignment_tag
def p3_voting_data(conference):
from conference.templatetags.conference import voting_data
from conference.utils import voting_results
groups = defaultdict(list)
results = voting_results()
if results is not None:
talk_ids = [ x[0] for x in results ]
sub_community = dict(
models.P3Talk.objects\
.filter(talk__conference=conference)\
.values_list('talk', 'sub_community'))
for tid, type, language in results:
community = sub_community.get(tid, '')
groups[(type, community)].append(tid)
results = voting_data(conference)
results['groups'] = dict(groups)
return results
@fancy_tag(register, takes_context=True)
def get_latest_conf_deadline(context, limit=None, not_expired=True):
try:
conf = ConferenceModels.Conference.objects.latest('code')
return [conf.name, conf.code, conf.conference_start, conf.conference_end, datetime.today().date()]
except IndexError:
return []
| bsd-2-clause | -4,565,564,612,632,686,600 | 31.471386 | 112 | 0.596215 | false |
Semprini/cbe-retail | retail/store/models.py | 1 | 2047 | from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from cbe.location.models import Location, GeographicArea
from cbe.party.models import Organisation, PartyRole
from cbe.physical_object.models import Structure
class Store(PartyRole):
enterprise_id = models.IntegerField(unique=True)
code = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
identifiers = GenericRelation('human_resources.Identification', object_id_field="party_role_object_id", content_type_field='party_role_content_type', related_query_name='store')
store_type = models.CharField( max_length=100, null=True, blank=True, choices=(('mitre 10','mitre 10'),('mega','mega'),('hammer','hammer'),('trade','trade')) )
store_class = models.CharField( max_length=100, null=True, blank=True, choices=(('mega-1','mega-1'),('mega-2','mega-2'),('mega-3','mega-3'),('mega-r','mega-r'),('mitre10-small','mitre10-small'),('mitre10-medium','mitre10-medium'),('mitre10-large','mitre10-large'),('trade','trade')) )
opening_date = models.DateField(blank=True, null=True)
location = models.ForeignKey(Location, on_delete=models.CASCADE, blank=True, null=True)
trade_area = models.ForeignKey(GeographicArea, on_delete=models.CASCADE, related_name='store_trade_areas', blank=True, null=True)
retail_area = models.ForeignKey(GeographicArea, on_delete=models.CASCADE, related_name='store_retail_areas', blank=True, null=True)
national_area = models.ForeignKey(GeographicArea, on_delete=models.CASCADE, related_name='store_national_areas', blank=True, null=True)
buildings = models.ManyToManyField(Structure, related_name='store')
class Meta:
ordering = ['id']
def __str__(self):
return "%s" %(self.name )
def save(self, *args, **kwargs):
if self.name == "":
self.name = "Store"
super(Store, self).save(*args, **kwargs) | apache-2.0 | -8,812,676,983,199,888,000 | 52.894737 | 288 | 0.682462 | false |
ytec/instaforex-web | app/open/migrations/0001_initial.py | 1 | 1404 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='form',
fields=[
('cmsplugin_ptr', models.OneToOneField(primary_key=True, serialize=False, auto_created=True, related_name='open_form', parent_link=True, to='cms.CMSPlugin')),
('name', models.CharField(max_length=25, default='Demo')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='OpenAccountAnonymous',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.CreateModel(
name='OpenAccountDemo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.CreateModel(
name='OpenAccountReal',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
]
| gpl-3.0 | 2,533,682,071,263,246,300 | 31.651163 | 174 | 0.533476 | false |
AlexaProjects/Alexa2 | ALEXA-IDE/core/ninja_tests/gui/test_status_bar.py | 1 | 1869 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
# Import this before Qt to set the correct API
import ninja_ide # lint:ok
from PyQt4.QtGui import QApplication
from ninja_ide.gui import status_bar
from ninja_ide.gui.main_panel import main_container
from ninja_tests import BaseTest
from ninja_tests import gui
class StatusBarTestCase(BaseTest):
def setUp(self):
super(StatusBarTestCase, self).setUp()
self.app = QApplication(sys.argv)
self.parent = gui.FakeParent()
self.patch(main_container.editor, 'Editor', gui.FakeEditor)
self.main = main_container.MainContainer(None)
self.main._parent = self.parent
self.status = status_bar.StatusBar()
def test_show(self):
editor = self.main.add_editor()
editor.setPlainText('ninja test')
editor.selectAll()
data = []
def fake_find_matches(*arg):
data.append(arg)
self.patch(self.status._searchWidget, 'find_matches',
fake_find_matches)
self.status.show()
expected = [(editor, True)]
self.assertEqual(data, expected)
| gpl-3.0 | 4,837,241,543,870,073,000 | 31.224138 | 70 | 0.689674 | false |
josefschneider/switchboard | switchboard/__main__.py | 1 | 1547 | #!/usr/bin/env python
'''The main entry point. Invoke as `switchboard' or `python -m switchboard'.
'''
import argparse
import sys
from switchboard.config import SwitchboardConfig
from switchboard.ws_ctrl_server import WSCtrlServer
from switchboard.engine import SwitchboardEngine
from switchboard.app_manager import AppManager
from switchboard.cli import SwitchboardCli
from switchboard.log import init_logging
def main():
try:
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-c', '--config', help='specify .json config file')
args = arg_parser.parse_args()
swb_config = SwitchboardConfig()
if args.config:
swb_config.load_config(args.config)
init_logging(swb_config)
ws_ctrl_server = WSCtrlServer(swb_config)
swb = SwitchboardEngine(swb_config, ws_ctrl_server)
with AppManager(swb_config, swb) as app_manager:
cli = SwitchboardCli(swb, swb_config, app_manager)
ws_ctrl_server.init_config()
if args.config:
swb.init_clients()
# Only once the clients have been setup can we initialise the app manager
app_manager.init_config()
# And the modules go right at the end once we know all the devices
swb.init_modules()
ws_ctrl_server.set_dependencies(swb, app_manager)
swb.start()
sys.exit(cli.run())
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| mit | 4,278,887,027,851,164,000 | 27.648148 | 89 | 0.638655 | false |
slideclick/toys | Simple/oMachine2.3.1-1.py | 1 | 3469 | # -* - coding: UTF-8 -* -
## Virtual Machine 2.3.1
## 小步语义 -- 表达式
## python 3.4
class Number(object):
""" 数值符号类
"""
def __init__(self, value):
self.value = value
def reducible(self):
return False
def to_s(self):
return str(self.value)
class Boolean(object):
""" 布尔值符号类型
"""
def __init__(self, value):
self.value = value
def reducible(self):
return False
def to_s(self):
return str(self.value)
class Add(object):
""" 加法符号类
"""
def __init__(self, left, right):
self.left = left
self.right = right
def reducible(self):
return True
def reduce(self, environment):
if self.left.reducible():
return Add(self.left.reduce(environment), self.right)
elif self.right.reducible():
return Add(self.left, self.right.reduce(environment))
else:
return Number(self.left.value + self.right.value)
def to_s(self):
return self.left.to_s() + ' + ' + self.right.to_s()
class Multiply(object):
""" 乘法符号类
"""
def __init__(self, left, right):
self.left = left
self.right = right
def reducible(self):
return True
def reduce(self, environment):
if self.left.reducible():
return Multiply(self.left.reduce(environment), self.right)
elif self.right.reducible():
return Multiply(self.left, self.right.reduce(environment))
else:
return Number(self.left.value * self.right.value)
def to_s(self):
return self.left.to_s() + ' * ' + self.right.to_s()
class LessThan(object):
""" 小于符号类
"""
def __init__(self, left, right):
self.left = left
self.right = right
def reducible(self):
return True
def reduce(self, environment):
if self.left.reducible():
return LessThan(self.left.reduce(environment), self.right)
elif self.right.reducible():
return LessThan(self.left, self.right.reduce(environment))
else:
return Boolean(self.left.value < self.right.value)
def to_s(self):
return self.left.to_s() + ' < ' + self.right.to_s()
class Variable(object):
""" 变量符号类
"""
def __init__(self, name):
self.name = name
def reducible(self):
return True
def reduce(self, environment):
return environment[self.name]
def to_s(self):
return str(self.name)
class Machine(object):
""" 虚拟机
"""
def __init__(self, expression, environment):
self.expression = expression
self.environment = environment
def step(self):
self.expression = self.expression.reduce(self.environment)
def run(self):
while self.expression.reducible():
print(self.expression.to_s())
self.step()
print(self.expression.value)
## test
## 在虚拟机中运行表达式
##1 * 2 + 3 * 4 = 14
Machine(Add(Multiply(Number(1), Number(2)),
Multiply(Number(3), Number(4))),
{}
).run()
print('')
##5 < 2 + 2
Machine(
LessThan(Number(5), Add(Number(2), Number(2))),
{}
).run()
print('')
##x = 3; y = 4; x + y = 7
Machine(
Add(Variable('x'), Variable('y')),
{'x':Number(3), 'y':Number(4)}
).run()
| gpl-2.0 | -6,269,499,450,322,178,000 | 20.433121 | 70 | 0.55156 | false |
jszopi/repESP | repESP/respin_format.py | 1 | 19161 | """Parsing and writing ``resp`` program instruction file format ("respin")"""
from dataclasses import dataclass, asdict
from fortranformat import FortranRecordWriter as FW
from itertools import zip_longest
import io
import math
import re
import sys
from typing import Dict, List, Optional, TextIO, Tuple, Type, TypeVar, Union
from repESP.exceptions import InputFormatError
from repESP.equivalence import Equivalence
from repESP.types import Atom, Molecule
from repESP._util import get_line, zip_exact
IvaryT = TypeVar('IvaryT', bound='Respin.Ivary')
@dataclass
class Respin:
"""Dataclass describing the ``resp`` program instructions
Note that the functionality is currently limited to a single molecule and
a single structure.
Parameters
----------
title : str
The title of the calculation to be performed.
cntrl : Cntrl
Dataclass representing the "cntrl" section of the input.
subtitle : str
Subtitle describing the considered molecular structure.
charge : int
The total charge of the molecule.
molecule : Molecule[Atom]
The molecule which charges are being fitted. Only atom identities are required.
ivary : Ivary
The "ivary" values for fitting the considered structure. These determine
how the charge on each atom is allowed to vary during the fitting.
Attributes
----------
title
See initialization parameter
cntrl
See initialization parameter
subtitle
See initialization parameter
charge
See initialization parameter
molecule
See initialization parameter
ivary
See initialization parameter
"""
_ValueType = TypeVar("_ValueType", int, float, str)
@staticmethod
def _check_value(
attr_name: str,
attr_value: _ValueType,
allowed_values: List[_ValueType]
) -> None:
if attr_value not in allowed_values:
raise ValueError(
f"Invalid value for `{attr_name}`: {attr_value}."
)
@dataclass
class Cntrl:
"""Dataclass describing the "cntrl" section of a "respin" file
See ``resp`` program documentation for more details.
Parameters
----------
inopt : int, optional
If equal to 1, ``resp`` will cycle through different "qwt" values
from the file specified with the ``-w`` option. Defaults to 0.
ioutopt : int, optional
If equal to 1, ``resp`` will write restart info of new ESP field
to the file specified with the ``-s`` option. Defaults to 0.
iqopt : int, optional
Controls setting initial charges. If equal to 1 (default), all
initial charges will be set to zero. If equal to 2, initial charges
are read from the file specified with the ``-q`` option. If equal
to 3, charges are read as with the previous option and will
additionally be averaged according to "ivary" values (normally not
used).
ihfree : int, optional
If equal to 0, the charge magnitude restraint is applied to all
charges. If equal to 1 (default), the restraint does not apply to
hydrogen atoms.
irstrnt : int, optional
Controls the type of charge magnitude restraint. If equal to 0,
harmonic restraints are used (old-style). If equal to 1 (default),
hyperbolic restraints are used. If equal to 2, no charge fitting
is carried out and only analysis of input charges is performed.
qwt : float, optional
The weight of the charge magnitude restraint to be used during
the fitting. Defaults to 0.0 (no charge magnitude restraint).
.. warning::
The default used here is different from the default used by ``resp``.
That the ``resp`` documentation specifies that it uses the
Amber force field values by default. However, it is not clear how
it can determine the fitting stage. Thus, to remove the ambiguity,
this dataclass assumes a weight of zero by default.
.. note::
Amber force fields use values of 0.0005 and 0.001 for
stages 1 and 2, respectively. The Glycam force field is derived with
one stage fitting with a value of 0.01.
Attributes
----------
inopt
See initialization parameter
ioutopt
See initialization parameter
iqopt
See initialization parameter
ihfree
See initialization parameter
irstrnt
See initialization parameter
qwt
See initialization parameter
"""
inopt: int = 0
ioutopt: int = 0
iqopt: int = 1
ihfree: int = 1
irstrnt: int = 1
qwt: float = 0
@property
def nmol(self) -> int:
"""Number of structures in a multiple structure fit.
With the current implementation this will always be equal to 1.
"""
return 1
def __post_init__(self) -> None:
Respin._check_value("inopt", self.inopt, [0, 1])
Respin._check_value("ioutopt", self.ioutopt, [0, 1])
Respin._check_value("iqopt", self.iqopt, [1, 2, 3])
Respin._check_value("ihfree", self.ihfree, [0, 1])
Respin._check_value("irstrnt", self.irstrnt, [0, 1, 2])
if self.qwt < 0:
raise ValueError(f"Invalid value for `qwt`: {self.qwt}.")
@dataclass
class Ivary:
"""Dataclass representing per-atom fitting instructions for ``resp``
The fitting instructions are represented as a list of values stored in
the `values` attribute. The length of this list must be the same as the
number of atoms in the molecule it describes. Consecutive values refer
to consecutive atoms of the molecule.
The values determine how the charge on the atom can vary during the
fitting and the allowed values are:
* -1, meaning that the atom's charge is "frozen" at the initial value
* 0, meaning that this atom will be varied freely
* Larger than zero, representing the 1-based index of the atom in the
molecule to which this atom is to be equivalenced.
Example
-------
Consider fitting RESP charges in a molecule of methylamine:
>>> methylamine = Molecule([Atom(atomic_number) for atomic_number in [6, 1, 1, 1, 7, 1, 1]])
Fitting RESP charges consists of two stages. The ivary section for the
second stage of the fitting for the methylamine molecule should be as
follows:
>>> ivary = Respin.Ivary([0, 0, 2, 2, -1, -1, -1])
The carbon atom is free to vary during the fitting. The first of the methyl
hydrogens is equivalenced to the remaining two but they haven't been
specified yet, so it also has a value of 0. These two hydrogen atoms
are equivalenced to the first one, and thus are assigned its one-based
index in the molecule, i.e. 2 (meaning "equivalenced to the second atom
of the molecule"). The nitrogen atom and the two hydrogen atoms attached
to it are frozen during the second stage of the fitting and are thus
assigned values of -1.
Parameters
----------
values : List[int]
The per-atom instructions for the ``resp`` program.
Attributes
----------
values
See initialization parameter
"""
values: List[int]
def __post_init__(self) -> None:
for i, elem in enumerate(self.values):
if elem < -1 or elem > len(self.values):
raise ValueError(
f"Value number {i} passed as `ivary` with value {elem}, "
f"which is either lower than 0 or outside the list length."
)
def describe(self, molecule: Optional[Molecule[Atom]]=None) -> str:
"""Verbosely report the "ivary" actions
Example
-------
>>> print(ivary.describe(methylamine))
Atom (C) number 1
Atom (H) number 2
Atom (H) number 3, equivalenced to atom 2
Atom (H) number 4, equivalenced to atom 2
Atom (N) number 5, frozen
Atom (H) number 6, frozen
Atom (H) number 7, frozen
Parameters
----------
molecule : Optional[Molecule[Atom]], optional
The molecule to which the ivary information refers. This
argument is optional and defaults to None. If it is provided,
atom identities will be included in the output.
Raises
------
ValueError
Raised when the number of atoms in the molecule does not match
the length of the list of values in this object.
Returns
-------
str
A verbose description of the "ivary" instructions.
"""
if molecule is not None and len(molecule.atoms) != len(self.values):
raise ValueError(
f"The number of atoms ({len(molecule.atoms)} is not the same "
f"as the number of ivary values ({len(self.values)}."
)
zipped = zip_longest(self.values, molecule.atoms if molecule is not None else [])
f = io.StringIO()
for i, (ivary, atom) in enumerate(zipped):
atomic_number = atom.symbol if molecule is not None else None
id_str = f" ({atomic_number})" if atomic_number is not None else ""
if ivary < 0:
# TODO: This could also report at what value if charges are provided
ivary_str = ", frozen"
elif ivary > 0:
ivary_str = f", equivalenced to atom {ivary}"
else:
ivary_str = ""
print(f"Atom{id_str} number {i+1}{ivary_str}", file=f)
return f.getvalue()
@classmethod
def from_equivalence(cls: Type[IvaryT], equivalence: Equivalence) -> IvaryT:
"""Alternative initialization from equivalence information
.. note:
The resulting ivary instructions will correspond to fitting
with equivalent atoms assigned identical charges. This may not
be the type of fitting that you want to perform.
Parameters
----------
equivalence : Equivalence
Information about chemical equivalence of atoms in a molecule.
"""
return cls([0 if val is None else val + 1 for val in equivalence.values])
title: str
cntrl: Cntrl
subtitle: str
charge: int
molecule: Molecule[Atom]
ivary: Ivary
@property
def wtmol(self) -> float:
"""Relative weight of the structure in a multistructure fitting.
A value of 1.0 is always returned in the current implementation.
"""
return 1.0
@property
def iuniq(self) -> int:
"""The number of atoms in the fitted structure"""
return len(self.molecule.atoms)
def __post_init__(self) -> None:
if len(self.molecule.atoms) != len(self.ivary.values):
raise ValueError(
f"Number of atoms ({len(self.molecule.atoms)}) does not match number "
f"of ivary values ({len(self.ivary.values)})."
)
def _get_equivalence_from_ivary(ivary: Respin.Ivary) -> Equivalence:
"""Get atom equivalence information from an `Respin.Ivary` object
This function is private as users probably mean to use the
`get_equivalence` function instead.
`Ivary` objects are specific to ``resp`` program input and thus may not
provide information about atom equivalence. The "respin" file may have been
generated to perform any custom fitting with ``resp``. Only use this
function when you're certain that the "respin" file contains all the
equivalence information that you need.
"""
return Equivalence([
None if ivary_value == 0 else ivary_value - 1
for ivary_value in ivary.values
])
def get_equivalence_from_two_stage_resp_ivary(ivary1: Respin.Ivary, ivary2: Respin.Ivary) -> Equivalence:
"""Get atom equivalence from input files for two 2-stage RESP
Derive atom equivalence based on the data in two "respin" files
(represented by the `Respin` objects) created for the purpose of two-stage
RESP fitting with the ``resp`` program. The files can be generated with the
``respgen`` program with the following commands::
respgen -i methane.ac -o methane.respin1 -f resp1
respgen -i methane.ac -o methane.respin2 -f resp2
.. warning::
The correctness of this function relies on:
1. Antechamber and ``respgen`` correctly recognizing the symmetry
relations between atoms. Fast-exchanging atoms may not be identified.
2. The author's understanding of how ``respgen`` generates the "respin"
files for two-stage RESP fitting.
Thus it is advised to always check that the result of this function
agrees with the domain knowledge about the studied molecule.
"""
# The equivalence logic is explained somewhat inconsistently in the RESP
# papers but I've additionally re-engineered the ``resp`` program's logic
# to be sure that reading both the ``respin`` files will give the desired
# behaviour. In fact, it's pretty simple. In the first stage atoms of the
# methyl and methylene groups are free, while all the others are
# equivalenced. In the second stage the former are equivalenced, while all
# the others are frozen.
return _get_equivalence_from_ivary(Respin.Ivary([
max(ivary1_value, ivary2_value)
for ivary1_value, ivary2_value in zip_exact(ivary1.values, ivary2.values)
]))
def _parse_cntrl(f: TextIO) -> Respin.Cntrl:
line_re = re.compile(" (\w+) =\s+([0-9.]+)")
kwargs: Dict[str, Union[int, float]] = {}
for line in f:
if line.rstrip('\n') == " &end":
break
if line.rstrip('\n') == "":
continue
line_match = line_re.match(line)
if line_match is None:
raise InputFormatError(
f"Failed parsing cntrl section of respin file:\n{line}"
)
key = line_match.group(1)
value = line_match.group(2)
kwargs[key] = float(value) if key == "qwt" else int(value)
# nmol is not a parameter of Cntrl.__init__ and must be equal to 1.
nmol = kwargs.pop("nmol", None)
if nmol is not None and nmol != 1:
raise InputFormatError("Parsing multiple structures is not supported")
return Respin.Cntrl(**kwargs) # type: ignore # (not sure why not recognized)
def parse_respin(f: TextIO) -> Respin:
"""Parse a file in the "respin" format (input format of ``resp``)
Note that only files describing a single structure fit are currently supported.
Parameters
----------
f : TextIO
File object opened in read mode containing the "respin" file.
Raises
------
InputFormatError
Raised when the file does not follow the expected format.
Returns
-------
Respin
Object representing the fitting instructions for the ``resp`` program.
"""
title = get_line(f)
for line in f:
if line == " &cntrl\n":
break
cntrl = _parse_cntrl(f)
wtmol = get_line(f).strip()
if not math.isclose(float(wtmol), 1.0, rel_tol=0, abs_tol=1e-6):
raise InputFormatError(
f"Encountered value of `wtmol` different from 1.0 ({wtmol}) but "
f"parsing is supported only for single-structure respin files."
)
subtitle = get_line(f)
charge_and_iuniq = get_line(f)
if len(charge_and_iuniq.split()) != 2:
raise InputFormatError(
f"Expected two ints for the line specifying charge and iuniq, found:\n{charge_and_iuniq}"
)
charge = int(charge_and_iuniq.split()[0])
iuniq = int(charge_and_iuniq.split()[1])
atoms: List[Atom] = []
ivary = Respin.Ivary([])
for line in f:
if line.rstrip('\n') == "":
break
if len(line.split()) != 2:
raise InputFormatError(
f"Expected two ints for the line specifying atom and ivary, found:\n{line}"
)
atoms.append(Atom(int(line.split()[0])))
ivary_value = int(line.split()[1])
# `respgen` uses a value of -99 but internally we use -1 as per resp spec.
ivary.values.append(ivary_value if ivary_value != -99 else -1)
if len(atoms) != iuniq:
raise InputFormatError(
f"The value of `iuniq` ({iuniq}) is different from the number of"
f"atoms in the described molecule ({len(atoms)})."
)
return Respin(
title,
cntrl,
subtitle,
charge,
Molecule(atoms),
ivary
)
def _write_cntrl(f: TextIO, cntrl: Respin.Cntrl, skip_defaults: bool) -> None:
default_cntrl: Dict[str, Union[int, float]] = asdict(Respin.Cntrl())
default_cntrl["nmol"] = 1
dict_: Dict[str, Union[int, float]] = asdict(cntrl)
dict_["nmol"] = cntrl.nmol
print(" &cntrl\n", file=f)
for key, value in dict_.items():
if key == "qwt":
print(" {} = {:.5f},".format(key, value), file=f)
else:
if not skip_defaults or value != default_cntrl[key]:
print(" {} = {},".format(key, value), file=f)
print("\n &end", file=f)
def write_respin(f: TextIO, respin: Respin, skip_cntrl_defaults: bool=True) -> None:
"""Write a "respin" file described by the given input data
Parameters
----------
f : TextIO
The file object to which the instructions are to be saved. The file
must be opened for writing.
respin : Respin
The dataclass representing all the instructions needed by the ``resp``
program.
skip_cntrl_defaults : bool, optional
When this option is set to True (default), fitting options in the "cntrl"
section with default values will not be written to the file.
"""
print(respin.title, file=f)
print(file=f)
_write_cntrl(f, respin.cntrl, skip_cntrl_defaults)
print(FW("F7.1").write([respin.wtmol]), file=f)
print(respin.subtitle, file=f)
print(FW("2I5").write([respin.charge, respin.iuniq]), file=f)
for atom, ivary in zip(respin.molecule.atoms, respin.ivary.values):
print(FW("2I5").write([atom.atomic_number, ivary]), file=f)
# According to the spec, a blank line is only for multi-structures but
# `resp` fails without it.
print(file=f)
| gpl-3.0 | 4,448,487,963,937,923,600 | 35.358634 | 105 | 0.604405 | false |
senuido/stash-scanner | ui/ConfigEditor.py | 1 | 40208 | import copy
import functools
import os
import pprint
import threading
from enum import Enum
from tkinter import Toplevel, StringVar, BooleanVar, messagebox, IntVar
from tkinter.constants import *
import tkinter.font as tkfont
from tkinter.ttk import Notebook, Frame, Label, Button, Style, Combobox, Entry, Checkbutton, Scale, LabelFrame
from lib.CurrencyManager import cm
from lib.FilterManager import fm
from lib.ItemFilter import Filter
from lib.Utility import logexception, AppException, config, AppConfiguration, ConfidenceLevel
from ui.MyTreeview import EditableTreeview
from ui.ScrollingFrame import AutoScrollbar
from ui.TooltipEntry import TooltipEntry
from ui.cmb_autocomplete import Combobox_Autocomplete
from ui.entry_placeholder import PlaceholderEntry
from ui.ttk_spinbox import Spinbox
READONLY = 'readonly'
ColEntry = EditableTreeview.ColEntry
TAB_FPRICES = 'fprices'
TAB_CURRENCY = 'currency'
INVALID_PRICE = 'Price format is: <amount> <currency>\nExamples: 100 chaos, 20 ex, 5 divine'
INVALID_PRICE_OVERRIDE = 'Price format is: <amount> <currency>\nExamples: 100 chaos, 20 ex, 5 divine\n' \
'Prices can also be relative to their base.\n' \
'Examples: / 3, * 0.5, +2.5 ex, -20 chaos'
# class ConfigEditor(Toplevel):
class ConfigEditor(Notebook):
def __init__(self, master, app_main, **kwargs):
super().__init__(master, **kwargs)
# style = Style()
# if we do this we also need to hide the #0 column because it adds indention for possible children
# style.configure("Treeview.Heading", padding=(10, 0))
# self.protocol('WM_DELETE_WINDOW', self.onClose)
# self.nb_tabs = Notebook(self)
# self.create_iprice_tab()
self.prices_editor = PricesEditor(self)
self.currency_editor = CurrencyEditor(self)
self.settings_editor = SettingsEditor(self, app_main)
# self.add(self.frm_iprices_tab, text='Item Prices', sticky='nsew')
self.add(self.settings_editor, text='General', sticky='nsew')
self.add(self.prices_editor, text='Prices', sticky='nsew')
self.add(self.currency_editor, text='Currency', sticky='nsew')
self.bind('<<NotebookTabChanged>>', self.onTabChange)
self.settings_editor_id, self.prices_tab_id, self.currency_tab_id = self.tabs()
def loadCurrency(self, force=False):
self.currency_editor.loadCurrency(force_reload=force)
def loadPrices(self, force=False):
self.prices_editor.loadPrices(force_reload=force)
def loadSettings(self):
self.settings_editor.loadSettings()
def onTabChange(self, event=None):
current_tab_id = self.select()
if current_tab_id == self.currency_tab_id:
self.loadCurrency(force=True)
elif current_tab_id == self.prices_tab_id:
self.loadPrices(force=True)
elif current_tab_id == self.settings_editor_id:
self.loadSettings()
# def onClose(self):
# self.destroy()
leagueOptions = [
'Harbinger',
'Hardcore Harbinger',
'Standard',
'Hardcore',
# 'Beta Standard',
# 'Beta Hardcore'
]
scanModeOptions = ['Latest', 'Continue']
class SettingsEditor(Frame):
def __init__(self, master, app_main, **kwargs):
super().__init__(master, **kwargs)
self.app = app_main
self.create_settings_ui()
def create_settings_ui(self):
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.frm_settings = Frame(self)
# self.frm_settings.rowconfigure(2, weight=1)
# self.frm_settings.columnconfigure(0, weight=1)
is_valid_req_delay = self.register(functools.partial(_is_number, min=0))
is_valid_duration = self.register(functools.partial(_is_number, min=0, max=20))
is_valid_history_retention = self.register(functools.partial(_is_number, min=0, max=100))
is_valid_max_conns = self.register(functools.partial(_is_number, min=1, max=20, integer=True))
is_valid_num_workers = self.register(functools.partial(_is_number, min=0, max=os.cpu_count() or 8, integer=True))
self.frm_settings.grid(padx=10, pady=10, sticky='nsew')
frm_basic = LabelFrame(self.frm_settings, text='Basic')
frm_basic.grid(padx=5, pady=5, sticky='nsew', row=0, column=0, ipadx=5)
lbl = Label(frm_basic, text='League:')
lbl.grid(row=0, column=0, padx=5, pady=3, sticky='w')
self.cmb_league = Combobox(frm_basic, state=READONLY, values=leagueOptions)
self.cmb_league.grid(row=0, column=1, pady=3, sticky='nsew')
# lbl = Label(frm_basic, text='Minimum request delay(s):')
# lbl.grid(row=1, column=0, padx=5, pady=3, sticky='w')
# self.entry_req_delay = Entry(frm_basic, validate='all', validatecommand=(is_valid_req_delay, '%P'))
# self.entry_req_delay.grid(row=1, column=1, pady=3, sticky='nsew')
# lbl = Label(frm_basic, text='Scan mode:')
# lbl.grid(row=2, column=0, padx=5, pady=3, sticky='w')
# self.cmb_scan_mode = Combobox(frm_basic, state=READONLY, values=scanModeOptions)
# self.cmb_scan_mode.grid(row=2, column=1, pady=3, sticky='nsew')
lbl = Label(frm_basic, text='Notification duration(s):')
lbl.grid(row=3, column=0, padx=5, pady=3, sticky='w')
self.entry_notification_duration = Entry(frm_basic, validate='all',
validatecommand=(is_valid_duration, '%P'))
self.entry_notification_duration.grid(row=3, column=1, pady=3, sticky='nsew')
frm = LabelFrame(self.frm_settings, text='Advanced')
frm.grid(pady=5, sticky='nsew', row=0, column=1, ipadx=5)
lbl = Label(frm, text='Scan mode:')
lbl.grid(row=0, column=0, padx=5, pady=3, sticky='w')
self.cmb_scan_mode = Combobox(frm, state=READONLY, values=scanModeOptions)
self.cmb_scan_mode.grid(row=0, column=1, pady=3, sticky='nsew')
lbl = Label(frm, text='Min. request delay:')
lbl.grid(row=1, column=0, padx=5, pady=3, sticky='w')
self.entry_req_delay = Entry(frm, validate='all', validatecommand=(is_valid_req_delay, '%P'))
self.entry_req_delay.grid(row=1, column=1, pady=3, sticky='nsew')
lbl = Label(frm, text='(seconds)')
lbl.grid(row=1, column=2, padx=(5, 0), pady=3, sticky='w')
lbl = Label(frm, text='Max connections:')
lbl.grid(row=2, column=0, padx=5, pady=3, sticky='w')
self.entry_max_conns = Entry(frm, validate='all', validatecommand=(is_valid_max_conns, '%P'))
self.entry_max_conns.grid(row=2, column=1, pady=3, sticky='nsew')
lbl = Label(frm, text='Parsers #:')
lbl.grid(row=3, column=0, padx=5, pady=3, sticky='w')
self.entry_num_workers = Entry(frm, validate='all', validatecommand=(is_valid_num_workers, '%P'))
self.entry_num_workers.grid(row=3, column=1, pady=3, sticky='nsew')
lbl = Label(frm, text='(0 = Auto)')
lbl.grid(row=3, column=2, padx=(5, 0), pady=3, sticky='w')
lbl = Label(frm, text='History retention:')
lbl.grid(row=4, column=0, padx=5, pady=3, sticky='w')
self.entry_history_retention = Entry(frm, validate='all', validatecommand=(is_valid_history_retention, '%P'))
self.entry_history_retention.grid(row=4, column=1, pady=3, sticky='nsew')
lbl = Label(frm, text='(days)')
lbl.grid(row=4, column=2, padx=(5, 0), pady=3, sticky='w')
frm = Frame(frm_basic)
frm.grid(row=4, column=0)
self.var_notify = BooleanVar()
self.var_notify.trace_variable('w', lambda a, b, c: self._on_notify_option_change())
self.cb_notifications = Checkbutton(frm, text='Growl notifications', variable=self.var_notify)
self.cb_notifications.grid(row=0, column=0, padx=5, pady=3, sticky='w')
self.var_notify_copy = BooleanVar()
self.cb_notify_copy = Checkbutton(frm, text='Copy message', variable=self.var_notify_copy)
self.cb_notify_copy.grid(row=1, column=0, padx=5, pady=3, sticky='w')
self.var_notify_play_sound = BooleanVar()
self.cb_notify_play_sound = Checkbutton(frm, text='Play sound', variable=self.var_notify_play_sound)
self.cb_notify_play_sound.grid(row=2, column=0, padx=5, pady=3, sticky='w')
frm_btns = Frame(self.frm_settings)
frm_btns.grid(row=2, columnspan=3, pady=(20, 5), sticky='w')
self.btn_apply = Button(frm_btns, text='Apply', command=self.applyChanges)
self.btn_apply.grid(row=0, column=0, padx=5)
self.btn_reload = Button(frm_btns, text='Reload', command=self.loadSettings)
self.btn_reload.grid(row=0, column=1)
def _on_notify_option_change(self):
state = NORMAL if self.var_notify.get() else DISABLED
self.cb_notify_copy.config(state=state)
self.cb_notify_play_sound.config(state=state)
def applyChanges(self):
cfg = AppConfiguration()
cfg.league = self.cmb_league.get() or leagueOptions[0]
cfg.notify = self.var_notify.get()
cfg.notify_copy_msg = self.var_notify_copy.get()
cfg.notify_play_sound = self.var_notify_play_sound.get()
cfg.notification_duration = float(self.entry_notification_duration.get() or 4)
cfg.request_delay = float(self.entry_req_delay.get() or 0.7)
cfg.scan_mode = self.cmb_scan_mode.get() or scanModeOptions[0]
cfg.history_retention = int(self.entry_history_retention.get() or 1)
cfg.max_conns = int(self.entry_max_conns.get() or 8)
cfg.num_workers = int(self.entry_num_workers.get() or 0)
cfg.smooth_delay = config.smooth_delay
self.app.update_configuration(cfg)
def loadSettings(self):
self.cmb_league.set(config.league)
self.cmb_scan_mode.set(config.scan_mode)
self.entry_notification_duration.delete(0, END)
self.entry_notification_duration.insert(0, config.notification_duration)
self.var_notify.set(config.notify)
self.var_notify_copy.set(config.notify_copy_msg)
self.var_notify_play_sound.set(config.notify_play_sound)
self.entry_req_delay.delete(0, END)
self.entry_req_delay.insert(0, config.request_delay)
self.entry_history_retention.delete(0, END)
self.entry_history_retention.insert(0, config.history_retention)
self.entry_max_conns.delete(0, END)
self.entry_max_conns.insert(0, config.max_conns)
self.entry_num_workers.delete(0, END)
self.entry_num_workers.insert(0, config.num_workers)
class CurrencyColumn(Enum):
Currency = 'Currency'
Rate = 'Rate'
Override = 'Override'
EffectiveRate = 'Effective Rate'
Filler = ''
currencyColumns = [col.name for col in CurrencyColumn]
class PricesColumn(Enum):
Name = 'Name'
ID = 'ID'
ItemPrice = 'Item value'
Override = 'Override'
FilterPrice = 'Effective item value'#'Filter Price (c)'
FilterOverride = 'Filter Override'
EffectiveFilterPrice = 'Effective Filter Price (c)'
FilterStateOverride = 'Filter State Override'
Filler = ''
pricesColumns = [col.name for col in PricesColumn]
class FilterStateOption(Enum):
Enable = True
Disable = False
filterStateOptions = [''] + [option.name for option in FilterStateOption]
class PricesEditor(Frame):
def __init__(self, master, **kwargs):
super().__init__(master, **kwargs)
self.create_prices_ui()
self.initial_values = {}
self.table_modified = False
def create_prices_ui(self):
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.frm_prices = Frame(self)
self.frm_prices.rowconfigure(2, weight=1)
self.frm_prices.columnconfigure(0, weight=1)
self.frm_prices.grid(padx=10, pady=10, sticky='nsew')
# Button Frame
frm_btns = Frame(self.frm_prices, relief=SOLID, borderwidth=2)
frm_btns.grid(row=0, column=0, pady=(0, 5), sticky='nsew')
frm_btns.columnconfigure(10, weight=1)
# self.entry_currency = \
# Combobox_Autocomplete(frm, list_of_items=['one of many currencies'], startswith_match=False)
self.search_var = StringVar()
self.entry_search = PlaceholderEntry(frm_btns, 'Search..',
style='Default.TEntry', textvariable=self.search_var)
self.search_var.trace_variable('w', lambda a, b, c: self.tree.search(self.entry_search.get_value()))
self.entry_search.bind('<Return>',
lambda event: self.tree.search(self.entry_search.get_value(), find_next=True))
self.btn_apply = Button(frm_btns, text='Apply', command=self.applyChanges)
self.btn_reload = Button(frm_btns, text='Reload', command=lambda: self.loadPrices(force_reload=True))
self.entry_search.grid(row=2, column=0, pady=5, padx=5)
self.btn_apply.grid(row=2, column=2, pady=5)
# frm.columnconfigure(3, weight=1)
self.btn_reload.grid(row=2, column=3, sticky='e', pady=5)
self.var_advanced = BooleanVar(False)
self.var_advanced.trace_variable('w', lambda a, b, c: self._on_view_option_change())
self.cb_advanced = Checkbutton(frm_btns, text='Advanced', variable=self.var_advanced)
self.cb_advanced.grid(row=2, column=10, sticky='e', padx=10)
frm_border = Frame(self.frm_prices, relief=SOLID, borderwidth=2)
frm_border.grid(row=2, column=0, sticky='nsew')
frm_border.rowconfigure(2, weight=1)
frm_border.columnconfigure(0, weight=1)
# Tree Frame
self.frm_tree = Frame(frm_border)
self.frm_tree.grid(row=2, column=0, sticky='nsew', padx=5, pady=(0, 0))
self.frm_tree.rowconfigure(0, weight=1)
self.frm_tree.columnconfigure(0, weight=1)
self.tree = EditableTreeview(self.frm_tree, on_cell_update=self.onCellUpdate)
scrly = AutoScrollbar(self.frm_tree, command=self.tree.yview)
scrlx = AutoScrollbar(self.frm_tree, command=self.tree.xview, orient=HORIZONTAL)
self.tree.config(yscrollcommand=scrly.set, xscrollcommand=scrlx.set)
self.tree.grid(row=0, column=0, sticky='nsew')
scrly.grid(row=0, column=1, sticky='nsew')
scrlx.grid(row=1, column=0, sticky='nsew')
# Button Frame
frm = Frame(frm_border) #, relief=SOLID, borderwidth=1)
# frm = Frame(self.frm_prices)
frm.grid(row=0, column=0, sticky='nsew')
# self.entry_currency = \
# Combobox_Autocomplete(frm, list_of_items=['one of many currencies'], startswith_match=False)
lbl = Label(frm, text='Item value threshold:')
lbl.grid(row=0, column=0, padx=5, pady=5, sticky='w')
self.var_threshold = StringVar()
self.entry_threshold = TooltipEntry(frm, textvariable=self.var_threshold)
self.entry_threshold.bind('<FocusOut>', lambda event: self._validate_threshold_entry())
self.entry_threshold.grid(row=0, column=1, padx=5, pady=5)
self.var_threshold.trace('w', lambda a, b, c: self.on_entry_change(self.entry_threshold))
lbl = Label(frm, text='Budget:')
lbl.grid(row=0, column=2, padx=5, pady=5)
self.var_budget = StringVar()
self.entry_budget = TooltipEntry(frm, textvariable=self.var_budget)
self.entry_budget.bind('<FocusOut>', lambda event: self._validate_budget_entry())
self.entry_budget.grid(row=0, column=3, padx=5, pady=5)
self.var_budget.trace('w', lambda a, b, c: self.on_entry_change(self.entry_budget))
lbl = Label(frm, text='Minimum price:')
lbl.grid(row=0, column=4, padx=5, pady=5)
self.var_min_price = StringVar()
self.entry_min_price = TooltipEntry(frm, textvariable=self.var_min_price)
self.entry_min_price.bind('<FocusOut>', lambda event: self._validate_min_price_entry())
self.entry_min_price.grid(row=0, column=5, padx=5, pady=5)
self.var_min_price.trace('w', lambda a, b, c: self.on_entry_change(self.entry_min_price))
lbl = Label(frm, text='Default filter override:')
lbl.grid(row=0, column=6, padx=5, pady=5)
self.lbl_fprice_override = lbl
self.var_fprice_override = StringVar()
self.entry_fprice_override = TooltipEntry(frm, textvariable=self.var_fprice_override)
self.entry_fprice_override.bind('<FocusOut>', lambda event: self._validate_fprice_override_entry())
self.entry_fprice_override.grid(row=0, column=7, padx=5, pady=5)
self.var_fprice_override.trace('w', lambda a, b, c: self.on_entry_change(self.entry_fprice_override))
# Advanced
lbl = Label(frm, text='Default item value override:')
lbl.grid(row=1, column=0, padx=5, pady=(2, 5), sticky='w')
self.lbl_price_override = lbl
self.var_price_override = StringVar()
self.entry_price_override = TooltipEntry(frm, textvariable=self.var_price_override)
self.entry_price_override.bind('<FocusOut>', lambda event: self._validate_price_override_entry())
self.entry_price_override.grid(row=1, column=1, padx=5, pady=(2, 5))
self.var_price_override.trace('w', lambda a, b, c: self.on_entry_change(self.entry_price_override))
# Confidence Level
lbl = Label(frm, text="Confidence level:")
lbl.grid(row=1, column=2, padx=5, pady=(2, 5), sticky='w')
self.lbl_confidence_lvl = lbl
self.var_confidence_lvl = IntVar()
self.entry_confidence_lvl = ConfidenceScale(frm, variable=self.var_confidence_lvl)
self.entry_confidence_lvl.grid(row=1, column=3, padx=5, pady=(2, 5))
self.var_confidence_lvl.trace('w', lambda a, b, c: self.on_entry_change(self.entry_confidence_lvl))
self.var_5l_filters = BooleanVar(False)
self.cb_5l_filters = VarCheckbutton(frm, text='Enable 5L filters', variable=self.var_5l_filters)
self.cb_5l_filters.var = self.var_5l_filters
self.cb_5l_filters.grid(row=1, column=4, padx=5, pady=(2, 5), columnspan=1)
self.var_5l_filters.trace_variable('w', lambda a, b, c: self.on_entry_change(self.cb_5l_filters))
# Tree Config
tree = self.tree
def init_tree_column(col):
col_name = pricesColumns[0] if col == '#0' else col
tree.heading(col, text=PricesColumn[col_name].value, anchor=W, command=lambda col=col: tree.sort_col(col))
tree.column(col, width=140, stretch=False)
# self.tree['columns'] = ('ID', 'Item Price', 'Override', 'Filter Price', 'Filter Override', 'Effective Filter Price', 'Filter State Override', '')
self.tree['columns'] = pricesColumns[1:]
self.tree.register_column(PricesColumn.Override.name,
ColEntry(TooltipEntry(self.tree), func_validate=_validate_price_override))
self.tree.register_column(PricesColumn.FilterOverride.name,
ColEntry(TooltipEntry(self.tree), func_validate=_validate_price_override))
self.tree.register_column(PricesColumn.FilterStateOverride.name,
ColEntry(Combobox(self.tree, values=filterStateOptions, state=READONLY),
accept_events=('<<ComboboxSelected>>', '<Return>')))
for col in (('#0', ) + tree['columns']):
init_tree_column(col)
tree.heading('#0', anchor=CENTER)
tree.column('#0', width=200, stretch=False)
tree.column(PricesColumn.Filler.name, stretch=True)
tree.heading(PricesColumn.ItemPrice.name,
command=lambda col=PricesColumn.ItemPrice.name: tree.sort_col(col, key=self._price_key))
tree.heading(PricesColumn.Override.name,
command=lambda col=PricesColumn.Override.name: tree.sort_col(col, key=self._price_key))
tree.heading(PricesColumn.FilterOverride.name,
command=lambda col=PricesColumn.FilterOverride.name: tree.sort_col(col, key=self._price_key))
tree.heading(PricesColumn.FilterPrice.name,
command=lambda col=PricesColumn.FilterPrice.name: tree.sort_col(col, key=self._rate_key, default=0))
tree.heading(PricesColumn.EffectiveFilterPrice.name,
command=lambda col=PricesColumn.EffectiveFilterPrice.name: tree.sort_col(col, key=self._rate_key, default=0))
self.bvar_modified = BooleanVar()
self.bvar_modified.trace('w', lambda a, b, c: self._updateApplyState())
self.bvar_modified.set(False)
self.var_advanced.set(False)
def _rate_key(self, key):
if key == 'N/A':
return 0
return float(key)
def _price_key(self, key):
if key == '':
return None # this means it will be ignored while sorting
try:
return cm.compilePrice(key, base_price=0)
except Exception:
return 0
def on_entry_change(self, entry):
val = entry.get()
if self.initial_values[entry] != val:
self.bvar_modified.set(True)
# def on_price_entry_focusout(self, widget):
# valid = _validate_price(widget, accept_empty=False)
# if valid and not self.bvar_modified.get() and self.initial_values[widget] != widget.get():
# self.bvar_modified.set(True)
# return valid
#
# def on_override_entry_focusout(self, widget):
# valid = _validate_price_override(widget, accept_empty=False)
# if valid and not self.bvar_modified.get() and self.initial_values[widget] != widget.get():
# self.bvar_modified.set(True)
# return valid
def _validate_threshold_entry(self):
return _validate_price(self.entry_threshold, accept_empty=False)
def _validate_budget_entry(self):
return _validate_price(self.entry_budget, accept_empty=True)
def _validate_min_price_entry(self):
return _validate_price(self.entry_min_price, accept_empty=True)
def _validate_price_override_entry(self):
return _validate_price_override(self.entry_price_override, accept_empty=False)
def _validate_fprice_override_entry(self):
return _validate_price_override(self.entry_fprice_override, accept_empty=False)
def _update_modified(self):
modified = any(entry.get() != self.initial_values[entry] for entry in self.initial_values) or self.table_modified
self.bvar_modified.set(modified)
def _updateApplyState(self):
if self.bvar_modified.get():
self.btn_apply.config(state=NORMAL)
else:
self.btn_apply.config(state=DISABLED)
def _validateForm(self):
if not self._validate_threshold_entry():
return False
if not self._validate_budget_entry():
return False
if not self._validate_min_price_entry():
return False
if not self._validate_price_override_entry():
return False
if not self._validate_fprice_override_entry():
return False
return True
def applyChanges(self, event=None):
if not self.bvar_modified.get() or not fm.initialized:
return
if not self._validateForm():
return
price_threshold = self.entry_threshold.get()
default_price_override = self.entry_price_override.get()
default_fprice_override = self.entry_fprice_override.get()
budget = self.entry_budget.get()
min_price = self.entry_min_price.get()
confidence_lvl = self.entry_confidence_lvl.get() or fm.DEFAULT_CONFIDENCE_LEVEL
enable_5l_filters = self.var_5l_filters.get()
price_overrides = {}
filter_price_overrides = {}
filter_state_overrides = {}
for iid in self.tree.get_children():
id = self.tree.set(iid, PricesColumn.ID.name)
iprice = self.tree.set(iid, PricesColumn.Override.name)
if iprice:
price_overrides[id] = iprice
fprice = self.tree.set(iid, PricesColumn.FilterOverride.name)
if fprice:
filter_price_overrides[id] = fprice
fstate = self.tree.set(iid, PricesColumn.FilterStateOverride.name)
try:
filter_state_overrides[id] = FilterStateOption[fstate].value
except KeyError:
pass
ids = set([self.tree.set(iid, PricesColumn.ID.name) for iid in self.tree.get_children()])
# preserve unhandled ids configuration
for key in (set(fm.price_overrides) - ids):
price_overrides[key] = fm.price_overrides[key]
for key in (set(fm.filter_price_overrides) - ids):
filter_price_overrides[key] = fm.filter_price_overrides[key]
for key in (set(fm.filter_state_overrides) - ids):
filter_state_overrides[key] = fm.filter_state_overrides[key]
try:
fm.updateConfig(default_price_override, default_fprice_override, price_threshold, budget, min_price,
price_overrides, filter_price_overrides, filter_state_overrides, int(confidence_lvl), enable_5l_filters)
except AppException as e:
messagebox.showerror('Validation error',
'Failed to update configuration:\n{}'.format(e), parent=self.winfo_toplevel())
except Exception as e:
logexception()
messagebox.showerror('Update error',
'Failed to apply changes, unexpected error:\n{}'.format(e), parent=self.winfo_toplevel())
else:
# SHOULD always work since config is valid, main console will report any failures
# background thread because schema validating takes a bit of time
threading.Thread(target=fm.compileFilters).start()
self._initFormState()
def loadPrices(self, force_reload=False):
if not cm.initialized or not fm.initialized:
return
if not force_reload:
self._update_modified() # in case of reverted changes
if self.bvar_modified.get(): # dont interrupt user changes
return
tree = self.tree
tree.clear()
table = {}
for fltr in fm.autoFilters:
# effective_rate = cm.crates.get(curr, '')
# if effective_rate != '':
# effective_rate = round(effective_rate, 3)
fid = fltr.id
fstate_override = fm.filter_state_overrides.get(fid, '')
try:
fstate_override = FilterStateOption(fstate_override).name
except ValueError:
fstate_override = ''
table[fid] = (fltr.title, fid, fm.item_prices[fid], fm.price_overrides.get(fid, ''),
_to_display_rate(fm.compiled_item_prices.get(fid, 'N/A')), fm.filter_price_overrides.get(fid, ''),
_to_display_rate(fm.compiled_filter_prices.get(fid, 'N/A')), fstate_override)
for fid in table:
tree.insert('', END, '', text=table[fid][0], values=table[fid][1:])
# tree.sort_by('#0', descending=True)
tree.sort_col('#0', reverse=False)
self._initFormState()
# def onItemPriceUpdate(self, iid, col, old, new):
# print('IPrice update: iid {}, col {}'.format(iid, col))
def onCellUpdate(self, iid, col, old, new):
if old != new:
self.table_modified = True
self.bvar_modified.set(True)
# self._update_modified()
def _initFormState(self):
self.table_modified = False
self.initial_values[self.entry_threshold] = fm.price_threshold
self.initial_values[self.entry_budget] = fm.budget
self.initial_values[self.entry_min_price] = fm.default_min_price
self.initial_values[self.entry_price_override] = fm.default_price_override
self.initial_values[self.entry_fprice_override] = fm.default_fprice_override
self.initial_values[self.entry_confidence_lvl] = fm.confidence_level
self.initial_values[self.cb_5l_filters] = fm.enable_5l_filters
self.var_threshold.set(fm.price_threshold)
self.var_budget.set(fm.budget)
self.var_min_price.set(fm.default_min_price)
self.var_price_override.set(fm.default_price_override)
self.var_fprice_override.set(fm.default_fprice_override)
self.var_confidence_lvl.set(fm.confidence_level)
self.var_5l_filters.set(fm.enable_5l_filters)
self.bvar_modified.set(False)
def _on_view_option_change(self):
advanced_widgets = [self.entry_price_override, self.lbl_price_override,
self.lbl_confidence_lvl, self.entry_confidence_lvl, self.cb_5l_filters]
if not self.var_advanced.get():
for w in advanced_widgets:
w.grid_remove()
self.tree.config(displaycolumn=[PricesColumn.FilterPrice.name, PricesColumn.FilterOverride.name,
PricesColumn.EffectiveFilterPrice.name, PricesColumn.Filler.name])
else:
for w in advanced_widgets:
w.grid()
self.tree.config(displaycolumn='#all')
self.tree.on_entry_close()
class CurrencyEditor(Frame):
def __init__(self, master, **kwargs):
super().__init__(master, **kwargs)
self.bvar_modified = BooleanVar()
self.create_currency_ui()
def create_currency_ui(self):
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.frm_currency = Frame(self)
self.frm_currency.rowconfigure(2, weight=1)
self.frm_currency.columnconfigure(0, weight=1)
self.frm_currency.grid(padx=10, pady=10, sticky='nsew')
# Tree Frame
self.frm_tree = Frame(self.frm_currency)
self.frm_tree.grid(row=2, sticky='nsew')
self.frm_tree.rowconfigure(0, weight=1)
self.frm_tree.columnconfigure(0, weight=1)
self.tree = EditableTreeview(self.frm_tree, on_cell_update=self.onCellUpdate)
scrly = AutoScrollbar(self.frm_tree, command=self.tree.yview)
scrlx = AutoScrollbar(self.frm_tree, command=self.tree.xview, orient=HORIZONTAL)
self.tree.config(yscrollcommand=scrly.set, xscrollcommand=scrlx.set)
self.tree.grid(row=0, column=0, sticky='nsew')
scrly.grid(row=0, column=1, sticky='nsew')
scrlx.grid(row=1, column=0, sticky='nsew')
self.tree.insert('', 0, text='Exalted Orb', values=('90', '85'))
frm = Frame(self.frm_currency, relief=SOLID, borderwidth=2)
frm.columnconfigure(10, weight=1)
frm.grid(row=1, column=0, pady=(0, 5), sticky='nsew')
# self.entry_currency = \
# Combobox_Autocomplete(frm, list_of_items=['one of many currencies'], startswith_match=False)
self.search_var = StringVar()
self.entry_search = PlaceholderEntry(frm, 'Search..',
style='Default.TEntry', textvariable=self.search_var)
self.search_var.trace_variable('w', lambda a, b, c: self.tree.search(self.entry_search.get_value()))
self.entry_search.bind('<Return>', lambda event: self.tree.search(self.entry_search.get_value(), find_next=True))
# self.btn_currency_search = Button(frm, text='Search', command=lambda event: self.tree_currency.search(self.entry_currency_search.get_value(), find_next=True))
self.btn_apply = Button(frm, text='Apply', command=self.applyChanges)
self.btn_reload = Button(frm, text='Reload', command=lambda: self.loadCurrency(force_reload=True))
self.entry_search.grid(row=2, column=0, pady=5, padx=5)
# self.btn_currency_search.grid(row=2, column=1, pady=5)
self.btn_apply.grid(row=2, column=2, pady=5)
# frm.columnconfigure(3, weight=1)
self.btn_reload.grid(row=2, column=3, sticky='e', pady=5)
# Confidence Level
lbl = Label(frm, text="Confidence level:")
lbl.grid(row=2, column=10, padx=5, sticky='nse', pady=(3, 5))
self.lbl_confidence_lvl = lbl
self.var_confidence_lvl = IntVar()
self.entry_confidence_lvl = ConfidenceScale(frm, variable=self.var_confidence_lvl)
self.entry_confidence_lvl.grid(row=2, column=11, padx=5, pady=5)
self.var_confidence_lvl.trace('w', lambda a, b, c: self.on_entry_change(self.entry_confidence_lvl))
# Tree Config
tree = self.tree
tree['columns'] = currencyColumns[1:]
tree.register_column('Override', ColEntry(TooltipEntry(tree), func_validate=_validate_price_override))
def init_tree_column(col):
col_name = currencyColumns[0] if col == '#0' else col
tree.heading(col, text=CurrencyColumn[col_name].value, anchor=W, command=lambda col=col: tree.sort_col(col))
tree.column(col, width=140, stretch=False)
for col in ('#0', ) + tree['columns']:
init_tree_column(col)
tree.heading('#0', anchor=CENTER)
tree.column('#0', width=250, stretch=False)
tree.column(CurrencyColumn.Filler.name, stretch=True)
tree.heading(CurrencyColumn.Rate.name,
command=lambda col=CurrencyColumn.Rate.name: tree.sort_col(col, key=float, default=0))
tree.heading(CurrencyColumn.EffectiveRate.name,
command=lambda col=CurrencyColumn.EffectiveRate.name: tree.sort_col(col, key=float, default=0))
tree.heading(CurrencyColumn.Override.name,
command=lambda col=CurrencyColumn.Override.name: tree.sort_col(col, key=self._price_key))
self.bvar_modified.trace('w', lambda a, b, c: self._updateApplyState())
def _price_key(self, key):
if key == '':
return None # this means it will be ignored while sorting
try:
return cm.compilePrice(key, base_price=0)
except Exception:
return 0
def _updateApplyState(self):
if self.bvar_modified.get():
self.btn_apply.config(state=NORMAL)
else:
self.btn_apply.config(state=DISABLED)
def loadCurrency(self, force_reload=False):
if not cm.initialized:
return
if not force_reload and self.bvar_modified.get():
return
self.var_confidence_lvl.set(cm.confidence_level)
tree = self.tree
tree.clear()
table = {}
for curr in cm.shorts:
effective_rate = cm.crates.get(curr, '0')
table[curr] = (_to_display_rate(cm.rates.get(curr, '')), cm.overrides.get(curr, ''), _to_display_rate(effective_rate))
for curr in table:
tree.insert('', END, '', text=curr, values=table[curr])
tree.sort_col(CurrencyColumn.EffectiveRate.name, key=float, default=0)
self.bvar_modified.set(False)
def applyChanges(self, event=None):
if not self.bvar_modified.get() or not cm.initialized:
return
overrides = {}
for iid in self.tree.get_children():
#TODO: hide #0 col and move names to a value column
currency_name_col = '#0' # CurrencyColumn.Currency.name
# id = self.tree.set(iid, currency_name_col)
id = self.tree.item(iid, 'text')
override = self.tree.set(iid, CurrencyColumn.Override.name)
if override:
overrides[id] = override
# ids = set([self.tree.set(iid, currency_name_col) for iid in self.tree.get_children()])
ids = set([self.tree.item(iid, 'text') for iid in self.tree.get_children()])
# preserve unhandled ids configuration
for key in (set(cm.overrides) - ids):
overrides[key] = cm.overrides[key]
cm.confidence_level = self.entry_confidence_lvl.get()
try:
cm.compile(overrides=overrides)
if fm.initialized:
threading.Thread(target=fm.compileFilters).start()
self.bvar_modified.set(False)
except AppException as e:
messagebox.showerror('Update error', e, parent=self.winfo_toplevel())
except Exception as e:
logexception()
messagebox.showerror('Update error',
'Failed to apply changes, unexpected error:\n{}'.format(e),
parent=self.winfo_toplevel())
def onCellUpdate(self, iid, col, old, new):
if not self.bvar_modified.get() and old != new:
self.bvar_modified.set(True)
def on_entry_change(self, entry):
self.bvar_modified.set(True)
class VarCheckbutton(Checkbutton):
def __init__(self, master, **kw):
super().__init__(master, **kw)
self.var = kw.get('variable', None)
def configure(self, cnf=None, **kw):
super().configure(cnf, **kw)
if 'variable' in kw:
self.var = kw['variable']
def get(self):
if self.var:
return self.var.get()
return None
class ConfidenceScale(Frame):
def __init__(self, master, **kw):
super().__init__(master)
# self.grid_propagate(0)
# self.columnconfigure(0, weight=1)
# self.rowconfigure(0, weight=1)
self.var = kw.get('variable', IntVar())
kw['variable'] = self.var
kw['from_'] = ConfidenceLevel.Low.value
kw['to'] = ConfidenceLevel.VeryHigh.value
# kw['command'] = self.scale_change
kw['orient'] = HORIZONTAL
self.lbl_scale = Label(self)
self.scale = Scale(self, **kw)
self.scale_font = tkfont.nametofont(Style().lookup('TLabel', 'font')).copy()
self.scale_font.config(weight=tkfont.BOLD, size=9)
self.lbl_scale.config(font=self.scale_font, width=3, anchor=CENTER)
self.var.trace_variable('w', lambda a, b, c: self.scale_change())
self.scale.grid(row=0, column=0, sticky='ns')
self.lbl_scale.grid(row=0, column=1, sticky='ns', padx=(3, 0))
def scale_change(self):
rval = self.get()
if rval >= ConfidenceLevel.High:
fg = '#4CAF50'
elif rval >= ConfidenceLevel.Medium:
fg = '#FF9800'
else:
fg = '#FF5722'
self.lbl_scale.config(foreground=fg, text=str(rval))
def get(self):
return round(float(self.var.get()))
def _validate_price(widget, accept_empty=True):
val = widget.get()
valid, reason = _is_price_valid(val, accept_empty)
if not valid:
widget.showTooltip(reason)
widget.focus()
else:
widget.hideTooltip()
return valid
def _is_price_valid(val, accept_empty=True):
if accept_empty and val.strip() == '':
return True, ''
if not cm.initialized:
return True, ''
if not cm.isPriceValid(val):
return False, INVALID_PRICE
return True, ''
def _validate_price_override(widget, accept_empty=True):
val = widget.get()
valid, reason = _is_price_override_valid(val, accept_empty)
if not valid:
widget.showTooltip(reason)
widget.focus()
else:
widget.hideTooltip()
return valid
def _is_price_override_valid(val, accept_empty=True):
if accept_empty and val.strip() == '':
return True, ''
if not cm.isOverridePriceValid(val):
return False, INVALID_PRICE_OVERRIDE
return True, ''
def _to_display_rate(val):
if val == 'N/A' or val == '':
return val
if int(val) == float(val):
return int(val)
return round(val, 2)
def _is_number(text, min=None, max=None, accept_empty=True, integer=False):
try:
# text = text.strip()
if text == '':
return accept_empty
if text.find(' ') != -1:
return False
if integer:
num = int(text)
else:
num = float(text)
if min is not None and num < min:
return False
if max is not None and num > max:
return False
return True
except ValueError:
return False | gpl-3.0 | -7,908,959,473,610,707,000 | 41.684713 | 168 | 0.623756 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_12_01/aio/operations/_route_tables_operations.py | 1 | 23236 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_12_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2016_12_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_12_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_12_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_12_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
| mit | -8,438,199,354,132,655,000 | 47.712788 | 191 | 0.638879 | false |
Azure/azure-sdk-for-python | sdk/cosmos/azure-cosmos/test/test_diagnostics.py | 1 | 1357 | import unittest
import pytest
import azure.cosmos.diagnostics as m
_common = {
'x-ms-activity-id',
'x-ms-session-token',
'x-ms-item-count',
'x-ms-request-quota',
'x-ms-resource-usage',
'x-ms-retry-after-ms',
}
_headers = dict(zip(_common, _common))
_headers['other'] = 'other'
class BaseUnitTests(unittest.TestCase):
def test_init(self):
rh = m.RecordDiagnostics()
assert rh.headers == {}
def test_headers(self):
rh = m.RecordDiagnostics()
rh(_headers, "body")
assert rh.headers == _headers
assert rh.headers is not _headers
def test_headers_case(self):
rh = m.RecordDiagnostics()
rh(_headers, "body")
rh_headers = rh.headers
for key in rh.headers.keys():
assert key.upper() in rh_headers
assert key.lower() in rh_headers
def test_common_attrs(self):
rh = m.RecordDiagnostics()
rh(_headers, "body")
for name in _common:
assert rh.headers[name] == name
attr = name.replace('x-ms-', '').replace('-', '_')
assert getattr(rh, attr) == name
def test_other_attrs(self):
rh = m.RecordDiagnostics()
rh(_headers, "body")
assert rh.headers['other'] == 'other'
with pytest.raises(AttributeError):
rh.other
| mit | 9,059,282,863,094,365,000 | 25.607843 | 62 | 0.572587 | false |
Storj/pyp2p | tests/test_sock.py | 1 | 15565 | """
* Test whether multiple recvs on the same connection (non-blocking) will
eventually have the connection closed (use another net instance.)
* Test whether multiple sends on the same connection (non-blocking) will
eventually lead to the connection being closed (use a net instance with
no recvs! and loop over the cons)
(Not implemented for now since these will greatly slow the build.)
"""
import hashlib
import os
import tempfile
from threading import Thread
from unittest import TestCase
from pyp2p.net import rendezvous_servers
from pyp2p.rendezvous_client import RendezvousClient
from pyp2p.sock import *
if sys.version_info >= (3, 0, 0):
from urllib.parse import urlparse
import socketserver as SocketServer
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
else:
from urlparse import urlparse
import SocketServer
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class ThreadingSimpleServer(
SocketServer.ThreadingMixIn,
HTTPServer
):
pass
def md5sum(fname):
my_hash = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
my_hash.update(chunk)
return my_hash.hexdigest()
class SockDownload:
def __init__(self, url, expected_hash, file_size, blocking=0,
encoding="ascii"):
"""
Download a file from a HTTP URL and compare it to an MD5 hash.
Uses the sock.py module for testing.
:param url: URL to download
:param expected_hash: MD5 hash of file (md5sum file from term)
:param file_size: size in bytes of the file to download
:param blocking: use blocking or non-blocking sockets
:return:
"""
url = urlparse(url)
location = url.netloc.split(":")
if len(location) == 1:
port = 80
host, = location
else:
host, port = location
con = Sock(host, port, blocking=blocking, debug=1)
req = self.build_request(host, url.path)
con.send(req, send_all=1)
buf = u""
eof = u"\r\n\r\n"
while buf != eof and con.connected:
ch = con.recv(1)
if len(ch):
buf += ch
eq = 0
for i in range(0, len(buf)):
if buf[i] != eof[eq]:
eq = 0
else:
eq += 1
# Reset buf.
if eq == len(eof):
break
fp, path = tempfile.mkstemp()
os.close(fp)
remaining = file_size
with open(path, "ab") as fp:
future = time.time() + 30 # Slow connections are slow.
while con.connected and remaining:
data = con.recv(remaining, encoding=encoding)
print(type(data))
if len(data):
remaining -= len(data)
fp.write(data)
time.sleep(0.0002)
# Fail safe:
if time.time() >= future:
break
found_hash = md5sum(path)
os.remove(path)
if expected_hash is not None:
assert(found_hash == expected_hash)
def build_request(self, host, resource):
req = "GET %s HTTP/1.1\r\n" % resource
req += "Host: %s\r\n\r\n" % host
return req
class SockUpload:
def __init__(self, upload_size, blocking=0):
host = u"185.86.149.128"
port = 80
resource = u"/upload_test.php"
content = self.build_content(upload_size)
con = Sock(host, port, blocking=blocking, debug=1)
req = self.build_request(host, resource, content)
con.send(req, send_all=1, timeout=6)
# Now do the actual upload.
remaining = upload_size
chunk_size = 4096
while con.connected and remaining:
sent = upload_size - remaining
msg = content[sent:sent + chunk_size]
sent = con.send(msg)
if sent:
remaining -= sent
# Get response.
con.set_blocking(1)
ret = con.recv(1024)
# Check response.
expected_hash = hashlib.sha256(content).hexdigest()
assert(expected_hash in ret)
def build_request(self, host, resource, content):
req = "POST %s HTTP/1.1\r\n" % resource
req += "Host: %s\r\n" % host
req += "User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) "
req += "Gecko/20100101 Firefox/42.0\r\n"
req += "Accept: text/html,"
req += "application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
req += "Accept-Language: en-US,en;q=0.5\r\n"
req += "Accept-Encoding: gzip, deflate\r\n"
req += "Connection: keep-alive\r\n"
req += "Content-Type: application/x-www-form-urlencoded\r\n"
req += "Content-Length: %d\r\n\r\n" % (len(content) + 5)
req += "test=" # Hence the extra + 5.
return req
def build_content(self, upload_size):
content = b"8" * upload_size
return content
def simple_server():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 9000))
s.listen(0)
(clientsocket, address) = s.accept()
time.sleep(2)
s.close()
class TestSock(TestCase):
def test_http_upload_post(self):
SockUpload(1000 * 100)
def test_http_download(self):
SockDownload(
"http://mirror.internode.on.net/pub/test/1meg.test",
"e6527b4d5db05226f40f9f2e7750abfb",
1000000
)
def test_blocking_mode(self):
x = Sock()
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock(blocking=1)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock("www.example.com", 80, timeout=10)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock("www.example.com", 80, blocking=1, timeout=10)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
def test_blocking_timeout(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
t = time.time()
s.recv_line(timeout=1)
if time.time() - t >= 4:
print("Manual timeout failed.")
assert 0
s.close()
def test_non_blocking_timeout(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
assert(s.recv_line() == u"")
assert(s.recv(1) == u"")
s.close()
def test_encoding(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
s.send_line("SOURCE TCP 50")
ret = s.recv(1, encoding="ascii")
if sys.version_info >= (3, 0, 0):
assert(type(ret) == bytes)
else:
assert(type(ret) == str)
assert(ret == b"R")
ret = s.recv_line()
assert(u"EMOTE" in ret)
s.send_line("SOURCE TCP 50")
ret = s.recv(1, encoding="unicode")
if sys.version_info >= (3, 0, 0):
assert(type(ret) == str)
else:
assert(type(ret) == unicode)
s.close()
def test_0000001_sock(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
assert s.connected
s.send_line("SOURCE TCP 323")
assert s.connected
line = s.recv_line()
assert ("REMOTE" in line)
s = Sock("www.example.com", 80, blocking=0, timeout=10)
data = "GET / HTTP/1.1\r\n"
data += "Connection: close\r\n"
data += "Host: www.example.com\r\n\r\n"
s.send(data, send_all=1)
replies = ""
while s.connected:
for reply in s:
# Output should be unicode.
if sys.version_info >= (3, 0, 0):
assert (type(reply) == str)
else:
assert (type(reply) == unicode)
replies += reply
print(reply)
assert (s.connected != 1)
assert (replies != "")
s.close()
s.reconnect()
s.close()
s = Sock("www.example.com", 80, blocking=1, timeout=10)
s.send_line("GET / HTTP/1.1")
s.send_line("Host: www.example.com\r\n")
line = s.recv_line()
print(line)
print(type(line))
print(s.buf)
print(type(s.buf))
assert (line, "HTTP/1.1 200 OK")
if sys.version_info >= (3, 0, 0):
assert (type(line) == str)
else:
assert (type(line) == unicode)
s.close()
s = Sock()
s.buf = b"\r\nx\r\n"
x = s.parse_buf()
assert (x[0] == "x")
s.buf = b"\r\n"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\n\r\n"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\r\n\r\n"
x = s.parse_buf()
assert (x[0] == "\r")
s.buf = b"\r\n\r\n\r\nx"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\n\r\nx\r\nsdfsdfsdf\r\n"
x = s.parse_buf()
assert (x[0] == "x" and x[1] == "sdfsdfsdf")
s.buf = b"sdfsdfsdf\r\n"
s.parse_buf()
s.buf += b"abc\r\n"
x = s.parse_buf()
assert (x[0] == "abc")
s.buf += b"\r\ns\r\n"
x = s.parse_buf()
assert (x[0] == "s")
s.buf = b"reply 1\r\nreply 2\r\n"
s.replies = []
s.update()
assert (s.pop_reply(), "reply 1")
assert (s.replies[0], "reply 2")
def test_keep_alive(self):
old_system = platform.system
for os in ["Darwin", "Windows", "Linux"]:
def system_wrapper():
return os
platform.system = system_wrapper
sock = Sock()
# Sock option error - not supported on this OS.
try:
sock.set_keep_alive(sock.s)
except socket.error as e:
valid_errors = (10042, 22)
if e.errno not in valid_errors:
raise e
except AttributeError:
pass
sock.close()
platform.system = old_system
assert 1
def test_non_default_iface(self):
sock = Sock(interface="eth12")
try:
sock.connect("www.example.com", 80, timeout=10)
except (TypeError, socket.error) as e:
pass
sock.close()
assert 1
def test_ssl(self):
s = Sock(
"www.example.com",
443,
blocking=0,
timeout=10,
use_ssl=1
)
data = "GET / HTTP/1.1\r\n"
data += "Connection: close\r\n"
data += "Host: www.example.com\r\n\r\n"
s.send(data, send_all=1)
replies = ""
while s.connected:
for reply in s:
# Output should be unicode.
if sys.version_info >= (3, 0, 0):
assert (type(reply) == str)
else:
assert (type(reply) == unicode)
replies += reply
print(reply)
assert (s.connected != 1)
assert (replies != "")
def test_ssl_blocking_error(self):
# Blocking.
s = Sock(
"www.example.com",
443,
blocking=1,
timeout=2,
use_ssl=1,
debug=1
)
s.get_chunks()
s.close()
# Non-blocking.
s = Sock(
"www.example.com",
443,
blocking=0,
timeout=2,
use_ssl=1,
debug=1
)
s.get_chunks()
s.close()
def test_decoding_error(self):
SockDownload(
"http://mirror.internode.on.net/pub/test/1meg.test",
expected_hash=None,
file_size=1000,
blocking=0,
encoding="unicode"
)
def test_broken_send_con(self):
# Can't monkey patch socket on Linux.
if platform.system != "Windows":
return
port = 10121
server = ThreadingSimpleServer(('', port), SimpleHTTPRequestHandler)
sock = Sock("127.0.0.1", port, debug=1, timeout=6)
server.server_close()
print(sock.send(b"test"))
sock.close()
server = ThreadingSimpleServer(('', port), SimpleHTTPRequestHandler)
def close_server():
time.sleep(1)
server.server_close()
sock = Sock("127.0.0.1", port, debug=1, timeout=6)
Thread(target=close_server).start()
for i in range(0, 5):
print(sock.send(b"test"))
time.sleep(0.5)
sock.close
# Simulate send timeout!
sock = Sock(debug=1, blocking=1)
def raise_timeout():
time.sleep(1)
original_send = sock.s.send
def fake_send(data):
raise socket.timeout("timed out")
sock.s.send = fake_send
time.sleep(1)
sock.s.send = original_send
Thread(target=raise_timeout).start()
sock.connect("www.example.com", 80)
# You want to fill up the entire networking buffer
# so that it times out without the needed recv.
buf_size = sock.s.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) + 1
buf_size *= 2
sock.chunk_size = buf_size
total = 0
for i in range(0, 4):
x = sock.send(b"x" * buf_size)
total += x
if x < buf_size:
break
time.sleep(2.2)
sock.close()
# Test broken connection.
sock = Sock(debug=1, blocking=1)
def raise_timeout():
time.sleep(1)
original_send = sock.s.send
def fake_send(data):
return 0
sock.s.send = fake_send
time.sleep(1)
Thread(target=raise_timeout).start()
sock.connect("www.example.com", 80)
# You want to fill up the entire networking buffer
# so that it times out without the needed recv.
x = 1
timeout = time.time() + 10
while x and time.time() < timeout:
x = sock.send(b"x")
time.sleep(2.2)
sock.close()
def test_magic(self):
sock = Sock()
sock.replies = ["a", "b", "c"]
assert(len(sock) == 3)
assert(sock[0] == "a")
del sock[0]
assert(sock[0] == "b")
sock[0] = "x"
assert(sock[0] == "x")
y = list(reversed(sock))
assert(y == ["x", "c"])
| mit | -2,747,348,296,401,501,000 | 27.403285 | 78 | 0.511083 | false |
julython/julython.org | july/people/migrations/0010_auto__add_userbadge.py | 1 | 9221 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserBadge'
db.create_table(u'people_userbadge', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['july.User'])),
('badges', self.gf('jsonfield.fields.JSONField')(null=True, blank=True)),
))
db.send_create_signal(u'people', ['UserBadge'])
def backwards(self, orm):
# Deleting model 'UserBadge'
db.delete_table(u'people_userbadge')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'july.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'location_members'", 'null': 'True', 'to': u"orm['people.Location']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'picture_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['people.Project']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team_members'", 'null': 'True', 'to': u"orm['people.Team']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'people.commit': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'Commit'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'files': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '2024', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Project']", 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['july.User']", 'null': 'True', 'blank': 'True'})
},
u'people.language': {
'Meta': {'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'people.location': {
'Meta': {'object_name': 'Location'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.project': {
'Meta': {'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'forked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parent_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'repo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.team': {
'Meta': {'object_name': 'Team'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.userbadge': {
'Meta': {'object_name': 'UserBadge'},
'badges': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['july.User']"})
}
}
complete_apps = ['people'] | mit | 1,895,763,320,148,108,300 | 71.614173 | 195 | 0.544193 | false |
astromme/classify-handwritten-characters | utils/gnt.py | 1 | 1587 | #!/usr/bin/env python3
import os
import sys
import numpy as np
from .tagcode import tagcode_to_unicode
def samples_from_gnt(f):
header_size = 10
# read samples from f until no bytes remaining
while True:
header = np.fromfile(f, dtype='uint8', count=header_size)
if not header.size: break
sample_size = header[0] + (header[1]<<8) + (header[2]<<16) + (header[3]<<24)
tagcode = header[5] + (header[4]<<8)
width = header[6] + (header[7]<<8)
height = header[8] + (header[9]<<8)
assert header_size + width*height == sample_size
bitmap = np.fromfile(f, dtype='uint8', count=width*height).reshape((height, width))
yield bitmap, tagcode
def read_gnt_in_directory(gnt_dirpath):
for file_name in os.listdir(gnt_dirpath):
if file_name.endswith('.gnt'):
file_path = os.path.join(gnt_dirpath, file_name)
with open(file_path, 'rb') as f:
for bitmap, tagcode in samples_from_gnt(f):
yield bitmap, tagcode
def main():
import png
if len(sys.argv) != 3:
print("usage: {} gntfile outputdir".format(sys.argv[0]))
_, gntfile, outputdir = sys.argv
try:
os.makedirs(outputdir)
except FileExistsError:
pass
with open(gntfile) as f:
for i, (bitmap, tagcode) in enumerate(samples_from_gnt(f)):
character = tagcode_to_unicode(tagcode)
png.from_array(bitmap, 'L').save(os.path.join(outputdir, '{} {}.png'.format(character, i)))
if __name__ == "__main__":
main()
| mit | -8,479,756,461,216,267,000 | 28.388889 | 103 | 0.592313 | false |
ppolewicz/logfury | setup.py | 1 | 3132 | from codecs import open
import os.path
from setuptools import setup, find_packages
import sys
################################################################### yapf: disable
NAME = 'logfury'
VERSION = '0.1.2'
AUTHOR = 'Pawel Polewicz'
AUTHOR_EMAIL = '[email protected]'
DESCRIPTION = 'Toolkit for responsible, low-boilerplate logging of library method calls',
LICENSE = 'BSD'
KEYWORDS = ['logging', 'tracing']
URL = 'https://github.com/ppolewicz/logfury'
DOWNLOAD_URL_TEMPLATE = URL + '/tarball/%s'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Logging',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
]
################################################################### yapf: enable
if __name__ == '__main__':
here = os.path.abspath(os.path.dirname(__file__))
def read_file_contents(filename):
with open(os.path.join(here, filename), 'rb', encoding='utf-8') as f:
return f.read()
long_description = read_file_contents('README.rst')
requirements_install = read_file_contents('requirements.txt').splitlines()
if sys.version_info == (2, 6):
requirements_install.append('ordereddict')
requirements_test = read_file_contents('requirements-test.txt').splitlines()
setup(
name = NAME,
version = VERSION,
url = URL,
download_url = DOWNLOAD_URL_TEMPLATE % (VERSION,),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
maintainer = AUTHOR,
maintainer_email = AUTHOR_EMAIL,
packages = find_packages(where='src'),
license = LICENSE,
description = DESCRIPTION,
long_description = long_description,
keywords = KEYWORDS,
package_dir = {'': 'src'},
zip_safe = False,
classifiers = CLASSIFIERS,
install_requires = requirements_install,
tests_require = requirements_test,
package_data = {
NAME: [
'requirements.txt',
'requirements-test.txt',
]
},
# to install: pip install -e .[dev,test]
extras_require = {
'test': requirements_test,
},
) # yapf: disable
| bsd-3-clause | -5,933,197,304,800,659,000 | 31.625 | 90 | 0.554917 | false |
crawfordsm/pyspectrograph | PySpectrograph/WavelengthSolution/WavelengthSolution.py | 1 | 3169 | """Wavelength Solution is a task describing the functional form for transforming
pixel position to wavelength. The inputs for this task are the given pixel position
and the corresponding wavelength. The user selects an input functional form and
order for that form. The task then calculates the coefficients for that form.
Possible options for the wavelength solution include polynomial, legendre, spline.
HISTORY
20090915 SMC Initially Written by SM Crawford
LIMITATIONS
20090915 SMC Need to add legendre, spline functions
"""
import numpy as np
from .LineSolution import LineSolution
from .ModelSolution import ModelSolution
class WavelengthSolution:
"""Wavelength Solution is a task describing the functional form for transforming
pixel position to wavelength.
"""
func_options = ['poly', 'polynomial', 'spline', 'legendre', 'chebyshev', 'model']
def __init__(self, x, w, function='poly', order=3, niter=5, thresh=3,
sgraph=None, cfit='both', xlen=3162, yval=0):
self.sgraph = sgraph
self.function = function
self.order = order
self.niter = niter
self.thresh = thresh
self.cfit = cfit
self.xlen = xlen
self.yval = yval
self.set_array(x, w)
self.set_func()
def set_array(self, x, w):
self.x_arr = x
self.w_arr = w
def set_thresh(self, thresh):
self.thresh = thresh
def set_niter(self, niter):
self.niter = niter
def set_func(self):
if self.function in ['poly', 'polynomial', 'spline', 'legendre', 'chebyshev']:
self.func = LineSolution(self.x_arr, self.w_arr, function=self.function,
order=self.order, niter=self.niter, thresh=self.thresh)
if self.function == 'model':
self.func = ModelSolution(self.x_arr, self.w_arr, sgraph=self.sgraph,
xlen=self.xlen, yval=self.yval, order=self.order)
def fit(self):
if self.function in ['poly', 'polynomial', 'spline', 'legendre', 'chebyshev']:
self.func.interfit()
self.coef = self.func.coef
if self.function in ['model']:
self.func.fit(cfit=self.cfit)
self.coef = np.array([c() for c in self.func.coef])
# self.set_coef(coef)
def set_coef(self, coef):
if self.function in ['poly', 'polynomial', 'spline', 'legendre', 'chebyshev']:
self.func.coef = coef
self.coef = self.func.coef
if self.function in ['model']:
for i in range(len(self.func.coef)):
self.func.coef[i].set(coef[i])
self.coef = np.array([c() for c in self.func.coef])
def value(self, x):
return self.func.value(x)
def invvalue(self, w):
"""Given a wavelength, return the pixel position
"""
return w
def sigma(self, x, y):
"""Return the RMS of the fit """
return (((y - self.value(x)) ** 2).mean()) ** 0.5
def chisq(self, x, y, err):
"""Return the chi^2 of the fit"""
return (((y - self.value(x)) / err) ** 2).sum()
| bsd-3-clause | -8,209,459,238,024,676,000 | 32.712766 | 92 | 0.60082 | false |
Guokr1991/ProstateSensitivityAnalysis | convert_histology_txt_json.py | 1 | 2295 | def main():
hist_txt_to_json()
def hist_txt_to_json():
j = open('HistologyLesions.json', 'w')
j.write('{\n')
index = True
benign = False
with open('HistologyLesions.txt', 'r') as t:
tfile = t.readlines()
num_lesions = len(tfile)
global td # define globally for ece_extent_writer method
for nl, td in enumerate(tfile):
td = td[:-1]
if 'pca' in td and index:
j.write('\t"pca": [\n')
j.write('\t\t{\n\t\t\t"region": "%s",\n' % td.split(',')[1][1:])
j.write('\t\t\t"volume_cc": %.1f,\n' % float(td.split(',')[2]))
j.write('\t\t\t"Gleason": %i,\n' % float(td.split(',')[3]))
j.write('\t\t\t"Staging": "%s",\n' % td.split(',')[4][1:4])
j.write('\t\t\t"ECE_extent": "%s",\n' % ece_extent_writer())
j.write('\t\t\t"index": true\n\t\t}')
index = False
if (nl+1) == num_lesions:
j.write(']\n')
elif 'pca' in td and not index:
j.write(',\n')
j.write('\t\t{\n\t\t\t"region": "%s",\n' % td.split(',')[1][1:])
j.write('\t\t\t"volume_cc": %.1f,\n' % float(td.split(',')[2]))
j.write('\t\t\t"Gleason": %i,\n' % float(td.split(',')[3]))
j.write('\t\t\t"Staging": "%s",\n' % td.split(',')[4][1:4])
j.write('\t\t\t"ECE_extent": "%s",\n' % ece_extent_writer())
j.write('\t\t\t"index": false\n\t\t}')
if (nl+1) == num_lesions:
j.write(']\n')
elif ('atrophy' in td) or ('bph' in td):
if not benign:
j.write('],\n')
else:
j.write(',\n')
num_regions = len(td.split(',')[1:])
j.write('\t"%s": {\n\t\t"regions": [' % td.split(',')[0])
for n, r in enumerate(td.split(',')[1:]):
if n < (num_regions-1):
j.write('"%s", ' % r[1:])
else:
j.write('"%s"]\n\t\t}' % r[1:])
benign = True
j.write('\n}')
def ece_extent_writer():
if td.split(',')[4][-1] == 'E':
return "Established"
elif td.split(',')[4][-1] == 'F':
return "Focal"
else:
return "None"
if __name__ == '__main__':
main()
| apache-2.0 | -3,372,639,854,298,931,000 | 33.253731 | 76 | 0.423965 | false |
harlowja/speedlimit | speedlimit/__init__.py | 1 | 3653 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import time
from monotonic import monotonic as _now
class SpeedLimit(object):
"""Speed/limiting iterator wrapper object.
A wrapper object that uses the `token bucket`_ algorithm to limit the
rate at which values comes out of an iterable. This can be used to limit
the consumption speed of iteration of some other iterator (or iterable).
.. _token bucket: http://en.wikipedia.org/wiki/Token_bucket
"""
def __init__(self,
# How many items to yield from the provided
# wrapped iterator (per second).
items_per_second,
# Used to simulate a thread with its own 'tic rate'. Making
# this smaller affects the accuracy of the 'tic' calculation,
# which affects the accuracy of consumption (and delays).
refresh_rate_seconds=0.01,
# How *full* the initial bucket is.
initial_bucket_size=1,
# Made a keyword argument, so one could replace this
# with a eventlet.sleep or other idling function...
sleep_func=time.sleep):
self._refresh_rate_seconds = refresh_rate_seconds
self._bucket = (items_per_second *
refresh_rate_seconds * initial_bucket_size)
self._items_per_tic = items_per_second * refresh_rate_seconds
self._next_fill = _now() + refresh_rate_seconds
self._sleep = sleep_func
def _check_fill(self):
# Fill the bucket based on elapsed time.
#
# This simulates a background thread...
now = _now()
if now > self._next_fill:
d = now - self._next_fill
tics = int(math.ceil(d / self._refresh_rate_seconds))
self._bucket += tics * self._items_per_tic
self._next_fill += tics * self._refresh_rate_seconds
def speed_limit_iter(self, itr, chunk_size_cb=None):
"""Return an iterator/generator which limits after each iteration.
:param itr: an iterator to wrap
:param chunk_size_cb: a function that can calculate the
size of each chunk (if none provided this
defaults to 1)
"""
for chunk in itr:
if chunk_size_cb is None:
sz = 1
else:
sz = chunk_size_cb(chunk)
self._check_fill()
if sz > self._bucket:
now = _now()
tics = int((sz - self._bucket) / self._items_per_tic)
tm_diff = self._next_fill - now
secs = tics * self._refresh_rate_seconds
if tm_diff > 0:
secs += tm_diff
self._sleep(secs)
self._check_fill()
self._bucket -= sz
yield chunk
| apache-2.0 | 4,321,641,280,246,281,700 | 40.044944 | 78 | 0.588284 | false |
stopthatcow/zazu | zazu/scm_host.py | 1 | 1275 | # -*- coding: utf-8 -*-
"""Source code management (SCM) host related classes."""
__author__ = 'Nicholas Wiles'
__copyright__ = 'Copyright 2018'
class ScmHost(object):
"""Parent of all ScmHost objects."""
class ScmHostError(Exception):
"""Parent of all ScmHost errors."""
class ScmHostRepo(object):
"""Parent of all SCM repos."""
@property
def name(self):
"""Get the name of the repo."""
raise NotImplementedError('Must implement name')
@property
def id(self):
"""Get the id of the repo."""
raise NotImplementedError('Must implement id')
@property
def description(self):
"""Get the description of the repo."""
raise NotImplementedError('Must implement description')
@property
def browse_url(self):
"""Get the url to open to display the repo."""
raise NotImplementedError('Must implement browse_url')
@property
def ssh_url(self):
"""Get the ssh url to clone the repo."""
raise NotImplementedError('Must implement ssh_url')
def __str__(self):
"""Return the id as the string representation."""
return self.id
def __repr__(self):
"""Return the id as the string representation."""
return self.id
| mit | -3,430,967,397,524,650,000 | 25.020408 | 63 | 0.616471 | false |
randy3k/AutoWrap | tests/test_longline.py | 1 | 2204 | import sublime
from unittesting import DeferrableTestCase
Lorem = ("""Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod"""
"""tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,"""
"""quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo"""
"""consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse"""
"""cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non"""
"""proident, sunt in culpa qui officia deserunt mollit anim id est \nlaborum.""")
class TestLongline(DeferrableTestCase):
def setUp(self):
# make sure we have a window to work with
s = sublime.load_settings("Preferences.sublime-settings")
s.set("close_windows_when_empty", False)
self.view = sublime.active_window().new_file()
self.view.settings().set("auto_wrap", True)
self.view.settings().set("auto_wrap_width", 80)
def tearDown(self):
if self.view:
self.view.set_scratch(True)
self.view.window().focus_view(self.view)
self.view.window().run_command("close_file")
def setText(self, string):
self.view.run_command("insert", {"characters": string})
def getRow(self, row):
return self.view.substr(self.view.line(self.view.text_point(row, 0)))
def test_long_line_end(self):
self.setText(Lorem)
self.view.sel().clear()
self.view.sel().add(sublime.Region(433, 433))
for c in "apple is orange":
self.setText(c)
yield 10
self.assertEqual(self.getRow(5),
"culpa qui officia deserunt mollit anim id est apple is orange")
self.assertEqual(self.getRow(6), "laborum.")
def test_long_line_middle(self):
self.setText(Lorem)
self.view.sel().clear()
self.view.sel().add(sublime.Region(200, 200))
for c in "apple is orange":
self.setText(c)
yield 10
self.assertEqual(
self.getRow(1),
"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,quis"
)
| mit | -5,633,155,568,879,142,000 | 34.548387 | 92 | 0.621597 | false |
dubourg/openturns | python/test/t_FisherSnedecor_std.py | 1 | 4992 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
from cmath import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
distribution = FisherSnedecor(5.5, 10.5)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())
# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", repr(oneRealization))
# Test for sampling
size = 10000
oneSample = distribution.getSample(size)
print("oneSample first=", repr(
oneSample[0]), " last=", repr(oneSample[size - 1]))
print("mean=", repr(oneSample.computeMean()))
print("covariance=", repr(oneSample.computeCovariance()))
size = 100
for i in range(2):
msg = ''
if FittingTest.Kolmogorov(distribution.getSample(size), distribution).getBinaryQualityMeasure():
msg = "accepted"
else:
msg = "rejected"
print(
"Kolmogorov test for the generator, sample size=", size, " is", msg)
size *= 10
# Define a point
point = NumericalPoint(distribution.getDimension(), 1.0)
print("Point= ", repr(point))
# Show PDF and CDF of point
eps = 1e-5
# derivative of PDF with regards its arguments
DDF = distribution.computeDDF(point)
# print "ddf =" , repr(DDF)
# by the finite difference technique
# print "ddf (FD)=" ,repr(NumericalPoint(1, (distribution.computePDF(
# point + NumericalPoint(1, eps) ) - distribution.computePDF( point +
# NumericalPoint(1, -eps) )) / (2.0 * eps)))
# PDF value
LPDF = distribution.computeLogPDF(point)
print("log pdf=%.6f" % LPDF)
PDF = distribution.computePDF(point)
print("pdf =%.6f" % PDF)
# by the finite difference technique from CDF
print("pdf (FD)=%.6f" % ((distribution.computeCDF(point + NumericalPoint(1, eps)) -
distribution.computeCDF(point + NumericalPoint(1, -eps))) / (2.0 * eps)))
# derivative of the PDF with regards the parameters of the distribution
CDF = distribution.computeCDF(point)
print("cdf=%.6f" % CDF)
CCDF = distribution.computeComplementaryCDF(point)
print("ccdf=%.6f" % CCDF)
CF = distribution.computeCharacteristicFunction(point[0])
print("characteristic function=(%.6f+%.6fj)" % (CF.real, CF.imag))
## PDFgr = distribution.computePDFGradient( point )
# print "pdf gradient =" , repr(PDFgr)
# by the finite difference technique
## PDFgrFD = NumericalPoint(2)
## PDFgrFD[0] = (FisherSnedecor(distribution.getLambda() + eps, distribution.getGamma()).computePDF(point) - FisherSnedecor(distribution.getLambda() - eps, distribution.getGamma()).computePDF(point)) / (2.0 * eps)
## PDFgrFD[1] = (FisherSnedecor(distribution.getLambda(), distribution.getGamma() + eps).computePDF(point) - FisherSnedecor(distribution.getLambda(), distribution.getGamma() - eps).computePDF(point)) / (2.0 * eps)
# print "pdf gradient (FD)=" , repr(PDFgrFD)
# derivative of the PDF with regards the parameters of the distribution
## CDFgr = distribution.computeCDFGradient( point )
# print "cdf gradient =" , repr(CDFgr)
## CDFgrFD = NumericalPoint(2)
## CDFgrFD[0] = (FisherSnedecor(distribution.getLambda() + eps, distribution.getGamma()).computeCDF(point) - FisherSnedecor(distribution.getLambda() - eps, distribution.getGamma()).computeCDF(point)) / (2.0 * eps)
## CDFgrFD[1] = (FisherSnedecor(distribution.getLambda(), distribution.getGamma() + eps).computeCDF(point) - FisherSnedecor(distribution.getLambda(), distribution.getGamma() - eps).computeCDF(point)) / (2.0 * eps)
# print "cdf gradient (FD)=", repr(CDFgrFD)
# quantile
quantile = distribution.computeQuantile(0.95)
print("quantile=", repr(quantile))
print("cdf(quantile)=%.6f" % distribution.computeCDF(quantile))
mean = distribution.getMean()
print("mean=", repr(mean))
standardDeviation = distribution.getStandardDeviation()
print("standard deviation=", repr(standardDeviation))
skewness = distribution.getSkewness()
print("skewness=", repr(skewness))
kurtosis = distribution.getKurtosis()
print("kurtosis=", repr(kurtosis))
covariance = distribution.getCovariance()
print("covariance=", repr(covariance))
parameters = distribution.getParametersCollection()
print("parameters=", repr(parameters))
for i in range(6):
print("standard moment n=", i, " value=",
distribution.getStandardMoment(i))
print("Standard representative=", distribution.getStandardRepresentative())
except:
import sys
print("t_FisherSnedecor_std.py", sys.exc_info()[0], sys.exc_info()[1])
| gpl-3.0 | -8,776,602,948,175,847,000 | 42.408696 | 217 | 0.672276 | false |
Fuchai/Philosophy-Machine | knowledgeframework/playground2.py | 1 | 3640 | from knowledgeframework.kf2 import *
kf=KF()
pred1 = Predicate(id="male")
pred2 = Predicate(id="single")
pred4 = Predicate(id="bachelor")
pred5 = Predicate(id="thief")
pred6 = Predicate(id="steals")
pred7 = Predicate(id="female")
pred8 = Predicate(id="human")
kf.add_node(pred1, name="male")
kf.add_node(pred2, name="man")
kf.add_node(pred4, name="bachelor")
pred3 = Analytical(kf, lambda x, y: x and y, (pred1, pred2), id="AND")
kf.add_node(pred3)
kf.add_node(pred6, name="thief")
kf.add_node(pred5, name="steals")
kf.add_edge(pred5, pred6)
kf.add_node(pred7)
kf.add_node(pred8)
kf.make_dimension(pred8) # pred8 human is a dimension
kf.add_edge_to_instance(pred1) # known man
kf.add_edge_to_instance(pred2) # known male
print(kf.consistency_search_through_dimensions(kf.get_instance()))
pred9=Predicate(id="dummy")
kf.add_node(pred9)
kf.add_edge_to_instance(pred7)
print(kf.consistency_search_through_dimensions(kf.get_instance()))
print(kf.static_proof("bachelor"))
print(kf.dynamic_eval("bachelor"))
# pred1=Predicate(id="1")
# pred2=Predicate(id="2")
# pred4=Predicate(id="4")
# pred5=Predicate(id="5")
# pred6=Predicate(id="6")
# pred7=Predicate(id="7")
#
#
#
# knowledgeframework.add_node(pred1,name="male")
# knowledgeframework.add_node(pred2,name="man")
# pred3=Analytical(knowledgeframework,lambda x,y: x and y,(pred1,pred2),id="3")
#
# knowledgeframework.add_node(pred3) # bachelor analytical
# knowledgeframework.add_node(pred4,name="bachelor")
# knowledgeframework.add_edge(pred3,pred4)
#
# knowledgeframework.add_node(pred6,name="thief")
# knowledgeframework.add_node(pred5,name="steals")
# knowledgeframework.add_edge(pred5,pred6)
# knowledgeframework.add_node(pred7)
#
# knowledgeframework.add_edge_to_instance(pred1) # known man
# knowledgeframework.add_edge_to_instance(pred2) # known male
#
# # if pred3.eval()==True:
# # knowledgeframework.add_edge_to_instance(pred3)
#
# print(knowledgeframework.static_proof(pred4))
# print(knowledgeframework.hybrid_eval_I(pred4))
# True!
# knowledgeframework=KF()
# pred1=Predicate()
# pred2=Predicate()
#
# knowledgeframework.add_node(pred1,name="red")
# knowledgeframework.add_node(pred2,name="apple")
# knowledgeframework.add_edge(pred2,pred1)
#
# print(1,knowledgeframework.nodes())
# print(knowledgeframework.edges())
# print(2,knowledgeframework.find_name("apple"))
# print(3,knowledgeframework.find_name("red"))
# print(4,knowledgeframework.predecessors_set(knowledgeframework.find_name("apple")))
# #print(knowledgeframework.predecessors(knowledgeframework[knowledgeframework.find_name("apple")[0]]))
#
#
# # Noteworthy:"apple"'s successor is the "name" labels, and the predecessors return all the names. This is an interesting
# # operation, since it basically returns everything that has some directly overlapped properties.
# print(knowledgeframework.predecessors_set(knowledgeframework.successors_set(["apple"])))
#
# dummy1=knowledgeframework.nodes()[3]
# dummy2=knowledgeframework.nodes()[4]
#
# # Example of a proof that an apple is red
# print(5,knowledgeframework.get_node_from_hash("instance") in knowledgeframework.nodes())
# knowledgeframework.add_edge(knowledgeframework.get_node_from_hash("instance"),knowledgeframework.find_name("apple")[0])
# print(networkx.shortest_path(knowledgeframework,knowledgeframework.get_instance(),knowledgeframework.find_name("red")[0]))
#
# # Fast query
# knowledgeframework.static_proof(knowledgeframework.find_name_unqiue("red"))
#
#
# def hello(*args):
# for i in args:
# print(i)
#
# print (type(args))
#
#
# def ya(i, *args):
# print("starting")
# hello(*args)
#
#
#
# ya(2, 3, 4, 5) | apache-2.0 | 8,127,122,852,842,790,000 | 29.341667 | 124 | 0.736538 | false |
tensorflow/privacy | tensorflow_privacy/privacy/estimators/multi_label_head.py | 1 | 6457 | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiclass head for Estimator that allow integration with TF Privacy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.keras.utils import losses_utils # pylint: disable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator import model_fn
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export_output
from tensorflow_estimator.python.estimator.head import base_head
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
class DPMultiLabelHead(tf.estimator.MultiLabelHead):
"""Creates a TF Privacy-enabled version of MultiLabelHead."""
def __init__(self,
n_classes,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
classes_for_class_based_metrics=None,
name=None):
if loss_reduction == tf.keras.losses.Reduction.NONE:
loss_reduction = tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE
super(DPMultiLabelHead, self).__init__(
n_classes=n_classes,
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=loss_fn,
classes_for_class_based_metrics=classes_for_class_based_metrics,
name=name)
def loss(self,
labels,
logits,
features=None,
mode=None,
regularization_losses=None):
"""Returns regularized training loss. See `base_head.Head` for details."""
del mode # Unused for this head.
with tf.compat.v1.name_scope(
'losses', values=(logits, labels, regularization_losses, features)):
logits = base_head.check_logits_final_dim(logits, self.logits_dimension)
labels = self._processed_labels(logits, labels)
unweighted_loss, weights = self._unweighted_loss_and_weights(
logits, labels, features)
vector_training_loss = losses_utils.compute_weighted_loss(
unweighted_loss,
sample_weight=weights,
reduction=tf.keras.losses.Reduction.NONE)
regularization_loss = tf.math.add_n(
regularization_losses) if regularization_losses is not None else None
vector_regularized_training_loss = (
tf.add(vector_training_loss, regularization_loss)
if regularization_loss is not None else vector_training_loss)
return vector_regularized_training_loss
def _create_tpu_estimator_spec(self,
features,
mode,
logits,
labels=None,
optimizer=None,
trainable_variables=None,
train_op_fn=None,
update_ops=None,
regularization_losses=None):
"""See superclass for description."""
with tf.compat.v1.name_scope(self._name, 'head'):
# Predict.
pred_keys = prediction_keys.PredictionKeys
predictions = self.predictions(logits)
if mode == ModeKeys.PREDICT:
probabilities = predictions[pred_keys.PROBABILITIES]
classifier_output = base_head.classification_output(
scores=probabilities,
n_classes=self._n_classes,
label_vocabulary=self._label_vocabulary)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
base_head.DEFAULT_SERVING_KEY:
classifier_output,
base_head.CLASSIFY_SERVING_KEY:
classifier_output,
base_head.PREDICT_SERVING_KEY:
export_output.PredictOutput(predictions)
})
regularized_training_loss = self.loss(
logits=logits,
labels=labels,
features=features,
mode=mode,
regularization_losses=regularization_losses)
scalar_loss = tf.reduce_mean(regularized_training_loss)
# Eval.
if mode == ModeKeys.EVAL:
eval_metrics = self.metrics(regularization_losses=regularization_losses)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.EVAL,
predictions=predictions,
loss=scalar_loss,
eval_metrics=base_head.create_eval_metrics_tuple(
self.update_metrics, {
'eval_metrics': eval_metrics,
'features': features,
'logits': logits,
'labels': labels,
'regularization_losses': regularization_losses
}))
# Train.
train_op = base_head.create_estimator_spec_train_op(
head_name=self._name,
optimizer=optimizer,
train_op_fn=train_op_fn,
update_ops=update_ops,
trainable_variables=trainable_variables,
regularized_training_loss=regularized_training_loss,
loss_reduction=self._loss_reduction)
# Create summary.
base_head.create_estimator_spec_summary(
regularized_training_loss=scalar_loss,
regularization_losses=regularization_losses,
summary_key_fn=self._summary_key)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.TRAIN,
predictions=predictions,
loss=scalar_loss,
train_op=train_op)
| apache-2.0 | 1,296,019,722,160,518,000 | 41.480263 | 100 | 0.628775 | false |
DentonJC/virtual_screening | moloi/descriptors/morgan_descriptor.py | 1 | 1427 | #!/usr/bin/env python
"""
https://github.com/kudkudak
"""
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
def smiles_to_morgan(smi, hashed=True, radius=2, n_bits=300):
mol = Chem.MolFromSmiles(smi)
if hashed:
try:
vect = AllChem.GetHashedMorganFingerprint(mol=mol,
radius=radius,
nBits=n_bits)
vect = vect.GetNonzeroElements()
vect_keys = list(vect.keys())
vect_values = list(vect.values())
# Not sure how to transform it better
vect_dense = np.zeros(shape=(n_bits,))
vect_dense[vect_keys] = vect_values
return vect_dense
except:
print("Failed computing morgan fingerprint for %s", smi)
return np.zeros(shape=(n_bits,))
else:
try:
mol = Chem.MolFromSmiles(smi)
vect = AllChem.GetMorganFingerprintAsBitVect(mol=mol,
radius=radius,
nBits=n_bits)
return np.array(vect)
except:
print("Failed computing morgan fingerprint for %s", smi)
return np.zeros(shape=(n_bits,))
if __name__ == "__main__":
features = smiles_to_morgan("CC")
print(features)
| gpl-3.0 | 3,558,702,890,475,788,000 | 31.431818 | 71 | 0.501752 | false |
maym2104/ift6266-h17-project | lib/updates.py | 1 | 9422 | """
Copyright (c) 2017 - Philip Paquette
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Modified from https://raw.githubusercontent.com/Newmu/dcgan_code/master/lib/updates.py
# MIT License
import theano
import theano.tensor as T
from .utils import floatX
from .layers import l2norm
# ------------------------
# Regularization
# ------------------------
def clip_norm(grad, clip, norm):
if clip > 0:
grad = T.switch(T.ge(norm, clip), grad * clip / norm, grad)
return grad
def clip_norms(grads, clip):
norm = T.sqrt(sum([T.sum(grad ** 2) for grad in grads]))
return [clip_norm(grad, clip, norm) for grad in grads]
# Base regularizer
class Regularizer(object):
def __init__(self, l1=0., l2=0., maxnorm=0., l2norm=False, frobnorm=False):
self.__dict__.update(locals())
def max_norm(self, param, maxnorm):
if maxnorm > 0:
norms = T.sqrt(T.sum(T.sqr(param), axis=0))
desired = T.clip(norms, 0, maxnorm)
param = param * (desired / (1e-7 + norms))
return param
def l2_norm(self, param):
return param / l2norm(param, axis=0)
def frob_norm(self, param, nrows):
return (param / T.sqrt(T.sum(T.sqr(param)))) * T.sqrt(nrows)
def gradient_regularize(self, param, grad):
grad += param * self.l2
grad += T.sgn(param) * self.l1
return grad
def weight_regularize(self, param):
param = self.max_norm(param, self.maxnorm)
if self.l2norm:
param = self.l2_norm(param)
if self.frobnorm > 0:
param = self.frob_norm(param, self.frobnorm)
return param
# ------------------------
# Updates
# ------------------------
class Update(object):
def __init__(self, regularizer=Regularizer(), clipnorm=0.):
self.__dict__.update(locals())
def __call__(self, params, grads):
raise NotImplementedError
# Stochastic Gradient Descent
class SGD(Update):
def __init__(self, lr=0.01, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
updated_param = param - self.lr * grad
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# SGD with momentum
class Momentum(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = (self.momentum * m) - (self.lr * grad)
updates.append((m, v))
updated_param = param + v
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# SGD with Nesterov Accelerated Gradient
class Nesterov(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = (self.momentum * m) - (self.lr * grad)
updated_param = param + self.momentum * v - self.lr * grad
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((m, v))
updates.append((param, updated_param))
return updates
# RMS Prop
class RMSprop(Update):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * grad ** 2
updates.append((acc, acc_new))
updated_param = param - self.lr * (grad / T.sqrt(acc_new + self.epsilon))
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# Adam
class Adam(Update):
def __init__(self, lr=0.001, b1=0.9, b2=0.999, e=1e-8, l=1 - 1e-8, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
t = theano.shared(floatX(1.))
b1_t = self.b1 * self.l ** (t - 1)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = theano.shared(param.get_value() * 0.)
m_t = b1_t * m + (1 - b1_t) * grad
v_t = self.b2 * v + (1 - self.b2) * grad ** 2
m_c = m_t / (1 - self.b1 ** t)
v_c = v_t / (1 - self.b2 ** t)
p_t = param - (self.lr * m_c) / (T.sqrt(v_c) + self.e)
p_t = self.regularizer.weight_regularize(p_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((param, p_t))
updates.append((t, t + 1.))
return updates
# AdaGrad
class Adagrad(Update):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_t = acc + grad ** 2
updates.append((acc, acc_t))
p_t = param - (self.lr / T.sqrt(acc_t + self.epsilon)) * grad
p_t = self.regularizer.weight_regularize(p_t)
updates.append((param, p_t))
return updates
# AdeDelta
class Adadelta(Update):
def __init__(self, lr=0.5, rho=0.95, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_delta = theano.shared(param.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * grad ** 2
updates.append((acc, acc_new))
update = grad * T.sqrt(acc_delta + self.epsilon) / T.sqrt(acc_new + self.epsilon)
updated_param = param - self.lr * update
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
acc_delta_new = self.rho * acc_delta + (1 - self.rho) * update ** 2
updates.append((acc_delta, acc_delta_new))
return updates
# No updates
class NoUpdate(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
for param in params:
updates.append((param, param))
return updates
| mit | 2,983,714,831,106,733,600 | 36.537849 | 93 | 0.585863 | false |
Stunkymonkey/passworts | calc.py | 1 | 2206 | #!/usr/bin/env python3
import os.path
from collections import defaultdict
import pickle
from optparse import OptionParser
import sys
n = 3
def analyse(counts, text, n):
"""analyse text with n chars markov state, update the counts"""
text = '^' * n + text + '$' * n
for i in range(len(text) - n):
st = i, text[i:i + n]
next = text[i + n]
counts[st][next] += 1
return counts
def compute_prob(counts):
"""compute ranges in [0 .. 1) of the given words"""
for c1 in counts:
total = float(sum(counts[c1][c2] for c2 in counts[c1]))
base = 0.0
for c2 in counts[c1]:
prob = counts[c1][c2] / total
base = base + prob
counts[c1][c2] = base
return counts
def text_import(dict_path, source):
"""reads a file to analyse"""
try:
with open(dict_path + source, "r", encoding="ISO-8859-1") as f:
text = set(f.read().split())
except FileNotFoundError as e:
raise SystemExit("Could not open text file: " + str(e))
return text
def dd():
return defaultdict(int)
def calculate(source):
print("reading...")
dict_path = os.path.join(os.path.abspath(".") + r"/dict/")
text = text_import(dict_path, source)
source = source.split(".")[0]
print("analysing text...")
counts = defaultdict(dd)
for word in text:
counts = analyse(counts, word, n)
print("calculating...")
counts = compute_prob(counts)
# print(type(counts))
# print(counts)
# save to file
print("write...")
with open((dict_path + source + '.pickle'), 'wb') as handle:
pickle.dump(counts, handle)
print("checking file...")
with open((dict_path + source + '.pickle'), 'rb') as handle:
written = pickle.load(handle)
if written == counts:
print("Calucation was sucessfull")
else:
print("Something went wrong")
sys.exit(1)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f", "--file", type="string", dest="filename",
help="Name of the input file")
(options, args) = parser.parse_args()
calculate(options.filename)
| mit | 6,256,238,498,084,905,000 | 23.511111 | 71 | 0.577063 | false |
wolfsonliu/crispr | pycas/script/pycasanalysis.py | 1 | 3253 | #! /bin/env python3
__all__ = ['pycaspattern']
# ------------------
# Libraries
# ------------------
import argparse
import os
import sys
sys.path.append('/gpfs/user/liuzh/Code/crispr')
import pandas as pd
from pycas.analysis import Screening
from pycas.utils.decorator import helpstring
from pycas.utils.decorator import AppendHelp
# ------------------
# Functions
# ------------------
def pycasanalysis(filename,
ctrl_label,
exp_label,
method,
hasbarcode,
out_dir):
file_type = 'csv'
if filename.split('.')[-1] == 'txt':
sep = '\t'
file_type = 'txt'
elif filename.split('.')[-1] == 'csv':
sep = ','
file_type = 'csv'
else:
raise ValueError('Input data file should be txt or csv')
if file_type == 'csv':
data = pd.read_csv(filename, header=0)
else:
data = pd.read_table(filename, header=0, sep='\t')
for x in ['gene', 'guide', 'barcode']:
if x not in data.columns:
raise ValueError('Input data file should contain column named as: ' + x)
if len(ctrl_label) != len(exp_label):
raise ValueError('Input control labels and treatment labels should be of the same length.')
if out_dir != '' and not os.path.exists(out_dir):
os.mkdir(out_dir)
if method not in ['sunbird', 'mw']:
raise ValueError('The test method should be in: sunbird mw.')
analysis = Screening(data, ctrl_label, exp_label, hasbarcode=hasbarcode)
if method == 'sunbird':
analysis.sunbird(10)
analysis.test['sunbird'].to_csv(
os.path.join(out_dir, 'pycas_analysis_sunbird.csv')
)
else:
pass
# ------------------
# Main
# ------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='pycasanalysis',
description='Analysis the library screening result.'
)
parser.add_argument(
'-i', '--input', required=True,
help='Input data file path, with columns: gene, guide <, barcode>, [labels]. (column names should be in the csv file).'
)
parser.add_argument(
'-c', '--control-label', nargs='+',
help='Control experiment labels, separeted by space.'
)
parser.add_argument(
'-t', '--treat-label', nargs='+',
help='Treatment experiment labels, separeted by space.'
)
parser.add_argument(
'-m', '--method', default='sunbird',
help='Method to be used in the analysis: sunbird, mw.'
)
parser.add_argument(
'--has-barcode', action='store_true',
help='Input data should be tested consider barcodes.'
)
parser.add_argument(
'--out-dir', default='',
help='Result output directory,default is current work directory.'
)
args = parser.parse_args()
def analysis(**args):
pycasanalysis(
filename=args['input'],
ctrl_label=args['control_label'],
exp_label=args['treat_label'],
method=args['method'],
hasbarcode=args['has_barcode'],
out_dir=args['out_dir']
)
analysis(**vars(args))
# ------------------
# EOF
# ------------------
| gpl-3.0 | 329,395,760,262,836,000 | 29.383178 | 127 | 0.550907 | false |
alberthdev/cihelper | cihelper/download.py | 1 | 2101 | #!/usr/bin/env python3
import os
import requests
import time
from collections import namedtuple
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
class TimeoutRequestsSession(requests.Session):
def __init__(self, *args, **kwargs):
self.__default_timeout = None
if 'timeout' in kwargs:
self.__default_timeout = kwargs.pop('timeout')
super().__init__(*args, **kwargs)
def request(self, *args, **kwargs):
if self.__default_timeout:
kwargs.setdefault('timeout', self.__default_timeout)
return super(TimeoutRequestsSession, self).request(*args, **kwargs)
SessionSettings = namedtuple("SessionSettings",
["total_retries", "timeout", "backoff_factor", "status_forcelist"])
cached_sessions = {}
def get_session(total_retries=5, timeout=60, backoff_factor=1, status_forcelist=None):
if not status_forcelist:
status_forcelist = (500, 502, 503, 504)
settings = SessionSettings(total_retries=total_retries, timeout=timeout,
backoff_factor=backoff_factor, status_forcelist=status_forcelist)
if settings in cached_sessions:
return cached_sessions[settings]
session = TimeoutRequestsSession(timeout=timeout)
retries = Retry(total=total_retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("httsp://", HTTPAdapter(max_retries=retries))
cached_sessions[settings] = session
return session
def check_url(session, url):
response = session.get(url)
return response.status_code < 400
def download_file(session, url, dest=None, chunk_size=8192):
dl_attempts = 0
dest = dest or os.path.basename(url)
with session.get(url, stream=True) as response:
response.raise_for_status()
with open(dest, 'wb') as fh:
for chunk in r.iter_content(chunk_size=chunk_size):
fh.write(chunk)
return dest
| mit | -3,698,014,187,821,694,000 | 30.358209 | 96 | 0.656354 | false |
kennt/fixtest | simple/logon_controller.py | 1 | 4782 | """ Simple client/server controller for testing.
Copyright (c) 2014 Kenn Takara
See LICENSE for details
"""
import logging
from fixtest.base.asserts import *
from fixtest.base.controller import TestCaseController
from fixtest.fix.constants import FIX
from fixtest.fix.messages import logon_message, logout_message
from fixtest.fix.transport import FIXTransportFactory
class LogonController(TestCaseController):
""" The base class for FIX-based TestCaseControllers.
This creates a client and a server that will
communicate with each other. So they will use
the same link config.
"""
def __init__(self, **kwargs):
super(LogonController, self).__init__(**kwargs)
self.testcase_id = 'Simple-1'
self.description = 'Test of the command-line tool'
config = kwargs['config']
self.server_config = config.get_role('test-server')
self.server_config.update({'name': 'server-9940'})
self.server_link_config = config.get_link('client', 'test-server')
self.server_link_config.update({
'sender_compid': self.server_link_config['test-server'],
'target_compid': self.server_link_config['client'],
})
self.client_config = config.get_role('client')
self.client_config.update({'name': 'client-9940'})
self.client_link_config = config.get_link('client', 'test-server')
self.client_link_config.update({
'sender_compid': self.client_link_config['client'],
'target_compid': self.client_link_config['test-server'],
})
self._servers = dict()
self._clients = dict()
factory = FIXTransportFactory('server-9940',
self.server_config,
self.server_link_config)
factory.filter_heartbeat = False
server = {
'name': 'server-9940',
'port': self.server_link_config['port'],
'factory': factory,
}
self._servers[server['name']] = server
# In the client case we do not need to provide a
# factory, Just need a transport.
client = {
'name': 'client-9940',
'host': self.client_link_config['host'],
'port': self.client_link_config['port'],
'node': factory.create_transport('client-9940',
self.client_config,
self.client_link_config),
}
self._clients[client['name']] = client
self._logger = logging.getLogger(__name__)
def clients(self):
""" The clients that need to be started """
return self._clients
def servers(self):
""" The servers that need to be started """
return self._servers
def setup(self):
""" For this case, wait until our servers are all
connected before continuing with the test.
"""
# at this point the servers should be waiting
# so startup the clients
self.wait_for_client_connections(10)
self.wait_for_server_connections(10)
def teardown(self):
pass
def run(self):
""" This test is a demonstration of logon and
heartbeat/TestRequest processing. Usually
the logon process should be done from setup().
"""
client = self._clients['client-9940']['node']
client.protocol.heartbeat = 5
# We only have a single server connection
server = self._servers['server-9940']['factory'].servers[0]
server.protocol.heartbeat = 5
# client -> server
client.send_message(logon_message(client))
# server <- client
message = server.wait_for_message(title='waiting for logon')
assert_is_not_none(message)
assert_tag(message, [(35, FIX.LOGON)])
# server -> client
server.send_message(logon_message(server))
server.start_heartbeat(True)
# client <- server
message = client.wait_for_message(title='waiting for logon ack')
client.start_heartbeat(True)
assert_is_not_none(message)
assert_tag(message, [(35, FIX.LOGON)])
# Logout
client.send_message(logout_message(client))
message = server.wait_for_message(title='waiting for logout')
assert_is_not_none(message)
assert_tag(message, [(35, FIX.LOGOUT)])
server.send_message(logout_message(server))
server.start_heartbeat(False)
message = client.wait_for_message('waiting for logout ack')
client.start_heartbeat(False)
assert_is_not_none(message)
assert_tag(message, [(35, FIX.LOGOUT)])
| mit | -4,686,859,582,700,257,000 | 32.914894 | 74 | 0.592221 | false |
gwob/Maarifa | twpm/manage.py | 1 | 5250 | from csv import DictReader
from datetime import datetime
from pprint import pprint
from flask.ext.script import Manager
from taarifa_api import add_document, delete_documents, get_schema
from taarifa_waterpoints import app
from taarifa_waterpoints.schemas import facility_schema, service_schema
manager = Manager(app)
def check(response, success=201, print_status=True):
data, _, _, status = response
if status == success:
if print_status:
print " Succeeded"
return True
print "Failed with status", status
pprint(data)
return False
@manager.option("resource", help="Resource to show the schema for")
def show_schema(resource):
"""Show the schema for a given resource."""
pprint(get_schema(resource))
@manager.command
def list_routes():
"""List all routes defined for the application."""
import urllib
for rule in sorted(app.url_map.iter_rules(), key=lambda r: r.endpoint):
methods = ','.join(rule.methods)
print urllib.unquote("{:40s} {:40s} {}".format(rule.endpoint, methods,
rule))
@manager.command
def create_facility():
"""Create facility for waterpoints."""
check(add_document('facilities', facility_schema))
@manager.command
def create_service():
"""Create service for waterpoints."""
check(add_document('services', service_schema))
@manager.command
def delete_facilities():
"""Delete all facilities."""
check(delete_documents('facilities'), 200)
@manager.command
def delete_services():
"""Delete all services."""
check(delete_documents('services'), 200)
@manager.command
def delete_requests():
"""Delete all requests."""
check(delete_documents('requests'), 200)
@manager.option("filename", help="CSV file to upload (required)")
@manager.option("--skip", type=int, default=0, help="Skip a number of records")
@manager.option("--limit", type=int, help="Only upload a number of records")
def upload_waterpoints(filename, skip=0, limit=None):
"""Upload waterpoints from a CSV file."""
# Use sys.stdout.write so waterpoints can be printed nicely and succinctly
import sys
date_converter = lambda s: datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
bool_converter = lambda s: s == "T"
status_map = {
"non functional": "not functional",
"functional needs repair": "needs repair"
}
status_converter = lambda s: status_map.get(s.lower(), s.lower())
convert = {
'latitude': float,
'longitude': float,
'gid': int,
'objectid': int,
'valid_from': date_converter,
'valid_to': date_converter,
'amount_tsh': float,
'breakdown_year': int,
'date_recorded': date_converter,
'gps_height': float,
'x_wgs84': float,
'y_wgs84': float,
'num_privcon': int,
'pop_served': int,
'public_meeting': bool_converter,
'construction_year': int,
'status_group': status_converter,
'region_code': int,
'district_code': int,
'ward_code': int
}
def print_flush(msg):
sys.stdout.write(msg)
sys.stdout.flush()
facility_code = "wpf001"
print_every = 1000
print_flush("Adding waterpoints. Please be patient.")
with open(filename, 'rU') as f:
reader = DictReader(f)
for i in range(skip):
reader.next()
for i, d in enumerate(reader):
actual_index = i + skip + 2
do_print = actual_index % print_every == 0
try:
d = dict((k, convert.get(k, str)(v)) for k, v in d.items() if v)
coords = [d.pop('longitude'), d.pop('latitude')]
d['location'] = {'type': 'Point', 'coordinates': coords}
d['facility_code'] = facility_code
if not check(add_document('waterpoints', d), 201, False):
raise Exception()
if do_print:
print_flush(".")
except Exception as e:
print "Error adding waterpoint", e
pprint(d)
exit()
if limit and i >= limit:
break
# Create a 2dsphere index on the location field for geospatial queries
app.data.driver.db['resources'].ensure_index([('location', '2dsphere')])
print "Waterpoints uploaded!"
@manager.command
def ensure_indexes():
"""Make sure all important database indexes are created."""
print "Ensuring resources:location 2dsphere index is created ..."
app.data.driver.db['resources'].ensure_index([('location', '2dsphere')])
print "Done!"
@manager.option("status", help="Status (functional or non functional)")
@manager.option("wp", help="Waterpoint id")
def create_request(wp, status):
"""Create an example request reporting a broken waterpoint"""
r = {"service_code": "wps001",
"attribute": {"waterpoint_id": wp,
"status": status}}
check(add_document("requests", r))
@manager.command
def delete_waterpoints():
"""Delete all existing waterpoints."""
print delete_documents('waterpoints')
if __name__ == "__main__":
manager.run()
| apache-2.0 | 7,342,189,801,768,494,000 | 29.172414 | 80 | 0.60781 | false |
kevinah95/bmc-sequence-alignment | algorithms/needleman_wunsch/plot_nw.py | 1 | 3189 | import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import datetime
from Bio.Seq import Seq
if __name__ == '__main__':
from needleman_wunsch import needleman_wunsch
else:
from .needleman_wunsch import needleman_wunsch
#-------------------------------
def plot_nw(seq_alpha_col,seq_beta_row,p_penalty):
if not seq_alpha_col or not seq_beta_row:
print("Alguna de las secuencias está vacía.")
return
plt.rcParams["figure.figsize"] = 20, 20
param = {"grid.linewidth": 1.6,
"grid.color": "lightgray",
"axes.linewidth": 1.6,
"axes.edgecolor": "lightgray",
"font.size": 8}
plt.rcParams.update(param)
# Data
headh = seq_alpha_col
headv = seq_beta_row
score_matrix, pt_mat, arrows = needleman_wunsch(seq_alpha_col,seq_beta_row,p_penalty,score_only=False)
# Plot
fig, ax = plt.subplots()
ax.set_xlim(-1.5, score_matrix.shape[1] - .5)
ax.set_ylim(-1.5, score_matrix.shape[0] - .5)
ax.invert_yaxis()
for i in range(score_matrix.shape[0]):
for j in range(score_matrix.shape[1]):
ax.text(j, i, score_matrix[i, j], ha="center", va="center")
for i, l in enumerate(headh):
ax.text(i + 1, -1, l, ha="center", va="center", fontweight="semibold")
for i, l in enumerate(headv):
ax.text(-1, i + 1, l, ha="center", va="center", fontweight="semibold")
ax.xaxis.set_minor_locator(ticker.FixedLocator(
np.arange(-1.5, score_matrix.shape[1] - .5, 1)))
ax.yaxis.set_minor_locator(ticker.FixedLocator(
np.arange(-1.5, score_matrix.shape[1] - .5, 1)))
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left="off", right="off", labelbottom='off', labelleft='off')
#-----------ax.set_aspect('auto')
ax.grid(True, which='minor')
arrowprops = dict(facecolor='blue', alpha=0.5, lw=0,
shrink=0.2, width=2, headwidth=7, headlength=7)
# all path
for i in range(1,pt_mat.shape[0]):
for j in range(1,pt_mat.shape[1]):
if(pt_mat[i][j]['left'] != ''):
ax.annotate("", xy=(j-1,i),
xytext=(j,i), arrowprops=arrowprops)
if(pt_mat[i][j]['diagonal'] != ''):
ax.annotate("", xy=(j-1,i-1),
xytext=(j,i), arrowprops=arrowprops)
if(pt_mat[i][j]['up'] != ''):
ax.annotate("", xy=(j,i-1),
xytext=(j,i), arrowprops=arrowprops)
# optimal path
arrowprops.update(facecolor='crimson')
for i in range(arrows.shape[0]):
ax.annotate("", xy=arrows[i, 2:], # origin
xytext=arrows[i, :2], arrowprops=arrowprops)
#------------
plt.gca().set_aspect('auto')
time = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
plt.savefig("output/needleman_wunsch/output-nw_"+time+".pdf", dpi=600)
#plt.show()
if __name__ == '__main__':
alpha = Seq("ACTCA")
beta = Seq("TTCAT")
penalty = {'MATCH': 1, 'MISMATCH': -1, 'GAP': -2}
plot_nw(alpha,beta,penalty) | mit | -9,171,932,329,394,185,000 | 34.032967 | 106 | 0.548792 | false |
cchampet/TuttleOFX | doc/scripts/plaintext2html.py | 1 | 3911 | #!/usr/bin/env python
from __future__ import with_statement
import re
import cgi
colorcodes = {'bold':{True:'\033[1m',False:'\033[22m'},
'cyan':{True:'\033[1;36m',False:'\033[0;0m'},
#'#8E4429':{True:'\033[1;33m',False:'\033[0;0m'},
'#8E4429':{True:'\033[0;33m',False:'\033[0;0m'},
#'#8E4429':{True:'\033[33m',False:'\033[0;0m'},
'#0000B0':{True:'\033[1;34m',False:'\033[0;0m'},
'#B63A11':{True:'\033[1;31m',False:'\033[0;0m'},
'magenta':{True:'\033[1;35m',False:'\033[0;0m'},
#'green':{True:'\033[1;32m',False:'\033[0;0m'},
'green':{True:'\033[0;32m',False:'\033[0;0m'},
#'green':{True:'\033[32m',False:'\033[0;0m'},
'underline':{True:'\033[1;4m',False:'\033[0;0m'}}
def recolor(color, text):
regexp = "(?:%s)(.*?)(?:%s)" % (colorcodes[color][True], colorcodes[color][False])
regexp = regexp.replace('[', r'\[')
return re.sub(regexp, r'''<span style="color: %s">\1</span>''' % color, text)
def resinglecolor(color, text, intxt):
regexp = "(?:\033\[1;32m%s)(.*?)" % intxt
return re.sub(regexp, r'<span style="color: green">%s\1</span>'% intxt, text)
def removestdcolor(text):
regexp = "(?:\033\[0;0m)(.*?)"
return re.sub(regexp, r'', text)
def bold(text):
regexp = "(?:%s)(.*?)(?:%s)" % (colorcodes['bold'][True], colorcodes['bold'][False])
regexp = regexp.replace('[', r'\[')
return re.sub(regexp, r'<span style="font-weight:bold">\1</span>', text)
def underline(text):
regexp = "(?:%s)(.*?)(?:%s)" % (colorcodes['underline'][True], colorcodes['underline'][False])
regexp = regexp.replace('[', r'\[')
return re.sub(regexp, r'<span style="text-decoration: underline">\1</span>', text)
def removebells(text):
return text.replace('\07', '')
def removebackspaces(text):
backspace_or_eol = r'(.\010)|(\033\[K)'
n = 1
while n > 0:
text, n = re.subn(backspace_or_eol, '', text, 1)
return text
template = '''\
<html>
<head>
</head>
<body>
%s
</body>
</html>
'''
re_string = re.compile(r'(?P<htmlchars>[<&>])|(?P<space>^[ \t]+)|(?P<lineend>\r\n|\r|\n)|(?P<protocal>(^|\s|\[)((http|ftp)://.*?))(\s|$|\])', re.S|re.M|re.I)
def plaintext2html(text, tabstop=4):
def do_sub(m):
c = m.groupdict()
if c['htmlchars']:
return cgi.escape(c['htmlchars'])
if c['lineend']:
return '<br>'
elif c['space']:
t = m.group().replace('\t', ' '*tabstop)
t = t.replace(' ', ' ')
return t
elif c['space'] == '\t':
return ' '*tabstop;
else:
url = m.group('protocal')
#print url
if url.startswith('['):
prefix = '['
suffix = ']'
url = url[1:]
else:
prefix = ''
suffix = ''
last = m.groups()[-1]
if last in ['\n', '\r', '\r\n']:
last = '<br>'
return '%s<a href=%s>%s</a>%s' % (prefix, url, url, suffix)
result = re.sub(re_string, do_sub, text)
result = result.replace(' ', ' ')
result = result.replace('\t', ' '*tabstop)
result = recolor('cyan', result)
result = recolor('#8E4429', result)
result = recolor('#0000B0', result)
result = recolor('#B63A11', result)
result = recolor('magenta', result)
result = recolor('green', result)
result = resinglecolor('green', result, 'Source')
result = resinglecolor('green', result, 'Output')
result = bold(result)
result = underline(result)
result = removebells(result)
result = removebackspaces(result)
result = removestdcolor(result)
return template % result
if __name__ == '__main__':
import sys
with open(sys.argv[-1]) as f:
text = f.read()
print plaintext2html(text)
| gpl-3.0 | -902,266,013,893,889,500 | 31.591667 | 157 | 0.520327 | false |
mantarayforensics/mantaray | Tools/Python/extract_ntfs_artifacts_mr.py | 1 | 15861 | #!/usr/bin/env python3
#This program extracts NTFS artifacts ($MFT, $Logfile, $USRJRNL) (Overt, Deleted
#Shadow Volumes)
#Use to extract files when using Triforce ANJP NTFS Journal Parser | Triforce (David Cohen)
#########################COPYRIGHT INFORMATION############################
#Copyright (C) 2013 [email protected] #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
#
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
#
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see http://www.gnu.org/licenses/. #
#########################COPYRIGHT INFORMATION############################
from easygui import *
from get_case_number import *
from get_output_location import *
from select_file_to_process import *
from parted import *
from mount import *
from mount_ewf import *
from done import *
from unix2dos import *
from remove_dupes_module_noask import *
from mmls import *
from Windows_Time_Converter_module import *
from check_for_folder import *
import os
from os.path import join
import re
import io
import sys
import string
import subprocess
import datetime
import shutil
import struct
### GET BLOCK SIZE ##############################################################################################
def get_block_size_mmls(Image_Path, outfile):
block_size = subprocess.check_output(['mmls -i raw ' + Image_Path + " | grep Units | awk '{print $4}' | sed s/-byte//"], shell=True, universal_newlines=True)
block_size = block_size.strip()
print("The block size is: " + str(block_size))
outfile.write("The block size is: " + str(block_size) + "\n\n")
return block_size
def get_block_size_parted(outfile, temp_time):
block_size_command = "sudo cat /tmp/timeline_partition_info_" + temp_time +".txt | grep -a " + "'"+"Sector size"+"'" + " | awk {'print $4'} | sed s_B/.*__"
outfile.write("The block_size command is: " + block_size_command + "\n")
block_size = subprocess.check_output([block_size_command], shell=True, universal_newlines=True)
block_size = block_size.strip()
print("The block size is: " + str(block_size))
outfile.write("The block size is: " + str(block_size) + "\n\n")
return block_size
### END GET BLOCK SIZE ##########################################################################################
### PROCESS FLS OUTPUT ###### ############################################################
def process_fls_output(value, key, Image_Path, block_size, folder_path, item, file_type, outfile, temp_file):
#divide offset by block size so it is in correct format for fls
key_bytes = int(key)//int(block_size)
#open FLS output file
fls_output_file = open("/tmp/fls_output_ntfs_" + temp_file + ".txt", 'r')
for line in fls_output_file:
#print current line for debugging purposes
#print(line)
newList=[]
#strip carriage returns
line = line.strip()
line_split = line.split('/')
#print(line_split)
for i in line_split:
newList.append(i.split('\t')[0])
#print (newList)
#assign items in newList to variables
inode_number_temp = newList[1]
#strip alpha chars from inode & leading space
inode_number = re.sub('[a-z]','',inode_number_temp)
inode_number = re.sub('^ +','', inode_number)
#get file_name
file_name = newList[-1]
if(item == "NO"):
if(re.search('OrphanFiles', line)):
#copy files out using icat
icat_command = "icat -r -i raw -f " + value + " -o " + str(key_bytes) + " -b " + block_size + " " + Image_Path + " " + inode_number + " > " + "'" + folder_path + "/" + inode_number + "_Partition_" + str(key) + "_" + file_type +"_DELETED" + "'"
else:
#get user profile name
icat_command = "icat -r -i raw -f " + value + " -o " + str(key_bytes) + " -b " + block_size + " " + Image_Path + " " + inode_number + " > " + "'" + folder_path + "/" + inode_number + "_Partition_" + str(key) + "_OVERT_" + file_type + "'"
else: #these are the shadow volume files
if(re.search('OrphanFiles', line)):
#copy files out using icat
icat_command = "icat -r -i raw -f " + value + " -o " + str(key_bytes) + " -b " + block_size + " " + Image_Path + " " + inode_number + " > " + "'" + folder_path + "/" + inode_number + "_Partition_" + str(key) + "_DELETED_" + file_type + "_" + item + "'"
else:
#get user profile name
icat_command = "icat -r -i raw -f " + value + " -o " + str(key_bytes) + " -b " + block_size + " " + Image_Path + " " + inode_number + " > " + "'" + folder_path + "/" + inode_number + "_Partition_" + str(key) + "_" + file_type + "_" + item + "'"
print("File Name: " + file_name.ljust(10) + "\t" "Inode number: " + inode_number.ljust(10))
outfile.write("The icat command is: " + icat_command + "\n")
#run icat command
subprocess.call([icat_command], shell=True)
#close file
fls_output_file.close()
##########################################################################################
### PROCESS OVERT / DELETED HIVES ##############################################################################
def process_overt_deleted_files(value, key, Image_Path, outfile, folder_path, block_size, item, temp_time):
#divide offset by block size so it is in correct format for fls
key_bytes = int(key)//int(block_size)
#run fls to get information for MFT files
fls_command = "fls -Fpr -f ntfs -i raw -o " + str(key_bytes) + " " + Image_Path + " | grep -i '\$MFT$' | sed s/:// | sed s/*// > /tmp/fls_output_ntfs_" + temp_time + ".txt"
#print ("\nThe fls command is: " + fls_command + "\n")
print("\nSearching for $MFT files")
outfile.write("The fls command is: " + fls_command + "\n")
#run fls command
subprocess.call([fls_command], shell=True)
#process fls output
process_fls_output(value, key, Image_Path, block_size, folder_path, item, "MFT", outfile, temp_time)
#run fls to get information for Logfiles files
fls_command = "fls -Fpr -f ntfs -i raw -o " + str(key_bytes) + " " + Image_Path + " | grep -i '\$LogFile$' | sed s/:// | sed s/*// > /tmp/fls_output_ntfs_" + temp_time +".txt"
#print ("\nThe fls command is: " + fls_command + "\n")
print("\nSearching for $LogFiles files")
outfile.write("The fls command is: " + fls_command + "\n")
#run fls command
subprocess.call([fls_command], shell=True)
#process fls output
process_fls_output(value, key, Image_Path, block_size, folder_path, item, "LogFile", outfile, temp_time)
#run fls to get information for $UsrJrnl files
fls_command = "fls -Fpr -f ntfs -i raw -o " + str(key_bytes) + " " + Image_Path + " | grep -i '\$UsnJrnl.\$J$' | sed s/:// | sed s/*// > /tmp/fls_output_ntfs_" + temp_time + ".txt"
#print ("\nThe fls command is: " + fls_command + "\n")
print("\nSearching for $UsrJrnl files")
outfile.write("The fls command is: " + fls_command + "\n")
#run fls command
subprocess.call([fls_command], shell=True)
#process fls output
process_fls_output(value, key, Image_Path, block_size, folder_path, item, "UsnJrnl", outfile, temp_time)
### END PROCESS OVERT / DELETED HIVES ##############################################################################
### CHECK FOR SHADOW VOLUMES ################################################
def check_for_shadow_volumes(Image_Path, key, block_size, outfile, folder_path, temp_time):
#set shadow volume variables
has_shadow_volumes = "NULL"
vssvolume_mnt = "NULL"
#divide offset by block size so it is in correct format for vshadowinfo
key_bytes = int(key)//int(block_size)
key_bytes_disk_offset = int(key) * int(block_size)
image_no_quotes = Image_Path.replace("'","")
print("\nChecking: " + Image_Path + " for shadow volumes")
f = open('/tmp/dump_' + temp_time + '.txt', 'w+t')
try:
vshadow_info_command = "vshadowinfo -v -o " + str(key) + " " + Image_Path# + " > /tmp/dump.txt"
#print("The vshadow_command is: " + vshadow_info_command)
outfile.write("The vshadow_command is: " + vshadow_info_command)
subprocess.call([vshadow_info_command], shell=True, stdout = f, stderr=subprocess.STDOUT)
#vshadow_output = subprocess.check_output([vshadow_info_command], shell=True, stderr=subprocess.STDOUT)
#f.close()
f =open('/tmp/dump_' + temp_time + '.txt', 'rt')
#print("try succedded")
for line in f:
line = line.strip()
print(line)
if (re.search("No Volume Shadow Snapshots found", line)):
has_shadow_volumes = "NO"
if(has_shadow_volumes != "NO"):
print("Partition at offset: " + str(key_bytes) + " has shadow volumes.")
outfile.write("Partition at offset: " + str(key_bytes) + " has shadow volumes.")
#check for existence of folder
vssvolume_mnt = check_for_folder("/mnt/vssvolume", outfile)
#mount shadow volumes for partition
mount_shadow_command = "sudo vshadowmount -o " + str(key) + " " + Image_Path + " " + vssvolume_mnt
print("The mount_shadow_command is: " + mount_shadow_command)
subprocess.call(["sudo vshadowmount -o " + str(key) + " " + Image_Path + " " + vssvolume_mnt], shell=True, stderr=subprocess.STDOUT)
#pass vssvolume mount point to mount_shadow_volume for mounting
mount_shadow_volumes(vssvolume_mnt, outfile, folder_path)
elif(has_shadow_volumes == "NO"):
print("Partition at offset: " + str(key) + " has no shadow volumes")
f.close()
except:
print("The vshadow_info command for partition: " + str(key) + " failed")
return vssvolume_mnt
#############################################################################
#### MOUNT INDIVIDUAL SHADOW VOLUMES ########################################
def mount_shadow_volumes(vssvolume_mnt, outfile, folder_path):
print("Inside mount_shadow_volumes sub")
print("Vssvolume_mnt: " + vssvolume_mnt)
#check for existence of folder
vss_mount = check_for_folder("/mnt/vss_mount", outfile)
vss_volumes = os.listdir(vssvolume_mnt)
print(vss_volumes)
for item in vss_volumes:
print("About to process Shadow Volume: " + item)
#call parted function
partition_info_dict, temp_time = parted(outfile, vssvolume_mnt + "/"+item)
block_size = get_block_size_parted(outfile, temp_time)
for key,value in partition_info_dict.items():
print("About to process registry hives from: " + item)
process_overt_deleted_files(value, key, vssvolume_mnt+"/"+item, outfile, folder_path, block_size, item, temp_time)
#############################################################################
### MAIN PROGRAM ########################################################################################################################
def extract_ntfs_artifacts_mr(item_to_process, case_number, root_folder_path, evidence):
print("The item to process is: " + item_to_process)
print("The case_name is: " + case_number)
print("The output folder is: " + root_folder_path)
print("The evidence to process is: " + evidence)
#get datetime
now = datetime.datetime.now()
#set Mount Point
mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S")
#create output folder path
folder_path = root_folder_path + "/" + "NTFS_Artifacts"
check_for_folder(folder_path, "NONE")
#open a log file for output
log_file = folder_path + "/NTFS_Artifacts_logfile.txt"
outfile = open(log_file, 'wt+')
Image_Path = '"' + evidence + '"'
#set item variable to tell functions whether data is from shadow volumes
item = "NO"
#check if Image file is in Encase format
if re.search(".E01", Image_Path):
#set mount point
mount_point = "/mnt/"+case_number+"_unallocated"
Image_Path = mount_ewf(Image_Path, outfile, mount_point)
#call mmls function
partition_info_dict, temp_time = mmls(outfile, Image_Path)
partition_info_dict_temp = partition_info_dict
#get filesize of mmls_output.txt
file_size = os.path.getsize("/tmp/mmls_output_" + temp_time + ".txt")
#if filesize of mmls output is 0 then run parted
if(file_size == 0):
print("mmls output was empty, running parted\n")
outfile.write("mmls output was empty, running parted\n")
#call parted function
partition_info_dict, temp_time = parted(outfile, Image_Path)
block_size = get_block_size_parted(outfile, temp_time)
else:
#get block_size since mmls was successful
block_size = get_block_size_mmls(Image_Path, outfile)
#read through the mmls output and look for GUID Partition Tables (used on MACS)
mmls_output_file = open("/tmp/mmls_output_" + temp_time + ".txt", 'r')
for line in mmls_output_file:
if re.search("GUID Partition Table", line):
print("We found a GUID partition table, need to use parted")
outfile.write("We found a GUID partition table, need to use parted\n")
#call parted function
partition_info_dict, temp_time = parted(outfile, Image_Path)
#loop through the dictionary containing the partition info (filesystem is VALUE, offset is KEY)
for key,value in partition_info_dict.items():
#process overt registy hives
if(value =="ntfs") or (value=="fat32"):
if not os.path.exists(folder_path + "/Partition_" + str(key)):
os.makedirs(folder_path + "/Partition_" + str(key))
#print("Just created output folder: " + folder_path + "/Partition_" + str(key))
outfile.write("Just created output folder: " + folder_path + "/Partition_" + str(key) + "\n\n")
else:
print("Output folder: " + folder_path +"/Partition_" + str(key) + " already exists")
outfile.write("Output folder: " + folder_path +"/Partition_" + str(key) + " already exists\n\n")
process_overt_deleted_files(value, key, Image_Path, outfile, folder_path, block_size, item, temp_time)
vssvolume_mnt = check_for_shadow_volumes(Image_Path, key, block_size, outfile, folder_path, temp_time)
else:
print("This partition is not formatted NTFS or FAT32")
outfile.write("This partition is not formatted NTFS or FAT32\n\n")
#run fdupes against output path to eliminate dupes
remove_dupes_module_noask(folder_path, outfile, str(key))
#chdir to output foler
os.chdir(folder_path)
#unmount shadow volumes
if(vssvolume_mnt != "NULL"):
print("Unmounting: " + vssvolume_mnt)
outfile.write("Unmounting: " + vssvolume_mnt + "\n")
subprocess.call(['sudo umount -f ' + vssvolume_mnt], shell=True)
os.rmdir(vssvolume_mnt)
#unmount and remount points
if re.search(".E01", Image_Path):
if(os.path.exists(mount_point+"_ewf")):
subprocess.call(['sudo umount -f ' + mount_point + "_ewf"], shell=True)
os.rmdir(mount_point+"_ewf")
#remove empty directories
for root, dirs, files in os.walk(folder_path, topdown = False):
for directory in dirs:
dir_path = os.path.join(root, directory)
if not os.listdir(dir_path):
outfile.write("Removing empty folder: " + dir_path + "\n")
os.rmdir(dir_path)
#close outfiles
outfile.close()
#delete temp files
os.remove('/tmp/fls_output_ntfs_' + temp_time + '.txt')
#run text files through unix2dos
for root, dirs, files in os.walk(folder_path):
for filenames in files:
#get file extension
fileName, fileExtension = os.path.splitext(filenames)
if(fileExtension.lower() == ".txt"):
full_path = os.path.join(root,filenames)
quoted_full_path = "'" +full_path+"'"
print("Running Unix2dos against file: " + filenames)
unix2dos_command = "sudo unix2dos " + quoted_full_path
subprocess.call([unix2dos_command], shell=True)
| gpl-3.0 | 2,019,264,180,146,138,400 | 39.256345 | 256 | 0.617868 | false |
vlegoff/tsunami | src/secondaires/auberge/editeurs/aubedit/__init__.py | 1 | 3826 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur d'auberge 'aubedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package
Note importante : ce package contient la définition d'un éditeur, mais
celui-ci peut très bien être étendu par d'autres modules. Au quel cas,
les extensions n'apparaîtront pas ici.
"""
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.uniligne import Uniligne
from secondaires.auberge.editeurs.aubedit.edt_chambres import EdtChambres
class EdtAubedit(Presentation):
"""Classe définissant l'éditeur d'auberge."""
nom = "aubedit"
def __init__(self, personnage, auberge):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, auberge)
if personnage and auberge:
self.construire(auberge)
def __getnewargs__(self):
return (None, None)
def construire(self, auberge):
"""Construction de l'éditeur"""
# Titre
titre = self.ajouter_choix("titre", "t", Uniligne, auberge, "titre")
titre.parent = self
titre.prompt = "Titre de l'auberge : "
titre.apercu = "{objet.titre}"
titre.aide_courte = \
"Entrez le |ent|titre|ff| de l'auberge ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nTitre actuel : " \
"|bc|{objet.titre}|ff|"
# Clé de l'aubergiste
cle = self.ajouter_choix("clé de l'aubergiste", "a", Uniligne,
auberge, "cle_aubergiste")
cle.parent = self
cle.prompt = "Clé du prototype de l'aubergiste : "
cle.apercu = "{objet.cle_aubergiste}"
cle.aide_courte = \
"Entrez la |ent|clé de l'aubergiste|ff| ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nClé actuelle : " \
"|bc|{objet.cle_aubergiste}|ff|"
# Chambres
chambres = self.ajouter_choix("chambres", "c", EdtChambres, auberge)
chambres.parent = self
chambres.apercu = "\n{objet.aff_chambres}"
| bsd-3-clause | -3,694,249,358,320,712,000 | 40.326087 | 79 | 0.694108 | false |