repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mfkaptan/biyos-app | biyos/biyos.py | 1 | 13470 | import cookielib
import urllib
import urllib2
import sys
import biyosui
import base64 as b64
import re
from docx import Document
from docx.shared import Inches
from openpyxl import load_workbook
from PyQt4 import QtGui
from bs4 import BeautifulSoup
from math import ceil
# No, Blok, Daire
kiraci = [[7710, "A", 6]]
DOGALGAZ_BIRIM = 12.5
SICAKSU_BIRIM = 6.25
class BiyosApp(QtGui.QMainWindow, biyosui.Ui_MainWindow):
def __init__(self, parent=None):
super(BiyosApp, self).__init__(parent)
self.setupUi(self)
self.dogalgaz_birim_in.setValue(DOGALGAZ_BIRIM)
self.su_birim_in.setValue(SICAKSU_BIRIM)
self.kalori_hesap_button.clicked.connect(self.kalori_hesap)
self.sayac_veri_button.clicked.connect(self.sayac_verileri)
self.apartman_aidat_button.clicked.connect(self.apartman_aidat)
self.tum_borclar_button.clicked.connect(self.tum_borclar)
self.tek_borc_button.clicked.connect(self.tek_borc)
self.login()
def login(self):
with open('login/log.in', 'r') as f:
self.email = b64.decodestring(f.readline().strip())
self.password = b64.decodestring(f.readline().strip())
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(
urllib2.HTTPRedirectHandler(),
urllib2.HTTPHandler(debuglevel=0),
urllib2.HTTPSHandler(debuglevel=0),
urllib2.HTTPCookieProcessor(self.cj)
)
self.opener.addheaders = [('User-agent', ('Mozilla/4.0 (compatible; MSIE 6.0; '
'Windows NT 5.2; .NET CLR 1.1.4322)'))]
# need this twice - once to set cookies, once to log in...
self._login()
self._login()
self.giris_button.setStyleSheet('QPushButton {background-color: #00FF00; color: black;}')
self.giris_button.setText(self.email + ' adresi ile giris yapildi!')
def _login(self):
"""
Handle login. This should populate our cookie jar.
"""
login_data = urllib.urlencode({
'email': self.email,
'password': self.password,
})
response = self.opener.open("https://app.biyos.net/login.php", login_data)
def sayac_verileri(self):
self.dogalgaz_birim = float(self.dogalgaz_birim_in.value())
self.su_birim = float(self.su_birim_in.value())
su = self.get_page('https://app.biyos.net/385/yonetim/sayaclar/sicaksu')
self.su_toplam = self.get_sayac_toplam(su)
self.su_toplam_disp.setText(str(self.su_toplam))
kalori = self.get_page('https://app.biyos.net/385/yonetim/sayaclar/kalorimetre')
self.kalori_toplam = self.get_sayac_toplam(kalori)
self.kalori_toplam_disp.setText(str(self.kalori_toplam))
self.kalori_ortalama = self.kalori_toplam / 48.0
self.kalori_ortalama_disp.setText(str("%.2f" % self.kalori_ortalama))
self.sayac_veri_button.setStyleSheet('QPushButton {background-color: #00FF00; color: black;}')
self.sayac_veri_button.setText('Veriler gosteriliyor')
def kalori_hesap(self):
self.sayac_verileri()
self.dogalgaz_birim = float(self.dogalgaz_birim_in.value())
self.su_birim = float(self.su_birim_in.value())
fatura = float(self.fatura_in.value())
if fatura == 0:
self.kalori_hesap_button.setStyleSheet('QPushButton {background-color: #FF0000; color: black;}')
self.kalori_hesap_button.setText('Fatura girip tekrar deneyin!')
return
su_fark = (self.dogalgaz_birim - self.su_birim) * self.su_toplam
son_fiyat = fatura - su_fark
self.son_fiyat_disp.setText(str("%.2f" % son_fiyat))
ortak_gider = (son_fiyat * 3.) / 480.
aidat = 200. - ortak_gider
self.ortak_gider_disp.setText(str("%.2f" % ortak_gider))
self.aidat_disp.setText(str("%.2f" % aidat))
self.kalori_hesap_button.setStyleSheet('QPushButton {background-color: #00FF00; color: black;}')
self.kalori_hesap_button.setText('Hesaplandi!')
def _get_rows(self, html, attr=None):
if attr is None:
attr = "table"
table = html.find('table', attrs={'class': attr})
body = table.find('tbody')
rows = body.find_all('tr')
return rows
def get_sayac_toplam(self, html):
rows = self._get_rows(html)
total = 0
for r in rows:
cols = r.find_all('td')
total += int(cols[-1].text)
return total
def get_page(self, url):
try:
resp = self.opener.open(url)
return BeautifulSoup(resp.read(), "lxml")
except Exception as e:
raise e
return
def apartman_aidat(self):
self.sayac_verileri()
dogalgaz_link = self.paylasim_link_in.value()
if dogalgaz_link != 0:
url = 'https://app.biyos.net/385/raporlar/paylasimlar/' + str(dogalgaz_link)
else:
url = None
su_rows = []
kalori_rows = []
title = ""
try:
su = self.get_page('https://app.biyos.net/385/yonetim/sayaclar/sicaksu')
su_rows = self._get_rows(su)
if url is None:
title = "2016 - "
else:
kalori = self.get_page(url)
section = kalori.body.find('section', attrs={'class': 'rapor'})
title = section.find('h4', attrs={'class': 'pull-left'}).get_text()
yil = title.split('-')[0].strip()
ay = title.split('-')[1].strip().split(' ')[0].strip()
title = yil + ' - ' + ay
kalori_rows = self._get_rows(kalori)
except Exception as e:
print e
self.apartman_aidat_button.setStyleSheet('QPushButton {background-color: #FF0000; color: white;}')
self.apartman_aidat_button.setText('Yazdirma basarisiz, linki kontrol edin!')
return
try:
self.wb = load_workbook('aidat/template/aidat.xlsx')
ws = self.wb.active
ws.title = title
ws['C1'] = ws['C29'] = title
self._set_xlsx(ws, su_rows, kalori_rows)
self.wb.save(filename='aidat/' + title + ' ISIMLI Aidat.xlsx')
self._remove_names(ws)
self.wb.save(filename='aidat/' + title + ' ISIMSIZ Aidat.xlsx')
except Exception as e:
print e
self.apartman_aidat_button.setStyleSheet('QPushButton {background-color: #FF0000; color: white;}')
self.apartman_aidat_button.setText('Yazdirma basarisiz!')
else:
self.apartman_aidat_button.setStyleSheet('QPushButton {background-color: #00FF00; color: black;}')
self.apartman_aidat_button.setText(title + ' Yazdirildi!')
def _remove_names(self, ws):
for i in range(4, 28):
ws.cell(row=i, column=2).value = 'NO LU Daire'
ws.cell(row=i+28, column=2).value = 'NO LU Daire'
def _set_xlsx(self, ws, su, kalori):
for i in range(48):
r = i + 4
if i >= 24:
r += 4
col = su[i].find_all('td')
ws.cell(row=r, column=2).value = col[2].text
ws.cell(row=r, column=3).value = int(col[5].text)
ws.cell(row=r, column=4).value = su_tl = self.dogalgaz_birim * int(col[5].text)
if len(kalori) == 0:
ws.cell(row=r, column=5).value = 0
ws.cell(row=r, column=6).value = d70 = 0
ws.cell(row=r, column=7).value = d30 = 0
else:
col = kalori[i].find_all('td')
ws.cell(row=r, column=5).value = float(col[6].text.replace(',', '.'))
ws.cell(row=r, column=6).value = d70 = float(col[8].text.replace(',', '.'))
ws.cell(row=r, column=7).value = d30 = float(col[7].text.replace(',', '.'))
aidat = 200. - d30
ws.cell(row=r, column=8).value = aidat
total = su_tl + d70 + d30 + aidat
ws.cell(row=r, column=9).value = ceil(total)
def _single_account(self, no, blok, daire):
html = self.get_page('https://app.biyos.net/385/hesaplar/' + str(no))
hesap = html.body.find('span', attrs={'style': 'font-size:22px;'}).get_text()
head = self.document.add_heading(hesap, level=1)
head.style.paragraph_format.keep_together = True
head.style.paragraph_format.keep_with_next = True
head = self.document.add_heading(blok + " Blok / No: " + str(daire), level=2)
head.style.paragraph_format.keep_together = True
head.style.paragraph_format.keep_with_next = True
try:
data = html.body.find('div', attrs={'class': 'table-responsive'})
geciken = html.body.find('div', attrs={'class': 'detail-payment-item text-danger big-title'})
bakiye = html.body.find('div', attrs={'class': 'detail-payment-item text-warning big-title'})
self.create_table(data, geciken, bakiye)
except AttributeError:
return
def create_table(self, data, geciken, bakiye):
if bakiye:
rows = self._get_rows(data, attr='table table-detail')
tbl = self.document.add_table(rows=0, cols=3)
tbl.autofit = False
tbl.style.paragraph_format.keep_together = True
tbl.style.paragraph_format.keep_with_next = True
tbl.style.paragraph_format.widow_control = True
row_cells = tbl.add_row().cells
row_cells[0].text = "Son Odeme Tarihi"
row_cells[1].text = "Aciklama"
row_cells[2].text = "Tutar"
for r in rows:
row_cells = tbl.add_row().cells
cols = r.find_all('td')
i = 0
for c in cols:
if c.text:
row_cells[i].text = c.text
i += 1
non_decimal = re.compile(r'[^\d.,]+')
row_cells = tbl.add_row().cells
row_cells[1].text = "Toplam Borc"
row_cells[2].text = non_decimal.sub('', bakiye.get_text())
tbl.columns[0].width = Inches(1.5)
tbl.columns[1].width = Inches(50)
tbl.columns[2].width = Inches(0.5)
else:
self.document.add_heading("Odenmemis borcunuz bulunmamaktadir.", level=3)
self.document.add_heading("Gosterdiginiz hassasiyet icin tesekkur ederiz.", level=4)
def tek_borc(self):
blok = None
d = 0
if self.a_blok_in.isChecked():
d = 0
blok = "A"
elif self.b_blok_in.isChecked():
d = 24
blok = "B"
else:
self.tek_borc_button.setStyleSheet('QPushButton {background-color: #FF0000; color: white;}')
self.tek_borc_button.setText('Blok seciniz!')
return
daire = int(self.daire_no_in.value())
hesap = daire + 6148 + d
yazdir = [[hesap, blok, daire]]
for k in kiraci:
if k[1] == blok and k[2] == daire:
yazdir.append(k)
try:
self.document = Document()
for d in yazdir:
self._single_account(*d)
self.document.save('aidat/' + d[1] + '-' + str(d[2]) + ' borc.docx')
except Exception as e:
print e
self.tek_borc_button.setStyleSheet('QPushButton {background-color: #FF0000; color: white;}')
self.tek_borc_button.setText('Yazdirma basarisiz!')
else:
self.tek_borc_button.setStyleSheet('QPushButton {background-color: #00FF00; color: black;}')
self.tek_borc_button.setText('Basarili!\nBaska Yazdir')
def tum_borclar(self):
self.tum_borclar_button.setText('Yazdiriliyor, lutfen bekleyin...')
try:
self.document = Document()
bar = "".join(['_'] * 78)
daire = 1
blok = "A"
for i in range(6149, 6197):
print blok, daire
p = self.document.add_paragraph()
p.add_run(bar).bold = True
p.style.paragraph_format.keep_together = True
p.style.paragraph_format.keep_with_next = True
self._single_account(i, blok, daire)
daire += 1
if daire == 25:
daire = 1
blok = "B"
for k in kiraci:
p = self.document.add_paragraph()
p.style.paragraph_format.keep_together = True
p.style.paragraph_format.keep_with_next = True
p.add_run(bar).bold = True
self._single_account(*k)
self.document.save('aidat/Tum borclar.docx')
except Exception as e:
print e
self.tum_borclar_button.setStyleSheet('QPushButton {background-color: #FF0000; color: white;}')
self.tum_borclar_button.setText('Yazdirma basarisiz!')
else:
self.tum_borclar_button.setStyleSheet('QPushButton {background-color: #00FF00; color: black;}')
self.tum_borclar_button.setText('Yazdirma basarili!')
def main():
app = QtGui.QApplication(sys.argv)
biyos = BiyosApp()
biyos.show()
app.exec_()
if __name__ == '__main__':
main()
| mit | 9,147,751,380,979,631,000 | 36.313019 | 110 | 0.558129 | false |
thiagopena/PySIGNFe | pysignfe/nfe/manual_300/__init__.py | 1 | 1984 | # -*- coding: utf-8 -*-
ESQUEMA_ATUAL = u'pl_005f'
#
# Envelopes SOAP
#
from .soap_100 import SOAPEnvio as SOAPEnvio_110
from .soap_100 import SOAPRetorno as SOAPRetorno_110
#
# Emissão de NF-e
#
from .nfe_110 import NFe as NFe_110
from .nfe_110 import NFRef as NFRef_110
from .nfe_110 import Det as Det_110
from .nfe_110 import DI as DI_110
from .nfe_110 import Adi as Adi_110
from .nfe_110 import Med as Med_110
from .nfe_110 import Arma as Arma_110
from .nfe_110 import Reboque as Reboque_110
from .nfe_110 import Vol as Vol_110
from .nfe_110 import Lacres as Lacres_110
from .nfe_110 import Dup as Dup_110
from .nfe_110 import ObsCont as ObsCont_110
from .nfe_110 import ObsFisco as ObsFisco_110
from .nfe_110 import ProcRef as ProcRef_110
#
# Envio de lote de NF-e
#
from .envinfe_110 import EnviNFe as EnviNFe_110
from .envinfe_110 import RetEnviNFe as RetEnviNFe_110
#
# Consulta do recibo do lote de NF-e
#
from .consrecinfe_110 import ConsReciNFe as ConsReciNFe_110
from .consrecinfe_110 import RetConsReciNFe as RetConsReciNFe_110
from .consrecinfe_110 import ProtNFe as ProtNFe_110
from .consrecinfe_110 import ProcNFe as ProcNFe_110
#
# Cancelamento de NF-e
#
from .cancnfe_107 import CancNFe as CancNFe_107
from .cancnfe_107 import RetCancNFe as RetCancNFe_107
from .cancnfe_107 import ProcCancNFe as ProcCancNFe_107
#
# Inutilização de NF-e
#
from .inutnfe_107 import InutNFe as InutNFe_107
from .inutnfe_107 import RetInutNFe as RetInutNFe_107
from .inutnfe_107 import ProcInutNFe as ProcInutNFe_107
#
# Consulta a situação de NF-e
#
from .conssitnfe_107 import ConsSitNFe as ConsSitNFe_107
from .conssitnfe_107 import RetConsSitNFe as RetConsSitNFe_107
#
# Consulta a situação do serviço
#
from .consstatserv_107 import ConsStatServ as ConsStatServ_107
from .consstatserv_107 import RetConsStatServ as RetConsStatServ_107
#
# Consulta cadastro
#
from .conscad_101 import ConsCad as ConsCad_101
from .conscad_101 import RetConsCad as RetConsCad_101
| lgpl-2.1 | -4,792,829,922,985,035,000 | 25.346667 | 68 | 0.779858 | false |
GoogleCloudPlatform/PerfKitBenchmarker | tests/providers/aws/aws_vpc_endpoint_test.py | 1 | 4111 | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.providers.aws.aws_vpc_endpoint."""
import unittest
from absl import flags
import mock
from perfkitbenchmarker.providers.aws import aws_vpc_endpoint
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
SERVICE_NAME = 's3'
REGION = 'us-west-1'
FULL_SERVICE_NAME = 'com.amazonaws.{}.s3'.format(REGION)
VPC_ID = 'vpc-1234'
ENDPOINT_ID = 'vpce-1234'
ROUTE_TABLE_ID = 'rtb-1234'
CREATE_RES = {'VpcEndpoint': {'VpcEndpointId': ENDPOINT_ID}}
DELETE_RES = {'Unsuccessful': []}
QUERY_ENDPOINTS_CMD = [
'describe-vpc-endpoints', '--filters',
'Name=service-name,Values={}'.format(FULL_SERVICE_NAME),
'Name=vpc-id,Values={}'.format(VPC_ID), '--query',
'VpcEndpoints[].VpcEndpointId'
]
DESCRIBE_ROUTES_CMD = [
'describe-route-tables', '--filters',
'Name=vpc-id,Values={}'.format(VPC_ID), '--query',
'RouteTables[].RouteTableId'
]
CREATE_ENDPOINT_CMD = [
'create-vpc-endpoint', '--vpc-endpoint-type', 'Gateway', '--vpc-id', VPC_ID,
'--service-name', FULL_SERVICE_NAME, '--route-table-ids', ROUTE_TABLE_ID
]
DELETE_ENDPOINT_CMD = [
'delete-vpc-endpoints', '--vpc-endpoint-ids', ENDPOINT_ID
]
class AwsVpcS3EndpointTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(AwsVpcS3EndpointTest, self).setUp()
self.mock_vpc = mock.Mock()
self.mock_vpc.region = REGION
self.mock_run_cmd = self.enter_context(
mock.patch.object(aws_vpc_endpoint.AwsVpcS3Endpoint, '_RunCommand'))
def _InitEndpoint(self, vpc_id):
self.mock_vpc.id = vpc_id
return aws_vpc_endpoint.CreateEndpointService(SERVICE_NAME, self.mock_vpc)
def testEndPointIdNoVpc(self):
# initialize with no VPC means no immediate lookups done
endpoint = self._InitEndpoint(None)
self.assertIsNone(endpoint.id)
endpoint._RunCommand.assert_not_called()
def testEndPointIdHasVpc(self):
# initialize with a VPC does an immediate call to find existing endpoints
endpoint = self._InitEndpoint(VPC_ID)
self.assertIsNone(endpoint.id, 'Endpoint id always None on initialization')
self.mock_run_cmd.reset_mock()
self.mock_run_cmd.side_effect = [[ENDPOINT_ID]]
self.assertEqual(ENDPOINT_ID, endpoint.endpoint_id)
endpoint._RunCommand.assert_called_with(QUERY_ENDPOINTS_CMD)
def testCreate(self):
# shows that a call to .Create() will get the routing table info followed
# by the create-vpc-endpoint call
endpoint = self._InitEndpoint(VPC_ID)
self.mock_run_cmd.reset_mock()
self.mock_run_cmd.side_effect = [
[], # query for endpoint id
[ROUTE_TABLE_ID], # query for route tables
CREATE_RES, # _Create()
[ENDPOINT_ID], # _Exists()
]
endpoint.Create()
calls = endpoint._RunCommand.call_args_list
self.assertEqual(mock.call(QUERY_ENDPOINTS_CMD), calls[0])
self.assertEqual(mock.call(DESCRIBE_ROUTES_CMD), calls[1])
self.assertEqual(mock.call(CREATE_ENDPOINT_CMD), calls[2])
self.assertEqual(mock.call(QUERY_ENDPOINTS_CMD), calls[3])
self.assertEqual(ENDPOINT_ID, endpoint.id)
def testDelete(self):
endpoint = self._InitEndpoint(VPC_ID)
self.mock_run_cmd.reset_mock()
endpoint.id = ENDPOINT_ID
self.mock_run_cmd.side_effect = [DELETE_RES, []]
endpoint.Delete()
calls = endpoint._RunCommand.call_args_list
self.assertEqual(mock.call(DELETE_ENDPOINT_CMD), calls[0])
self.assertEqual(mock.call(QUERY_ENDPOINTS_CMD), calls[1])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,071,237,302,103,893,000 | 35.705357 | 80 | 0.704208 | false |
mvaled/sentry | src/sentry/integrations/gitlab/issues.py | 2 | 6466 | from __future__ import absolute_import
import re
from django.core.urlresolvers import reverse
from sentry.integrations.exceptions import ApiError, IntegrationError, ApiUnauthorized
from sentry.integrations.issues import IssueBasicMixin
from sentry.utils.http import absolute_uri
ISSUE_EXTERNAL_KEY_FORMAT = re.compile(r".+:(.+)#(.+)")
class GitlabIssueBasic(IssueBasicMixin):
def make_external_key(self, data):
return u"{}:{}".format(self.model.metadata["domain_name"], data["key"])
def get_issue_url(self, key):
match = ISSUE_EXTERNAL_KEY_FORMAT.match(key)
project, issue_id = match.group(1), match.group(2)
return u"{}/{}/issues/{}".format(self.model.metadata["base_url"], project, issue_id)
def get_persisted_default_config_fields(self):
return ["project"]
def get_projects_and_default(self, group, **kwargs):
params = kwargs.get("params", {})
defaults = self.get_project_defaults(group.project_id)
kwargs["repo"] = params.get("project", defaults.get("project"))
# In GitLab Repositories are called Projects
default_project, project_choices = self.get_repository_choices(group, **kwargs)
return default_project, project_choices
def create_default_repo_choice(self, default_repo):
client = self.get_client()
try:
# default_repo should be the project_id
project = client.get_project(default_repo)
except (ApiError, ApiUnauthorized):
return ("", "")
return (project["id"], project["name_with_namespace"])
def get_create_issue_config(self, group, **kwargs):
default_project, project_choices = self.get_projects_and_default(group, **kwargs)
kwargs["link_referrer"] = "gitlab_integration"
fields = super(GitlabIssueBasic, self).get_create_issue_config(group, **kwargs)
org = group.organization
autocomplete_url = reverse(
"sentry-extensions-gitlab-search", args=[org.slug, self.model.id]
)
return [
{
"name": "project",
"label": "GitLab Project",
"type": "select",
"url": autocomplete_url,
"choices": project_choices,
"defaultValue": default_project,
"required": True,
}
] + fields
def create_issue(self, data, **kwargs):
client = self.get_client()
project_id = data.get("project")
if not project_id:
raise IntegrationError("project kwarg must be provided")
try:
issue = client.create_issue(
project=project_id,
data={"title": data["title"], "description": data["description"]},
)
project = client.get_project(project_id)
except ApiError as e:
raise IntegrationError(self.message_from_error(e))
project_and_issue_iid = "%s#%s" % (project["path_with_namespace"], issue["iid"])
return {
"key": project_and_issue_iid,
"title": issue["title"],
"description": issue["description"],
"url": issue["web_url"],
"project": project_id,
"metadata": {"display_name": project_and_issue_iid},
}
def after_link_issue(self, external_issue, **kwargs):
data = kwargs["data"]
project_id, issue_id = data.get("externalIssue", "").split("#")
if not (project_id and issue_id):
raise IntegrationError("Project and Issue id must be provided")
client = self.get_client()
comment = data.get("comment")
if not comment:
return
try:
client.create_issue_comment(
project_id=project_id, issue_id=issue_id, data={"body": comment}
)
except ApiError as e:
raise IntegrationError(self.message_from_error(e))
def get_link_issue_config(self, group, **kwargs):
default_project, project_choices = self.get_projects_and_default(group, **kwargs)
org = group.organization
autocomplete_url = reverse(
"sentry-extensions-gitlab-search", args=[org.slug, self.model.id]
)
return [
{
"name": "project",
"label": "GitLab Project",
"type": "select",
"default": default_project,
"choices": project_choices,
"url": autocomplete_url,
"updatesForm": True,
"required": True,
},
{
"name": "externalIssue",
"label": "Issue",
"default": "",
"type": "select",
"url": autocomplete_url,
"required": True,
},
{
"name": "comment",
"label": "Comment",
"default": u"Sentry issue: [{issue_id}]({url})".format(
url=absolute_uri(
group.get_absolute_url(params={"referrer": "gitlab_integration"})
),
issue_id=group.qualified_short_id,
),
"type": "textarea",
"required": False,
"help": ("Leave blank if you don't want to " "add a comment to the GitLab issue."),
},
]
def get_issue(self, issue_id, **kwargs):
project_id, issue_num = issue_id.split("#")
client = self.get_client()
if not project_id:
raise IntegrationError("project must be provided")
if not issue_num:
raise IntegrationError("issue must be provided")
try:
issue = client.get_issue(project_id, issue_num)
project = client.get_project(project_id)
except ApiError as e:
raise IntegrationError(self.message_from_error(e))
project_and_issue_iid = "%s#%s" % (project["path_with_namespace"], issue["iid"])
return {
"key": project_and_issue_iid,
"title": issue["title"],
"description": issue["description"],
"url": issue["web_url"],
"project": project_id,
"metadata": {"display_name": project_and_issue_iid},
}
def get_issue_display_name(self, external_issue):
return external_issue.metadata["display_name"]
| bsd-3-clause | 5,595,595,440,710,333,000 | 35.122905 | 99 | 0.548871 | false |
Alshain-Oy/Cloudsnake-Application-Server | clients/htpasswdFS.py | 1 | 5036 | #!/usr/bin/env python
# Cloudsnake Application server
# Licensed under Apache License, see license.txt
# Author: Markus Gronholm <[email protected]> Alshain Oy
import fuse
import time, sys
import stat, os, errno
import libCloudSnakeClient as SnakeClient
fuse.fuse_python_api = (0, 2)
class ObjectStat(fuse.Stat):
def __init__( self ):
self.st_mode = stat.S_IFDIR | 0755
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 2
self.st_uid = 0
self.st_gid = 0
self.st_size = 4096
self.st_atime = int( time.time() )
self.st_mtime = int( time.time() )
self.st_ctime = int( time.time() )
class testFS( fuse.Fuse ):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
print 'Init complete.'
self.files = []
#self.files.append( 'htpasswd_id' )
self.client = None
def attach_cloudsnake( self, client ):
self.client = client
self.snake = SnakeClient.CloudSnakeMapper( self.client )
self.files = self.snake.apache_get_groups()
print "self.files:", self.files
self.content = {}
def getattr(self, path):
"""
- st_mode (protection bits)
- st_ino (inode number)
- st_dev (device)
- st_nlink (number of hard links)
- st_uid (user ID of owner)
- st_gid (group ID of owner)
- st_size (size of file, in bytes)
- st_atime (time of most recent access)
- st_mtime (time of most recent content modification)
- st_ctime (platform dependent; time of most recent metadata change on Unix,
or the time of creation on Windows).
"""
print '*** getattr', path
#depth = getDepth(path) # depth of path, zero-based from root
#pathparts = getParts(path) # the actual parts of the path
#return -errno.ENOSYS
self.files = self.snake.apache_get_groups()
print "self.files:", self.files
st = ObjectStat()
parts = path.split( '/' )
if len( parts ) > 1:
fn = parts[ 1 ]
else:
fn = ''
if fn == '':
print "returing stats"
st.st_nlink += len( self.files )
return st
elif fn not in self.files:
print "No such file! (%s)"%fn
return -errno.ENOENT
else:
print "Returning stats.."
st.st_mode = stat.S_IFREG | 0755
self.content[ fn ] = self.snake.apache_get_content( fn )
st.st_size = len( self.content[ fn ] )
return st
# def getdir(self, path):
# """
# return: [[('file1', 0), ('file2', 0), ... ]]
# """
# self.files = self.snake.apache_get_groups()
#
# print '*** getdir', path
# #return -errno.ENOSYS
# return [[ (x, 0) for x in self.files ]]
def readdir(self, path, offset):
print "*** readdir"
dirents = [ '.', '..' ]
self.files = self.snake.apache_get_groups()
print "self.files:", self.files
if path == '/':
dirents.extend( self.files )
for r in dirents:
yield fuse.Direntry( str( r ))
def chmod( self, path, mode ):
print '*** chmod', path, oct(mode)
#return -errno.ENOSYS
return 0
def chown( self, path, uid, gid ):
print '*** chown', path, uid, gid
#return -errno.ENOSYS
return 0
def fsync( self, path, isFsyncFile ):
print '*** fsync', path, isFsyncFile
return -errno.ENOSYS
def link( self, targetPath, linkPath ):
print '*** link', targetPath, linkPath
return -errno.ENOSYS
def mkdir( self, path, mode ):
print '*** mkdir', path, oct(mode)
return -errno.ENOSYS
def mknod( self, path, mode, dev ):
print '*** mknod', path, oct(mode), dev
return -errno.ENOSYS
def open( self, path, flags ):
print '*** open', path, flags
#return -errno.ENOSYS
return 0
def read( self, path, length, offset ):
print '*** read', path, length, offset
#return -errno.ENOSYS
parts = path.split( '/' )
fn = parts[ 1 ]
self.content[ fn ] = self.snake.apache_get_content( fn )
#return self.content[ fn ][ offset : offset + length ]
out = self.content[ fn ][ offset : offset + length ]
print "out:", out
return str( out )
def readlink( self, path ):
print '*** readlink', path
return -errno.ENOSYS
def release( self, path, flags ):
print '*** release', path, flags
#return -errno.ENOSYS
return 0
def rename( self, oldPath, newPath ):
print '*** rename', oldPath, newPath
return -errno.ENOSYS
def rmdir( self, path ):
print '*** rmdir', path
return -errno.ENOSYS
def statfs( self ):
print '*** statfs'
return -errno.ENOSYS
def symlink( self, targetPath, linkPath ):
print '*** symlink', targetPath, linkPath
return -errno.ENOSYS
def truncate( self, path, size ):
print '*** truncate', path, size
return -errno.ENOSYS
def unlink( self, path ):
print '*** unlink', path
return -errno.ENOSYS
def utime( self, path, times ):
print '*** utime', path, times
return -errno.ENOSYS
def write( self, path, buf, offset ):
print '*** write', path, buf, offset
return -errno.ENOSYS
if __name__ == '__main__':
client = SnakeClient.CloudSnakeClient( 'http://localhost:8500', 'main' )
fs = testFS()
fs.attach_cloudsnake( client )
fs.flags = 0
fs.multihreaded = 0
fs.parse()
fs.main()
#print fs.main.__doc__
| apache-2.0 | 8,844,387,426,208,073,000 | 20.991266 | 78 | 0.626291 | false |
ebu/ebu-tt-live-toolkit | ebu_tt_live/node/producer.py | 1 | 3589 | import logging
from .base import AbstractProducerNode
from datetime import timedelta
from ebu_tt_live.bindings import div_type, br_type, p_type, style_type, styling, layout, region_type, span_type
from ebu_tt_live.bindings._ebuttdt import LimitedClockTimingType
from ebu_tt_live.documents.ebutt3 import EBUTT3Document
from ebu_tt_live.errors import EndOfData
from ebu_tt_live.strings import END_OF_DATA, DOC_PRODUCED
document_logger = logging.getLogger('document_logger')
class SimpleProducer(AbstractProducerNode):
_document_sequence = None
_input_blocks = None
_reference_clock = None
_provides = EBUTT3Document
def __init__(self, node_id, producer_carriage, document_sequence, input_blocks):
super(SimpleProducer, self).__init__(node_id=node_id, producer_carriage=producer_carriage)
self._document_sequence = document_sequence
self._input_blocks = input_blocks
self._reference_clock = document_sequence.reference_clock
@property
def reference_clock(self):
return self._reference_clock
@property
def document_sequence(self):
return self._document_sequence
@staticmethod
def _interleave_line_breaks(items, style=None):
end_list = []
for item in items:
end_list.append(
span_type(
item,
style=style,
_strict_keywords=False
)
)
end_list.append(br_type())
# We don't require the last linebreak so remove it.
end_list.pop()
return end_list
def _create_fragment(self, lines, style=None):
return div_type(
p_type(
*self._interleave_line_breaks(lines, style=style),
id='ID{:03d}'.format(1),
_strict_keywords=False
),
region='bottomRegion'
)
def process_document(self, document=None, **kwargs):
activation_time = self._reference_clock.get_time() + timedelta(seconds=1)
if self._input_blocks:
try:
lines = self._input_blocks.next()
except StopIteration:
raise EndOfData(END_OF_DATA)
else:
lines = [LimitedClockTimingType(activation_time)]
document = self._document_sequence.new_document()
# Add default style
document.binding.head.styling = styling(
style_type(
id='defaultStyle1',
backgroundColor="rgb(0, 0, 0)",
color="rgb(255, 255, 255)",
linePadding="0.5c",
fontFamily="sansSerif"
)
)
document.binding.head.layout = layout(
region_type(
id='bottomRegion',
origin='14.375% 60%',
extent='71.25% 24%',
displayAlign='after',
writingMode='lrtb',
overflow="visible"
)
)
document.add_div(
self._create_fragment(
lines,
'defaultStyle1'
),
)
document.set_dur(LimitedClockTimingType(timedelta(seconds=1)))
document.set_begin(LimitedClockTimingType(activation_time))
document.validate()
document_logger.info(
DOC_PRODUCED.format(
sequence_identifier=document.sequence_identifier,
sequence_number=document.sequence_number
)
)
self.producer_carriage.emit_data(document, **kwargs)
| bsd-3-clause | -5,551,885,586,373,914,000 | 30.761062 | 111 | 0.575091 | false |
googleinterns/where-is-my-watch | GpsDataAnalyzer/visualizer/visualizer.py | 1 | 6027 | """
Usage: visualizer.py
Visualize the classified data as histogram and line graph with min/max/mean/std/availability information
"""
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import numpy as np
import pandas as pd
import os
from datetime import datetime
from datetime import timedelta
from datetime import timezone
"""
Deviation distribution zone
"""
HIGH_CONFIDENCE_THRESHOLD = 5
LOW_CONFIDENCE_THRESHOLD = 10
class Visualizer:
"""
Classify the deviation of distance and visualize the deviations of distance/speed/altitude
"""
def __init__(self):
current_directory = os.path.dirname(__file__)
current_time = datetime.strftime(datetime.now(), "%Y-%m-%dT%H%M%S")
self.output_file_folder = os.path.join(current_directory, current_time)
os.mkdir(self.output_file_folder)
def get_min_deviation(self, data):
"""
Get the min value of deviation
Args:
data: the deviation data
"""
return min(deviation for deviation in data)
def get_max_deviation(self, data):
"""
Get the max value of deviation
Args:
data: the deviation data
"""
return max(deviation for deviation in data)
def classify_deviation(self, deviation_dataframe):
"""
Classify the deviation of distance according to its absolute value, and mark the data confidence (1, 2, 3).
Higher score means higher confidence and accuracy.
Args:
deviation_dataframe: a dataframe containing time and deviation of distance/speed/altitude
Returns:
A dataframe after distance deviation classified with confidence
"""
deviation_list = deviation_dataframe["Deviations"]
confidence = []
for deviation in deviation_list:
abs_deviation = abs(deviation)
if abs_deviation <= HIGH_CONFIDENCE_THRESHOLD:
confidence.append(3)
elif abs_deviation <= LOW_CONFIDENCE_THRESHOLD:
confidence.append(2)
else:
confidence.append(1)
deviation_dataframe["Confidence"] = confidence
return deviation_dataframe
def draw_hist_graph(self, data, x_label, y_label, title, availability):
"""
Draw the histogram graph and save it as a png file
Args:
data: data on y axis
x_label: label for x axis
y_label: label for y axis
title: title for the graph
availability: percentile of captured datapoints
"""
# Plot the data
fig = plt.figure(figsize=(20,10))
hist_label = "Availability: {}%".format(availability)
plt.hist(data, align='mid', bins=[0.5,1.5,2.5,3.5], rwidth=0.8, label=hist_label, orientation="horizontal", color='cornflowerblue')
# Set the title and labels
plt.legend(loc="upper left")
plt.xlabel(x_label, fontsize=10)
plt.ylabel(y_label, fontsize=10)
plt.title(title, fontsize=12)
plt.yticks(range(0,5))
# Save the graph as a png picture
my_file = "{}_Deviation_Confidence_{}.png".format(title, datetime.strftime(datetime.now(), "%Y-%m-%dT%H%M%S"))
fig.savefig(os.path.join(self.output_file_folder, my_file))
def draw_line_graph(self, x_data, x_label, y_data, y_label, title):
"""
Draw the line graph and save it as a png file
Args:
x_data: data on x axis
x_label: label for x axis
y_data: data on y axis
y_label: label for y axis
title: title for the graph
"""
# Get the absolute mean of deviation, stadard deviation, min and max deviation
abs_mean_deviation = round(np.mean(y_data),3)
std_deviation = round(np.std(y_data),3)
min_deviation = round(self.get_min_deviation(y_data), 3)
max_deviation = round(self.get_max_deviation(y_data), 3)
# Get the time duration
time_duration = x_data[len(x_data)-1] - x_data[0]
# Set the line_label and x_label
line_label = "Mean: {}\nSTD: {}\nMin: {}\nMax: {}".format(abs_mean_deviation, std_deviation, min_deviation, max_deviation)
x_label += str(time_duration)
# Plot the data
fig = plt.figure(figsize=(20,10))
ax = plt.subplot()
ax.plot(x_data, y_data, color='cornflowerblue', label= line_label)
# Format the time on x axis '%H:%M:%S'
ax.xaxis.set_major_formatter(dates.DateFormatter('%H:%M:%S'))
# Set the title and labels
plt.legend(loc="upper left")
plt.title(title +" Deviation", fontsize = 12)
plt.xlabel(x_label, fontsize = 10)
plt.ylabel(y_label, fontsize = 10)
# Save the graph as a png picture
my_file = "{}_Deviation_{}.png".format(title, datetime.strftime(datetime.now(), "%Y-%m-%dT%H%M%S"))
fig.savefig(os.path.join(self.output_file_folder, my_file))
def draw_lines_graph(self, x_data, x_label, y1_data, y2_data, y_label, title, label_1, label_2):
# Get the time duration
time_duration = x_data[len(x_data)-1] - x_data[0]
x_label += str(time_duration)
# Plot the data
fig = plt.figure(figsize=(20,10))
ax = plt.subplot()
ax.plot(x_data, y1_data, color='cornflowerblue', label=label_1)
ax.plot(x_data, y2_data, color='forestgreen', label=label_2)
# Format the time on x axis '%H:%M:%S'
ax.xaxis.set_major_formatter(dates.DateFormatter('%H:%M:%S'))
# Set the title and labels
plt.legend(loc="upper left")
plt.title(title, fontsize = 12)
plt.xlabel(x_label, fontsize = 10)
plt.ylabel(y_label, fontsize = 10)
# Save the graph as a png picture
my_file = "{}_Deviation_{}.png".format(title, datetime.strftime(datetime.now(), "%Y-%m-%dT%H%M%S"))
fig.savefig(os.path.join(self.output_file_folder, my_file))
| apache-2.0 | 8,937,705,627,102,015,000 | 34.040698 | 139 | 0.60959 | false |
mskala/birdie | birdieapp/gui/statusicon.py | 1 | 2075 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Ivo Nunes/Vasco Nunes
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
from birdieapp.signalobject import SignalObject
class StatusIcon(SignalObject):
def __init__(self):
super(StatusIcon, self).init_signals()
self.statusicon = Gtk.StatusIcon()
self.statusicon.set_from_icon_name("birdie")
self.statusicon.connect("popup-menu", self.right_click_event)
self.statusicon.connect("activate", self.trayicon_activate)
def right_click_event(self, icon, button, tm):
menu = Gtk.Menu()
new_tweet = Gtk.MenuItem()
new_tweet.set_label(_("New Tweet"))
new_tweet.connect("activate", self.on_new_tweet)
menu.append(new_tweet)
quit_item = Gtk.MenuItem()
quit_item.set_label(_("Quit"))
quit_item.connect("activate", self.on_exit)
menu.append(quit_item)
menu.show_all()
menu.popup(None, None,
lambda w, x: self.statusicon.position_menu(
menu, self.statusicon),
self.statusicon, 3, tm)
def trayicon_activate (self, widget, data = None):
"""Toggle status icon"""
self.emit_signal("toggle-window-visibility")
def on_new_tweet(self, widget):
self.emit_signal_with_arg("new-tweet-compose", None)
def on_exit(self, widget):
self.emit_signal_with_args("on-exit", (None, None, None))
| gpl-3.0 | 2,877,588,231,206,584,300 | 32.467742 | 70 | 0.659759 | false |
Didacti/elixir | tests/test_dict.py | 1 | 5070 | """
test the deep-set functionality
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from elixir import *
def setup():
metadata.bind = 'sqlite://'
global Table1, Table2, Table3
class Table1(Entity):
t1id = Field(Integer, primary_key=True)
name = Field(String(30))
tbl2s = OneToMany('Table2')
tbl3 = OneToOne('Table3')
class Table2(Entity):
t2id = Field(Integer, primary_key=True)
name = Field(String(30))
tbl1 = ManyToOne(Table1)
class Table3(Entity):
t3id = Field(Integer, primary_key=True)
name = Field(String(30))
tbl1 = ManyToOne(Table1)
setup_all()
def teardown():
cleanup_all()
class TestDeepSet(object):
def setup(self):
create_all()
def teardown(self):
session.close()
drop_all()
def test_set_attr(self):
t1 = Table1()
t1.from_dict(dict(name='test1'))
assert t1.name == 'test1'
def test_nonset_attr(self):
t1 = Table1(name='test2')
t1.from_dict({})
assert t1.name == 'test2'
def test_set_rel(self):
t1 = Table1()
t1.from_dict(dict(tbl3={'name': 'bob'}))
assert t1.tbl3.name == 'bob'
def test_remove_rel(self):
t1 = Table1()
t1.tbl3 = Table3()
t1.from_dict(dict(tbl3=None))
assert t1.tbl3 is None
def test_update_rel(self):
t1 = Table1()
t1.tbl3 = Table3(name='fred')
t1.from_dict(dict(tbl3={'name': 'bob'}))
assert t1.tbl3.name == 'bob'
def test_extend_list(self):
t1 = Table1()
t1.from_dict(dict(tbl2s=[{'name': 'test3'}]))
assert len(t1.tbl2s) == 1
assert t1.tbl2s[0].name == 'test3'
def test_truncate_list(self):
t1 = Table1()
t2 = Table2()
t1.tbl2s.append(t2)
session.commit()
t1.from_dict(dict(tbl2s=[]))
assert len(t1.tbl2s) == 0
def test_update_list_item(self):
t1 = Table1()
t2 = Table2()
t1.tbl2s.append(t2)
session.commit()
t1.from_dict(dict(tbl2s=[{'t2id': t2.t2id, 'name': 'test4'}]))
assert len(t1.tbl2s) == 1
assert t1.tbl2s[0].name == 'test4'
def test_invalid_update(self):
t1 = Table1()
t2 = Table2()
t1.tbl2s.append(t2)
session.commit()
try:
t1.from_dict(dict(tbl2s=[{'t2id': t2.t2id+1}]))
assert False
except:
pass
def test_to(self):
t1 = Table1(t1id=50, name='test1')
assert t1.to_dict() == {'t1id': 50, 'name': 'test1'}
def test_to_deep_m2o(self):
t1 = Table1(t1id=1, name='test1')
t2 = Table2(t2id=1, name='test2', tbl1=t1)
session.flush()
assert t2.to_dict(deep={'tbl1': {}}) == \
{'t2id': 1, 'name': 'test2', 'tbl1_t1id': 1,
'tbl1': {'name': 'test1'}}
def test_to_deep_m2o_none(self):
t2 = Table2(t2id=1, name='test2')
session.flush()
assert t2.to_dict(deep={'tbl1': {}}) == \
{'t2id': 1, 'name': 'test2', 'tbl1_t1id': None, 'tbl1': None}
def test_to_deep_o2m_empty(self):
t1 = Table1(t1id=51, name='test2')
assert t1.to_dict(deep={'tbl2s': {}}) == \
{'t1id': 51, 'name': 'test2', 'tbl2s': []}
def test_to_deep_o2m(self):
t1 = Table1(t1id=52, name='test3')
t2 = Table2(t2id=50, name='test4')
t1.tbl2s.append(t2)
session.commit()
assert t1.to_dict(deep={'tbl2s':{}}) == \
{'t1id': 52,
'name': 'test3',
'tbl2s': [{'t2id': 50, 'name': 'test4'}]}
def test_to_deep_o2o(self):
t1 = Table1(t1id=53, name='test2')
t1.tbl3 = Table3(t3id=50, name='wobble')
session.commit()
assert t1.to_dict(deep={'tbl3': {}}) == \
{'t1id': 53,
'name': 'test2',
'tbl3': {'t3id': 50, 'name': 'wobble'}}
def test_to_deep_nested(self):
t3 = Table3(t3id=1, name='test3')
t1 = Table1(t1id=1, name='test1', tbl3=t3)
t2 = Table2(t2id=1, name='test2', tbl1=t1)
session.flush()
assert t2.to_dict(deep={'tbl1': {'tbl3': {}}}) == \
{'t2id': 1,
'name': 'test2',
'tbl1_t1id': 1,
'tbl1': {'name': 'test1',
'tbl3': {'t3id': 1,
'name': 'test3'}}}
class TestSetOnAliasedColumn(object):
def setup(self):
metadata.bind = 'sqlite://'
session.close()
def teardown(self):
cleanup_all(True)
def test_set_on_aliased_column(self):
class A(Entity):
name = Field(String(60), colname='strName')
setup_all(True)
a = A()
a.set(name='Aye')
assert a.name == 'Aye'
session.commit()
session.close()
| mit | -3,058,275,147,593,896,400 | 26.857143 | 76 | 0.506312 | false |
Jarvie8176/wows-noob-warning | API/makeshift/config.py | 1 | 1407 | player_search = 'http://worldofwarships.com/en/community/accounts/search/?search=%s'
player_search_key_search_page = 'js-search-results'
player_search_key_player_page = 'og:url'
player_search_key_error_page = 'Error'
player_search_id_pattern_search_page = '(?<=accounts/).*(?=-)'
player_search_id_pattern_player_page = '(?<=accounts/).*(?=-)'
player_stat_page = 'http://worldofwarships.com/en/community/accounts/tab/pvp/overview/%s/'
player_stat_tab_class = 'account-main-stats-mobile'
player_stat_key_battle_fought = 'battle_fought'
player_stat_key_win_rate = 'win_rate'
player_stat_key_average_exp = 'avg_exp'
player_stat_key_average_dmg = 'avg_dmg'
player_stat_key_kd_ratio = 'kd_ratio'
player_stat_battlefought = {
'key' : '_battles',
'value' : '_number'}
player_stat_winrate = {
'key' : '_winrate',
'value' : '_number'}
player_stat_avgexp = {
'key' : '_rating',
'value' : '_number'}
player_stat_avgdmg = {
'key' : '_kd',
'value' : '_number'}
player_stat_kdratio = {
'key' : '_damage',
'value' : '_number'}
player_stat_regex_pattern = '(?<=>).*(?=<)'
| mit | 8,899,890,509,648,553,000 | 40.382353 | 90 | 0.50462 | false |
cameronbwhite/PyOLP | PyOLP/api_objects.py | 1 | 12957 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2013, Cameron White
#
# PyGithub is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
from . import api_exceptions
from .requester import Requester
import datetime
class _NotSetType:
def __repr__(self):
return "NotSet"
value = None
NotSet = _NotSetType()
class _ValuedAttribute:
def __init__(self, value):
self.value = value
class _BadAttribute:
def __init__(self, value, expectedType, exception=None):
self.__value = value
self.__expectedType = expectedType
self.__exception = exception
@property
def value(self):
raise api_exceptions.BadAttributeException(self.__value, self.__expectedType)
class ApiObject(object):
def __init__(self, requester, headers, attributes):
self._requester = requester
self._initAttributes() # virtual
self._storeAndUseAttributes(headers, attributes)
def _storeAndUseAttributes(self, headers, attributes):
# Make sure headers are assigned before calling _useAttributes
# (Some derived classes will use headers in _useAttributes)
self._headers = headers
self._rawData = attributes
self._useAttributes(attributes) # virtual
@staticmethod
def __makeSimpleAttribute(value, type):
if value is None or isinstance(value, type):
return _ValuedAttribute(value)
else:
return _BadAttribute(value, type)
@staticmethod
def _makeStringAttribute(value):
return ApiObject.__makeSimpleAttribute(value, (str, unicode))
@staticmethod
def _makeIntAttribute(value):
return ApiObject.__makeSimpleAttribute(value, (int, long))
@staticmethod
def _makeBoolAttribute(value):
return ApiObject.__makeSimpleAttribute(value, bool)
@staticmethod
def _makeFloatAttribute(value):
try:
value = float(value)
except ValueError:
pass
return ApiObject.__makeSimpleAttribute(value, float)
@staticmethod
def _makeDatetimeAttribute(value):
try:
d = datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
d = datetime.datetime.strptime(value, "%Y-%m-%d")
return ApiObject.__makeSimpleAttribute(d, datetime.datetime)
@property
def raw_data(self):
"""
:type: dict
"""
return self._rawData
@property
def raw_headers(self):
"""
:type: dict
"""
return self._headers
def update(self):
status, responseHeaders, output = self._requester.requestJson(
self._resource_uri.value # virtual
)
headers, data = self._requester._Requester__check(status, responseHeaders, output)
self._storeAndUseAttributes(headers, data)
class Price(ApiObject):
@property
def amount(self):
"""
:type: float
"""
return self._amount.value
@property
def created_at(self):
"""
:type: datetime
"""
return self._created_at.value
@property
def effective_date(self):
"""
:type: datetime
"""
return self._effective_date.value
@property
def id(self):
"""
:type: string
"""
return self._id.value
@property
def modified_at(self):
"""
:type: datetime
"""
return self._modified_at.value
@property
def product(self):
"""
:type: related
"""
return self._product.value
@property
def resource_uri(self):
"""
:type: string
"""
return self._resource_uri.value
def get_product(self):
headers, data = self._requester.requestJsonAndCheck(
self.product
)
return Product(self._requester, headers, data)
def _initAttributes(self):
self._amount = NotSet
self._created_at = NotSet
self._effective_date = NotSet
self._id = NotSet
self._modified_at = NotSet
self._product = NotSet
self._resource_uri = NotSet
def _useAttributes(self, attributes):
if "amount" in attributes:
self._amount = self._makeFloatAttribute(attributes["amount"])
if "created_at" in attributes:
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "effective_date" in attributes:
self._effective_date = self._makeDatetimeAttribute(attributes["effective_date"])
if "id" in attributes:
self._id = self._makeStringAttribute(attributes["id"])
if "modified_at" in attributes:
self._modified_at = self._makeDatetimeAttribute(attributes["modified_at"])
if "product" in attributes:
self._product = self._makeStringAttribute(attributes["product"])
if "resource_uri" in attributes:
self._resource_uri = self._makeStringAttribute(attributes["resource_uri"])
class Product(ApiObject):
@property
def age(self):
"""
:type: float
"""
return self._age.value
@property
def bottles_per_case(self):
"""
:type: int
"""
return self._bottles_per_case.value
@property
def code(self):
"""
:type: string
"""
return self._code.value
@property
def created_at(self):
"""
:type: datetime
"""
return self._created_at.value
@property
def description(self):
"""
:type: string
"""
return self._description.value
@property
def id(self):
"""
:type: string
"""
return self._id.value
@property
def modified_at(self):
"""
:type: datetime
"""
return self._modified_at.value
@property
def on_sale(self):
"""
:type: bool
"""
return self._on_sale.value
@property
def proof(self):
"""
:type: float
"""
return self._proof.value
@property
def resource_uri(self):
"""
:type: string
"""
return self._resource_uri.value
@property
def size(self):
"""
:type: string
"""
return self._size.value
@property
def slug(self):
"""
:type: string
"""
return self._slug.value
@property
def status(self):
"""
:type: string
"""
return self._status.value
@property
def title(self):
"""
:type: string
"""
return self._title.value
def get_price(self):
headers, data = self._requester.requestJsonAndCheck(
'/api/v1/price/' + str(self.id) + '/'
)
return Price(self._requester, headers, data)
def _initAttributes(self):
self._age = NotSet
self._bottles_per_case = NotSet
self._code = NotSet
self._created_at = NotSet
self._description = NotSet
self._id = NotSet
self._modified_at = NotSet
self._on_sale = NotSet
self._proof = NotSet
self._resource_uri = NotSet
self._size = NotSet
self._slug = NotSet
self._status = NotSet
self._title = NotSet
def _useAttributes(self, attributes):
if "age" in attributes:
self._age = self._makeFloatAttribute(attributes["age"])
if "bottles_per_case" in attributes:
self._bottles_per_case = self._makeIntAttribute(attributes["bottles_per_case"])
if "code" in attributes:
self._code = self._makeStringAttribute(attributes["code"])
if "created_at" in attributes:
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "description" in attributes:
self._description = self._makeStringAttribute(attributes["description"])
if "id" in attributes:
self._id = self._makeStringAttribute(attributes["id"])
if "modified_at" in attributes:
self._modified_at = self._makeDatetimeAttribute(attributes["modified_at"])
if "on_sale" in attributes:
self._on_sale = self._makeBoolAttribute(attributes["on_sale"])
if "proof" in attributes:
self._proof = self._makeFloatAttribute(attributes["proof"])
if "resource_uri" in attributes:
self._resource_uri = self._makeStringAttribute(attributes["resource_uri"])
if "size" in attributes:
self._size = self._makeStringAttribute(attributes["size"])
if "slug" in attributes:
self._slug = self._makeStringAttribute(attributes["slug"])
if "status" in attributes:
self._status = self._makeStringAttribute(attributes["status"])
if "title" in attributes:
self._title = self._makeStringAttribute(attributes["title"])
class Store(ApiObject):
@property
def address(self):
"""
:type: string
"""
return self._address.value
@property
def address_raw(self):
"""
:type: string
"""
return self._address_raw.value
@property
def county(self):
"""
:type: string
"""
return self._county.value
@property
def hours_raw(self):
"""
:type: string
"""
return self._hours_raw.value
@property
def id(self):
"""
:type: string
"""
return self._id.value
@property
def key(self):
"""
:type: int
"""
return self._key.value
@property
def latitude(self):
"""
:type: float
"""
return self._latitude.value
@property
def longitude(self):
"""
:type: float
"""
return self._longitude.value
@property
def name(self):
"""
:type: string
"""
return self._name.value
@property
def phone(self):
"""
:type: string
"""
return self._phone.value
@property
def resource_uri(self):
"""
:type: string
"""
return self._resource_uri.value
def _initAttributes(self):
self._address = NotSet
self._address_raw = NotSet
self._county = NotSet
self._hours_raw = NotSet
self._id = NotSet
self._key = NotSet
self._latitude = NotSet
self._longitude = NotSet
self._name = NotSet
self._phone = NotSet
self._resource_uri = NotSet
def _useAttributes(self, attributes):
if "address" in attributes:
self._address = self._makeStringAttribute(attributes["address"])
if "address_raw" in attributes:
self._address_raw = self._makeStringAttribute(attributes["address_raw"])
if "county" in attributes:
self._county = self._makeStringAttribute(attributes["county"])
if "hours_raw" in attributes:
self._hours_raw = self._makeStringAttribute(attributes["hours_raw"])
if "id" in attributes:
self._id = self._makeStringAttribute(attributes["id"])
if "key" in attributes:
self._key = self._makeIntAttribute(attributes["key"])
if "latitude" in attributes:
self._latitude = self._makeFloatAttribute(attributes["latitude"])
if "longitude" in attributes:
self._longitude = self._makeFloatAttribute(attributes["longitude"])
if "name" in attributes:
self._name = self._makeStringAttribute(attributes["name"])
if "phone" in attributes:
self._phone = self._makeStringAttribute(attributes["phone"])
if "resource_uri" in attributes:
self._resource_uri = self._makeStringAttribute(attributes["resource_uri"])
| gpl-3.0 | -8,386,608,160,705,220,000 | 26.864516 | 92 | 0.561704 | false |
schleichdi2/OPENNFR-6.1-CORE | opennfr-openembedded-core/meta/lib/oeqa/selftest/archiver.py | 1 | 1776 | from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import bitbake, get_bb_vars
from oeqa.utils.decorators import testcase
import glob
import os
import shutil
class Archiver(oeSelfTest):
@testcase(1345)
def test_archiver_allows_to_filter_on_recipe_name(self):
"""
Summary: The archiver should offer the possibility to filter on the recipe. (#6929)
Expected: 1. Included recipe (busybox) should be included
2. Excluded recipe (zlib) should be excluded
Product: oe-core
Author: Daniel Istrate <[email protected]>
AutomatedBy: Daniel Istrate <[email protected]>
"""
include_recipe = 'busybox'
exclude_recipe = 'zlib'
features = 'INHERIT += "archiver"\n'
features += 'ARCHIVER_MODE[src] = "original"\n'
features += 'COPYLEFT_PN_INCLUDE = "%s"\n' % include_recipe
features += 'COPYLEFT_PN_EXCLUDE = "%s"\n' % exclude_recipe
self.write_config(features)
bitbake('-c clean %s %s' % (include_recipe, exclude_recipe))
bitbake("%s %s" % (include_recipe, exclude_recipe))
bb_vars = get_bb_vars(['DEPLOY_DIR_SRC', 'TARGET_SYS'])
src_path = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
# Check that include_recipe was included
included_present = len(glob.glob(src_path + '/%s-*' % include_recipe))
self.assertTrue(included_present, 'Recipe %s was not included.' % include_recipe)
# Check that exclude_recipe was excluded
excluded_present = len(glob.glob(src_path + '/%s-*' % exclude_recipe))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % exclude_recipe)
| gpl-2.0 | -3,082,833,724,617,833,500 | 40.302326 | 95 | 0.63964 | false |
phil-el/phetools | hocr/hocr.py | 1 | 9939 | # -*- coding: utf-8 -*-
#
# @file hocr.py
#
# @remark Copyright 2014 Philippe Elie
# @remark Read the file COPYING
#
# @author Philippe Elie
import sys
import os
from common import utils
import hashlib
from ocr import pdf_to_djvu
from ocr import ocr_djvu
from ocr import djvu_text_to_hocr
from ocr import ocr
from common import db
import re
def lang_to_site(lang):
sites = {
'nb' : 'no',
}
return sites.get(lang, lang)
tmp_dir = os.path.expanduser('~/tmp/hocr/')
def get_tmp_dir(lang):
if type(lang) == type(u''):
lang = lang.encode('utf-8')
return tmp_dir + lang + '/'
def bookname_md5(book_name):
h = hashlib.md5()
h.update(book_name)
return h.hexdigest()
def cache_path(book_name, lang):
base_dir = os.path.expanduser('~/cache/hocr/') + '%s/%s/%s/'
h = bookname_md5(book_name + lang_to_site(lang))
return base_dir % (h[0:2], h[2:4], h[4:])
def read_sha1(path):
fd = open(path + 'sha1.sum')
sha1 = fd.read()
fd.close()
return sha1
def check_sha1(path, sha1):
if os.path.exists(path + 'sha1.sum'):
old_sha1 = read_sha1(path)
if old_sha1 == sha1:
return True
return False
def check_and_upload(url, filename, sha1):
if not os.path.exists(filename) or utils.sha1(filename) != sha1:
if not utils.copy_file_from_url(url, filename, sha1):
return False
return True
def db_sha1(domain, family, bookname):
conn = db.create_conn(domain = domain, family = family)
cursor = db.use_db(conn, domain, family)
q = 'SELECT img_sha1 FROM image WHERE img_name = %s'
cursor.execute(q, [bookname])
data = cursor.fetchall()
cursor.close()
conn.close()
return data[0][0] if len(data) else None
def get_sha1(lang, bookname):
if type(bookname) == type(u''):
bookname = bookname.encode('utf-8')
url = None
md5 = bookname_md5(bookname)
commons = False
sha1 = db_sha1(lang, 'wikisource', bookname)
if not sha1:
sha1 = db_sha1('commons', 'wiki', bookname)
commons = True
if sha1:
sha1 = "%040x" % int(sha1, 36)
# FIXME: don't hardcode this.
url = 'https://upload.wikimedia.org/wikisource/%s/' % lang
if commons:
url = 'https://upload.wikimedia.org/wikipedia/commons/'
url += md5[0] + '/' + md5[0:2] + '/' + bookname
return sha1, url
# check if data are uptodate
#
# return:
# -1 if the File: doesn't exist
# -2 and exception occured during file copy
# 0 data exist and uptodate
# 1 File: exists but data outdated or not existing
# if it return 1 the file is uploaded if it didn't already exists.
def is_uptodate(lang, book):
path = cache_path(book, lang)
url = None
sha1, url = get_sha1(lang, book)
if not sha1:
return -1
if check_sha1(path, sha1):
return 0
if not os.path.exists(path):
os.makedirs(path)
# This is racy, if two hocr try to create the same directory, the directory
# can't exist when testing it but is created by the other process before
# makedirs() is called, so protect it with a try/except.
temp_dir = get_tmp_dir(lang)
if not os.path.exists(temp_dir):
try:
os.makedirs(temp_dir)
except OSError, e:
import errno
if e.errno != errno.EEXIST:
raise
if not check_and_upload(url, temp_dir + book, sha1):
return -2
return 1
def write_sha1(out_dir, in_file):
sha1 = utils.sha1(in_file)
utils.write_sha1(sha1, out_dir + "sha1.sum")
def fast_hocr(book, lang):
print "fast_hocr"
path = cache_path(book, lang)
print "out_dir:", path
options = djvu_text_to_hocr.default_options()
options.compress = 'bzip2'
options.out_dir = path
options.silent = True
in_file = get_tmp_dir(lang) + book
if djvu_text_to_hocr.parse(options, in_file) == 0:
return True
return False
def slow_hocr(lang, book, in_file):
print "slow_hocr"
path = cache_path(book, lang)
print "out_dir:", path
options = ocr_djvu.default_options()
options.silent = True
options.compress = 'bzip2'
options.config = 'hocr'
options.num_thread = 1
options.lang = ocr.tesseract_languages.get(lang, 'eng')
options.out_dir = path
print "Using tesseract lang:", options.lang
ret = ocr_djvu.ocr_djvu(options, in_file)
# FIXME: should go in ocr_djvu.cleanup() but better if cleanup() can
# be triggered by some sort of ocr_djvu module unload
try:
os.rmdir(options.temp_tiff_dir)
except:
print >> sys.stderr, "unable to remove directory:", options.temp_tiff_dir
return ret
# is_uptodate() must be called first to ensure the file is uploaded.
def hocr(options):
path = cache_path(options.book, options.lang)
if os.path.exists(path + 'sha1.sum'):
os.remove(path + 'sha1.sum')
in_file = get_tmp_dir(options.lang) + options.book
done = False
if in_file.endswith('.pdf'):
# Don't do slow hocr for ws.ru as .pdf ==> slow_hocr, don't try to
# convert pdf to djvu for ru.ws
if options.lang != 'ru':
djvuname = pdf_to_djvu.pdf_to_djvu(in_file)
else:
djvuname = None
else:
djvuname = in_file
if options.lang != 'bn' and djvu_text_to_hocr.has_word_bbox(in_file):
done = fast_hocr(options.book, options.lang)
# djvuname == None if pdf_to_djvu() fail to convert the file
if not done and djvuname and options.lang != 'ru':
done = slow_hocr(options.lang, options.book, djvuname)
# never fail for ws.ru, see above.
if done or options.lang == 'ru':
write_sha1(path, in_file)
if djvuname:
os.remove(djvuname)
if djvuname != in_file:
os.remove(in_file)
return done
def update_db(lang, bookname):
import hocr_request
db_hocr = hocr_request.DbHocr()
with db.connection(db_hocr):
path = cache_path(bookname, lang)
if os.path.exists(path + 'sha1.sum'):
sha1 = read_sha1(path)
db_hocr.add_update_row(bookname, lang, sha1)
else:
print >> sys.stderr, "Can't locate sha1.sum", path
def ret_val(error, text):
if error:
print >> sys.stderr, "Error: %d, %s" % (error, text)
return { 'error' : error, 'text' : text }
def get_hocr(lang, title):
# FIXME, delete all no ocr and redo them with nb code lang.
if lang == 'nb':
lang = 'no'
if type(title) == type(u''):
title = title.encode('utf-8')
title = title.replace(' ', '_')
try:
if lang == 'bn':
title = unicode(title, 'utf-8')
page_nr = re.sub(u'^.*/([০-৯]+)$', '\\1', title)
book_name = re.sub(u'^(.*?)(/[০-৯]+)?$', '\\1', title)
book_name = book_name.encode('utf-8')
result = ord(page_nr[0]) - ord(u'০')
for ch in page_nr[1:]:
result *= 10
result += ord(ch) - ord(u'০')
page_nr = result
else:
page_nr = re.sub('^.*/([0-9]+)$', '\\1', title)
book_name = re.sub('^(.*?)(/[0-9]+)?$', '\\1', title)
page_nr = int(page_nr)
except:
return ret_val(1, "unable to extract page number from page: " + title)
path = cache_path(book_name, lang)
filename = path + 'page_%04d.hocr' % page_nr
# We support data built with different compress scheme than the one
# actually generated by the server
text = utils.uncompress_file(filename, [ 'bzip2', 'gzip', '' ])
if text == None:
# not available, add a request to do this hocr so we build data
# lazilly but we filter here unsupported file type
if book_name.endswith('.djvu') or book_name.endswith('.pdf'):
import hocr_request
hocr_request.add_hocr_request(lang, book_name, True)
return ret_val(1, "unable to locate file %s for page %s lang %s" % (filename, book_name, lang))
# work-around https://code.google.com/p/tesseract-ocr/issues/detail?id=690&can=1&q=utf-8 a simple patch exists: https://code.google.com/p/tesseract-ocr/source/detail?r=736# but it's easier to do a double conversion to remove invalid utf8 rather than to maintain a patched version of tesseract.
text = unicode(text, 'utf-8', 'ignore')
text = text.encode('utf-8', 'ignore')
return ret_val(0, text)
def default_options():
class Options:
pass
options = Options()
options.book = None
options.lang = None
return options
def main():
options = default_options()
for arg in sys.argv[1:]:
if arg == '-help':
pass
elif arg.startswith('-book:'):
options.book = arg[len('-book:'):]
options.book = options.book.replace(' ', '_')
elif arg.startswith('-lang:'):
options.lang = arg[len('-lang:'):]
else:
print >> sys.stderr, 'unknown option:', sys.argv
exit(1)
if not options.book or not options.lang:
print >> sys.stderr, 'missing option -lang: and/or -book:', sys.argv
exit(1)
ret = is_uptodate(options.lang, options.book)
if ret > 0:
if not hocr(options):
print >> sys.stderr, 'Error, hocr fail'
ret = 2
else:
update_db(options.lang, options.book)
ret = 0
elif ret < 0:
print >> sys.stderr, "Error, file doesn't exist:", ret
ret = 3 + abs(ret)
else:
update_db(options.lang, options.book)
return ret
if __name__ == '__main__':
cache_dir = 'hocr'
if not os.path.exists(os.path.expanduser('~/cache/' + cache_dir)):
os.mkdir(os.path.expanduser('~/cache/' + cache_dir))
try:
ret = main()
except:
utils.print_traceback()
exit(4)
exit(ret)
| gpl-3.0 | 1,838,696,693,971,659,800 | 27.282051 | 297 | 0.586481 | false |
dchaplinsky/garnahata.in.ua | garnahata_site/garnahata_site/urls.py | 1 | 2269 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.contrib.sitemaps import GenericSitemap
from django.conf.urls import include, url
from wagtail.core import urls as wagtail_urls
from wagtail.admin import urls as wagtailadmin_urls
from garnahata_site.sitemaps import MainXML, NewsXML, StaticXML
from garnahata_site.feeds import LatestNewsFeed
from catalog import views as catalog_views
from catalog.models import Address
from cms_pages import views as cms_pages_views
urlpatterns = [
# url(r'^ajax/suggest$', catalog_views.suggest, name='suggest'),
url(r'^search/suggest$', catalog_views.SuggestView.as_view(), name="suggest"),
url(r'^a/(?P<slug>.+)$', catalog_views.address_details,
name='address_details'),
url(r'^tag/', include('cms_pages.urls')),
url(r'^latest$', catalog_views.latest_addresses,
name='latest_addresses'),
url(r'^by_city$', catalog_views.addresses_by_city,
name='addresses_by_city'),
url(r'^news$', cms_pages_views.news, name='news'),
url(r'^news/special$', cms_pages_views.news, name='special_news',
kwargs={'special': True}),
url(r'^search$', catalog_views.search, name='search'),
url(r'^sitemap\.xml$', sitemap, {
'sitemaps': {
'main': MainXML,
'adresses': GenericSitemap({
'queryset': Address.objects.all(),
'date_field': "date_added",
}),
'news': NewsXML,
'static': StaticXML,
}},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^admin/fs/', include('fs.urls')),
url(r'^search_ownerships$', catalog_views.search,
name='search_ownerships', kwargs={"sources": ["ownerships"]}),
url(r'^search_addresses$', catalog_views.search,
name='search_addresses', kwargs={"sources": ["addresses"]}),
url(r'^feeds/news/$', LatestNewsFeed(), name="rss_feed"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^admin/', admin.site.urls),
url(r'^cms/', include(wagtailadmin_urls)),
url(r'', include(wagtail_urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit | -5,547,407,876,952,480,000 | 31.884058 | 82 | 0.651829 | false |
aaltay/beam | sdks/python/apache_beam/typehints/typed_pipeline_test.py | 1 | 19346 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the type-hint objects and decorators."""
# pytype: skip-file
from __future__ import absolute_import
import sys
import typing
import unittest
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import apache_beam as beam
from apache_beam import pvalue
from apache_beam import typehints
from apache_beam.options.pipeline_options import OptionsContext
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.typehints import WithTypeHints
from apache_beam.typehints.decorators import get_signature
# These test often construct a pipeline as value | PTransform to test side
# effects (e.g. errors).
# pylint: disable=expression-not-assigned
class MainInputTest(unittest.TestCase):
def test_bad_main_input(self):
@typehints.with_input_types(str, int)
def repeat(s, times):
return s * times
with self.assertRaises(typehints.TypeCheckError):
[1, 2, 3] | beam.Map(repeat, 3)
def test_non_function(self):
result = ['a', 'bb', 'c'] | beam.Map(str.upper)
self.assertEqual(['A', 'BB', 'C'], sorted(result))
result = ['xa', 'bbx', 'xcx'] | beam.Map(str.strip, 'x')
self.assertEqual(['a', 'bb', 'c'], sorted(result))
result = ['1', '10', '100'] | beam.Map(int)
self.assertEqual([1, 10, 100], sorted(result))
result = ['1', '10', '100'] | beam.Map(int, 16)
self.assertEqual([1, 16, 256], sorted(result))
@unittest.skipIf(
sys.version_info.major >= 3 and sys.version_info < (3, 7, 0),
'Function signatures for builtins are not available in Python 3 before '
'version 3.7.')
def test_non_function_fails(self):
with self.assertRaises(typehints.TypeCheckError):
[1, 2, 3] | beam.Map(str.upper)
def test_loose_bounds(self):
@typehints.with_input_types(typing.Union[int, float])
@typehints.with_output_types(str)
def format_number(x):
return '%g' % x
result = [1, 2, 3] | beam.Map(format_number)
self.assertEqual(['1', '2', '3'], sorted(result))
def test_typed_dofn_class(self):
@typehints.with_input_types(int)
@typehints.with_output_types(str)
class MyDoFn(beam.DoFn):
def process(self, element):
return [str(element)]
result = [1, 2, 3] | beam.ParDo(MyDoFn())
self.assertEqual(['1', '2', '3'], sorted(result))
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*int.*got.*str'):
['a', 'b', 'c'] | beam.ParDo(MyDoFn())
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*int.*got.*str'):
[1, 2, 3] | (beam.ParDo(MyDoFn()) | 'again' >> beam.ParDo(MyDoFn()))
def test_typed_callable_iterable_output(self):
@typehints.with_input_types(int)
@typehints.with_output_types(typehints.Iterable[typehints.Iterable[str]])
def do_fn(element):
return [[str(element)] * 2]
result = [1, 2] | beam.ParDo(do_fn)
self.assertEqual([['1', '1'], ['2', '2']], sorted(result))
def test_typed_dofn_instance(self):
class MyDoFn(beam.DoFn):
def process(self, element):
return [str(element)]
my_do_fn = MyDoFn().with_input_types(int).with_output_types(str)
result = [1, 2, 3] | beam.ParDo(my_do_fn)
self.assertEqual(['1', '2', '3'], sorted(result))
with self.assertRaises(typehints.TypeCheckError):
['a', 'b', 'c'] | beam.ParDo(my_do_fn)
with self.assertRaises(typehints.TypeCheckError):
[1, 2, 3] | (beam.ParDo(my_do_fn) | 'again' >> beam.ParDo(my_do_fn))
def test_filter_type_hint(self):
@typehints.with_input_types(int)
def filter_fn(data):
return data % 2
self.assertEqual([1, 3], [1, 2, 3] | beam.Filter(filter_fn))
def test_partition(self):
with TestPipeline() as p:
even, odd = (p
| beam.Create([1, 2, 3])
| 'even_odd' >> beam.Partition(lambda e, _: e % 2, 2))
self.assertIsNotNone(even.element_type)
self.assertIsNotNone(odd.element_type)
res_even = (
even
| 'IdEven' >> beam.ParDo(lambda e: [e]).with_input_types(int))
res_odd = (
odd
| 'IdOdd' >> beam.ParDo(lambda e: [e]).with_input_types(int))
assert_that(res_even, equal_to([2]), label='even_check')
assert_that(res_odd, equal_to([1, 3]), label='odd_check')
def test_typed_dofn_multi_output(self):
class MyDoFn(beam.DoFn):
def process(self, element):
if element % 2:
yield beam.pvalue.TaggedOutput('odd', element)
else:
yield beam.pvalue.TaggedOutput('even', element)
with TestPipeline() as p:
res = (
p
| beam.Create([1, 2, 3])
| beam.ParDo(MyDoFn()).with_outputs('odd', 'even'))
self.assertIsNotNone(res[None].element_type)
self.assertIsNotNone(res['even'].element_type)
self.assertIsNotNone(res['odd'].element_type)
res_main = (
res[None]
| 'id_none' >> beam.ParDo(lambda e: [e]).with_input_types(int))
res_even = (
res['even']
| 'id_even' >> beam.ParDo(lambda e: [e]).with_input_types(int))
res_odd = (
res['odd']
| 'id_odd' >> beam.ParDo(lambda e: [e]).with_input_types(int))
assert_that(res_main, equal_to([]), label='none_check')
assert_that(res_even, equal_to([2]), label='even_check')
assert_that(res_odd, equal_to([1, 3]), label='odd_check')
with self.assertRaises(ValueError):
_ = res['undeclared tag']
def test_typed_dofn_multi_output_no_tags(self):
class MyDoFn(beam.DoFn):
def process(self, element):
if element % 2:
yield beam.pvalue.TaggedOutput('odd', element)
else:
yield beam.pvalue.TaggedOutput('even', element)
with TestPipeline() as p:
res = (p | beam.Create([1, 2, 3]) | beam.ParDo(MyDoFn()).with_outputs())
self.assertIsNotNone(res[None].element_type)
self.assertIsNotNone(res['even'].element_type)
self.assertIsNotNone(res['odd'].element_type)
res_main = (
res[None]
| 'id_none' >> beam.ParDo(lambda e: [e]).with_input_types(int))
res_even = (
res['even']
| 'id_even' >> beam.ParDo(lambda e: [e]).with_input_types(int))
res_odd = (
res['odd']
| 'id_odd' >> beam.ParDo(lambda e: [e]).with_input_types(int))
assert_that(res_main, equal_to([]), label='none_check')
assert_that(res_even, equal_to([2]), label='even_check')
assert_that(res_odd, equal_to([1, 3]), label='odd_check')
def test_typed_ptransform_fn_pre_hints(self):
# Test that type hints are propagated to the created PTransform.
# Decorator appears before type hints. This is the more common style.
@beam.ptransform_fn
@typehints.with_input_types(int)
def MyMap(pcoll):
return pcoll | beam.ParDo(lambda x: [x])
self.assertListEqual([1, 2, 3], [1, 2, 3] | MyMap())
with self.assertRaises(typehints.TypeCheckError):
_ = ['a'] | MyMap()
def test_typed_ptransform_fn_post_hints(self):
# Test that type hints are propagated to the created PTransform.
# Decorator appears after type hints. This style is required for Cython
# functions, since they don't accept assigning attributes to them.
@typehints.with_input_types(int)
@beam.ptransform_fn
def MyMap(pcoll):
return pcoll | beam.ParDo(lambda x: [x])
self.assertListEqual([1, 2, 3], [1, 2, 3] | MyMap())
with self.assertRaises(typehints.TypeCheckError):
_ = ['a'] | MyMap()
def test_typed_ptransform_fn_multi_input_types_pos(self):
@beam.ptransform_fn
@beam.typehints.with_input_types(str, int)
def multi_input(pcoll_tuple, additional_arg):
_, _ = pcoll_tuple
assert additional_arg == 'additional_arg'
with TestPipeline() as p:
pcoll1 = p | 'c1' >> beam.Create(['a'])
pcoll2 = p | 'c2' >> beam.Create([1])
_ = (pcoll1, pcoll2) | multi_input('additional_arg')
with self.assertRaises(typehints.TypeCheckError):
_ = (pcoll2, pcoll1) | 'fails' >> multi_input('additional_arg')
def test_typed_ptransform_fn_multi_input_types_kw(self):
@beam.ptransform_fn
@beam.typehints.with_input_types(strings=str, integers=int)
def multi_input(pcoll_dict, additional_arg):
_ = pcoll_dict['strings']
_ = pcoll_dict['integers']
assert additional_arg == 'additional_arg'
with TestPipeline() as p:
pcoll1 = p | 'c1' >> beam.Create(['a'])
pcoll2 = p | 'c2' >> beam.Create([1])
_ = {
'strings': pcoll1, 'integers': pcoll2
} | multi_input('additional_arg')
with self.assertRaises(typehints.TypeCheckError):
_ = {
'strings': pcoll2, 'integers': pcoll1
} | 'fails' >> multi_input('additional_arg')
class NativeTypesTest(unittest.TestCase):
def test_good_main_input(self):
@typehints.with_input_types(typing.Tuple[str, int])
def munge(s_i):
(s, i) = s_i
return (s + 's', i * 2)
result = [('apple', 5), ('pear', 3)] | beam.Map(munge)
self.assertEqual([('apples', 10), ('pears', 6)], sorted(result))
def test_bad_main_input(self):
@typehints.with_input_types(typing.Tuple[str, str])
def munge(s_i):
(s, i) = s_i
return (s + 's', i * 2)
with self.assertRaises(typehints.TypeCheckError):
[('apple', 5), ('pear', 3)] | beam.Map(munge)
def test_bad_main_output(self):
@typehints.with_input_types(typing.Tuple[int, int])
@typehints.with_output_types(typing.Tuple[str, str])
def munge(a_b):
(a, b) = a_b
return (str(a), str(b))
with self.assertRaises(typehints.TypeCheckError):
[(5, 4), (3, 2)] | beam.Map(munge) | 'Again' >> beam.Map(munge)
class SideInputTest(unittest.TestCase):
def _run_repeat_test(self, repeat):
self._run_repeat_test_good(repeat)
self._run_repeat_test_bad(repeat)
@OptionsContext(pipeline_type_check=True)
def _run_repeat_test_good(self, repeat):
# As a positional argument.
result = ['a', 'bb', 'c'] | beam.Map(repeat, 3)
self.assertEqual(['aaa', 'bbbbbb', 'ccc'], sorted(result))
# As a keyword argument.
result = ['a', 'bb', 'c'] | beam.Map(repeat, times=3)
self.assertEqual(['aaa', 'bbbbbb', 'ccc'], sorted(result))
def _run_repeat_test_bad(self, repeat):
# Various mismatches.
with self.assertRaises(typehints.TypeCheckError):
['a', 'bb', 'c'] | beam.Map(repeat, 'z')
with self.assertRaises(typehints.TypeCheckError):
['a', 'bb', 'c'] | beam.Map(repeat, times='z')
with self.assertRaises(typehints.TypeCheckError):
['a', 'bb', 'c'] | beam.Map(repeat, 3, 4)
if all(param.default == param.empty
for param in get_signature(repeat).parameters.values()):
with self.assertRaisesRegex(typehints.TypeCheckError,
r'(takes exactly|missing a required)'):
['a', 'bb', 'c'] | beam.Map(repeat)
def test_basic_side_input_hint(self):
@typehints.with_input_types(str, int)
def repeat(s, times):
return s * times
self._run_repeat_test(repeat)
def test_keyword_side_input_hint(self):
@typehints.with_input_types(str, times=int)
def repeat(s, times):
return s * times
self._run_repeat_test(repeat)
def test_default_typed_hint(self):
@typehints.with_input_types(str, int)
def repeat(s, times=3):
return s * times
self._run_repeat_test(repeat)
def test_default_untyped_hint(self):
@typehints.with_input_types(str)
def repeat(s, times=3):
return s * times
# No type checking on default arg.
self._run_repeat_test_good(repeat)
@OptionsContext(pipeline_type_check=True)
def test_varargs_side_input_hint(self):
@typehints.with_input_types(str, int)
def repeat(s, *times):
return s * times[0]
result = ['a', 'bb', 'c'] | beam.Map(repeat, 3)
self.assertEqual(['aaa', 'bbbbbb', 'ccc'], sorted(result))
if sys.version_info >= (3, ):
with self.assertRaisesRegex(
typehints.TypeCheckError,
r'requires Tuple\[int, ...\] but got Tuple\[str, ...\]'):
['a', 'bb', 'c'] | beam.Map(repeat, 'z')
def test_var_positional_only_side_input_hint(self):
# Test that a lambda that accepts only a VAR_POSITIONAL can accept
# side-inputs.
# TODO(BEAM-8247): There's a bug with trivial_inference inferring the output
# type when side-inputs are used (their type hints are not passed). Remove
# with_output_types(...) when this bug is fixed.
result = (['a', 'b', 'c']
| beam.Map(lambda *args: args, 5).with_input_types(
str, int).with_output_types(typehints.Tuple[str, int]))
self.assertEqual([('a', 5), ('b', 5), ('c', 5)], sorted(result))
if sys.version_info >= (3, ):
with self.assertRaisesRegex(
typehints.TypeCheckError,
r'requires Tuple\[Union\[int, str\], ...\] but got '
r'Tuple\[Union\[float, int\], ...\]'):
_ = [1.2] | beam.Map(lambda *_: 'a', 5).with_input_types(int, str)
def test_var_keyword_side_input_hint(self):
# Test that a lambda that accepts a VAR_KEYWORD can accept
# side-inputs.
result = (['a', 'b', 'c']
| beam.Map(lambda e, **kwargs:
(e, kwargs), kw=5).with_input_types(str, ignored=int))
self.assertEqual([('a', {
'kw': 5
}), ('b', {
'kw': 5
}), ('c', {
'kw': 5
})],
sorted(result))
if sys.version_info >= (3, ):
with self.assertRaisesRegex(
typehints.TypeCheckError,
r'requires Dict\[str, str\] but got Dict\[str, int\]'):
_ = (['a', 'b', 'c']
| beam.Map(lambda e, **_: 'a', kw=5).with_input_types(
str, ignored=str))
def test_deferred_side_inputs(self):
@typehints.with_input_types(str, int)
def repeat(s, times):
return s * times
with TestPipeline() as p:
main_input = p | beam.Create(['a', 'bb', 'c'])
side_input = p | 'side' >> beam.Create([3])
result = main_input | beam.Map(repeat, pvalue.AsSingleton(side_input))
assert_that(result, equal_to(['aaa', 'bbbbbb', 'ccc']))
bad_side_input = p | 'bad_side' >> beam.Create(['z'])
with self.assertRaises(typehints.TypeCheckError):
main_input | 'bis' >> beam.Map(repeat, pvalue.AsSingleton(bad_side_input))
def test_deferred_side_input_iterable(self):
@typehints.with_input_types(str, typing.Iterable[str])
def concat(glue, items):
return glue.join(sorted(items))
with TestPipeline() as p:
main_input = p | beam.Create(['a', 'bb', 'c'])
side_input = p | 'side' >> beam.Create(['x', 'y', 'z'])
result = main_input | beam.Map(concat, pvalue.AsIter(side_input))
assert_that(result, equal_to(['xayaz', 'xbbybbz', 'xcycz']))
bad_side_input = p | 'bad_side' >> beam.Create([1, 2, 3])
with self.assertRaises(typehints.TypeCheckError):
main_input | 'fail' >> beam.Map(concat, pvalue.AsIter(bad_side_input))
class CustomTransformTest(unittest.TestCase):
class CustomTransform(beam.PTransform):
def _extract_input_pvalues(self, pvalueish):
return pvalueish, (pvalueish['in0'], pvalueish['in1'])
def expand(self, pvalueish):
return {'out0': pvalueish['in0'], 'out1': pvalueish['in1']}
# TODO(robertwb): (typecheck) Make these the default?
def with_input_types(self, *args, **kwargs):
return WithTypeHints.with_input_types(self, *args, **kwargs)
def with_output_types(self, *args, **kwargs):
return WithTypeHints.with_output_types(self, *args, **kwargs)
test_input = {'in0': ['a', 'b', 'c'], 'in1': [1, 2, 3]}
def check_output(self, result):
self.assertEqual(['a', 'b', 'c'], sorted(result['out0']))
self.assertEqual([1, 2, 3], sorted(result['out1']))
def test_custom_transform(self):
self.check_output(self.test_input | self.CustomTransform())
def test_keyword_type_hints(self):
self.check_output(
self.test_input
| self.CustomTransform().with_input_types(in0=str, in1=int))
self.check_output(
self.test_input | self.CustomTransform().with_input_types(in0=str))
self.check_output(
self.test_input
| self.CustomTransform().with_output_types(out0=str, out1=int))
with self.assertRaises(typehints.TypeCheckError):
self.test_input | self.CustomTransform().with_input_types(in0=int)
with self.assertRaises(typehints.TypeCheckError):
self.test_input | self.CustomTransform().with_output_types(out0=int)
def test_flat_type_hint(self):
# Type hint is applied to both.
({
'in0': ['a', 'b', 'c'], 'in1': ['x', 'y', 'z']
}
| self.CustomTransform().with_input_types(str))
with self.assertRaises(typehints.TypeCheckError):
self.test_input | self.CustomTransform().with_input_types(str)
with self.assertRaises(typehints.TypeCheckError):
self.test_input | self.CustomTransform().with_input_types(int)
with self.assertRaises(typehints.TypeCheckError):
self.test_input | self.CustomTransform().with_output_types(int)
class AnnotationsTest(unittest.TestCase):
def test_pardo_wrapper_builtin_method(self):
th = beam.ParDo(str.strip).get_type_hints()
if sys.version_info < (3, 7):
self.assertEqual(th.input_types, ((str, ), {}))
else:
# Python 3.7+ has annotations for CPython builtins
# (_MethodDescriptorType).
self.assertEqual(th.input_types, ((str, typehints.Any), {}))
self.assertEqual(th.output_types, ((typehints.Any, ), {}))
def test_pardo_wrapper_builtin_type(self):
th = beam.ParDo(list).get_type_hints()
if sys.version_info < (3, 7):
self.assertEqual(
th.input_types,
((typehints.Any, typehints.decorators._ANY_VAR_POSITIONAL), {
'__unknown__keywords': typehints.decorators._ANY_VAR_KEYWORD
}))
else:
# Python 3.7+ supports signatures for builtins like 'list'.
self.assertEqual(th.input_types, ((typehints.Any, ), {}))
self.assertEqual(th.output_types, ((typehints.Any, ), {}))
def test_pardo_wrapper_builtin_func(self):
th = beam.ParDo(len).get_type_hints()
self.assertIsNone(th.input_types)
self.assertIsNone(th.output_types)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,108,936,589,871,372,300 | 35.709677 | 80 | 0.621834 | false |
apacha/OMR-Datasets | docs/source/conf.py | 1 | 5278 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'OMR Dataset Tools'
copyright = '2020, Alexander Pacha'
author = 'Alexander Pacha'
# The short X.Y version
version = '1.0'
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'OmrDatasetToolsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OmrDatasetTools.tex', 'OmrDatasetTools Documentation',
'Alexander Pacha', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'omrdatasettools', 'OmrDatasetTools Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OmrDatasetTools', 'OmrDatasetTools Documentation',
author, 'OmrDatasetTools', 'Tools for working with datasets for Optical Music Recognition',
'Miscellaneous'),
]
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True | mit | 2,584,878,461,333,540,000 | 30.236686 | 96 | 0.65233 | false |
kubeflow/katib | cmd/suggestion/nas/darts/v1beta1/main.py | 1 | 1428 | # Copyright 2021 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
from concurrent import futures
import time
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
from pkg.apis.manager.health.python import health_pb2_grpc
from pkg.suggestion.v1beta1.nas.darts.service import DartsService
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
DEFAULT_PORT = "0.0.0.0:6789"
def serve():
print("Darts Suggestion Service")
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
service = DartsService()
api_pb2_grpc.add_SuggestionServicer_to_server(service, server)
health_pb2_grpc.add_HealthServicer_to_server(service, server)
server.add_insecure_port(DEFAULT_PORT)
print("Listening...")
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
serve()
| apache-2.0 | 558,172,976,097,228,500 | 31.454545 | 74 | 0.727591 | false |
SUNET/eduid-webapp | src/eduid_webapp/orcid/run.py | 1 | 1774 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from eduid_webapp.orcid.app import init_orcid_app
app = init_orcid_app()
if __name__ == '__main__':
app.logger.info(f'Starting {app}...')
app.run()
| bsd-3-clause | -5,975,524,636,354,072,000 | 42.268293 | 72 | 0.739008 | false |
arunkgupta/gramps | gramps/plugins/lib/maps/placeselection.py | 1 | 9938 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011-2012 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from gramps.gen.ggettext import sgettext as _
import re
from gi.repository import GObject
import math
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("maps.placeselection")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.errors import WindowActiveError
from gramps.gui.managedwindow import ManagedWindow
from osmGps import OsmGps
#-------------------------------------------------------------------------
#
# Functions and variables
#
#-------------------------------------------------------------------------
PLACE_REGEXP = re.compile('<span background="green">(.*)</span>')
PLACE_STRING = '<span background="green">%s</span>'
def match(self, lat, lon, radius):
"""
coordinates matching.
"""
rds = float(radius)
self.places = []
# place
for entry in self.place_list:
if (math.hypot(lat-float(entry[3]),
lon-float(entry[4])) <= rds) == True:
# Do we already have this place ? avoid duplicates
self.get_location(entry[9])
if not [self.country, self.state, self.county] in self.places:
self.places.append([self.country, self.state, self.county])
return self.places
#-------------------------------------------------------------------------
#
# PlaceSelection
#
#-------------------------------------------------------------------------
class PlaceSelection(ManagedWindow, OsmGps):
"""
We show a selection box for possible places in a region of the map.
We can select the diameter of the region which is a circle.
Depending of this region, we can show the possible choice.
We select the value depending of our need which open the EditPlace box.
"""
def __init__(self, uistate, dbstate, maps, layer, places, lat, lon,
function, oldvalue=None):
"""
Place Selection initialization
"""
try:
ManagedWindow.__init__(self, uistate, [],
PlaceSelection)
except WindowActiveError:
return
self.uistate = uistate
self.dbstate = dbstate
self.lat = lat
self.lon = lon
self.osm = maps
self.country = None
self.state = None
self.county = None
self.radius = 1.0
self.circle = None
self.oldvalue = oldvalue
self.place_list = places
self.function = function
self.selection_layer = layer
self.layer = layer
alignment = Gtk.Alignment.new(0, 1, 0, 0)
self.set_window(
Gtk.Dialog(_('Place Selection in a region'),
buttons=(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)),
None, _('Place Selection in a region'), None)
label = Gtk.Label(label=_('Choose the radius of the selection.\n'
'On the map you should see a circle or an'
' oval depending on the latitude.'))
alignment.add(label)
self.window.vbox.pack_start(alignment, False, True, 0)
adj = Gtk.Adjustment(1.0, 0.1, 3.0, 0.1, 0, 0)
# default value is 1.0, minimum is 0.1 and max is 3.0
slider = Gtk.Scale(orientation=Gtk.Orientation.HORIZONTAL,
adjustment=adj)
slider.set_digits(1)
slider.set_value_pos(Gtk.PositionType.BOTTOM)
slider.connect('value-changed', self.slider_change, self.lat, self.lon)
self.window.vbox.pack_start(slider, False, True, 0)
self.vadjust = Gtk.Adjustment(page_size=15)
self.scroll = Gtk.ScrolledWindow(self.vadjust)
self.scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.scroll.set_shadow_type(Gtk.ShadowType.IN)
self.plist = Gtk.ListStore(str, str, str)
self.choices = Gtk.TreeView(self.plist)
self.scroll.add(self.choices)
self.renderer = Gtk.CellRendererText()
self.tvcol1 = Gtk.TreeViewColumn(_('Country'), self.renderer, markup=0)
self.tvcol2 = Gtk.TreeViewColumn(_('State'), self.renderer, markup=1)
self.tvcol3 = Gtk.TreeViewColumn(_('County'), self.renderer, markup=2)
self.tvcol1.set_sort_column_id(0)
self.tvcol2.set_sort_column_id(1)
self.tvcol3.set_sort_column_id(2)
self.choices.append_column(self.tvcol1)
self.choices.append_column(self.tvcol2)
self.choices.append_column(self.tvcol3)
self.window.vbox.pack_start(self.scroll, True, True, 0)
self.label2 = Gtk.Label()
self.label2.set_markup('<span background="green" foreground="black"'
'>%s</span>' %
_('The green values in the row correspond '
'to the current place values.'))
alignment = Gtk.Alignment.new(0, 1, 0, 0)
alignment.add(self.label2)
self.window.vbox.pack_start(alignment, False, True, 0)
self.window.set_default_size(400, 300)
self.choices.connect('row-activated', self.selection, function)
self.window.connect('response', self.close)
self.window.show_all()
self.show()
self.label2.hide()
self.slider_change(None, lat, lon)
def close(self, *obj):
"""
Close the selection place editor
"""
self.hide_the_region()
ManagedWindow.close(self, *obj)
def slider_change(self, obj, lat, lon):
"""
Display on the map a circle in which we select all the places inside this region.
"""
self.radius = obj.get_value() if obj else 1.0
self.show_the_region(self.radius)
match(self, lat, lon, self.radius)
self.plist.clear()
if self.oldvalue != None:
# The old values are always in the first row.
# In this case, we change the color of the row.
# display the associated message
self.label2.show()
field1, field2, field3 = self.oldvalue
self.plist.append((PLACE_STRING % field1,
PLACE_STRING % field2,
PLACE_STRING % field3)
)
for place in self.places:
self.plist.append(place)
# here, we could add value from geography names services ...
# if we found no place, we must create a default place.
self.plist.append((_("New place with empty fields"), "", "..."))
def hide_the_region(self):
"""
Hide the layer which contains the circle
"""
layer = self.get_selection_layer()
if layer:
self.remove_layer(layer)
def show_the_region(self, rds):
"""
Show a circle in which we select the places.
"""
# circle (rds)
self.hide_the_region()
self.selection_layer = self.add_selection_layer()
self.selection_layer.add_circle(rds, self.lat, self.lon)
def get_location(self, place):
"""
get location values
"""
place = self.dbstate.db.get_place_from_gramps_id(place)
loc = place.get_main_location()
data = loc.get_text_data_list()
# new background or font color on gtk fields ?
self.country = data[6]
self.state = data[5]
self.county = data[4]
return(self.country, self.state, self.county)
def selection(self, obj, index, column, function):
"""
get location values and call the real function : add_place, edit_place
"""
if self.plist[index][2] == "...":
# case with blank values ( New place with empty fields )
self.function( "", "", "", self.lat, self.lon)
elif self.plist[index][0][1:5] == "span":
# case with old values ( keep the old values of the place )
name = PLACE_REGEXP.search(self.plist[index][0], 0)
country = name.group(1)
name = PLACE_REGEXP.search(self.plist[index][1], 0)
state = name.group(1)
name = PLACE_REGEXP.search(self.plist[index][2], 0)
county = name.group(1)
self.function( country, county, state, self.lat, self.lon)
else:
# Set the new values of the country, county and state fields.
self.function( self.plist[index][0], self.plist[index][2],
self.plist[index][1], self.lat, self.lon)
| gpl-2.0 | -5,166,526,136,838,341,000 | 37.972549 | 89 | 0.543872 | false |
p1c2u/openapi-core | tests/integration/contrib/flask/conftest.py | 1 | 1818 | import pytest
from flask.wrappers import Request
from flask.wrappers import Response
from werkzeug.routing import Map
from werkzeug.routing import Rule
from werkzeug.routing import Subdomain
from werkzeug.test import create_environ
@pytest.fixture
def environ_factory():
return create_environ
@pytest.fixture
def map():
return Map(
[
# Static URLs
Rule("/", endpoint="static/index"),
Rule("/about", endpoint="static/about"),
Rule("/help", endpoint="static/help"),
# Knowledge Base
Subdomain(
"kb",
[
Rule("/", endpoint="kb/index"),
Rule("/browse/", endpoint="kb/browse"),
Rule("/browse/<int:id>/", endpoint="kb/browse"),
Rule("/browse/<int:id>/<int:page>", endpoint="kb/browse"),
],
),
],
default_subdomain="www",
)
@pytest.fixture
def request_factory(map, environ_factory):
server_name = "localhost"
def create_request(method, path, subdomain=None, query_string=None):
environ = environ_factory(query_string=query_string)
req = Request(environ)
urls = map.bind_to_environ(
environ, server_name=server_name, subdomain=subdomain
)
req.url_rule, req.view_args = urls.match(
path, method, return_rule=True
)
return req
return create_request
@pytest.fixture
def response_factory():
def create_response(
data, status_code=200, headers=None, content_type="application/json"
):
return Response(
data,
status=status_code,
headers=headers,
content_type=content_type,
)
return create_response
| bsd-3-clause | -7,429,355,372,888,731,000 | 25.735294 | 78 | 0.571507 | false |
yograterol/python-ctrldaemon | tests/test_ctrldaemon.py | 1 | 1505 | import sys
sys.path[0:0] = [""]
import unittest
from ctrldaemon import ControlDaemon
class TestCtrlDaemon(unittest.TestCase):
def setUp(self):
self.ctrl_daemon = ControlDaemon("httpd")
self.ctrl_daemon_fail = ControlDaemon("service_fail")
def test_start_service(self):
self.assertTrue(self.ctrl_daemon.start())
def test_stop_service(self):
self.assertTrue(self.ctrl_daemon.stop())
def test_restart_service(self):
self.assertTrue(self.ctrl_daemon.restart())
def test_get_memory_usage(self):
self.ctrl_daemon.start()
self.assertGreater(self.ctrl_daemon.get_memory_usage(), 0)
def test_get_status(self):
self.ctrl_daemon.stop()
# Is stop service?
self.assertFalse(self.ctrl_daemon.get_status())
self.ctrl_daemon.start()
# Is running service?
self.assertTrue(self.ctrl_daemon.get_status())
def test_start_service_fail(self):
self.ctrl_daemon_fail.start()
self.assertFalse(self.ctrl_daemon_fail.get_status())
def test_restart_service_fail(self):
self.ctrl_daemon_fail.restart()
self.assertFalse(self.ctrl_daemon_fail.get_status())
def test_stop_service_fail(self):
self.ctrl_daemon_fail.stop()
self.assertFalse(self.ctrl_daemon_fail.get_status())
def test_repr(self):
service = 'httpd'
self.assertEqual(str(service), str(self.ctrl_daemon))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 7,164,116,117,361,461,000 | 27.396226 | 66 | 0.648505 | false |
zenpoy/pokerstats | app.py | 1 | 3503 | import os
from flask import Flask, request, jsonify
from mongoengine import *
import datetime
app = Flask(__name__)
mongodb_uri = os.environ.get('MONGODB_URI', 'localhost:27017')
connect("pokerstats", host=mongodb_uri)
class Player(Document):
name = StringField(required=True, unique=True, max_length=200)
class Record(Document):
player = ReferenceField("Player", required=True)
game = ReferenceField("Game", required=True)
cash_in = FloatField()
good_all_in = ListField(field=DateTimeField)
bad_all_in = ListField(field=DateTimeField)
cash_out = FloatField()
class Game(Document):
name = StringField(max_length=200)
date = DateTimeField()
cash = FloatField()
@app.route('/', methods=['GET'])
@app.route('/players', methods=['GET'])
def get_players():
return Player.objects.to_json()
@app.route('/players/<player_id>', methods=['GET'])
def get_player(player_id):
p = Player.objects(id=player_id)
return p.to_json(), 200
@app.route('/players', methods=['POST'])
def create_player():
# TODO: add check for is json
json_data = request.get_json()
p = Player(**json_data)
try:
p.save()
except NotUniqueError as e:
return jsonify({'error' : e.message}), 200
return p.to_json(), 201
@app.route('/players/<player_id>', methods=['DELETE'])
def delete_player(player_id):
Player.objects(id=player_id).delete()
return jsonify({}), 200
@app.route('/players/<player_id>', methods=['PUT'])
def update_player(player_id):
# TODO: add check for is json
json_data = request.get_json()
p = Player.objects(id=player_id)
p.update(**json_data)
return p.to_json(), 200
@app.route('/games', methods=['GET'])
def get_games():
return Game.objects.to_json()
@app.route('/games/<game_id>', methods=['GET'])
def get_game(game_id):
p = Game.objects(id=game_id)
return p.to_json(), 200
@app.route('/games', methods=['POST'])
def create_game():
# TODO: add check for is json
json_data = request.get_json()
p = Game(**json_data)
try:
p.save()
except NotUniqueError as e:
return jsonify({'error' : e.message}), 200
return p.to_json(), 201
@app.route('/games/<game_id>', methods=['DELETE'])
def delete_game(game_id):
Game.objects(id=game_id).delete()
return jsonify({}), 200
@app.route('/games/<game_id>', methods=['PUT'])
def update_game(game_id):
# TODO: add check for is json
json_data = request.get_json()
p = Game.objects(id=game_id)
p.update(**json_data)
return p.to_json(), 200
@app.route('/records', methods=['GET'])
def get_records():
return Record.objects.to_json()
@app.route('/records/<record_id>', methods=['GET'])
def get_record(record_id):
p = Record.objects(id=record_id)
return p.to_json(), 200
@app.route('/records', methods=['POST'])
def create_record():
# TODO: add check for is json
json_data = request.get_json()
p = Record(**json_data)
try:
p.save()
except NotUniqueError as e:
return jsonify({'error' : e.message}), 200
return p.to_json(), 201
@app.route('/records/<record_id>', methods=['DELETE'])
def delete_record(record_id):
Record.objects(id=record_id).delete()
return jsonify({}), 200
@app.route('/records/<record_id>', methods=['PUT'])
def update_record(record_id):
# TODO: add check for is json
json_data = request.get_json()
p = Record.objects(id=record_id)
p.update(**json_data)
return p.to_json(), 200
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
# connect to the mongodb database | mit | -8,197,957,731,518,686,000 | 24.764706 | 63 | 0.675992 | false |
srajag/nova | nova/objects/ec2.py | 1 | 3208 | # Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields
class EC2InstanceMapping(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'uuid': fields.UUIDField(),
}
@staticmethod
def _from_db_object(context, imap, db_imap):
for field in imap.fields:
imap[field] = db_imap[field]
imap._context = context
imap.obj_reset_changes()
return imap
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
db_imap = db.ec2_instance_create(context, self.uuid)
self._from_db_object(context, self, db_imap)
@base.remotable_classmethod
def get_by_uuid(cls, context, instance_uuid):
db_imap = db.ec2_instance_get_by_uuid(context, instance_uuid)
if db_imap:
return cls._from_db_object(context, cls(), db_imap)
@base.remotable_classmethod
def get_by_id(cls, context, ec2_id):
db_imap = db.ec2_instance_get_by_id(context, ec2_id)
if db_imap:
return cls._from_db_object(context, cls(), db_imap)
class EC2VolumeMapping(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'uuid': fields.UUIDField(),
}
@staticmethod
def _from_db_object(context, vmap, db_vmap):
for field in vmap.fields:
vmap[field] = db_vmap[field]
vmap._context = context
vmap.obj_reset_changes()
return vmap
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
db_vmap = db.ec2_volume_create(context, self.uuid)
self._from_db_object(context, self, db_vmap)
@base.remotable_classmethod
def get_by_uuid(cls, context, volume_uuid):
db_vmap = db.ec2_volume_get_by_uuid(context, volume_uuid)
if db_vmap:
return cls._from_db_object(context, cls(context), db_vmap)
@base.remotable_classmethod
def get_by_id(cls, context, ec2_id):
db_vmap = db.ec2_volume_get_by_id(context, ec2_id)
if db_vmap:
return cls._from_db_object(context, cls(context), db_vmap)
| apache-2.0 | -248,260,300,879,144,450 | 33.12766 | 78 | 0.62687 | false |
geopm/geopm | integration/test/test_geopmagent.py | 1 | 6342 | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import unittest
import subprocess
import json
import geopm_context
import geopmpy.io
import geopmpy.hash
class TestIntegrationGeopmagent(unittest.TestCase):
''' Tests of geopmagent.'''
def setUp(self):
self.exec_name = 'geopmagent'
self.skip_warning_string = 'Incompatible CPU frequency driver/governor'
def check_output(self, args, expected):
try:
with subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
proc.wait()
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_json_output(self, args, expected):
try:
with subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
proc.wait()
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
try:
out_json = json.loads(line.decode())
except ValueError:
self.fail('Could not convert json string: {}\n'.format(line))
self.assertEqual(expected, out_json)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_no_error(self, args):
try:
with subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
proc.wait()
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
proc.stdout.close()
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmagent_command_line(self):
'''
Check that geopmagent commandline arguments work.
'''
# no args
agent_names = ['monitor', 'power_balancer', 'power_governor',
'frequency_map']
self.check_output([], agent_names)
# help message
self.check_output(['--help'], ['Usage'])
# version
self.check_no_error(['--version'])
# agent policy and sample names
for agent in agent_names:
self.check_output(['--agent', agent],
['Policy', 'Sample'])
# policy file
self.check_json_output(['--agent', 'monitor', '--policy', 'None'],
{})
self.check_json_output(['--agent', 'power_governor', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# default value policy
self.check_json_output(['--agent', 'power_governor', '--policy', 'NAN'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'power_governor', '--policy', 'nan'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
# unspecified policy values are accepted
self.check_json_output(['--agent', 'power_balancer', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# hashing works for frequency map agent
self.check_json_output(['--agent', 'frequency_map', '--policy', '1e9,nan,hello,2e9'],
{'FREQ_DEFAULT': 1e9, 'FREQ_UNCORE': 'NAN',
'HASH_0': geopmpy.hash.crc32_str('hello'), 'FREQ_0': 2e9})
# errors
self.check_output(['--agent', 'power_governor', '--policy', 'None'],
['not a valid floating-point number', 'Invalid argument'])
self.check_output(['--agent', 'monitor', '--policy', '300'],
['agent takes no parameters', 'Invalid argument'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 7,362,926,428,155,289,000 | 43.661972 | 93 | 0.579313 | false |
jbrendel/RESTx | src/python/restx/components/TwitterComponent.py | 1 | 8443 | """
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
A test component.
"""
# Python imports
import urllib
# RESTx imports
from restx.components.api import *
class TwitterComponent(BaseComponent):
NAME = "TwitterComponent"
PARAM_DEFINITION = {
"account_name" : ParameterDef(PARAM_STRING, "Twitter account name"),
"account_password" : ParameterDef(PARAM_PASSWORD, "Password")
}
DESCRIPTION = "Provides access to a Twitter account."
DOCUMENTATION = \
"""The Twitter component is designed to provide access to a Twitter account.
It can be used to get as well as update status, or to view the timeline of
a Twitter account.
To create the resource, the Twitter account name and password need to be specified.
"""
SERVICES = {
"status" : { "desc" : "You can GET the status or POST a new status to it." },
"timeline" : {
"desc" : "You can GET the timeline of the user.",
"params" : {
"count" : ParameterDef(PARAM_NUMBER, "Number of results", required=False, default=20),
"filter" : ParameterDef(PARAM_BOOL, "If set, only 'important' fields are returned", required=False, default=True),
},
},
"home_timeline" : {
"desc" : "You can GET the home timeline of the user.",
"params" : {
"count" : ParameterDef(PARAM_NUMBER, "Number of results", required=False, default=20),
"filter" : ParameterDef(PARAM_BOOL, "If set, only 'important' fields are returned", required=False, default=True),
},
},
}
def __get_status(self, accountname):
"""
Get a the latest twitter status for the specified account.
@param accountname: Name of the account for which we get the status.
@type accountname: string
@return: The status text.
@rtype: string
"""
# Get the status for this account from Twitter (we get it in JSON format)
code, data = self.httpGet("http://api.twitter.com/1/users/show.json?screen_name=%s" % accountname)
if code == HTTP.OK:
obj = self.fromJson(data)
else:
return "Problem with Twitter: " + data
# Return the requested information, in this case the latest status
return obj['status']['text']
def __post_status(self, accountname, password, input):
"""
Post a the new twitter status for the specified account.
@param accountname: Name of the account for which we post the status.
@type accountname: string
@param password: The password for this account.
@type password: string
@param input: The new status update text.
@type input: string
@return: The status text.
@rtype: string
"""
# Send a new status string to the Twitter account
self.httpSetCredentials(accountname, password)
code, data = self.httpPost("http://api.twitter.com/1/statuses/update.xml",
"status=%s" % input)
data = "Status updated"
# Return the requested information, in this case the latest status
return data
def __result_filter(self, data):
"""
Filter timeline results to contain only the most essential information.
"""
r = list()
for elem in data:
u = elem['user']
user = dict(screen_name=u['screen_name'], name=u['name'], followers=u['followers_count'])
message = dict(date=elem['created_at'], Text=elem['text'], id=elem['id'],
reply="http://twitter.com/?status=@%s&in_reply_to_status_id=%s&in_reply_to=%s" % (u['screen_name'], elem['id'], u['screen_name']))
r.append(dict(message=message, user=user))
return r
def status(self, method, input):
"""
Gets or updates the twitter status for the specified account.
@param method: The HTTP request method.
@type method: string
@param input: Any data that came in the body of the request.
@type input: string
@return: The output data of this service.
@rtype: string
"""
# Get my parameters
if method == HTTP.GET:
return Result.ok(self.__get_status(self.account_name))
elif method == HTTP.POST:
return Result.ok(self.__post_status(self.account_name, self.account_password, input))
else:
return Result.methodNotAllowed("Only supporting GET and POST for this resource")
def timeline(self, method, input, count, filter):
"""
Get the user's timeline.
@param request: Information about the HTTP request.
@type request: RestxHttpRequest
@param input: Any data that came in the body of the request.
@type input: string
@param params: Dictionary of parameter values.
@type params: dict
@param method: The HTTP request method.
@type method: string
@return: The output data of this service.
@rtype: string
"""
# Get my parameters
self.httpSetCredentials(self.account_name, self.account_password)
if count > 0:
count_param = "?count=%s" % count
else:
count_param = ""
code, obj_str = self.httpGet("http://api.twitter.com/1/statuses/user_timeline.json"+count_param)
if code == HTTP.OK:
obj = self.fromJson(obj_str)
else:
obj = obj_str
if filter:
obj = self.__result_filter(obj)
return Result.ok(obj)
def home_timeline(self, method, input, count, filter):
"""
Get the user's home timeline (also contains tweets from friends).
@param request: Information about the HTTP request.
@type request: RestxHttpRequest
@param input: Any data that came in the body of the request.
@type input: string
@param params: Dictionary of parameter values.
@type params: dict
@param method: The HTTP request method.
@type method: string
@return: The output data of this service.
@rtype: string
"""
# Get my parameters
self.httpSetCredentials(self.account_name, self.account_password)
if count > 0:
count_param = "?count=%s" % count
else:
count_param = ""
code, obj_str = self.httpGet("http://api.twitter.com/1/statuses/home_timeline.json"+count_param)
if code == HTTP.OK:
obj = self.fromJson(obj_str)
else:
obj = obj_str
if filter:
obj = self.__result_filter(obj)
return Result.ok(obj)
| gpl-3.0 | -6,494,160,187,986,690,000 | 37.20362 | 160 | 0.540803 | false |
FCP-INDI/C-PAC | CPAC/image_utils/tests/test_smooth.py | 1 | 3611 | import os
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.interfaces.utility as util
from CPAC.utils.test_mocks import configuration_strategy_mock
from CPAC.image_utils import spatial_smooth
import CPAC.utils.test_init as test_utils
def test_smooth():
test_name = 'test_smooth_nodes'
c, strat = configuration_strategy_mock(method='FSL')
num_strat = 0
# build the workflow
workflow = pe.Workflow(name=test_name)
workflow.base_dir = c.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(c.crashLogDirectory)
}
spatial_smooth(workflow, 'mean_functional', 'functional_brain_mask',
'mean_functional_smooth'.format(num_strat), strat, num_strat, c)
func_node, func_output = strat['mean_functional']
mask_node, mask_output = strat['functional_brain_mask']
spatial_smooth(workflow, (func_node, func_output), (mask_node, mask_output),
'mean_functional_smooth_nodes'.format(num_strat), strat, num_strat, c)
print(workflow.list_node_names())
workflow.run()
correlations = []
for fwhm in c.fwhm:
out_name1 = os.path.join(c.workingDirectory, test_name,
'_fwhm_{0}/mean_functional_smooth_0/'.format(fwhm),
'sub-M10978008_ses-NFB3_task-test_bold_calc_tshift_resample_volreg_calc_tstat_maths.nii.gz')
out_name2 = os.path.join(c.workingDirectory, test_name,
'_fwhm_{0}/mean_functional_smooth_nodes_0/'.format(fwhm),
'sub-M10978008_ses-NFB3_task-test_bold_calc_tshift_resample_volreg_calc_tstat_maths.nii.gz')
correlations.append(test_utils.pearson_correlation(out_name1, out_name2) > 0.99)
assert all(correlations)
def test_smooth_mapnode():
test_name = 'test_smooth_mapnode'
c, strat = configuration_strategy_mock(method='FSL')
num_strat = 0
# build the workflow
workflow = pe.Workflow(name=test_name)
workflow.base_dir = c.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(c.crashLogDirectory)
}
spatial_smooth(workflow, 'dr_tempreg_maps_files', 'functional_brain_mask',
'dr_tempreg_maps_smooth'.format(num_strat), strat, num_strat, c,
input_image_type='func_derivative_multi')
func_node, func_output = strat['dr_tempreg_maps_files']
mask_node, mask_output = strat['functional_brain_mask']
spatial_smooth(workflow, (func_node, func_output), (mask_node, mask_output),
'dr_tempreg_maps_smooth_nodes'.format(num_strat), strat, num_strat, c,
input_image_type='func_derivative_multi')
print(workflow.list_node_names())
workflow.run()
correlations = []
for fwhm in c.fwhm:
dr_spatmaps_after_smooth1=[os.path.join(c.workingDirectory, test_name,
'_fwhm_{0}/dr_tempreg_maps_smooth_multi_0/mapflow'.format(fwhm),
'_dr_tempreg_maps_smooth_multi_0{0}/temp_reg_map_000{0}_maths.nii.gz'.format(n))
for n in range(0,10)]
dr_spatmaps_after_smooth2=[os.path.join(c.workingDirectory, test_name,
'_fwhm_{0}/dr_tempreg_maps_smooth_nodes_multi_0/mapflow'.format(fwhm),
'_dr_tempreg_maps_smooth_nodes_multi_0{0}/temp_reg_map_000{0}_maths.nii.gz'.format(n))
for n in range(0,10)]
correlations += [test_utils.pearson_correlation(file1, file2) > 0.99 \
for file1, file2 in zip(dr_spatmaps_after_smooth1, dr_spatmaps_after_smooth2)]
assert all(correlations)
| bsd-3-clause | 6,502,382,951,621,108,000 | 35.11 | 104 | 0.66159 | false |
matthias-k/pysaliency | tests/test_utils.py | 1 | 7196 | from __future__ import absolute_import, print_function, division
import unittest
import dill
import glob
import os
import numpy as np
from pysaliency.utils import LazyList, TemporaryDirectory, Cache, get_minimal_unique_filenames, atomic_directory_setup, build_padded_2d_array
from test_helpers import TestWithData
def test_minimal_unique_filenames():
assert get_minimal_unique_filenames(['a/b/c.d']) == ['c.d']
filenames = [
'a/b/c/d.e',
'a/b/c/f.g',
'a/b/c/h.i',
]
assert get_minimal_unique_filenames(filenames) == ['d.e', 'f.g', 'h.i']
filenames.append('a/b/C/j.k')
assert get_minimal_unique_filenames(filenames) == ['c/d.e', 'c/f.g', 'c/h.i', 'C/j.k']
class TestLazyList(TestWithData):
def test_lazy_list(self):
calls = []
def gen(i):
calls.append(i)
print('calling with {} yielding {}'.format(i, i**2))
return i**2
length = 20
lazy_list = LazyList(gen, length)
self.assertEqual(len(lazy_list), length)
for i in range(length):
self.assertEqual(lazy_list[i], i**2)
self.assertEqual(calls, list(range(length)))
def test_pickle_no_cache(self):
def gen(i):
print('calling with {} yielding {}'.format(i, i**2))
return i**2
length = 20
lazy_list = LazyList(gen, length)
lazy_list = self.pickle_and_reload(lazy_list, pickler=dill)
self.assertEqual(lazy_list._cache, {})
self.assertEqual(list(lazy_list), [i**2 for i in range(length)])
def test_pickle_with_cache(self):
def gen(i):
print('calling with {} yielding {}'.format(i, i**2))
return i**2
length = 20
lazy_list = LazyList(gen, length, pickle_cache=True)
list(lazy_list) # make sure all list items are generated
lazy_list = self.pickle_and_reload(lazy_list, pickler=dill)
self.assertEqual(lazy_list._cache, {i: i**2 for i in range(length)})
self.assertEqual(list(lazy_list), [i**2 for i in range(length)])
class TestTemporaryDirectory(unittest.TestCase):
def test_temporary_directory(self):
with TemporaryDirectory() as tmp_dir:
self.assertTrue(os.path.isdir(tmp_dir))
self.assertFalse(os.path.isdir(tmp_dir))
self.assertFalse(os.path.exists(tmp_dir))
def test_atomic_directory_setup_success(tmp_path):
directory = tmp_path / 'testdirectory'
assert not directory.exists()
with atomic_directory_setup(str(directory)):
directory.mkdir()
assert directory.exists()
assert directory.exists()
def test_atomic_directory_setup_failure(tmp_path):
directory = tmp_path / 'testdirectory'
assert not directory.exists()
try:
with atomic_directory_setup(str(directory)):
directory.mkdir()
assert directory.exists()
raise ValueError()
except ValueError:
pass
else:
assert False
assert not directory.exists()
def test_atomic_directory_setup_success_no_location():
with atomic_directory_setup(None):
assert True
assert True
def test_atomic_directory_setup_failure_no_location():
try:
with atomic_directory_setup(None):
assert True
raise ValueError()
except ValueError:
pass
else:
assert False
assert True
class TestCache(TestWithData):
def test_basics(self):
cache = Cache()
self.assertEqual(len(cache), 0)
data = np.random.randn(10, 10, 3)
cache['foo'] = data
self.assertEqual(list(cache.keys()), ['foo'])
np.testing.assert_allclose(cache['foo'], data)
del cache['foo']
self.assertEqual(len(cache), 0)
def test_cache_to_disk(self):
cache = Cache(cache_location=self.data_path)
self.assertEqual(len(cache), 0)
data = np.random.randn(10, 10, 3)
cache['foo'] = data
self.assertEqual(glob.glob(os.path.join(self.data_path, '*.*')),
[os.path.join(self.data_path, 'foo.npy')])
self.assertEqual(list(cache.keys()), ['foo'])
np.testing.assert_allclose(cache['foo'], data)
cache = Cache(cache_location=self.data_path)
self.assertEqual(cache._cache, {})
self.assertEqual(list(cache.keys()), ['foo'])
np.testing.assert_allclose(cache['foo'], data)
del cache['foo']
self.assertEqual(len(cache), 0)
self.assertEqual(glob.glob(os.path.join(self.data_path, '*.*')),
[])
def test_cache_to_disk_nonexisting_location(self):
cache_location = os.path.join(self.data_path, 'cache')
cache = Cache(cache_location=cache_location)
self.assertEqual(len(cache), 0)
data = np.random.randn(10, 10, 3)
cache['foo'] = data
self.assertEqual(glob.glob(os.path.join(cache_location, '*.*')),
[os.path.join(cache_location, 'foo.npy')])
self.assertEqual(list(cache.keys()), ['foo'])
np.testing.assert_allclose(cache['foo'], data)
cache = Cache(cache_location=cache_location)
self.assertEqual(cache._cache, {})
self.assertEqual(list(cache.keys()), ['foo'])
np.testing.assert_allclose(cache['foo'], data)
del cache['foo']
self.assertEqual(len(cache), 0)
self.assertEqual(glob.glob(os.path.join(cache_location, '*.*')),
[])
def test_pickle_cache(self):
cache = Cache()
self.assertEqual(len(cache), 0)
data = np.random.randn(10, 10, 3)
cache['foo'] = data
self.assertEqual(list(cache.keys()), ['foo'])
np.testing.assert_allclose(cache['foo'], data)
cache2 = self.pickle_and_reload(cache)
self.assertEqual(cache2._cache, {})
self.assertEqual(len(cache2), 0)
def test_pickle_cache_with_location(self):
cache = Cache(cache_location=self.data_path)
self.assertEqual(len(cache), 0)
data = np.random.randn(10, 10, 3)
cache['foo'] = data
self.assertEqual(glob.glob(os.path.join(self.data_path, '*.*')),
[os.path.join(self.data_path, 'foo.npy')])
self.assertEqual(list(cache.keys()), ['foo'])
np.testing.assert_allclose(cache['foo'], data)
cache2 = self.pickle_and_reload(cache)
self.assertEqual(cache2._cache, {})
self.assertEqual(len(cache2), 1)
np.testing.assert_allclose(cache2['foo'], data)
def test_build_padded_2d_array():
arrays = [
[0.1, 1, 2],
[0, 1],
[0, 1, 2, 4],
[0, 3]
]
expected = np.array([
[0.1, 1, 2, np.nan],
[0, 1, np.nan, np.nan],
[0, 1, 2, 4],
[0, 3, np.nan, np.nan]
])
actual = build_padded_2d_array(arrays)
np.testing.assert_allclose(actual, expected)
expected = np.hstack((actual, np.ones((4, 1)) * np.nan))
actual = build_padded_2d_array(arrays, max_length=5)
np.testing.assert_allclose(actual, expected)
if __name__ == '__main__':
unittest.main()
| mit | 871,037,050,908,416,800 | 27.219608 | 141 | 0.587827 | false |
NhuanTDBK/Kaggle_StackedOverflow | is13/examples/elman-forward.py | 1 | 4796 | import numpy
import time
import sys
import subprocess
import os
import random
import numpy as np
from is13.data import load
from is13.rnn.elman import model
from is13.metrics.accuracy import conlleval
from is13.utils.tools import shuffle, minibatch, contextwin
from gensim.models import Phrases
from sklearn.metrics import f1_score
if __name__ == '__main__':
s = {'fold':3, # 5 folds 0,1,2,3,4
'lr':0.0627142536696559,
'verbose':1,
'decay':False, # decay on the learning rate if improvement stops
'win':7, # number of words in the context window
'bs':9, # number of backprop through time steps
'nhidden':100, # number of hidden units
'seed':345,
'emb_dimension':100, # dimension of word embedding
'nepochs':10}
folder = os.path.basename(__file__).split('.')[0]
if not os.path.exists(folder): os.mkdir(folder)
# load the dataset
# train_set, valid_set, test_set, dic = load.atisfold(s['fold'])
# idx2label = dict((k,v) for v,k in dic['labels2idx'].iteritems())
# idx2word = dict((k,v) for v,k in dic['words2idx'].iteritems())
# train_lex, train_ne, train_y = train_set
# valid_lex, valid_ne, valid_y = valid_set
# test_lex, test_ne, test_y = test_set
dat = np.load("is13/demo_bio.npz")
train_lex = dat['X_train']
train_y = dat['y_train']
valid_lex = dat['X_val']
valid_y = dat['y_val']
trigram = Phrases.load('trigram_model_new.model')
# dict2vec = {word: idx for idx,word in enumerate(trigram.vocab)}
vocsize = len(trigram.vocab)
nclasses = 4
nsentences = len(train_lex)
# instanciate the model
numpy.random.seed(s['seed'])
random.seed(s['seed'])
rnn = model( nh = s['nhidden'],
nc = nclasses,
ne = vocsize,
de = s['emb_dimension'],
cs = s['win'] )
# train with early stopping on validation set
best_f1 = -numpy.inf
s['clr'] = s['lr']
for e in xrange(s['nepochs']):
# shuffle
shuffle([train_lex, train_ne, train_y], s['seed'])
s['ce'] = e
tic = time.time()
for i in xrange(nsentences):
cwords = contextwin(train_lex[i], s['win'])
words = map(lambda x: numpy.asarray(x).astype('int32'),\
minibatch(cwords, s['bs']))
labels = train_y[i]
for word_batch , label_last_word in zip(words, labels):
rnn.train(word_batch, label_last_word, s['clr'])
rnn.normalize()
if s['verbose']:
print '[learning] epoch %i >> %2.2f%%'%(e,(i+1)*100./nsentences),'completed in %.2f (sec) <<\r'%(time.time()-tic),
sys.stdout.flush()
# evaluation // back into the real world : idx -> words
predictions_test = [ map(lambda x: idx2label[x], \
rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
for x in test_lex ]
groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y ]
words_test = [ map(lambda x: idx2word[x], w) for w in test_lex]
predictions_valid = [ map(lambda x: idx2label[x], \
rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
for x in valid_lex ]
groundtruth_valid = [ map(lambda x: idx2label[x], y) for y in valid_y ]
words_valid = [ map(lambda x: idx2word[x], w) for w in valid_lex]
# evaluation // compute the accuracy using conlleval.pl
res_test = conlleval(predictions_test, groundtruth_test, words_test, folder + '/current.test.txt')
res_valid = conlleval(predictions_valid, groundtruth_valid, words_valid, folder + '/current.valid.txt')
if res_valid['f1'] > best_f1:
rnn.save(folder)
best_f1 = res_valid['f1']
if s['verbose']:
print 'NEW BEST: epoch', e, 'valid F1', res_valid['f1'], 'best test F1', res_test['f1'], ' '*20
s['vf1'], s['vp'], s['vr'] = res_valid['f1'], res_valid['p'], res_valid['r']
s['tf1'], s['tp'], s['tr'] = res_test['f1'], res_test['p'], res_test['r']
s['be'] = e
subprocess.call(['mv', folder + '/current.test.txt', folder + '/best.test.txt'])
subprocess.call(['mv', folder + '/current.valid.txt', folder + '/best.valid.txt'])
else:
print ''
# learning rate decay if no improvement in 10 epochs
if s['decay'] and abs(s['be']-s['ce']) >= 10: s['clr'] *= 0.5
if s['clr'] < 1e-5: break
print 'BEST RESULT: epoch', e, 'valid F1', s['vf1'], 'best test F1', s['tf1'], 'with the model', folder
| apache-2.0 | 4,744,306,026,209,939,000 | 41.442478 | 130 | 0.555046 | false |
kmackay/emk | modules/c.py | 1 | 22769 | import os
import logging
import shlex
import re
import sys
import traceback
log = logging.getLogger("emk.c")
utils = emk.module("utils")
fix_path_regex = re.compile(r'[\W]+')
class _GccCompiler(object):
"""
Compiler class for using gcc/g++ to compile C/C++ respectively.
In order for the emk c module to use a compiler instance, the compiler class must define the following methods:
load_extra_dependencies
compile_c
compile_cxx
See the documentation for those functions in this class for more details.
Properties (defaults set based on the path prefix passed to the constructor):
c_path -- The path of the C compiler (eg "gcc").
cxx_path -- The path of the C++ compiler (eg "g++").
"""
def __init__(self, path_prefix=""):
"""
Create a new GccCompiler instance.
Arguments:
path_prefix -- The prefix to use for the gcc/g++ executables. For example, if you had a 32-bit Linux cross compiler
installed into /cross/linux, you might use 'c.compiler = c.GccCompiler("/cross/linux/bin/i686-pc-linux-gnu-")'
to configure the c module to use the cross compiler. The default value is "" (ie, use the system gcc/g++).
"""
self.name = "gcc"
self.c_path = path_prefix + "gcc"
self.cxx_path = path_prefix + "g++"
def load_extra_dependencies(self, path):
"""
Load extra dependencies for the given object file path. The extra dependencies could be loaded from a generated
dependency file for that path, or loaded from the emk.scope_cache(path) (or some other mechanism).
Arguments:
path -- The path of the object file to get dependencies for.
Returns a list of paths (strings) of all the extra dependencies.
"""
cache = emk.scope_cache(path)
return cache.get("secondary_deps", [])
def depfile_args(self, dep_file):
"""
Returns a list of arguments to write secondary dependencies to the given dep_file path.
"""
return ["-Wp,-MMD,%s" % (dep_file)]
def compile(self, exe, source, dest, includes, defines, flags):
dep_file = dest + ".dep"
args = [exe]
args.extend(self.depfile_args(dep_file))
args.extend(["-I%s" % (emk.abspath(d)) for d in includes])
args.extend(["-D%s=%s" % (key, value) for key, value in defines.items()])
args.extend(utils.flatten(flags))
args.extend(["-o", dest, "-c", source])
utils.call(args, print_stderr=False)
try:
with open(dep_file, "r") as f:
data = f.read()
data = data.replace("\\\n", "")
items = shlex.split(data)
unique_items = [emk.abspath(item) for item in (set(items[2:]) - set([""]))]
# call has_changed to set up rule cache for future builds.
for item in unique_items:
emk.current_rule.has_changed(item)
cache = emk.scope_cache(dest)
cache["secondary_deps"] = unique_items
except IOError:
log.error("Failed to open depfile %s", dep_file)
utils.rm(dep_file)
def compile_c(self, source, dest, includes, defines, flags):
"""
Compile a C source file into an object file.
Arguments:
source -- The C source file path to compile.
dest -- The output object file path.
includes -- A list of extra include directories.
defines -- A dict of <name>: <value> entries to be used as defines; each entry is equivalent to #define <name> <value>.
flags -- A list of additional flags. This list may contain tuples; to flatten the list, you could use the emk utils module:
'flattened = utils.flatten(flags)'.
"""
self.compile(self.c_path, source, dest, includes, defines, flags)
def compile_cxx(self, source, dest, includes, defines, flags):
"""
Compile a C++ source file into an object file.
Arguments:
source -- The C++ source file path to compile.
dest -- The output object file path.
includes -- A list of extra include directories.
defines -- A dict of <name>: <value> entries to be used as defines; each entry is equivalent to #define <name> <value>.
flags -- A list of additional flags. This list may contain tuples; to flatten the list, you could use the emk utils module:
'flattened = utils.flatten(flags)'.
"""
self.compile(self.cxx_path, source, dest, includes, defines, flags)
def obj_ext(self):
"""
Get the extension of object files built by this compiler.
"""
return ".o"
class _ClangCompiler(_GccCompiler):
"""
A compiler class for compiling using clang.
Properties:
lipo_path -- The path of the 'lipo' executable.
libtool_path -- The path of the 'libtool' executable.
"""
def __init__(self, path_prefix=""):
super(_ClangCompiler, self).__init__(path_prefix)
self.name = "clang"
self.c_path = path_prefix + "clang"
self.cxx_path = path_prefix + "clang++"
class _MsvcCompiler(object):
"""
Compiler class for using Microsoft's Visual C++ to compile C/C++.
In order for the emk c module to use a compiler instance, the compiler class must define the following methods:
load_extra_dependencies
compile_c
compile_cxx
See the documentation for those functions in this class for more details.
"""
def __init__(self, path_prefix=None, env_script="vcvarsall.bat", target_arch=None):
"""
Create a new MsvcCompiler instance.
Arguments:
path_prefix -- The prefix to use for the vcvarsall.bat file. The default value is derived from the VS*COMNTOOLS environment variable.
Properties:
cl_exe -- The absolute path to the cl executable.
"""
from link import _MsvcLinker
self.name = "msvc"
self._env = _MsvcLinker.vs_env(path_prefix, env_script, target_arch)
self._dep_re = re.compile(r'Note:\s+including file:\s+([^\s].*)\s*')
self.cl_exe = "cl.exe"
def load_extra_dependencies(self, path):
"""
Load extra dependencies for the given object file path. The extra dependencies could be loaded from a generated
dependency file for that path, or loaded from the emk.scope_cache(path) (or some other mechanism).
Arguments:
path -- The path of the object file to get dependencies for.
Returns a list of paths (strings) of all the extra dependencies.
"""
cache = emk.scope_cache(path)
return cache.get("secondary_deps", [])
def compile(self, source, dest, includes, defines, flags):
args = [self.cl_exe, "/nologo", "/c", "/showIncludes"]
args.extend(['/I%s' % (emk.abspath(d)) for d in includes])
args.extend(['/D%s=%s' % (key, value) for key, value in defines.items()])
args.extend(utils.flatten(flags))
args.extend(['/Fo%s' % dest, source])
stdout, stderr, returncode = utils.call(args, env=self._env, print_stdout=False, print_stderr=False, error_stream="both")
items = []
for l in stdout.splitlines():
m = self._dep_re.match(l)
if m:
items.append(m.group(1))
unique_items = utils.unique_list(items)
# call has_changed to set up rule cache for future builds.
for item in unique_items:
emk.current_rule.has_changed(item)
cache = emk.scope_cache(dest)
cache["secondary_deps"] = unique_items
def compile_c(self, source, dest, includes, defines, flags):
"""
Compile a C source file into an object file.
Arguments:
source -- The C source file path to compile.
dest -- The output object file path.
includes -- A list of extra include directories.
defines -- A dict of <name>: <value> entries to be used as defines; each entry is equivalent to #define <name> <value>.
flags -- A list of additional flags. This list may contain tuples; to flatten the list, you could use the emk utils module:
'flattened = utils.flatten(flags)'.
"""
if "/TC" not in flags:
flags.extend(["/TC"])
self.compile(source, dest, includes, defines, flags)
def compile_cxx(self, source, dest, includes, defines, flags):
"""
Compile a C++ source file into an object file.
Arguments:
source -- The C++ source file path to compile.
dest -- The output object file path.
includes -- A list of extra include directories.
defines -- A dict of <name>: <value> entries to be used as defines; each entry is equivalent to #define <name> <value>.
flags -- A list of additional flags. This list may contain tuples; to flatten the list, you could use the emk utils module:
'flattened = utils.flatten(flags)'.
"""
if "/TP" not in flags:
flags.extend(["/TP"])
self.compile(source, dest, includes, defines, flags)
def obj_ext(self):
"""
Get the extension of object files built by this compiler.
"""
return ".obj"
class Module(object):
"""
emk module for compiling C and C++ code. Depends on the link module (and utils).
This module defines emk rules during the prebuild stage, to allow autodiscovery of generated source files
from rules defined before the prebuild stage (ie, in the post_rules() method of other modules). See the
autodetect and autodetect_from_targets properties for more information about autodiscovery of source files.
This module adds the compiled object files to the link module, which will link them into libraries/executables as desired.
The object files are added to the link module's 'objects' property (each mapped to the source file that the object file
was built from), so that the link module can autodetect main() functions from the source (if link.detect_exe == "simple").
See the link module documentation for details of main() autodetection.
The c module also sets the link module's link_cxx flag if there are any C++ source files being compiled.
Note that the compilation rules are not built automatically; the link module (or other modules/user code)
is responsible for marking the object files as autobuild if desired.
Classes:
GccCompiler -- A compiler class that uses gcc/g++ to compile.
ClangCompiler -- A compiler class that uses clang/clang++ to compile.
MsvcCompiler -- A compiler class that uses MSVC on Windows to compile binaries.
Properties (inherited from parent scope):
compiler -- The compiler instance that is used to load dependencies and compile C/C++ code.
include_dirs -- A list of additional include directories for both C and C++ code.
defines -- A dict of <name>: <value> defines for both C and C++; each entry is equivalent to #define <name> <value>.
flags -- A list of flags for both C and C++. If you have a 'flag' that is more than one argument, pass it as a tuple.
Example: ("-isystem", "/path/to/extra/sys/includes"). Duplicate flags will be removed.
source_files -- A list of files that should be included for compilation. Files will be built as C or C++ depending on the file extension.
c.exts -- The list of file extensions (suffixes) that will be considered as C code. The default is [".c"].
c.include_dirs -- A list of additional include directories for C code.
c.defines -- A dict of <name>: <value> defines for C.
c.flags -- A list of flags for C.
c.source_files -- A list of C files that should be included for compilation (will be built as C code).
cxx.exts -- The list of file extensions (suffixes) that will be considered as C++ code. The default is [".cpp", ".cxx", ".c++", ".cc"].
cxx.include_dirs -- A list of additional include directories for C++ code.
cxx.defines -- A dict of <name>: <value> defines for C++.
cxx.flags -- A list of flags for C++.
cxx.source_files -- A list of C++ files that should be included for compilation (will be built as C++ code).
autodetect -- Whether or not to autodetect files to build from the scope directory. All files that match the c.exts suffixes
will be compiled as C, and all files that match the cxx.exts suffixes will be compiled as C++. Autodetection
does not take place until the prebuild stage, so that autodetection of generated code can gather as many targets
as possible (see autodetect_from_targets). The default value is True.
autodetect_from_targets -- Whether or not to autodetect generated code based on rules defined in the current scope.
The default value is True.
excludes -- A list of source files to exclude from compilation.
non_lib_src -- A list of source files that will not be linked into a library for this directory (passed to the link module).
non_exe_src -- A list of source files that will not be linked into an executable, even if they contain a main() function.
unique_names -- If True, the output object files will be named according to the path from the project directory, to avoid
naming conflicts when the build directory is not a relative path. The default value is False.
If True, the link module's unique_names property will also be set to True.
obj_funcs -- A list of functions that are run for each generated object file path.
obj_ext -- The file extension for object files generated by the compiler (eg ".o" for gcc or ".obj" for MSVC). This property is
read-only as its value is provided by the compiler implementation.
"""
def __init__(self, scope, parent=None):
self.GccCompiler = _GccCompiler
self.ClangCompiler = _ClangCompiler
self.MsvcCompiler = _MsvcCompiler
self.link = emk.module("link")
self.c = emk.Container()
self.cxx = emk.Container()
if parent:
self.compiler = parent.compiler
self.include_dirs = list(parent.include_dirs)
self.defines = parent.defines.copy()
self.flags = list(parent.flags)
self.source_files = list(parent.source_files)
self.c.exts = list(parent.c.exts)
self.c.include_dirs = list(parent.c.include_dirs)
self.c.defines = parent.c.defines.copy()
self.c.flags = list(parent.c.flags)
self.c.source_files = list(parent.c.source_files)
self.cxx.exts = list(parent.cxx.exts)
self.cxx.include_dirs = list(parent.cxx.include_dirs)
self.cxx.defines = parent.cxx.defines.copy()
self.cxx.flags = list(parent.cxx.flags)
self.cxx.source_files = list(parent.cxx.source_files)
self.autodetect = parent.autodetect
self.autodetect_from_targets = parent.autodetect_from_targets
self.excludes = list(parent.excludes)
self.non_lib_src = list(parent.non_lib_src)
self.non_exe_src = list(parent.non_exe_src)
self.obj_funcs = list(parent.obj_funcs)
self.unique_names = parent.unique_names
else:
if sys.platform == "darwin":
self.compiler = self.ClangCompiler()
else:
self.compiler = self.GccCompiler()
self.include_dirs = []
self.defines = {}
self.flags = []
self.source_files = []
self.c.include_dirs = []
self.c.defines = {}
self.c.flags = []
self.c.exts = [".c"]
self.c.source_files = []
self.cxx.include_dirs = []
self.cxx.defines = {}
self.cxx.flags = []
self.cxx.exts = [".cpp", ".cxx", ".c++", ".cc"]
self.cxx.source_files = []
self.autodetect = True
self.autodetect_from_targets = True
self.excludes = []
self.non_lib_src = []
self.non_exe_src = []
self.obj_funcs = []
self.unique_names = False
@property
def obj_ext(self):
return self.compiler.obj_ext()
def new_scope(self, scope):
return Module(scope, parent=self)
def _matches_exts(self, file_path, exts):
for ext in exts:
if file_path.endswith(ext):
return True
return False
def post_rules(self):
if emk.cleaning:
return
emk.do_prebuild(self._prebuild)
if self.unique_names and self.link:
self.link.unique_names = True
def _prebuild(self):
c_sources = set()
cxx_sources = set()
self._non_exe_src = set(self.non_exe_src)
self._non_lib_src = set(self.non_lib_src)
if self.autodetect:
if self.autodetect_from_targets:
target_c_files = [t for t in emk.local_targets.keys() if self._matches_exts(t, self.c.exts)]
if target_c_files:
log.debug("Detected generated C files: %s", target_c_files)
self.c.source_files.extend(target_c_files)
target_cxx_files = [t for t in emk.local_targets.keys() if self._matches_exts(t, self.cxx.exts)]
if target_cxx_files:
log.debug("Detected generated C++ files: %s", target_cxx_files)
self.cxx.source_files.extend(target_cxx_files)
files = set(self.source_files)
files.update([f for f in os.listdir(emk.scope_dir) if os.path.isfile(f)])
for file_path in files:
if self._matches_exts(file_path, self.c.exts):
self.c.source_files.append(file_path)
if self._matches_exts(file_path, self.cxx.exts):
self.cxx.source_files.append(file_path)
for f in self.c.source_files:
if f in self.excludes:
continue
c_sources.add(f)
for f in self.cxx.source_files:
if f in self.excludes:
continue
cxx_sources.add(f)
c_includes = utils.unique_list(self.include_dirs + self.c.include_dirs)
c_flags = utils.unique_list(self.flags + self.c.flags)
c_defines = dict(self.defines)
c_defines.update(self.c.defines)
c_args = (False, c_includes, c_defines, c_flags)
cxx_includes = utils.unique_list(self.include_dirs + self.cxx.include_dirs)
cxx_flags = utils.unique_list(self.flags + self.cxx.flags)
cxx_defines = dict(self.defines)
cxx_defines.update(self.cxx.defines)
cxx_args = (True, cxx_includes, cxx_defines, cxx_flags)
objs = {}
for src in c_sources:
self._add_rule(objs, src, c_args)
for src in cxx_sources:
self._add_rule(objs, src, cxx_args)
if self.link:
self.link.objects.update([(os.path.join(emk.build_dir, obj + self.obj_ext), src) for obj, src in objs.items()])
if cxx_sources:
self.link.link_cxx = True
def _add_rule(self, objs, src, args):
fname = os.path.basename(src)
n, ext = os.path.splitext(fname)
if self.unique_names:
relpath = fix_path_regex.sub('_', os.path.relpath(emk.scope_dir, emk.proj_dir))
n = relpath + "_" + n
name = n
c = 1
while name in objs:
name = "%s_%s" % (n, c)
c += 1
objs[name] = src
if self.link:
objpath = os.path.join(emk.build_dir, name + self.obj_ext)
if src in self._non_exe_src:
self.link.non_exe_objs.append(objpath)
if src in self._non_lib_src:
self.link.non_lib_objs.append(objpath)
dest = os.path.join(emk.build_dir, name + self.obj_ext)
requires = [src]
extra_deps = None
if self.compiler:
extra_deps = self.compiler.load_extra_dependencies(emk.abspath(dest))
if extra_deps is None:
requires.append(emk.ALWAYS_BUILD)
emk.rule(self.do_compile, dest, requires, *args, cwd_safe=True, ex_safe=True)
if extra_deps:
emk.weak_depend(dest, extra_deps)
for f in self.obj_funcs:
f(dest)
def do_compile(self, produces, requires, cxx, includes, defines, flags):
"""
Rule function to compile a source file into an object file.
The compiler instance will also produce an <object file>.dep file that contains additional dependencies (ie, header files).
Arguments:
produces -- The path to the object file that will be produced.
requires -- The list of dependencies; the source file should be first.
cxx -- If True, the source file will be compiled as C++; otherwise it will be compiled as C.
includes -- A list of additional include directories.
defines -- A dict of <name>: <value> entries to be defined (like #define <name> <value>).
flags -- A list of flags to pass to the compiler. Compound flags should be in a tuple, eg: ("-isystem", "/path/to/extra/sys/includes").
"""
if not self.compiler:
raise emk.BuildError("No compiler defined!")
try:
if cxx:
self.compiler.compile_cxx(requires[0], produces[0], includes, defines, flags)
else:
self.compiler.compile_c(requires[0], produces[0], includes, defines, flags)
except:
utils.rm(produces[0])
utils.rm(produces[0] + ".dep")
raise
| bsd-2-clause | 672,571,377,680,635,400 | 43.998024 | 149 | 0.587421 | false |
randy3k/SublimePluginUnitTestHarness | sublime_unittest/suite.py | 1 | 1653 | from unittest import suite
class DeferrableSuite(suite.TestSuite):
r'''Deferrable test suite.
Run method is basically a copy from suite.TestSuite, but it is a
generator. If a generator is returned by a test, there is entered
a consumer loop and for each step there is done a step here.
'''
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if suite._isnotsuite(test):
self._tearDownPreviousClass(test, result)
yield
self._handleModuleFixture(test, result)
yield
self._handleClassSetUp(test, result)
yield
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
#import spdb ; spdb.start()
deferred = test(result)
if deferred is not None:
for x in deferred: yield x
else:
deferred = test.debug()
if deferred is not None:
for x in deferred: yield x
yield
if topLevel:
self._tearDownPreviousClass(None, result)
yield
self._handleModuleTearDown(result)
yield
result._testRunEntered = False
| bsd-2-clause | -9,171,879,996,287,597,000 | 30.188679 | 74 | 0.535995 | false |
JFK422/Hitch | components/Menu/menuEditTab.py | 1 | 2326 | import qtawesome as qta
from components.Menu import menuSeperator
from PyQt5 import QtGui, QtCore, QtWidgets
#Menu widget placed in the stack of vLPart
class MenuEdit(QtWidgets.QWidget):
def setup(self):
vMenu = QtWidgets.QVBoxLayout()
vMenu.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
vMenu.setContentsMargins(QtCore.QMargins(20,20,20,20))
scrollLay = QtWidgets.QVBoxLayout()
scrollLay.setContentsMargins(QtCore.QMargins(10,10,10,10))
scrollLay.setAlignment(QtCore.Qt.AlignTop)
scrollLayWid = QtWidgets.QWidget()
scrollLayWid.setObjectName("scrollMenuLay")
scrollLayWid.setLayout(scrollLay)
fileText = QtWidgets.QLabel("Edit")
fileText.setObjectName("menuTitle")
vMenu.addWidget(fileText)
scroll = QtWidgets.QScrollArea()
scroll.setWidget(scrollLayWid)
scroll.setWidgetResizable(True)
vMenu.addWidget(scroll)
#Add icons later!
undo = QtWidgets.QPushButton("Undo")
undo.setMaximumHeight(50)
undo.setObjectName("scrollMenuItem")
scrollLay.addWidget(undo)
redo = QtWidgets.QPushButton("Redo")
redo.setMaximumHeight(50)
redo.setObjectName("scrollMenuItem")
scrollLay.addWidget(redo)
sep = menuSeperator.MenuSeperator()
sep.setup()
scrollLay.addWidget(sep)
cut = QtWidgets.QPushButton("Cut")
cut.setMaximumHeight(50)
cut.setObjectName("scrollMenuItem")
scrollLay.addWidget(cut)
copy = QtWidgets.QPushButton("Copy")
copy.setMaximumHeight(50)
copy.setObjectName("scrollMenuItem")
scrollLay.addWidget(copy)
paste = QtWidgets.QPushButton("Paste")
paste.setMaximumHeight(50)
paste.setObjectName("scrollMenuItem")
scrollLay.addWidget(paste)
sep2 = menuSeperator.MenuSeperator()
sep2.setup()
scrollLay.addWidget(sep2)
search = QtWidgets.QLineEdit("")
search.setPlaceholderText("Search")
search.setMinimumHeight(50)
search.setObjectName("menuSearch")
scrollLay.addWidget(search)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
self.setLayout(vMenu) | gpl-3.0 | 5,954,955,563,805,491,000 | 31.774648 | 115 | 0.674549 | false |
dkeester/learn_python | file_read_print/file_read_print.py | 1 | 1748 | # /usr/bin/env python3
# -*- coding: utf-8 -*-
# A program to figure out the basics of file I/O
data = """\
I met a traveller from an antique land
Who said: Two vast and trunkless legs of stone
Stand in the desert. Near them, on the sand,
Half sunk, a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them and the heart that fed:
And on the pedestal these words appear:
'My name is Ozymandias, king of kings:
Look on my works, ye Mighty, and despair!'
Nothing beside remains. Round the decay
Of that colossal wreck, boundless and bare
The lone and level sands stretch far away.
-- Ozymandias by Percy Shelley
"""
# write the file
file = open('data_file', mode='w', encoding='utf-8')
file.write(data)
file.close()
# get some info about the file
file = open('data_file', mode='r', encoding='utf-8')
print("After open...")
print("name: " + file.name)
print("encoding: " + file.encoding)
print("mode: " + file.mode)
file.close()
print("After close...")
print("name: " + file.name)
print("encoding: " + file.encoding)
print("mode: " + file.mode)
# print the file and close automatically
with open('data_file', mode='r', encoding='utf-8') as file:
for line in file:
print(line, end='')
# print the file in reverse, close automatically
with open('data_file', mode='r', encoding='utf-8') as file:
lines = file.read()
for line in reversed(lines):
print(line, end='')
print('\n')
# print the file line-by-line in reverse
with open('data_file', mode='r', encoding='utf-8') as file:
lines = list(file)
for line in reversed(lines):
print(line, end='')
| apache-2.0 | 6,787,229,724,184,578,000 | 29.666667 | 59 | 0.688787 | false |
jhamman/storylines | setup.py | 1 | 4509 | #!/usr/bin/env python
import os
import re
import sys
import warnings
from setuptools import setup
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
DISTNAME = 'storylines'
LICENSE = 'GNU General Public License v3.0'
AUTHOR = 'Joseph Hamman'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://github.com/jhamman/storylines'
CLASSIFIERS = [
'Development Status :: 1 - Planning',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: POSIX',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
INSTALL_REQUIRES = ['xarray >= 0.8.2']
TESTS_REQUIRE = ['pytest >= 3.0.3']
DESCRIPTION = "Quantitative hydrologic storylines to assess climate impacts"
LONG_DESCRIPTION = """
**storylines** is a framework for characterizing uncertainty in traditional
hydroloic climate impacts modeling chains (climate models, downscaling methods,
hydrologic models). It includes tools for evaluating model fidelity and culling
models accordingly to reduce these uncertainties, and finally distilling
projections into a discrete set of quantitative hydrologic storylines that
represent key, impact-focused, features from the full range of future
scenarios.
**storylines** is being developed at the National Center for Atmospheric
Research (NCAR_), Research Applications Laboratory (RAL_) - Hydrometeorological
Applications Program (HAP_) under the support of USACE.
.. _NCAR: http://ncar.ucar.edu/
.. _RAL: https://www.ral.ucar.edu
.. _HAP: https://www.ral.ucar.edu/hap
Important links
---------------
- HTML documentation: http://storylines.readthedocs.io
- Issue tracker: http://github.com/jhamman/storylines/issues
- Source code: http://github.com/jhamman/storylines
"""
# code to extract and write the version copied from pandas
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git', 'git.cmd']:
try:
pipe = subprocess.Popen(
[cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so, serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('storylines/version.py'):
warnings.warn("Couldn't get git revision, using existing"
"storylines/version.py")
write_version = False
else:
warnings.warn(
"Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone
# (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}", rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev = "v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'storylines', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
if write_version:
write_version_py()
setup(name=DISTNAME,
version=FULLVERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=['storylines', 'storylines.tools'],
package_data={'storylines': ['test/data/*']})
| gpl-3.0 | 5,283,064,456,767,113,000 | 29.883562 | 79 | 0.642271 | false |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/views/ui_results_browser.py | 1 | 12710 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/ckla/Documents/workspace/opus_trunk/opus_gui/results_manager/views/results_browser.ui'
#
# Created: Sun May 10 17:20:29 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_ResultsBrowser(object):
def setupUi(self, ResultsBrowser):
ResultsBrowser.setObjectName("ResultsBrowser")
ResultsBrowser.resize(819, 744)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ResultsBrowser.sizePolicy().hasHeightForWidth())
ResultsBrowser.setSizePolicy(sizePolicy)
self.gridLayout_4 = QtGui.QGridLayout(ResultsBrowser)
self.gridLayout_4.setObjectName("gridLayout_4")
self.splitter_2 = QtGui.QSplitter(ResultsBrowser)
self.splitter_2.setOrientation(QtCore.Qt.Vertical)
self.splitter_2.setObjectName("splitter_2")
self.groupBox_3 = QtGui.QGroupBox(self.splitter_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
self.groupBox_3.setAutoFillBackground(False)
self.groupBox_3.setFlat(False)
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout.setObjectName("verticalLayout")
self.configSplitter = QtGui.QSplitter(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.configSplitter.sizePolicy().hasHeightForWidth())
self.configSplitter.setSizePolicy(sizePolicy)
self.configSplitter.setOrientation(QtCore.Qt.Horizontal)
self.configSplitter.setHandleWidth(12)
self.configSplitter.setObjectName("configSplitter")
self.groupBox = QtGui.QGroupBox(self.configSplitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setBaseSize(QtCore.QSize(0, 100))
self.groupBox.setFlat(True)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setMargin(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.lst_available_runs = QtGui.QListWidget(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lst_available_runs.sizePolicy().hasHeightForWidth())
self.lst_available_runs.setSizePolicy(sizePolicy)
self.lst_available_runs.setMinimumSize(QtCore.QSize(0, 0))
self.lst_available_runs.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lst_available_runs.setBaseSize(QtCore.QSize(100, 50))
self.lst_available_runs.setAlternatingRowColors(True)
self.lst_available_runs.setObjectName("lst_available_runs")
self.verticalLayout_4.addWidget(self.lst_available_runs)
self.groupBox_2 = QtGui.QGroupBox(self.configSplitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setBaseSize(QtCore.QSize(20, 0))
self.groupBox_2.setFlat(True)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.lst_years = QtGui.QListWidget(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lst_years.sizePolicy().hasHeightForWidth())
self.lst_years.setSizePolicy(sizePolicy)
self.lst_years.setMinimumSize(QtCore.QSize(0, 0))
self.lst_years.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lst_years.setBaseSize(QtCore.QSize(20, 50))
self.lst_years.setAlternatingRowColors(True)
self.lst_years.setObjectName("lst_years")
self.verticalLayout_3.addWidget(self.lst_years)
self.groupBox_4 = QtGui.QGroupBox(self.configSplitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(5)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_4.sizePolicy().hasHeightForWidth())
self.groupBox_4.setSizePolicy(sizePolicy)
self.groupBox_4.setBaseSize(QtCore.QSize(500, 0))
self.groupBox_4.setFlat(True)
self.groupBox_4.setObjectName("groupBox_4")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.indicator_table = QtGui.QTableWidget(self.groupBox_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(7)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.indicator_table.sizePolicy().hasHeightForWidth())
self.indicator_table.setSizePolicy(sizePolicy)
self.indicator_table.setMinimumSize(QtCore.QSize(0, 0))
self.indicator_table.setBaseSize(QtCore.QSize(500, 50))
self.indicator_table.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.indicator_table.setDragDropOverwriteMode(False)
self.indicator_table.setAlternatingRowColors(True)
self.indicator_table.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.indicator_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.indicator_table.setTextElideMode(QtCore.Qt.ElideNone)
self.indicator_table.setShowGrid(True)
self.indicator_table.setColumnCount(3)
self.indicator_table.setObjectName("indicator_table")
self.indicator_table.setColumnCount(3)
self.indicator_table.setRowCount(0)
self.verticalLayout_2.addWidget(self.indicator_table)
self.verticalLayout.addWidget(self.configSplitter)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 2, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.cb_auto_gen = QtGui.QCheckBox(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cb_auto_gen.sizePolicy().hasHeightForWidth())
self.cb_auto_gen.setSizePolicy(sizePolicy)
self.cb_auto_gen.setTristate(False)
self.cb_auto_gen.setObjectName("cb_auto_gen")
self.horizontalLayout.addWidget(self.cb_auto_gen)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lbl_current_selection = QtGui.QLabel(self.groupBox_3)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.lbl_current_selection.setFont(font)
self.lbl_current_selection.setObjectName("lbl_current_selection")
self.horizontalLayout_2.addWidget(self.lbl_current_selection)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.pb_generate_results = QtGui.QPushButton(self.groupBox_3)
self.pb_urbancanvas = QtGui.QPushButton(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pb_generate_results.sizePolicy().hasHeightForWidth())
self.pb_generate_results.setSizePolicy(sizePolicy)
self.pb_generate_results.setMinimumSize(QtCore.QSize(0, 0))
self.pb_generate_results.setObjectName("pb_generate_results")
self.verticalLayout_5.addWidget(self.pb_generate_results)
sizePolicy.setHeightForWidth(self.pb_urbancanvas.sizePolicy().hasHeightForWidth())
self.pb_urbancanvas.setSizePolicy(sizePolicy)
self.pb_urbancanvas.setMinimumSize(QtCore.QSize(0, 0))
self.pb_urbancanvas.setObjectName("pb_urbancanvas")
self.verticalLayout_5.addWidget(self.pb_urbancanvas)
self.horizontalLayout_2.addLayout(self.verticalLayout_5)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.tabwidget_visualizations = QtGui.QTabWidget(self.splitter_2)
self.tabwidget_visualizations.setMinimumSize(QtCore.QSize(0, 200))
self.tabwidget_visualizations.setObjectName("tabwidget_visualizations")
self.starttab = QtGui.QWidget()
self.starttab.setObjectName("starttab")
self.tabwidget_visualizations.addTab(self.starttab, "")
self.gridLayout_4.addWidget(self.splitter_2, 0, 0, 1, 1)
self.retranslateUi(ResultsBrowser)
self.tabwidget_visualizations.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(ResultsBrowser)
def retranslateUi(self, ResultsBrowser):
ResultsBrowser.setWindowTitle(QtGui.QApplication.translate("ResultsBrowser", "Result Browser", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("ResultsBrowser", "Configure an indicator to view", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("ResultsBrowser", "Simulation Runs", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("ResultsBrowser", "Years", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_4.setTitle(QtGui.QApplication.translate("ResultsBrowser", "Indicators", None, QtGui.QApplication.UnicodeUTF8))
self.indicator_table.setSortingEnabled(False)
self.cb_auto_gen.setToolTip(QtGui.QApplication.translate("ResultsBrowser", "Automatically generate and view the indicator when it\'s selected", None, QtGui.QApplication.UnicodeUTF8))
self.cb_auto_gen.setText(QtGui.QApplication.translate("ResultsBrowser", "Automatically generate", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_current_selection.setText(QtGui.QApplication.translate("ResultsBrowser", "current selection", None, QtGui.QApplication.UnicodeUTF8))
self.pb_generate_results.setText(QtGui.QApplication.translate("ResultsBrowser", "Generate results", None, QtGui.QApplication.UnicodeUTF8))
self.pb_urbancanvas.setText(QtGui.QApplication.translate("ResultsBrowser", "View in UrbanCanvas", None, QtGui.QApplication.UnicodeUTF8))
self.tabwidget_visualizations.setTabText(self.tabwidget_visualizations.indexOf(self.starttab), QtGui.QApplication.translate("ResultsBrowser", "starttab", None, QtGui.QApplication.UnicodeUTF8))
################################3
self.cb_auto_gen.setText(QtGui.QApplication.translate("ResultsBrowser", "Uncertainty options generate", None, QtGui.QApplication.UnicodeUTF8))
| agpl-3.0 | -1,741,197,974,735,579,600 | 61.925743 | 200 | 0.735877 | false |
rherlt/GoodVibrations | src/GoodVibrations.Listener/speech_recognition/examples/microphone_recognition.py | 1 | 3228 | #!/usr/bin/env python3
# NOTE: this example requires PyAudio because it uses the Microphone class
import speech_recognition as sr
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
# recognize speech using Sphinx
try:
print("Sphinx thinks you said " + r.recognize_sphinx(audio))
except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e))
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
print("Google Speech Recognition thinks you said " + r.recognize_google(audio))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
# recognize speech using Wit.ai
WIT_AI_KEY = "INSERT WIT.AI API KEY HERE" # Wit.ai keys are 32-character uppercase alphanumeric strings
try:
print("Wit.ai thinks you said " + r.recognize_wit(audio, key=WIT_AI_KEY))
except sr.UnknownValueError:
print("Wit.ai could not understand audio")
except sr.RequestError as e:
print("Could not request results from Wit.ai service; {0}".format(e))
# recognize speech using Microsoft Bing Voice Recognition
BING_KEY = "INSERT BING API KEY HERE" # Microsoft Bing Voice Recognition API keys 32-character lowercase hexadecimal strings
try:
print("Microsoft Bing Voice Recognition thinks you said " + r.recognize_bing(audio, key=BING_KEY))
except sr.UnknownValueError:
print("Microsoft Bing Voice Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
# recognize speech using Houndify
HOUNDIFY_CLIENT_ID = "INSERT HOUNDIFY CLIENT ID HERE" # Houndify client IDs are Base64-encoded strings
HOUNDIFY_CLIENT_KEY = "INSERT HOUNDIFY CLIENT KEY HERE" # Houndify client keys are Base64-encoded strings
try:
print("Houndify thinks you said " + r.recognize_houndify(audio, client_id=HOUNDIFY_CLIENT_ID, client_key=HOUNDIFY_CLIENT_KEY))
except sr.UnknownValueError:
print("Houndify could not understand audio")
except sr.RequestError as e:
print("Could not request results from Houndify service; {0}".format(e))
# recognize speech using IBM Speech to Text
IBM_USERNAME = "INSERT IBM SPEECH TO TEXT USERNAME HERE" # IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
IBM_PASSWORD = "INSERT IBM SPEECH TO TEXT PASSWORD HERE" # IBM Speech to Text passwords are mixed-case alphanumeric strings
try:
print("IBM Speech to Text thinks you said " + r.recognize_ibm(audio, username=IBM_USERNAME, password=IBM_PASSWORD))
except sr.UnknownValueError:
print("IBM Speech to Text could not understand audio")
except sr.RequestError as e:
print("Could not request results from IBM Speech to Text service; {0}".format(e))
| mit | 1,320,618,614,434,476,300 | 46.470588 | 148 | 0.757125 | false |
ThiefMaster/jinja2 | scripts/make-release.py | 2 | 4375 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
make-release
~~~~~~~~~~~~
Helper script that performs a release. Does pretty much everything
automatically for us.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import re
from datetime import datetime, date
from subprocess import Popen, PIPE
try:
import wheel
except ImportError:
wheel = None
_date_strip_re = re.compile(r'(?<=\d)(st|nd|rd|th)')
def parse_changelog():
with open('CHANGES') as f:
lineiter = iter(f)
for line in lineiter:
match = re.search('^Version\s+(.*)', line.strip())
if match is None:
continue
length = len(match.group(1))
version = match.group(1).strip()
if lineiter.next().count('-') != len(match.group(0)):
continue
while 1:
change_info = lineiter.next().strip()
if change_info:
break
match = re.search(r'(?:codename (.*),\s*)?'
r'released on (\w+\s+\d+\w+\s+\d+)(?i)',
change_info)
if match is None:
continue
codename, datestr = match.groups()
return version, parse_date(datestr), codename
def bump_version(version):
try:
parts = map(int, version.split('.'))
except ValueError:
fail('Current version is not numeric')
parts[-1] += 1
return '.'.join(map(str, parts))
def parse_date(string):
string = _date_strip_re.sub('', string)
return datetime.strptime(string, '%B %d %Y')
def set_filename_version(filename, version_number, pattern):
changed = []
def inject_version(match):
before, old, after = match.groups()
changed.append(True)
return before + version_number + after
with open(filename) as f:
contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')(?sm)" % pattern,
inject_version, f.read())
if not changed:
fail('Could not find %s in %s', pattern, filename)
with open(filename, 'w') as f:
f.write(contents)
def set_init_version(version):
info('Setting __init__.py version to %s', version)
set_filename_version('jinja2/__init__.py', version, '__version__')
def set_setup_version(version):
info('Setting setup.py version to %s', version)
set_filename_version('setup.py', version, 'version')
def build_and_upload():
cmd = [sys.executable, 'setup.py', 'sdist', 'bdist_wheel', 'upload']
if wheel is not None:
cmd.insert(4, 'bdist_wheel')
Popen(cmd).wait()
def fail(message, *args):
print >> sys.stderr, 'Error:', message % args
sys.exit(1)
def info(message, *args):
print >> sys.stderr, message % args
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
rv = parse_changelog()
if rv is None:
fail('Could not parse changelog')
version, release_date, codename = rv
dev_version = bump_version(version) + '.dev'
info('Releasing %s (codename %s, release date %s)',
version, codename, release_date.strftime('%d/%m/%Y'))
tags = get_git_tags()
if version in tags:
fail('Version "%s" is already tagged', version)
if release_date.date() != date.today():
fail('Release date is not today (%s != %s)', release_date.date(), date.today())
if not git_is_clean():
fail('You have uncommitted changes in git')
if wheel is None:
print ('Warning: You need to install the wheel package '
'to upload a wheel distribution.')
set_init_version(version)
set_setup_version(version)
make_git_commit('Bump version number to %s', version)
make_git_tag(version)
build_and_upload()
set_init_version(dev_version)
set_setup_version(dev_version)
if __name__ == '__main__':
main()
| bsd-3-clause | -7,196,274,055,336,344,000 | 25.676829 | 87 | 0.573714 | false |
ReactiveX/RxPY | tests/test_observable/test_elementat.py | 1 | 4126 | import unittest
import rx
from rx import operators as ops
from rx.testing import TestScheduler, ReactiveTest
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestElementAt(unittest.TestCase):
def test_elementat_first(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_next(470, 44), on_completed(600))
def create():
return xs.pipe(ops.element_at(0))
results = scheduler.start(create=create)
assert results.messages == [on_next(280, 42), on_completed(280)]
assert xs.subscriptions == [subscribe(200, 280)]
def test_elementat_other(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_next(470, 44), on_completed(600))
def create():
return xs.pipe(ops.element_at(2))
results = scheduler.start(create=create)
assert results.messages == [on_next(470, 44), on_completed(470)]
assert xs.subscriptions == [subscribe(200, 470)]
def test_elementat_outofrange(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_next(470, 44), on_completed(600))
def create():
return xs.pipe(ops.element_at(3))
results = scheduler.start(create=create)
self.assertEqual(1, len(results.messages))
self.assertEqual(600, results.messages[0].time)
self.assertEqual('E', results.messages[0].value.kind)
assert(results.messages[0].value.exception)
def test_elementat_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_error(420, ex))
def create():
return xs.pipe(ops.element_at(3))
results = scheduler.start(create=create)
assert results.messages == [on_error(420, ex)]
assert xs.subscriptions == [subscribe(200, 420)]
def test_element_at_or_default_first(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_next(470, 44), on_completed(600))
def create():
return xs.pipe(ops.element_at_or_default(0))
results = scheduler.start(create=create)
assert results.messages == [on_next(280, 42), on_completed(280)]
assert xs.subscriptions == [subscribe(200, 280)]
def test_element_at_or_default_other(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_next(470, 44), on_completed(600))
def create():
return xs.pipe(ops.element_at_or_default(2))
results = scheduler.start(create=create)
assert results.messages == [on_next(470, 44), on_completed(470)]
assert xs.subscriptions == [subscribe(200, 470)]
def test_element_at_or_default_outofrange(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_next(470, 44), on_completed(600))
def create():
return xs.pipe(ops.element_at_or_default(3, 0))
results = scheduler.start(create=create)
assert results.messages == [on_next(600, 0), on_completed(600)]
assert xs.subscriptions == [subscribe(200, 600)]
def test_element_at_or_default_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_error(420, ex))
def create():
return xs.pipe(ops.element_at_or_default(3))
results = scheduler.start(create=create)
assert results.messages == [on_error(420, ex)]
assert xs.subscriptions == [subscribe(200, 420)]
if __name__ == '__main__':
unittest.main()
| mit | -7,060,937,041,788,435,000 | 34.568966 | 117 | 0.639118 | false |
renzon/pswdclient | test/manager_tests.py | 1 | 2487 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from pswdclient.model import SignSecret
from pswdclient.manager import FindOrCreateSecrets, RenewSecrets, RevokeSecrets
class TestSignSecret(GAETestCase):
def test_find_or_create(self):
# Secret creation
command=FindOrCreateSecrets()
command.execute()
sign_secrets= SignSecret.query().order(-SignSecret.creation).fetch()
self.assertEqual(1,len(sign_secrets))
# Last secret reuse
command2=FindOrCreateSecrets()
command2.execute()
sign_secrets= SignSecret.query().order(-SignSecret.creation).fetch()
self.assertEqual(1,len(sign_secrets))
self.assertIsNotNone(command.result)
self.assertEqual(command.result,command2.result)
class TestRenewSecrets(GAETestCase):
def test_simple_without_invalidation(self):
find_command=FindOrCreateSecrets()
find_command.execute()
RenewSecrets().execute()
find_after_renew=FindOrCreateSecrets()
find_after_renew.execute()
self.assertEqual(2,len(find_after_renew.result))
self.assertEqual(find_command.result[0],find_after_renew.result[1])
RenewSecrets().execute()
find_after_renew2=FindOrCreateSecrets()
find_after_renew2.execute()
self.assertEqual(2,len(find_after_renew2.result))
self.assertEqual(find_after_renew.result[0],find_after_renew2.result[1])
self.assertNotEqual(find_after_renew.result[1],find_after_renew2.result[0])
def test_simple_with_invalidation(self):
find_command=FindOrCreateSecrets()
find_command.execute()
RevokeSecrets().execute()
find_after_revoke=FindOrCreateSecrets()
find_after_revoke.execute()
self.assertEqual(2,len(find_after_revoke.result))
self.assertNotEqual(find_command.result[0],find_after_revoke.result[1])
RevokeSecrets().execute()
find_after_revoke2=FindOrCreateSecrets()
find_after_revoke2.execute()
self.assertEqual(2,len(find_after_revoke2.result))
self.assertNotEqual(find_after_revoke.result[1],find_after_revoke2.result[0])
self.assertNotEqual(find_after_revoke.result[0],find_after_revoke2.result[0])
self.assertNotEqual(find_after_revoke.result[0],find_after_revoke2.result[1])
self.assertNotEqual(find_after_revoke.result[1],find_after_revoke2.result[1])
| gpl-2.0 | 4,326,890,402,456,468,500 | 40.45 | 85 | 0.697226 | false |
Metaleer/hexchat-scripts | regexkb.py | 1 | 2158 | from __future__ import print_function
__module_name__ = 'Regex Kickban'
__module_version__ = '0.2'
__module_description__ = 'Kickbans clients from specified channels on regex match against their message or notice to channel'
__author__ = 'Daniel A. J.'
# TODO:
# When ChanServ-type services are available, ask for ops if not opped
# If client is signed into account, ban accountname instead of host
import hexchat
import re
re = re.compile(r'\bfoo\b') # regex pattern to be matched against in user's message or notice
check_channels = ['#test', '#fooness'] # channel(s) where script is active
net = 'Bouncer' # network where script is active
def msg_search(word, word_eol, userdata):
if word[2].startswith('#') == False:
return
user_message = ' '.join(word[3:])[1:]
channel = word[2]
user_nickname = ''.join(word[0][1:word[0].index('!')])
user_host = ''.join(word[0][word[0].index('@'):])
for x in check_channels:
if re.search(user_message) != None and channel == x and hexchat.get_info("network") == net:
hexchat.command("mode %s +b *!*%s" % (channel, user_host))
hexchat.command("kick %s regex pattern detected" % user_nickname)
return hexchat.EAT_ALL
def notice_search(word, word_eol, userdata):
if word[2].startswith('#') == False:
return
user_message = ' '.join(word[3:])[1:]
channel = word[2]
user_nickname = ''.join(word[0][1:word[0].index('!')])
user_host = ''.join(word[0][word[0].index('@'):])
for x in check_channels:
if re.search(user_message) != None and channel == x and hexchat.get_info("network") == net:
hexchat.command("mode %s +b *!*%s" % (channel, user_host))
hexchat.command("kick %s regex pattern detected" % user_nickname)
return hexchat.EAT_ALL
def unload_regexkb(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
hexchat.hook_server("PRIVMSG", msg_search)
hexchat.hook_server("NOTICE", notice_search)
hexchat.hook_unload(unload_regexkb)
print(__module_name__, 'version', __module_version__, 'loaded.')
| mit | 3,880,300,969,970,562,600 | 36.206897 | 125 | 0.629286 | false |
andreimaximov/algorithms | leetcode/algorithms/wildcard-matching/solution.py | 1 | 1743 | #!/usr/bin/env python
class Solution(object):
def isMatch(self, s, p):
"""
Returns a boolean indicating if the pattern p matches string s. See
LeetCode problem description for full pattern spec.
"""
n = len(s)
m = len(p)
# If the pattern has more non-star chars than s has total chars, there
# is no way we can match the pattern even if we ignore all stars.'
if (m - p.count('*') > n):
return False
# Each lastDP[i] represents isMatch(s[:i], p[:j]) for previous j. We do
# not need a full 2D matrix since the recursive relation only depends
# on a sub-problem that is one level lower.
lastDP = [False] * (n + 1)
lastDP[0] = True
for j in range(1, m + 1):
# Create DP for the current j.
nextDP = [False] * (n + 1)
# Empty s matches p prefix if prefix contains only *'s.
nextDP[0] = lastDP[0] and p[j - 1] == '*'
for i in range(1, n + 1):
if p[j - 1] == '*':
# Skip * or current character.
nextDP[i] = lastDP[i] or nextDP[i - 1]
elif p[j - 1] == '?':
# Skip current character and ?.
nextDP[i] = lastDP[i - 1]
else:
# Ensure characters match and that s[:i] matches p[:j].
nextDP[i] = (s[i - 1] == p[j - 1]) and \
lastDP[i - 1]
lastDP = nextDP
return lastDP[-1]
def main():
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/wildcard-matching/')
if __name__ == '__main__':
main()
| mit | -6,150,173,116,118,105,000 | 33.176471 | 79 | 0.491681 | false |
caspar/PhysicsLab | 21_Midterm/plack2.py | 1 | 1726 | # Lab 0
# Linear Least Squares Fit
# Author Caspar Lant
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
# load csv file
DATA = "data.csv";
frequency, voltage = np.loadtxt(DATA, skiprows=1 , unpack=True, delimiter=',');
# plot temperature vs. pressure + error bars
plt.ylabel("Voltage (V)");
plt.xlabel("Frequency ($10^{14}$ Hz)");
plt.title("Voltage vs. Frequency");
plt.plot(frequency, voltage, 'ro', linestyle = '', mec='r', ms=5 );
# linear least squares fit line
def least_squares_fit (x, y):
xavg = x.mean()
slope = ( y * ( x - xavg)).sum() / (x*(x-xavg)).sum()
intercept = y.mean()-slope*xavg
return slope, intercept
slope, intercept = least_squares_fit(frequency, voltage);
# create arrays to plot
y1 = slope * 7 + intercept; # y1 = m(x1) + b
y2 = slope * 0 + intercept; # y2 = m(x2) + b
x_range = [0, 7]; # array of x values
y_range = [y2, y1]; # array of y values
PLANCK = slope* 1.60217662
print("plancks constant:", PLANCK)
print("or", 1/PLANCK)
# show the graph
plt.plot(x_range, y_range, color="blue", linestyle = '-', label="Actual");
slope = 0.413566766
y1 = slope * 7 + intercept; # y1 = m(x1) + b
y2 = slope * 0 + intercept; # y2 = m(x2) + b
x_range = [0, 7]; # array of x values
y_range = [y2, y1]; # array of y values
PLANCK = slope * 1.60217662
# print("plancks constant:", PLANCK)
# print("or", 1/PLANCK)
# show the graph
plt.plot(x_range, y_range, color="grey",linestyle = ':', label="Expected");
plt.legend(loc='best')
plt.annotate("Slope = $6.14 * 10^{-34}$", xy=(2.27, -0.32), xytext=(2.5, -.7), arrowprops=dict(arrowstyle="->"))
# plt.legend(["slope = 1"])
plt.show();
| mit | 2,794,208,592,931,171,300 | 30.381818 | 112 | 0.614716 | false |
pouzzler/IED-Logic-Simulator | doc/source/conf.py | 1 | 8475 | # -*- coding: utf-8 -*-
#
# magic_circuit_simulator documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 7 09:13:35 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'magic_circuit_simulator'
copyright = u'2014, Sebastien Magnien & Mathieu Fourcroy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'magic_circuit_simulatordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'magic_circuit_simulator.tex', u'magic\\_circuit\\_simulator Documentation',
u'Sebastien Magnien \\& Mathieu Fourcroy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'magic_circuit_simulator', u'magic_circuit_simulator Documentation',
[u'Sebastien Magnien & Mathieu Fourcroy'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'magic_circuit_simulator', u'magic_circuit_simulator Documentation',
u'Sebastien Magnien & Mathieu Fourcroy', 'magic_circuit_simulator', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 | 66,632,025,201,985,880 | 31.347328 | 106 | 0.710206 | false |
Czxck001/opensb | backend/logic.py | 1 | 6629 | # encoding: utf-8
''' Core logics to arrange the word flashcards
Wordbook
A wordbook contains words with their explanations.
Memory
The memory refers to the user's proficiency of words. It records the number of
times the user had recognize each word.
Progress (of task)
Every time the user starts server (more precisely, initialize the core logic),
a task is arranged. The task contains a subset of words in the wordbook.
When the task is ongoing by the user, progress is made. The progress is
depicted by assigning a ProgressStatus for each word. When a word is either
recognized or not recognized by the user, its ProgressStatus will transfer.
Group
Words is taken by user in groups. Each group consists several words. After
each group's recognition, the user will get a chance to review the words. At
the same time, the core logic should update the progress of task into the
memory.
'''
from collections import OrderedDict
class ProgressStatus:
UNKNOWN = 0
GOOD = 1
BAD = 2
WANTING = 3
class CoreLogicConfig:
group_size = 7
task_size = 100
num_new_word = 50
max_prof = 3
class CoreLogic:
know_trans = {
ProgressStatus.UNKNOWN: ProgressStatus.GOOD,
ProgressStatus.BAD: ProgressStatus.WANTING,
ProgressStatus.WANTING: ProgressStatus.GOOD,
}
def __init__(self,
wordbook,
mdb,
cmudict=None,
config=None):
self._wordbook = wordbook
self._mdb = mdb
self._cmudict = cmudict or {}
self._config = config or CoreLogicConfig()
memory = self._mdb.get_memory()
# only consider memory of words in wordbook
self._memory = {word: prof for word, prof in memory.items()
if word in self._wordbook}
# take new words in wordbook into memory if they aren't in there
for word in self._wordbook:
if word not in self._memory:
self._memory[word] = 0
# empty progress
self._progress = OrderedDict()
self._progress_updated = set() # has been marked as GOOD today
def make_task(self,
max_prof=None,
num_new_word=None,
task_size=None):
# if not specified, use default value in _config
max_prof = max_prof or self._config.max_prof
num_new_word = num_new_word or self._config.num_new_word
task_size = task_size or self._config.task_size
from random import shuffle
old_words = [word for word, prof in self._memory.items()
if 0 < prof < max_prof]
new_words = [word for word, prof in self._memory.items()
if 0 == prof]
shuffle(old_words)
shuffle(new_words)
new_words = new_words[:num_new_word]
old_words = old_words[:(task_size-len(new_words))]
task_words = new_words + old_words
shuffle(task_words)
self._progress = OrderedDict(
(word, ProgressStatus.UNKNOWN) for word in task_words
)
self._progress_updated = set() # has been marked as GOOD today
@property
def wordbook(self):
''' wordbook = {word: text}
'''
return self._wordbook
@property
def memory(self):
''' memory = {word: proficiency}
proficiency is number of times the word reaching GOOD
'''
return self._memory
@property
def progress(self):
''' progress = {word: status}
status in ProgressStatus
'''
return self._progress
@property
def config(self):
return self._config
def _update_memory(self):
''' Update the memory according to current progress of task, and then
sync with mdb
progress -> memory -> mdb
'''
for word, status in self.progress.items():
if status == ProgressStatus.GOOD \
and word not in self._progress_updated:
self._memory[word] += 1
self._progress_updated.add(word)
self._mdb.update_memory(self._memory)
def _i_know(self, word):
assert self.progress[word] != ProgressStatus.GOOD
self.progress[word] = self.know_trans[self.progress[word]]
self._mdb.log_word(word, True)
def _i_dont_know(self, word):
self.progress[word] = ProgressStatus.BAD
self._mdb.log_word(word, False)
def count_memory(self):
''' Count and stat the memory
'''
from collections import Counter
return dict(Counter(v for _, v in self._memory.items()))
def count_progress(self):
''' Count the number of words in each status
'''
from collections import Counter
counter = Counter(v for _, v in self.progress.items())
return {
'unknown': counter[ProgressStatus.UNKNOWN],
'good': counter[ProgressStatus.GOOD],
'bad': counter[ProgressStatus.BAD],
'wanting': counter[ProgressStatus.WANTING],
}
def update_group(self, know_status):
''' Get the know-status from the frontend, make action, then save the
status.
'''
for word, know in know_status.items():
if know:
self._i_know(word)
else:
self._i_dont_know(word)
# update memory
self._update_memory()
def next_group(self):
''' return: [(word, test), (word, test), ... ]
'''
pc = self.count_progress() # progress counter
if pc['bad'] == pc['wanting'] == pc['unknown'] == 0:
return [], pc
# if there is too many bad words, focus on the bad words
elif pc['bad'] > self._config.group_size \
or pc['wanting'] == pc['unknown'] == 0:
words = [word for word, status in self.progress.items()
if status == ProgressStatus.BAD]
else:
words = [word for word, status in self.progress.items()
if status
in {ProgressStatus.UNKNOWN, ProgressStatus.WANTING}]
from random import shuffle
shuffle(words)
group = []
for k, word in enumerate(words):
if k == self._config.group_size:
break
kk = self._cmudict[word] if word in self._cmudict else ''
group.append({'word': word,
'text': self.wordbook[word],
'kk': kk}) # Kenyon & Knott phonetic
return group, pc
| mit | -6,712,567,408,817,578,000 | 31.179612 | 78 | 0.57701 | false |
csueiras/rednaskela | rednaskela/core.py | 1 | 1903 | #
# core.py
#
# REDNASKELA: The Mobile Mini-RTS
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Christian A. Sueiras
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Core classes"""
class Singleton(object):
""" Singleton Decorator """
def __init__(self, decorated):
self._decorated = decorated
def get_instance(self, *args, **kwargs):
"""Returns the unique instance of this singleton (singleton enfocement)"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated(*args, **kwargs)
return self._instance
def __call__(self, *args, **kwargs):
"""Disables calling an instance of a Singleton directly (enforces get_instance)"""
raise TypeError("Singleton must be accessed through get_instance")
class ServerErrorException(Exception):
pass
| mit | -5,758,576,920,749,183,000 | 37.06 | 90 | 0.711508 | false |
raulhigueras/LolaVA | speech/input.py | 1 | 2363 | # -*- coding: utf-8 -*-
# Recoge el input, ya sea mediante el STT de Google o por el bot de Telegram
import speech_recognition as sr
import time, datetime, telepot, os
from config import get_config
bot = telepot.Bot('BOT_KEY')
def ask():
modo = get_config.get_profile()["modo"]
os.system("aplay resources/sound2.wav")
if modo == "texto":
print "Esperando mensaje"
response = bot.getUpdates(offset=-5)
length = len(response)
print length
if(length > 0):
last_id = response[-1]["update_id"]
while last_id == bot.getUpdates(offset=-5)[-1]["update_id"]:
time.sleep(3)
else:
while length == len(response):
response = bot.getUpdates(offset=-5)
time.sleep(3)
print "---"
response = bot.getUpdates()
respuesta = clean_input(response[-1]["message"]["text"].lower())
print respuesta
return respuesta
elif modo == "audio":
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source)
try:
rec = r.recognize_google(audio, language="es-ES")
print ("Has dicho " + rec)
return rec.lower()
except sr.UnknownValueError:
print( "No se ha entendido el audio")
except sr.RequestError as e:
print("Ha habido un error con el reconocimiento de voz {0}".format(e))
else:
print "Hay un error con la configuración"
def listen():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Escuchando")
audio = r.listen(source)
try:
rec = r.recognize_google(audio, language="es-ES")
if (get_config.get_profile()['asistente'] in rec.lower()):
return True
else:
return False
except sr.UnknownValueError:
print( "No se ha entendido el audio")
except sr.RequestError as e:
print("Ha habido un error con el reconocimiento de voz {0}".format(e))
def clean_input(frase):
caracteres_especiales = {
'á':'a',
'é':'e',
'í':'i',
'ó':'o',
'ú':'u',
'ü':'u',
'ñ':'n',
'¿':'?',
'¡':'!',
}
frase = list(frase)
for i in range(0, len(frase)):
if caracteres_especiales.has_key(frase[i].encode("utf-8")):
frase[i] = caracteres_especiales[frase[i].encode("utf-8")]
if frase[0] == "?" or frase[0] == "!":
del frase[0]
if frase[-1] == "?" or frase[-1] == "!":
del frase[-1]
return "".join(frase)
| mit | 6,502,096,092,672,122,000 | 27.011905 | 86 | 0.59966 | false |
OCA/purchase-workflow | setup/_metapackage/setup.py | 1 | 3639 | import setuptools
with open('VERSION.txt', 'r') as f:
version = f.read().strip()
setuptools.setup(
name="odoo12-addons-oca-purchase-workflow",
description="Meta package for oca-purchase-workflow Odoo addons",
version=version,
install_requires=[
'odoo12-addon-procurement_purchase_no_grouping',
'odoo12-addon-product_form_purchase_link',
'odoo12-addon-product_supplier_code_purchase',
'odoo12-addon-purchase_allowed_product',
'odoo12-addon-purchase_analytic_global',
'odoo12-addon-purchase_blanket_order',
'odoo12-addon-purchase_commercial_partner',
'odoo12-addon-purchase_date_planned_manual',
'odoo12-addon-purchase_default_terms_conditions',
'odoo12-addon-purchase_delivery_split_date',
'odoo12-addon-purchase_deposit',
'odoo12-addon-purchase_discount',
'odoo12-addon-purchase_exception',
'odoo12-addon-purchase_force_invoiced',
'odoo12-addon-purchase_invoice_plan',
'odoo12-addon-purchase_landed_cost',
'odoo12-addon-purchase_last_price_info',
'odoo12-addon-purchase_line_procurement_group',
'odoo12-addon-purchase_location_by_line',
'odoo12-addon-purchase_manual_delivery',
'odoo12-addon-purchase_minimum_amount',
'odoo12-addon-purchase_open_qty',
'odoo12-addon-purchase_order_analytic_search',
'odoo12-addon-purchase_order_approval_block',
'odoo12-addon-purchase_order_approved',
'odoo12-addon-purchase_order_archive',
'odoo12-addon-purchase_order_general_discount',
'odoo12-addon-purchase_order_line_deep_sort',
'odoo12-addon-purchase_order_line_description',
'odoo12-addon-purchase_order_line_price_history',
'odoo12-addon-purchase_order_line_price_history_discount',
'odoo12-addon-purchase_order_line_sequence',
'odoo12-addon-purchase_order_line_stock_available',
'odoo12-addon-purchase_order_product_recommendation',
'odoo12-addon-purchase_order_product_recommendation_brand',
'odoo12-addon-purchase_order_product_recommendation_secondary_unit',
'odoo12-addon-purchase_order_secondary_unit',
'odoo12-addon-purchase_order_type',
'odoo12-addon-purchase_order_uninvoiced_amount',
'odoo12-addon-purchase_picking_state',
'odoo12-addon-purchase_price_recalculation',
'odoo12-addon-purchase_product_usage',
'odoo12-addon-purchase_propagate_qty',
'odoo12-addon-purchase_quick',
'odoo12-addon-purchase_reception_notify',
'odoo12-addon-purchase_reception_status',
'odoo12-addon-purchase_request',
'odoo12-addon-purchase_request_department',
'odoo12-addon-purchase_request_order_approved',
'odoo12-addon-purchase_request_product_usage',
'odoo12-addon-purchase_request_tier_validation',
'odoo12-addon-purchase_request_usage_department',
'odoo12-addon-purchase_requisition_auto_rfq',
'odoo12-addon-purchase_requisition_line_description',
'odoo12-addon-purchase_requisition_tier_validation',
'odoo12-addon-purchase_security',
'odoo12-addon-purchase_stock_price_unit_sync',
'odoo12-addon-purchase_stock_return_request',
'odoo12-addon-purchase_tier_validation',
'odoo12-addon-purchase_triple_discount',
'odoo12-addon-purchase_work_acceptance',
'odoo12-addon-subcontracted_service',
'odoo12-addon-supplier_calendar',
],
classifiers=[
'Programming Language :: Python',
'Framework :: Odoo',
]
)
| agpl-3.0 | 6,702,452,264,549,585,000 | 45.063291 | 76 | 0.677109 | false |
Strassengezwitscher/Strassengezwitscher | crowdgezwitscher/contact/mail.py | 1 | 5814 | # pylint: disable=bad-builtin
from email.utils import formatdate
from tempfile import TemporaryDirectory
from django.core.mail import EmailMessage, SafeMIMEText, SafeMIMEMultipart, make_msgid
from django.core.mail.utils import DNS_NAME
from django.core.mail.backends.locmem import EmailBackend as LocMemEmailBackend
from django.utils.encoding import smart_text, force_text
from django.conf import settings
from django.core import mail
from gnupg import GPG
from crowdgezwitscher.log import logger
from contact.models import Key
from contact.utils import GPGException, handle_gpg_error
class GPGEmailMessage(EmailMessage):
"""
Django's default email class on paranoia.
The email is encrypted (but not signed) during send() using GPG in PGP/MIME format.
"""
encrypted_subtype = 'encrypted'
gpg_attachment_filename = 'encrypted.asc'
def _encrypt(self, plain):
# test if we have public keys for all recipients
available_recipients = []
keys = []
for key in Key.objects.all():
keys.append(key)
available_recipients.extend(key.addresses.split(', '))
logger.debug("available_recipients: %s", available_recipients)
if not all(recipient in available_recipients for recipient in self.recipients()):
logger.error("Public key not present for at least one of these recipients: %s", self.recipients())
raise GPGException("Public key not present for at least one recipient")
# encryption
with TemporaryDirectory() as temp_dir:
gpg = GPG(gnupghome=temp_dir)
for key in keys:
gpg.import_keys(key.key)
res = gpg.encrypt(plain, self.recipients(), always_trust=True)
if not res:
handle_gpg_error(res, 'encryption')
return smart_text(res)
def message(self):
"""
Returns the final message to be sent, including all headers etc. Content and attachments are encrypted using
GPG in PGP/MIME format (RFC 3156).
"""
def build_plain_message():
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
return msg
def build_version_attachment():
version_attachment = SafeMIMEText('Version: 1\n', self.content_subtype, encoding)
del version_attachment['Content-Type']
version_attachment.add_header('Content-Type', 'application/pgp-encrypted')
version_attachment.add_header('Content-Description', 'PGP/MIME Versions Identification')
return version_attachment
def build_gpg_attachment():
gpg_attachment = SafeMIMEText(encrypted_msg, self.content_subtype, encoding)
del gpg_attachment['Content-Type']
gpg_attachment.add_header('Content-Type', 'application/octet-stream', name=self.gpg_attachment_filename)
gpg_attachment.add_header('Content-Disposition', 'inline', filename=self.gpg_attachment_filename)
gpg_attachment.add_header('Content-Description', 'OpenPGP encrypted message')
return gpg_attachment
encoding = self.encoding or settings.DEFAULT_CHARSET
# build message including attachments as it would also be built without GPG
msg = build_plain_message()
# encrypt whole message including attachments
encrypted_msg = self._encrypt(str(msg))
# build new message object wrapping the encrypted message
msg = SafeMIMEMultipart(_subtype=self.encrypted_subtype,
encoding=encoding,
protocol='application/pgp-encrypted')
version_attachment = build_version_attachment()
gpg_attachment = build_gpg_attachment()
msg.attach(version_attachment)
msg.attach(gpg_attachment)
self.extra_headers['Content-Transfer-Encoding'] = '7bit'
# add headers
# everything below this line has not been modified when overriding message()
############################################################################
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
class GPGLocMemEmailBackend(LocMemEmailBackend):
"""
An email backend for use during test sessions.
Emails are prepared for final sending, so they include all headers etc.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def send_messages(self, messages):
"""Redirect final messages to the dummy outbox"""
messages = [message.message() for message in messages]
mail.outbox.extend(messages)
return len(messages)
| mit | -1,930,372,365,059,580,700 | 40.528571 | 116 | 0.642071 | false |
MuffinMedic/CloudBot | plugins/horoscope.py | 1 | 2411 | # Plugin by Infinity - <https://github.com/infinitylabs/UguuBot>
import requests
from bs4 import BeautifulSoup
from sqlalchemy import Table, String, Column, select
from cloudbot import hook
from cloudbot.util import database
table = Table(
'horoscope',
database.metadata,
Column('nick', String, primary_key=True),
Column('sign', String)
)
def get_sign(db, nick):
row = db.execute(select([table.c.sign]).where(table.c.nick == nick.lower())).fetchone()
if not row:
return None
return row[0]
def set_sign(db, nick, sign):
res = db.execute(table.update().values(sign=sign.lower()).where(table.c.nick == nick.lower()))
if res.rowcount == 0:
db.execute(table.insert().values(nick=nick.lower(), sign=sign.lower()))
db.commit()
@hook.command(autohelp=False)
def horoscope(text, db, bot, nick, notice, notice_doc, reply, message):
"""[sign] - get your horoscope"""
signs = {
'aries': '1',
'taurus': '2',
'gemini': '3',
'cancer': '4',
'leo': '5',
'virgo': '6',
'libra': '7',
'scorpio': '8',
'sagittarius': '9',
'capricorn': '10',
'aquarius': '11',
'pisces': '12'
}
headers = {'User-Agent': bot.user_agent}
# check if the user asked us not to save his details
dontsave = text.endswith(" dontsave")
if dontsave:
sign = text[:-9].strip().lower()
else:
sign = text.strip().lower()
if not sign:
sign = get_sign(db, nick)
if not sign:
notice_doc()
return
sign = sign.strip().lower()
if sign not in signs:
notice("Unknown sign: {}".format(sign))
return
params = {
"sign": signs[sign]
}
url = "http://www.horoscope.com/us/horoscopes/general/horoscope-general-daily-today.aspx"
try:
request = requests.get(url, params=params, headers=headers)
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
reply("Could not get horoscope: {}. URL Error".format(e))
raise
soup = BeautifulSoup(request.text)
horoscope_text = soup.find("main", class_="main-horoscope").find("p").text
result = "\x02{}\x02 {}".format(sign, horoscope_text)
if text and not dontsave:
set_sign(db, nick, sign)
message(result)
| gpl-3.0 | -6,390,855,012,608,246,000 | 24.648936 | 98 | 0.591456 | false |
alexanderad/pony-standup-bot | tests/test_tasks_send_report_summary.py | 1 | 7806 | from __future__ import absolute_import
from datetime import datetime
from flexmock import flexmock
import pony.tasks
from tests.test_base import BaseTest
class SendReportSummaryTest(BaseTest):
def setUp(self):
super(SendReportSummaryTest, self).setUp()
self.bot.storage.set('report', {})
self.bot.plugin_config = {
'_dummy_team': {
'post_summary_to': '#dummy-channel',
'name': 'Dummy Team'
}
}
(flexmock(self.bot)
.should_receive('get_user_by_id')
.with_args('_user_id')
.and_return({
'id': '_user_id',
'color': 'aabbcc',
'profile': {
'real_name': 'Dummy User'
}
}))
(flexmock(self.bot)
.should_receive('get_user_by_name')
.with_args('@user')
.and_return({
'id': '_user_id',
'color': 'aabbcc',
'profile': {
'real_name': 'Dummy User'
}
}))
def test_get_user_avatar_is_failsafe(self):
(flexmock(self.slack)
.should_receive('api_call')
.with_args('users.list')
.and_return(dict(members=[])))
task = pony.tasks.SendReportSummary('_dummy_team')
self.assertIsNone(task.get_user_avatar(self.slack, '_user_id'))
def test_get_user_avatar(self):
(flexmock(self.slack)
.should_receive('api_call')
.with_args('users.list')
.and_return({
'members': [{
'id': '_user_id',
'profile': {
'image_192': '_image_192_url',
}
}]
}))
task = pony.tasks.SendReportSummary('_dummy_team')
self.assertEqual(
task.get_user_avatar(self.slack, '_user_id'), '_image_192_url')
def test_get_user_avatar_lazy_loads_profiles(self):
(flexmock(self.slack)
.should_receive('api_call')
.with_args('users.list')
.and_return(dict(members=[]))
.times(1))
task = pony.tasks.SendReportSummary('_dummy_team')
self.assertIsNone(task.get_user_avatar(self.slack, '_user_id'))
self.assertIsNone(task.get_user_avatar(self.slack, '_user_id'))
self.assertIsNone(task.get_user_avatar(self.slack, '_user_id'))
def test_execute_no_reports(self):
self.bot.storage.set('report', {})
task = pony.tasks.SendReportSummary('_dummy_team')
self.assertIsNone(task.execute(self.bot, self.slack))
def test_execute_no_report_for_this_team(self):
self.bot.storage.set('report', {
datetime.utcnow().date(): {}
})
task = pony.tasks.SendReportSummary('_dummy_team')
self.assertIsNone(task.execute(self.bot, self.slack))
def test_execute_report_already_sent(self):
self.bot.storage.set('report', {
datetime.utcnow().date(): {
'_dummy_team': {
'reported_at': datetime.utcnow()
}
}
})
task = pony.tasks.SendReportSummary('_dummy_team')
self.assertIsNone(task.execute(self.bot, self.slack))
self.assertEqual(len(self.bot.fast_queue), 0)
def test_execute_user_not_seen_online(self):
self.bot.plugin_config['_dummy_team']['users'] = ['@user']
self.bot.storage.set('report', {
datetime.utcnow().date(): {
'_dummy_team': {
'reports': {
'_user_id': {
'seen_online': False
}
}
}
}
})
task = pony.tasks.SendReportSummary('_dummy_team')
self.assertIsNone(task.execute(self.bot, self.slack))
report = self.bot.fast_queue.pop()
self.assertIsInstance(report, pony.tasks.SendMessage)
self.assertEqual(report.to, '#dummy-channel')
self.assertIn('Summary for Dummy Team', report.text)
self.assertIn(
{'color': '#ccc', 'title': 'Offline', 'text': 'Dummy User'},
report.attachments
)
def test_execute_user_returned_no_response(self):
self.bot.plugin_config['_dummy_team']['users'] = ['@user']
self.bot.storage.set('report', {
datetime.utcnow().date(): {
'_dummy_team': {
'reports': {
'_user_id': {
'seen_online': True
}
}
}
}
})
task = pony.tasks.SendReportSummary('_dummy_team')
self.assertIsNone(task.execute(self.bot, self.slack))
report = self.bot.fast_queue.pop()
self.assertIsInstance(report, pony.tasks.SendMessage)
self.assertEqual(report.to, '#dummy-channel')
self.assertIn('Summary for Dummy Team', report.text)
self.assertIn(
{'color': '#ccc', 'title': 'No Response', 'text': 'Dummy User'},
report.attachments
)
def test_execute(self):
self.bot.plugin_config['_dummy_team']['users'] = ['@user']
self.bot.storage.set('report', {
datetime.utcnow().date(): {
'_dummy_team': {
'reports': {
'_user_id': {
'seen_online': True,
'reported_at': datetime.utcnow(),
'report': [
'line1',
'line2'
]
}
}
}
}
})
task = pony.tasks.SendReportSummary('_dummy_team')
(flexmock(task)
.should_receive('get_user_avatar')
.with_args(self.slack, '_user_id')
.and_return('_dummy_user_avatar_url'))
self.assertIsNone(task.execute(self.bot, self.slack))
report = self.bot.fast_queue.pop()
self.assertIsInstance(report, pony.tasks.SendMessage)
self.assertEqual(report.to, '#dummy-channel')
self.assertIn('Summary for Dummy Team', report.text)
report_line = report.attachments.pop()
self.assertEqual(report_line['title'], 'Dummy User')
self.assertEqual(report_line['text'], 'line1\nline2')
self.assertEqual(report_line['color'], '#aabbcc')
self.assertEqual(report_line['thumb_url'], '_dummy_user_avatar_url')
self.assertIsNotNone(report_line['ts'])
def test_execute_when_user_has_department_assigned(self):
self.bot.plugin_config['_dummy_team']['users'] = ['@user']
self.bot.storage.set('report', {
datetime.utcnow().date(): {
'_dummy_team': {
'reports': {
'_user_id': {
'seen_online': True,
'department': 'Dev Department',
'reported_at': datetime.utcnow(),
'report': [
'line1',
'line2'
]
}
}
}
}
})
task = pony.tasks.SendReportSummary('_dummy_team')
(flexmock(task)
.should_receive('get_user_avatar')
.with_args(self.slack, '_user_id')
.and_return('_dummy_user_avatar_url'))
self.assertIsNone(task.execute(self.bot, self.slack))
report = self.bot.fast_queue.pop()
report_line = report.attachments.pop()
self.assertEqual(report_line['footer'], 'Dev Department')
| mit | 8,936,455,861,183,878,000 | 32.646552 | 76 | 0.492954 | false |
DirectXMan12/datanozzle | datanozzle/client.py | 1 | 5713 | # datagrepper-client -- A Python client for datagrepper
# Copyright (C) 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import copy
import datetime
import collections
import urllib.parse as urlparse
import requests
def _filter_arg(name, multiple=True):
if multiple:
def filter_on_arg_mult(self, *args):
g = copy.deepcopy(self)
g._args.setdefault(name, [])
g._args[name].extend(args)
return g
return filter_on_arg_mult
else:
def filter_on_arg(self, arg):
g = copy.deepcopy(self)
g._args[name] = arg
return g
return filter_on_arg
class Entry(collections.Mapping):
__slots__ = ('certificate', 'signature', 'meta',
'index', 'timestamp', 'topic', '_msg')
def __init__(self, json):
self.certificate = json['certificate']
self.signature = json['signature']
self.meta = json.get('meta', {})
self.index = json['i']
self.timestamp = datetime.datetime.fromtimestamp(
float(json['timestamp']))
self.topic = json['topic']
self._msg = json['msg']
def __getitem__(self, key):
return self._msg[key]
def __iter__(self):
return iter(self._msg)
def __len__(self):
return len(self)
def __repr__(self):
return ('<Entry[{topic} -- {ind} @ {ts}] {msg}>').format(
topic=self.topic, ind=self.index,
cert=self.certificate, sig=self.signature,
ts=self.timestamp, msg=self._msg)
# TODO(directxman12): write from_id
class Grepper(object):
def __init__(self, target='https://apps.fedoraproject.org/datagrepper/'):
self._base = target
if self._base[-1] != '/':
self._base += '/'
self._args = {}
self._page_limit = None
def _req(self):
return requests.get(self._base + '/raw', params=self._args)
def _parse_json(self, json):
for msg in json['raw_messages']:
yield Entry(msg)
# TODO(directxman12): define a __repr__
# def __repr__(self):
def __iter__(self):
g = copy.deepcopy(self)
pg = g._args.get('page', 1)
r = g._req()
json = r.json()
yield from g._parse_json(json)
total_pages = json['pages']
if g._page_limit is not None and total_pages > g._page_limit:
total_pages = g._page_limit
pg += 1
max_pg = total_pages + 1
while pg < max_pg:
g._args['page'] = pg
r = g._req()
json = r.json()
yield from g._parse_json(json)
pg += 1
# formatting
def take(self, pages):
if pages is None:
raise ValueError("You must specify a number of pages.")
g = copy.deepcopy(self)
g._page_limit = pages
return g
def skip(self, pages):
if pages is None:
pages = 0
g = copy.deepcopy(self)
g._args['page'] = pages + 1
return g
# TODO(directxman12): with_chrome? with_size?
@property
def ascending(self):
g = copy.deepcopy(self)
g._args['order'] = 'asc'
return g
@property
def descending(self):
g = copy.deepcopy(self)
g._args['order'] = 'desc'
return g
@property
def grouped(self):
g = copy.deepcopy(self)
g._args['grouped'] = 'true'
return g
# pagination
def paginate(self, rows):
g = copy.deepcopy(self)
g._args['rows_per_page'] = rows
return g
def starting_at(self, start):
if isinstance(start, datetime.datetime):
start = start.timestamp()
g = copy.deepcopy(self)
g._args['start'] = start
return g
def ending_at(self, end):
if isinstance(end, datetime.datetime):
end = end.timestamp()
g = copy.deepcopy(self)
g._args['end'] = end
return g
def delta_seconds(self, delta):
g = copy.deepcopy(self)
g._args['delta'] = delta
return g
_ALT_NAMES = {'containing': 'contains', 'rows': 'rows_per_page',
'paginate': 'rows_per_page', 'skip': 'page',
'starting_at': 'start', 'ending_at': 'end',
'delta_seconds': 'delta'}
def reset(self, name):
g = copy.deepcopy(self)
if name == 'take':
g._page_limit = None
else:
name = self._ALT_NAMES.get(name, name)
del g._args[name]
return g
# query
by_user = _filter_arg('user')
by_package = _filter_arg('package')
by_category = _filter_arg('category')
by_topic = _filter_arg('topic')
containing = _filter_arg('contains')
without_user = _filter_arg('not_user')
without_package = _filter_arg('not_package')
without_category = _filter_arg('not_category')
without_topic = _filter_arg('not_topic')
with_meta = _filter_arg('meta')
| gpl-2.0 | -5,897,578,573,471,983,000 | 25.821596 | 77 | 0.560126 | false |
camptocamp/QGIS | python/plugins/processing/grass/ext/v_normal.py | 1 | 1237 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_normal.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.grass.ext import HtmlReportPostProcessor
def postProcessResults(alg):
HtmlReportPostProcessor.postProcessResults(alg)
| gpl-2.0 | 7,058,238,745,516,098,000 | 41.655172 | 75 | 0.421989 | false |
electricity345/Full.Text.Classification.Thesis | src/classify.phrase.match/src/text.py | 1 | 13082 | import codecs
import htmlentitydefs
import json
import logging
import nltk # Uses NLTK Version 2.0b9
import os
import re
import unicodedata
class positive_match:
def __init__(self, match, offset):
self.match = match
self.offset = offset
def getMatch(self):
return self.match
def getOffset(self):
return self.offset
def printMatch(self):
log = logging.getLogger('classify')
log.debug("match = %s ;; offset = %s" % (self.match, self.offset))
class text:
def __init__(self, gdbm_files, filter_file, path, category):
self.category = category
self.filter_file = filter_file
self.gdbm_files = gdbm_files
self.path = path
return
def processUnicodeText(self, tokens):
log = logging.getLogger('classify')
log.debug("text.processUnicodeText()")
log.debug("tokens = %s" % tokens)
symbols = [".", "\&", "'", "-", "/", ","] # Punctuation that will be removed individually from each token
punctuation = {0x2018:0x27, 0x2019:0x27, 0x201C:0x22, 0x201D:0x22, 0x2014:0x2D} # Unicode to ASCII equivalent
matches = [] # All matches found in the document
# Takes a list of tokenized words and adds them into a hash with the key = token and value = location of token in text (offset)
for index in range(len(tokens)):
token_possibilities = []
log.debug("unmodifed token = %s ;; index = %s" % (tokens[index], index))
# Converts Unicode Punctuation to ASCII equivalent - ADD ENTRIES AS NECESSARY
token = tokens[index].translate(punctuation).encode('ascii', 'ignore')
log.debug("token translate = %s" % token)
token_possibilities.append(token)
# Converts Unicode to ASCII equivalent - If no equivalent is found, it ignores the unicode
token1 = unicodedata.normalize('NFKD', tokens[index]).encode('ascii', 'ignore')
log.debug("token normalize = %s" % token1)
if token != token1:
log.debug("token != token1")
token_possibilities.append(token1)
log.debug("token possibilities = %s" % token_possibilities)
for token in token_possibilities:
potential_match = []
offset_match = []
token = re.sub("[^\&/\w\d.',-]", "", token) # Removes all characters that aren't words, digits, ', ".", "-", "/", "&", or ","
token = token.lower()
log.debug("token = %s ;; index = %s" % (token, index))
if token == "":
log.debug("token is empty string")
continue
# If the chosen category is "geography", we optimize it so that it looks for the inital word to have their first letter upper-cased.
# This helps to reduce the number of false positives found.
# Case: City of Industry ;; (London)
if self.category == "geography" and tokens[index][0].isupper() == False:
if len(tokens[index]) > 1 and tokens[index][1].isupper() == False:
continue
# Peeks at the next 4 words to the current key's location and appends each word one at a time to see if it forms a word that
# is found in a related category dbm file
for offset in range(5):
if index + offset >= len(tokens):
break
single_word_possibilities = [] # Possible variants for a given word
# Gets word from text without any modifications to it
word = tokens[index + offset].lower()
word1 = word.translate(punctuation).encode('ascii', 'ignore')
log.debug("word 1 translate = %s" % word1)
if word1 != "":
single_word_possibilities.append(word1)
word2 = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore')
log.debug("word 2 normalize = %s" % word2)
if word1 != word2:
log.debug("word1 != word2")
single_word_possibilities.append(word2)
offset_match.append(index + offset)
log.debug("word = %s ;; offset = %s" % (word, index + offset))
possible_words = single_word_possibilities[:] # Copies list
for word in single_word_possibilities:
# Removes all symbols except ".", ', "/", "-", and "," from the word in question
new_word = re.sub("[^\&/\w\d.',-]", "", word)
if new_word != word:
log.debug("[new_word != word] = %s" % new_word)
possible_words.append(new_word)
# Checks if the word has any punctuation specified. If it does, it removes each one of the punctutation individually and
# adds the newly created word back to the single_word_possiblities list for re-evalualtion.
if re.search("[\&/.',-]", new_word):
for element in symbols:
regular_expression = "[%s]" % element
if re.search(regular_expression, new_word):
new_words = re.split(regular_expression, new_word)
log.debug("new words = %s ;; re = %s" % (new_words, regular_expression))
for w in new_words:
new_word1 = w.rstrip().lstrip()
if new_word1 == "":
log.debug("new word is empty string")
continue
elif len(new_word1) < 2:
log.debug("new word has less than 2 characters = %s" % new_word1)
continue
element_seen = 0
for e in possible_words:
if new_word1 == e:
element_seen = 1
break
if element_seen == 0:
possible_words.append(new_word1)
single_word_possibilities.append(new_word1)
single_word_possibilities = possible_words[:]
log.debug("potential match - before = %s" % potential_match)
if not potential_match:
for word in single_word_possibilities:
potential_match.append(word)
elif single_word_possibilities:
tmp = []
for phrase in potential_match:
for word in single_word_possibilities:
potential_word = phrase + " " + word
tmp.append(potential_word)
potential_match = tmp
log.debug("potential match - after = %s" % potential_match)
# Iterates through all of the related category dbm files and sees if the potential match is found in any of them
# gdbm_files contains a list of gdbm_file objects that contain [path, gdbm_obj]
for gdbm_obj in self.gdbm_files:
for phrase in potential_match:
if phrase in gdbm_obj[1]:
log.debug("phrase matches = %s" % phrase)
log.debug("match offset = %s" % offset_match)
# Ignore matches that are just numbers
if phrase.isdigit():
log.debug("phrase match are digits = %s" % phrase)
continue
# If the chosen category is "geography," ignore matches that are found in the filter dbm file
if self.category == "geography" and phrase in self.filter_file:
log.debug("phrase match is in filter dbm = %s" % phrase)
continue
match_offset = offset_match[:] # Makes copy of offset_match
match_found = positive_match(phrase, match_offset)
matches.append(match_found)
# Eliminates duplicates found in the all matches by making sure that no two matches have the same offset
matches = sorted(matches, key=lambda positive_match: positive_match.offset)
all_matches = []
for match in matches:
found = 0
if not all_matches:
all_matches.append(match)
continue
match_offset = match.getOffset()
log.debug("match offset = %s" % match_offset)
for element in all_matches:
element_offset = element.getOffset()
for index in element_offset:
if match_offset[0] == index:
# The case where the offset is found in the previous stored entry and the current match has MORE words than the previous match
# (Ex) chicago and offset = [923] versus chicago bears and offset = [923, 924]
if len(match_offset) > len(element_offset):
found = 1
# The case where the offset is found in the previous stored entry and the current match has LESS words than the previous match
# (Ex) baltimore ravens and offset = [880, 881] versus ravens and offset = [881]
elif len(match_offset) < len(element_offset):
found = 2
# The case where the offset is found in previous stored entry and current match has the SAME number of words as the previous match
# (Ex) dallas and offset = [24] versus dallas and offset = [24]
elif len(match_offset) == len(element_offset) and match.getMatch() == element.getMatch():
found = 2
if found == 0: # The offsets have not been seen yet
all_matches.append(match)
elif found == 1:
all_matches[-1] = match
elif found == 2:
continue
return all_matches
# Processes an html file. Assumes the html file contains html entities that need to be escaped and converted to unicode.
# Function escapes html entities to unicode, tokenizes the entire text, and sends it for processing.
def processUnicodeString(self, string):
log = logging.getLogger('classify')
log.debug("text.processUnicodeString()")
# Html entities consist of the format &...; What we want is the ... portion. That is why we separated into a group in the RE.
string_unicode = re.sub("&(#?\\w+);", self.substituteEntity, string)
log.debug("string unicode = %s" % string_unicode)
token = nltk.tokenize.WhitespaceTokenizer().tokenize(string_unicode)
#token = nltk.wordpunct_tokenize(string_unicode)
matches = self.processUnicodeText(token)
return matches
# Processes a text file. Assumes that text file contains unescaped unicode literals.
# Function decodes text into unicode, tokenizes the entire text, and sends it for processing.
def processUTFString(self, string):
log = logging.getLogger('classify')
log.debug("text.processUTFString()")
log.debug("string = %s" % string)
string_utf = string.decode("utf-8")
log.debug("string utf = %s" % string_utf)
token = nltk.tokenize.WhitespaceTokenizer().tokenize(string_utf)
#token = nltk.wordpunct_tokenize(string_ascii)
matches = self.processUnicodeText(token)
return matches
# Function escapes all html entities and converts it to unicode
def substituteEntity(self, match):
log = logging.getLogger('classify')
name = match.group(1)
if name in htmlentitydefs.name2codepoint:
return unichr(htmlentitydefs.name2codepoint[name])
elif name.startswith("#"):
try:
return unichr(int(name[1:]))
except:
pass
log.debug("Cannot replace html entities with corresponding UTF-8 characters")
return '?'
| mit | -7,547,081,844,341,308,000 | 47.273063 | 154 | 0.515365 | false |
daniele-athome/kontalk-legacy-xmppserver | kontalk/xmppserver/component/resolver.py | 1 | 10926 | # -*- coding: utf-8 -*-
"""Kontalk XMPP resolver component."""
"""
Kontalk XMPP server
Copyright (C) 2014 Kontalk Devteam <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import time
import base64
from datetime import datetime
from copy import deepcopy
from twisted.python import failure
from twisted.internet import defer, reactor, task, threads
from twisted.words.protocols.jabber.xmlstream import XMPPHandler
from twisted.words.xish import domish
from twisted.words.protocols.jabber import jid, error, xmlstream
from wokkel import component
from kontalk.xmppserver import log, storage, util, xmlstream2, version, keyring
class IQHandler(XMPPHandler):
"""
Handles IQ stanzas.
@type parent: L{Resolver}
"""
def connectionInitialized(self):
self.xmlstream.addObserver("/iq[@type='get']/query[@xmlns='%s']" % (xmlstream2.NS_IQ_LAST, ), self.last_activity, 100)
self.xmlstream.addObserver("/iq[@type='get']/query[@xmlns='%s']" % (xmlstream2.NS_IQ_VERSION, ), self.version, 100)
self.xmlstream.addObserver("/iq[@type='result']", self.parent.bounce, 100)
self.xmlstream.addObserver("/iq/query", self.parent.error, 80)
def version(self, stanza):
if not stanza.consumed:
stanza.consumed = True
if stanza['to'] == self.parent.network:
response = xmlstream.toResponse(stanza, 'result')
query = domish.Element((xmlstream2.NS_IQ_VERSION, 'query'))
query.addElement((None, 'name'), content=version.NAME)
query.addElement((None, 'version'), content=version.VERSION)
response.addChild(query)
self.send(response)
else:
# send resolved stanza to router
self.send(stanza)
def last_activity(self, stanza):
if not stanza.consumed:
stanza.consumed = True
if stanza['to'] == self.parent.network:
# server uptime
seconds = self.parent.uptime()
response = xmlstream.toResponse(stanza, 'result')
response.addChild(domish.Element((xmlstream2.NS_IQ_LAST, 'query'), attribs={'seconds': str(int(seconds))}))
self.send(response)
else:
# seconds ago user was last seen
to = jid.JID(stanza['to'])
def found_latest(latest, stanza):
if latest:
log.debug("found latest! %r" % (latest, ))
response = xmlstream.toResponse(stanza, 'result')
query = domish.Element((xmlstream2.NS_IQ_LAST, 'query'), attribs={ 'seconds' : str(latest[1]) })
response.addChild(query)
self.send(response)
log.debug("response sent: %s" % (response.toXml(), ))
else:
log.debug("no latest found! sending back error")
# TODO send error
def _abort(stanzaId, callback, data):
log.debug("iq/last broadcast request timed out!")
self.xmlstream.removeObserver("/iq[@id='%s']" % stanza['id'], find_latest)
if not callback.called:
#callback.errback(failure.Failure(internet_error.TimeoutError()))
callback.callback(data['latest'])
def find_latest(stanza, data, callback, timeout):
log.debug("iq/last: %s" % (stanza.toXml(), ))
data['count'] += 1
seconds = int(stanza.query['seconds'])
if not data['latest'] or seconds < data['latest'][1]:
# no need to parse JID here
data['latest'] = (stanza['from'], seconds)
if int(stanza.query['seconds']) == 0 or data['count'] >= data['max']:
log.debug("all replies received, stop watching iq %s" % (stanza['id'], ))
timeout.cancel()
self.xmlstream.removeObserver("/iq[@id='%s']" % stanza['id'], find_latest)
if not callback.called:
callback.callback(data['latest'])
tmpTo = jid.JID(tuple=(to.user, to.host, to.resource))
lastIq = domish.Element((None, 'iq'))
lastIq['id'] = stanza['id']
lastIq['type'] = 'get'
lastIq['from'] = self.parent.network
lastIq.addElement((xmlstream2.NS_IQ_LAST, 'query'))
# data
data = {
# max replies that can be received
'max': len(self.parent.keyring.hostlist()),
# number of replies received so far
'count': 0,
# contains a tuple with JID and timestamp of latest seen user
'latest': None,
}
# final callback
callback = defer.Deferred()
callback.addCallback(found_latest, stanza)
# timeout of request
timeout = reactor.callLater(self.parent.cache.MAX_LOOKUP_TIMEOUT, _abort, stanzaId=lastIq['id'], data=data, callback=callback)
# request observer
self.xmlstream.addObserver("/iq[@id='%s']" % lastIq['id'], find_latest, 100, data=data, callback=callback, timeout=timeout)
# send iq last activity to the network
for server in self.parent.keyring.hostlist():
tmpTo.host = server
lastIq['to'] = tmpTo.full()
self.send(lastIq)
class Resolver(xmlstream2.SocketComponent):
"""
Kontalk resolver XMPP handler.
This component resolves network JIDs in stanzas (kontalk.net) into server
JIDs (prime.kontalk.net), altering the "to" attribute, then it bounces the
stanza back to the router.
@ivar subscriptions: a map of active user subscriptions (key=watched, value=subscribers)
@type subscriptions: L{dict}
@ivar whitelists: a map of user whitelists (key=user, value=list(allowed_users))
@type whitelists: L{dict}
@ivar blacklists: a map of user blacklists (key=user, value=list(blocked_users))
@type blacklists: L{dict}
@ivar cache: a local JID cache
@type cache: L{JIDCache}
"""
protocolHandlers = (
JIDCache,
PresenceHandler,
RosterHandler,
IQHandler,
PrivacyListHandler,
MessageHandler,
)
WHITELIST = 1
BLACKLIST = 2
def __init__(self, config):
router_cfg = config['router']
for key in ('socket', 'host', 'port'):
if key not in router_cfg:
router_cfg[key] = None
router_jid = '%s.%s' % (router_cfg['jid'], config['host'])
xmlstream2.SocketComponent.__init__(self, router_cfg['socket'], router_cfg['host'], router_cfg['port'], router_jid, router_cfg['secret'])
self.config = config
# this is for queueing keyring thread requests
reactor.suggestThreadPoolSize(1)
self.logTraffic = config['debug']
self.network = config['network']
self.servername = config['host']
self.start_time = time.time()
storage.init(config['database'])
self.keyring = keyring.Keyring(storage.MySQLNetworkStorage(), config['fingerprint'], self.network, self.servername, True)
self.presencedb = storage.MySQLPresenceStorage()
self.subscriptions = {}
self.whitelists = {}
self.blacklists = {}
# protocol handlers here!!
for handler in self.protocolHandlers:
inst = handler()
if handler == JIDCache:
self.cache = inst
inst.setHandlerParent(self)
def uptime(self):
return time.time() - self.start_time
def _authd(self, xs):
component.Component._authd(self, xs)
log.debug("connected to router")
xs.addObserver("/iq", self.iq, 500)
xs.addObserver("/presence", self.presence, 500)
# bind to network route
bind = domish.Element((None, 'bind'))
bind['name'] = self.network
bind.addElement((None, 'private'))
xs.send(bind)
def _disconnected(self, reason):
component.Component._disconnected(self, reason)
log.debug("lost connection to router (%s)" % (reason, ))
def iq(self, stanza):
to = stanza.getAttribute('to')
if to is not None:
to = jid.JID(to)
# sending to full JID, forward to router
if to.resource is not None:
self.bounce(stanza)
# sending to bare JID: handled by handlers
def presence(self, stanza):
to = stanza.getAttribute('to')
if to is not None:
to = jid.JID(to)
# sending to full JID, forward to router
if to.resource is not None:
self.bounce(stanza)
# sending to bare JID: handled by handlers
def error(self, stanza, condition='service-unavailable'):
if not stanza.consumed:
log.debug("error %s" % (stanza.toXml(), ))
stanza.consumed = True
e = error.StanzaError(condition, 'cancel')
self.send(e.toResponse(stanza))
def bounce(self, stanza, *args, **kwargs):
"""Send the stanza to the router."""
if not stanza.consumed:
stanza.consumed = True
self.send(stanza, *args, **kwargs)
def result(self, stanza):
"""Sends back a result response stanza. Used for IQ stanzas."""
stanza = xmlstream.toResponse(stanza, 'result')
self.send(stanza)
def build_vcard(self, userid, iq):
"""Adds a vCard to the given iq stanza."""
fpr = self.keyring.get_fingerprint(userid)
keydata = self.keyring.get_key(userid, fpr)
# add vcard
vcard = iq.addElement((xmlstream2.NS_XMPP_VCARD4, 'vcard'))
vcard_key = vcard.addElement((None, 'key'))
vcard_data = vcard_key.addElement((None, 'uri'))
vcard_data.addContent(xmlstream2.DATA_PGP_PREFIX + base64.b64encode(keydata))
return iq
| gpl-3.0 | 7,805,729,909,477,799,000 | 38.444043 | 145 | 0.576515 | false |
dwdii/emotional-faces | src/emotion_model.py | 1 | 20888 |
import csv
import os
import time
from scipy import misc
import keras.callbacks as cb
import keras.utils.np_utils as np_utils
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import MaxPooling2D, ZeroPadding2D
from keras.preprocessing.image import ImageDataGenerator
def imageDataGenTransform(img, y):
# Using keras ImageDataGenerator to generate random images
datagen = ImageDataGenerator(
featurewise_std_normalization=False,
rotation_range = 20,
width_shift_range = 0.10,
height_shift_range = 0.10,
shear_range = 0.1,
zoom_range = 0.1,
horizontal_flip = True)
#x = img_to_array(img)
x = img.reshape(1, 1, img.shape[0], img.shape[1])
j = 0
for imgT, yT in datagen.flow(x, y, batch_size = 1, save_to_dir = None):
img2 = imgT
break
return img2
def emotion_model_v1(outputClasses, verbose=False):
"""https://www.kaggle.com/somshubramajumdar/digit-recognizer/deep-convolutional-network-using-keras"""
nb_pool = 2
nb_conv = 3
nb_filters_1 = 32
nb_filters_2 = 64
nb_filters_3 = 128
dropout = 0.25
#nb_classes = 10
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(128, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v2(outputClasses, verbose=False):
"""https://www.kaggle.com/somshubramajumdar/digit-recognizer/deep-convolutional-network-using-keras"""
nb_pool = 2
nb_conv = 3
nb_filters_1 = 32
nb_filters_2 = 64
nb_filters_3 = 128
dropout = 0.25
#nb_classes = 10
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(128, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v3(outputClasses, verbose=False):
"""https://www.kaggle.com/somshubramajumdar/digit-recognizer/deep-convolutional-network-using-keras"""
nb_pool = 2
nb_conv = 3
nb_filters_1 = 32
nb_filters_2 = 64
nb_filters_3 = 128
nb_filters_4 = 128
dropout = 0.25
#nb_classes = 10
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_4, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(128, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v3_1(outputClasses, verbose=False):
nb_pool = 2
nb_conv = 3
nb_filters_1 = 32
nb_filters_2 = 64
nb_filters_3 = 128
nb_filters_4 = 128
dropout = 0.25
#nb_classes = 10
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
#model.add(ZeroPadding2D((1, 1), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(1, 350, 350)))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_4, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(64, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v3_2(outputClasses, verbose=False):
nb_pool = 2
nb_conv = 3
nb_filters_1 = 32
nb_filters_2 = 32
nb_filters_3 = 64
nb_filters_4 = 128
dropout = 0.25
#nb_classes = 10
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
#model.add(ZeroPadding2D((1, 1), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(1, 350, 350)))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_4, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.3))
model.add(Dense(64, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v4(outputClasses, verbose=False):
nb_pool = 2
nb_conv = 3
nb_filters_1 = 32
nb_filters_2 = 64
nb_filters_3 = 128
nb_filters_4 = 256
nb_filters_5 = 256
dropout = 0.25
#nb_classes = 10
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_4, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(nb_filters_5, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(128, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v5(outputClasses, verbose=False):
nb_pool = 2
nb_conv = 20
nb_filters_1 = 32
#nb_filters_2 = 64
#nb_filters_3 = 128
#nb_filters_4 = 256
#nb_filters_5 = 512
#dropout = 0.25
#nb_classes = 10
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((5, 5), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((10, 10)))
#model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
#model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
#model.add(Dropout(0.25))
#model.add(Dense(nb_filters_5, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v6(outputClasses, verbose=False):
nb_pool = 2
nb_conv = 30 # up from 20 to 30
nb_filters_1 = 32
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((5, 5), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((10, 10)))
#model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
#model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
#model.add(Dropout(0.25))
#model.add(Dense(nb_filters_5, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v7(outputClasses, verbose=False):
nb_pool = 2
nb_conv = 40 # up from 30 to 40
nb_filters_1 = 32
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((5, 5), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
#model.add(ZeroPadding2D((10, 10)))
#model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
#model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
#model.add(Dropout(0.25))
#model.add(Dense(nb_filters_5, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v8(outputClasses, verbose=False):
nb_pool = 2
nb_conv = 30 # Back to 30 from 40
nb_filters_1 = 32
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((5, 5), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(5, 5))) # 5,5 from 2,2
#model.add(ZeroPadding2D((10, 10)))
#model.add(Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu"))
#model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
#model.add(Dropout(0.25))
#model.add(Dense(nb_filters_5, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def emotion_model_v9(outputClasses, verbose=False):
nb_pool = 2
nb_conv = 30 # up from 20 to 30
nb_filters_1 = 32
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(ZeroPadding2D((5, 5), input_shape=(1, 350, 350), ))
model.add(Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu"))
#model.add(MaxPooling2D(strides=(2, 2)))
model.add(ZeroPadding2D((5, 5)))
model.add(Convolution2D(32, nb_conv, nb_conv, activation="relu"))
model.add(MaxPooling2D(strides=(2, 2)))
model.add(Flatten())
#model.add(Dropout(0.25))
#model.add(Dense(nb_filters_5, activation="relu"))
model.add(Dense(outputClasses, activation="softmax"))
if verbose:
print (model.summary())
# rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
print 'Model compiled in {0} seconds'.format(time.time() - start_time)
return model
def cnn_model_jhamski(outputClasses, input_shape=(3, 150, 150), verbose=False):
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(outputClasses))
model.add(Activation('softmax'))
if verbose:
print (model.summary())
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def emotion_model_jh_v2(outputClasses, input_shape=(3, 150, 150), verbose=False):
model = Sequential()
model.add(Convolution2D(32, 5, 5, input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(outputClasses))
model.add(Activation('softmax'))
if verbose:
print (model.summary())
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def emotion_model_jh_v3(outputClasses, input_shape=(3, 150, 150), verbose=False):
model = Sequential()
model.add(Convolution2D(32, 5, 5, input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(outputClasses))
model.add(Activation('softmax'))
if verbose:
print (model.summary())
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def emotion_model_jh_v4(outputClasses, input_shape=(3, 150, 150), verbose=False):
model = Sequential()
model.add(Convolution2D(32, 8, 8, input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 4, 4))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 1, 1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(outputClasses))
model.add(Activation('softmax'))
if verbose:
print (model.summary())
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def emotion_model_jh_v5(outputClasses, input_shape=(3, 150, 150), verbose=False):
model = Sequential()
model.add(Convolution2D(32, 8, 8, input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
#model.add(Dropout(0.4))
model.add(Dense(outputClasses))
model.add(Activation('softmax'))
if verbose:
print (model.summary())
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
class LossHistory(cb.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
batch_loss = logs.get('loss')
self.losses.append(batch_loss)
def run_network(data, model, epochs=20, batch=256, verbosity=2):
"""
:param data: X_train, X_test, y_train, y_test
:param model:
:param epochs:
:param batch:
:return:
"""
try:
start_time = time.time()
history = LossHistory()
X_train, X_test, y_train, y_test = data
y_trainC = np_utils.to_categorical(y_train )
y_testC = np_utils.to_categorical(y_test)
print y_trainC.shape
print y_testC.shape
print 'Training model...'
model.fit(X_train, y_trainC, nb_epoch=epochs, batch_size=batch,
callbacks=[history],
validation_data=(X_test, y_testC), verbose=verbosity)
print "Training duration : {0}".format(time.time() - start_time)
score = model.evaluate(X_test, y_testC, batch_size=16, verbose=0)
print "Network's test score [loss, accuracy]: {0}".format(score)
return model, history.losses
except KeyboardInterrupt:
print ' KeyboardInterrupt'
return model, history.losses | apache-2.0 | 5,617,332,157,175,504,000 | 31.84434 | 106 | 0.632756 | false |
mikesname/ehri-collections | ehriportal/portal/migrations/0014_auto__add_field_collection_notes__add_unique_collection_identifier_rep.py | 1 | 17462 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Collection.notes'
db.add_column('portal_collection', 'notes',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding unique constraint on 'Collection', fields ['identifier', 'repository']
db.create_unique('portal_collection', ['identifier', 'repository_id'])
def backwards(self, orm):
# Removing unique constraint on 'Collection', fields ['identifier', 'repository']
db.delete_unique('portal_collection', ['identifier', 'repository_id'])
# Deleting field 'Collection.notes'
db.delete_column('portal_collection', 'notes')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'portal.authority': {
'Meta': {'object_name': 'Authority', '_ormbases': ['portal.Resource']},
'dates_of_existence': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'functions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'general_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'institution_responsible_identifier': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'internal_structures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'blank': 'True'}),
'legal_status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mandates': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'places': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'revision_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type_of_entity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'portal.collection': {
'Meta': {'unique_together': "(('identifier', 'repository'),)", 'object_name': 'Collection', '_ormbases': ['portal.Resource']},
'access_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'accruals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'acquisition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'alternate_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'appraisal': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'archival_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'arrangement': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Authority']", 'null': 'True', 'blank': 'True'}),
'edition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extent_and_medium': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'finding_aids': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'institution_responsible_identifier': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'blank': 'True'}),
'languages_of_description': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'blank': 'True'}),
'location_of_copies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'location_of_originals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'physical_characteristics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'related_units_of_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Repository']"}),
'reproduction_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'revision_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scope_and_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'blank': 'True'}),
'scripts_of_description': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'portal.contact': {
'Meta': {'object_name': 'Contact'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Repository']"}),
'street_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'portal.fuzzydate': {
'Meta': {'object_name': 'FuzzyDate'},
'circa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'date_set'", 'to': "orm['portal.Collection']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'precision': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'})
},
'portal.othername': {
'Meta': {'object_name': 'OtherName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'portal.place': {
'Meta': {'object_name': 'Place'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"})
},
'portal.property': {
'Meta': {'object_name': 'Property'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'portal.relation': {
'Meta': {'object_name': 'Relation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['portal.Resource']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['portal.Resource']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'portal.repository': {
'Meta': {'object_name': 'Repository', '_ormbases': ['portal.Resource']},
'access_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'buildings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'collecting_policies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dates_of_existence': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disabled_access': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'finding_aids': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'functions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'general_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geocultural_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'holdings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'internal_structures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'blank': 'True'}),
'legal_status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('portal.thumbs.ImageWithThumbsField', [], {'name': "'logo'", 'sizes': '((100, 100), (300, 300))', 'max_length': '100', 'blank': 'True', 'null': 'True'}),
'maintenance_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mandates': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'opening_times': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'places': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reproduction_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'research_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type_of_entity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'portal.resource': {
'Meta': {'object_name': 'Resource'},
'created_on': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'portal.resourceimage': {
'Meta': {'object_name': 'ResourceImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('portal.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'name': "'image'", 'sizes': '((100, 100), (300, 300))'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['portal']
| mit | 374,222,808,844,784,830 | 82.152381 | 174 | 0.542664 | false |
brendano/twitter_geo_preproc | geo2_pipeline/preproc8/45_make_vocab/standalone_wc.py | 1 | 1387 | import re,sys,os,itertools
import ujson as json
## Umm, try to replicate config in 50_quadify ...
OPTS = {}
OPTS['min_date'] = '2009-08-03'
OPTS['max_date'] = '2012-09-30'
OPTS['msa_county_file'] = os.path.join(os.path.dirname(__file__), '../../../geo_metro/msa_counties.tsv')
OPTS['num_msas'] = 200
countyfips2regionid = {}
for line in open(OPTS['msa_county_file']).readlines()[1:]:
rank,name,countyfips = line.split('\t')
rank = int(rank)
if rank > OPTS['num_msas']: continue
countyfips = countyfips.strip().split(',')
for fips in countyfips:
countyfips2regionid[fips] = rank
def get_region(geodict):
county_fips = geodict['us_county']['geoid10']
return countyfips2regionid.get(county_fips, None)
def iterate_tweets():
for line in sys.stdin:
parts = line.rstrip('\n').split('\t')
date,user,geo,tweet = parts
date = date.split('T')[0]
if date < OPTS['min_date'] or date > OPTS['max_date']:
continue
region = get_region(json.loads(geo))
if region is None:
continue
yield user, tweet.split()
def stuff():
for user, tweets in itertools.groupby(iterate_tweets(), key=lambda (u,t): u):
wordset = set()
for _,toks in tweets:
for tok in toks:
wordset.add(tok)
for word in wordset:
print word
stuff()
| mit | -2,551,247,292,016,847,400 | 30.522727 | 104 | 0.599856 | false |
pierg75/pier-sosreport | sos/plugins/selinux.py | 1 | 2104 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin
class SELinux(Plugin, RedHatPlugin):
"""SELinux access control
"""
plugin_name = 'selinux'
profiles = ('system', 'security', 'openshift')
option_list = [("fixfiles", 'Print incorrect file context labels',
'slow', False)]
packages = ('libselinux',)
def setup(self):
self.add_copy_spec([
'/etc/sestatus.conf',
'/etc/selinux'
])
self.add_cmd_output('sestatus')
state = self.get_command_output('getenforce')['output']
if state is not 'Disabled':
self.add_cmd_output([
'ps auxZww',
'sestatus -v',
'sestatus -b',
'selinuxdefcon root',
'selinuxconlist root',
'selinuxexeccon /bin/passwd',
'semanage -o' # deprecated, may disappear at some point
])
subcmds = [
'fcontext',
'user',
'port',
'login',
'node',
'interface',
'module'
]
for subcmd in subcmds:
self.add_cmd_output("semanage %s -l" % subcmd)
if self.get_option('fixfiles'):
self.add_cmd_output("restorecon -Rvn /", stderr=False)
# vim: set et ts=4 sw=4 :
| gpl-2.0 | -5,123,982,218,898,091,000 | 31.875 | 73 | 0.574144 | false |
chipx86/the-cure | thecure/layers.py | 1 | 5377 | import pygame
class SpriteQuadTree(object):
def __init__(self, rect, depth=6, parent=None):
depth -= 1
self.rect = rect
self.sprites = []
self.parent = parent
self.depth = depth
self.cx = self.rect.centerx
self.cy = self.rect.centery
self._moved_cnxs = {}
self._next_stamp = 1
if depth == 0:
self.nw_tree = None
self.ne_tree = None
self.sw_tree = None
self.se_tree = None
else:
quad_size = (rect.width / 2, rect.height / 2)
self.nw_tree = SpriteQuadTree(
pygame.Rect(rect.x, rect.y, *quad_size),
depth, self)
self.ne_tree = SpriteQuadTree(
pygame.Rect(self.cx, rect.y, *quad_size),
depth, self)
self.sw_tree = SpriteQuadTree(
pygame.Rect(rect.x, self.cy, *quad_size),
depth, self)
self.se_tree = SpriteQuadTree(
pygame.Rect(self.cx, self.cy, *quad_size),
depth, self)
def add(self, sprite):
if not self.parent and sprite.can_move:
self._moved_cnxs[sprite] = sprite.moved.connect(
lambda dx, dy: self._recompute_sprite(sprite))
# If this is a leaf node or the sprite is overlapping all quadrants,
# store it in this QuadTree's list of sprites. If it's in fewer
# quadrants, go through and add to each that it touches.
if self.depth == 0:
trees = list(self._get_trees(sprite.rect))
assert len(trees) > 0
if len(trees) < 4:
for tree in trees:
tree.add(sprite)
return
assert sprite not in self.sprites
self.sprites.append(sprite)
sprite.quad_trees.add(self)
def remove(self, sprite):
if self.parent:
self.parent.remove(sprite)
return
assert sprite.quad_trees
for tree in sprite.quad_trees:
tree.sprites.remove(sprite)
sprite.quad_trees.clear()
if sprite.can_move:
cnx = self._moved_cnxs.pop(sprite)
cnx.disconnect()
def get_sprites(self, rect=None, stamp=None):
if stamp is None:
stamp = self._next_stamp
self._next_stamp += 1
for sprite in self.sprites:
if (getattr(sprite, '_quadtree_stamp', None) != stamp and
(rect is None or rect.colliderect(sprite.rect))):
sprite._quadtree_stamp = stamp
yield sprite
for tree in self._get_trees(rect):
for sprite in tree.get_sprites(rect, stamp):
yield sprite
def __iter__(self):
return self.get_sprites()
def _get_trees(self, rect):
if self.depth > 0:
if not rect or (rect.left <= self.cx and rect.top <= self.cy):
yield self.nw_tree
if not rect or (rect.right >= self.cx and rect.top <= self.cy):
yield self.ne_tree
if not rect or (rect.left <= self.cx and rect.bottom >= self.cy):
yield self.sw_tree
if not rect or (rect.right >= self.cx and rect.bottom >= self.cy):
yield self.se_tree
def _get_leaf_trees(self, rect):
trees = list(self._get_trees(rect))
if not trees or len(trees) == 4:
yield self
else:
for tree in trees:
for leaf in tree._get_leaf_trees(rect):
yield leaf
def _recompute_sprite(self, sprite):
assert sprite.quad_trees
if sprite.quad_trees != set(self._get_leaf_trees(sprite.rect)):
self.remove(sprite)
self.add(sprite)
class Layer(object):
def __init__(self, name, index, parent):
self.name = name
self.index = index
self.parent = parent
self.quad_tree = SpriteQuadTree(pygame.Rect(0, 0, *self.parent.size))
self.tick_sprites = []
def add(self, *objs):
for obj in objs:
obj.layer = self
self.update_sprite(obj)
if obj.use_quadtrees:
self.quad_tree.add(obj)
obj.on_added(self)
def remove(self, *objs):
for obj in objs:
self.update_sprite(obj, True)
if obj.use_quadtrees:
self.quad_tree.remove(obj)
obj.on_removed(self)
def update_sprite(self, sprite, force_remove=False):
assert sprite.layer == self
sprite.update_image()
if sprite.NEED_TICKS:
if sprite.visible and not force_remove:
self.tick_sprites.append(sprite)
else:
try:
self.tick_sprites.remove(sprite)
except ValueError:
# It may be gone now.
pass
def __iter__(self):
return iter(self.quad_tree)
def iterate_in_rect(self, rect):
return self.quad_tree.get_sprites(rect)
def tick(self):
for sprite in self.tick_sprites:
sprite.tick()
def start(self):
for sprite in self.quad_tree:
sprite.start()
def stop(self):
for sprite in self.quad_tree:
sprite.stop()
| mit | 5,568,242,331,991,032,000 | 28.222826 | 78 | 0.525386 | false |
SnabbCo/neutron | neutron/db/l3_db.py | 1 | 46915 | # Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common.notifier import api as notifier_api
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
DEVICE_OWNER_ROUTER_INTF = l3_constants.DEVICE_OWNER_ROUTER_INTF
DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
DEVICE_OWNER_FLOATINGIP = l3_constants.DEVICE_OWNER_FLOATINGIP
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
# Maps API field to DB column
# API parameter name and Database column names may differ.
# Useful to keep the filtering between API and Database.
API_TO_DB_COLUMN_MAP = {'port_id': 'fixed_port_id'}
class Router(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron router."""
name = sa.Column(sa.String(255))
status = sa.Column(sa.String(16))
admin_state_up = sa.Column(sa.Boolean)
gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
gw_port = orm.relationship(models_v2.Port, lazy='joined')
class FloatingIP(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a floating IP address.
This IP address may or may not be allocated to a tenant, and may or
may not be associated with an internal port/ip address/router.
"""
floating_ip_address = sa.Column(sa.String(64), nullable=False)
floating_network_id = sa.Column(sa.String(36), nullable=False)
floating_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'),
nullable=False)
fixed_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
fixed_ip_address = sa.Column(sa.String(64))
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'))
# Additional attribute for keeping track of the router where the floating
# ip was associated in order to be able to ensure consistency even if an
# aysnchronous backend is unavailable when the floating IP is disassociated
last_known_router_id = sa.Column(sa.String(36))
status = sa.Column(sa.String(16))
class L3_NAT_db_mixin(l3.RouterPluginBase):
"""Mixin class to add L3/NAT router methods to db_plugin_base_v2."""
l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotify
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def _get_router(self, context, id):
try:
router = self._get_by_id(context, Router, id)
except exc.NoResultFound:
raise l3.RouterNotFound(router_id=id)
return router
def _make_router_dict(self, router, fields=None,
process_extensions=True):
res = {'id': router['id'],
'name': router['name'],
'tenant_id': router['tenant_id'],
'admin_state_up': router['admin_state_up'],
'status': router['status'],
EXTERNAL_GW_INFO: None,
'gw_port_id': router['gw_port_id']}
if router['gw_port_id']:
nw_id = router.gw_port['network_id']
res[EXTERNAL_GW_INFO] = {'network_id': nw_id}
# NOTE(salv-orlando): The following assumes this mixin is used in a
# class inheriting from CommonDbMixin, which is true for all existing
# plugins.
if process_extensions:
self._apply_dict_extend_functions(
l3.ROUTERS, res, router)
return self._fields(res, fields)
def create_router(self, context, router):
r = router['router']
has_gw_info = False
if EXTERNAL_GW_INFO in r:
has_gw_info = True
gw_info = r[EXTERNAL_GW_INFO]
del r[EXTERNAL_GW_INFO]
tenant_id = self._get_tenant_id_for_create(context, r)
with context.session.begin(subtransactions=True):
# pre-generate id so it will be available when
# configuring external gw port
router_db = Router(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status="ACTIVE")
context.session.add(router_db)
if has_gw_info:
self._update_router_gw_info(context, router_db['id'], gw_info)
return self._make_router_dict(router_db, process_extensions=False)
def update_router(self, context, id, router):
r = router['router']
has_gw_info = False
gw_info = None
if EXTERNAL_GW_INFO in r:
has_gw_info = True
gw_info = r[EXTERNAL_GW_INFO]
del r[EXTERNAL_GW_INFO]
# check whether router needs and can be rescheduled to the proper
# l3 agent (associated with given external network);
# do check before update in DB as an exception will be raised
# in case no proper l3 agent found
candidates = None
if has_gw_info:
candidates = self._check_router_needs_rescheduling(
context, id, gw_info)
with context.session.begin(subtransactions=True):
if has_gw_info:
self._update_router_gw_info(context, id, gw_info)
router_db = self._get_router(context, id)
# Ensure we actually have something to update
if r.keys():
router_db.update(r)
if candidates:
l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
l3_plugin.reschedule_router(context, id, candidates)
self.l3_rpc_notifier.routers_updated(
context, [router_db['id']])
return self._make_router_dict(router_db)
def _check_router_needs_rescheduling(self, context, router_id, gw_info):
"""Checks whether router's l3 agent can handle the given network
When external_network_bridge is set, each L3 agent can be associated
with at most one external network. If router's new external gateway
is on other network then the router needs to be rescheduled to the
proper l3 agent.
If external_network_bridge is not set then the agent
can support multiple external networks and rescheduling is not needed
:return: list of candidate agents if rescheduling needed,
None otherwise; raises exception if there is no eligible l3 agent
associated with target external network
"""
# TODO(obondarev): rethink placement of this func as l3 db manager is
# not really a proper place for agent scheduling stuff
network_id = gw_info.get('network_id') if gw_info else None
if not network_id:
return
nets = self._core_plugin.get_networks(
context, {external_net.EXTERNAL: [True]})
# nothing to do if there is only one external network
if len(nets) <= 1:
return
# first get plugin supporting l3 agent scheduling
# (either l3 service plugin or core_plugin)
l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if (not utils.is_extension_supported(
l3_plugin,
l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or
l3_plugin.router_scheduler is None):
# that might mean that we are dealing with non-agent-based
# implementation of l3 services
return
cur_agents = l3_plugin.list_l3_agents_hosting_router(
context, router_id)['agents']
for agent in cur_agents:
ext_net_id = agent['configurations'].get(
'gateway_external_network_id')
ext_bridge = agent['configurations'].get(
'external_network_bridge', 'br-ex')
if (ext_net_id == network_id or
(not ext_net_id and not ext_bridge)):
return
# otherwise find l3 agent with matching gateway_external_network_id
active_agents = l3_plugin.get_l3_agents(context, active=True)
router = {
'id': router_id,
'external_gateway_info': {'network_id': network_id}
}
candidates = l3_plugin.get_l3_agent_candidates(
router, active_agents)
if not candidates:
msg = (_('No eligible l3 agent associated with external network '
'%s found') % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
return candidates
def _create_router_gw_port(self, context, router, network_id):
# Port has no 'tenant-id', as it is hidden from user
gw_port = self._core_plugin.create_port(context.elevated(), {
'port': {'tenant_id': '', # intentionally not set
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_id': router['id'],
'device_owner': DEVICE_OWNER_ROUTER_GW,
'admin_state_up': True,
'name': ''}})
if not gw_port['fixed_ips']:
self._core_plugin.delete_port(context.elevated(), gw_port['id'],
l3_port_check=False)
msg = (_('No IPs available for external network %s') %
network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
router.gw_port = self._core_plugin._get_port(context.elevated(),
gw_port['id'])
context.session.add(router)
def _update_router_gw_info(self, context, router_id, info, router=None):
# TODO(salvatore-orlando): guarantee atomic behavior also across
# operations that span beyond the model classes handled by this
# class (e.g.: delete_port)
router = router or self._get_router(context, router_id)
gw_port = router.gw_port
# network_id attribute is required by API, so it must be present
network_id = info['network_id'] if info else None
if network_id:
network_db = self._core_plugin._get_network(context, network_id)
if not network_db.external:
msg = _("Network %s is not a valid external "
"network") % network_id
raise n_exc.BadRequest(resource='router', msg=msg)
# figure out if we need to delete existing port
if gw_port and gw_port['network_id'] != network_id:
fip_count = self.get_floatingips_count(context.elevated(),
{'router_id': [router_id]})
if fip_count:
raise l3.RouterExternalGatewayInUseByFloatingIp(
router_id=router_id, net_id=gw_port['network_id'])
with context.session.begin(subtransactions=True):
router.gw_port = None
context.session.add(router)
self._core_plugin.delete_port(context.elevated(),
gw_port['id'],
l3_port_check=False)
if network_id is not None and (gw_port is None or
gw_port['network_id'] != network_id):
subnets = self._core_plugin._get_subnets_by_network(context,
network_id)
for subnet in subnets:
self._check_for_dup_router_subnet(context, router_id,
network_id, subnet['id'],
subnet['cidr'])
self._create_router_gw_port(context, router, network_id)
def delete_router(self, context, id):
with context.session.begin(subtransactions=True):
router = self._get_router(context, id)
# Ensure that the router is not used
fips = self.get_floatingips_count(context.elevated(),
filters={'router_id': [id]})
if fips:
raise l3.RouterInUse(router_id=id)
device_filter = {'device_id': [id],
'device_owner': [DEVICE_OWNER_ROUTER_INTF]}
ports = self._core_plugin.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=id)
#TODO(nati) Refactor here when we have router insertion model
vpnservice = manager.NeutronManager.get_service_plugins().get(
constants.VPN)
if vpnservice:
vpnservice.check_router_in_use(context, id)
context.session.delete(router)
# Delete the gw port after the router has been removed to
# avoid a constraint violation.
device_filter = {'device_id': [id],
'device_owner': [DEVICE_OWNER_ROUTER_GW]}
ports = self._core_plugin.get_ports(context.elevated(),
filters=device_filter)
if ports:
self._core_plugin._delete_port(context.elevated(),
ports[0]['id'])
self.l3_rpc_notifier.router_deleted(context, id)
def get_router(self, context, id, fields=None):
router = self._get_router(context, id)
return self._make_router_dict(router, fields)
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'router', limit, marker)
return self._get_collection(context, Router,
self._make_router_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_routers_count(self, context, filters=None):
return self._get_collection_count(context, Router,
filters=filters)
def _check_for_dup_router_subnet(self, context, router_id,
network_id, subnet_id, subnet_cidr):
try:
rport_qry = context.session.query(models_v2.Port)
rports = rport_qry.filter_by(device_id=router_id)
# It's possible these ports are on the same network, but
# different subnets.
new_ipnet = netaddr.IPNetwork(subnet_cidr)
for p in rports:
for ip in p['fixed_ips']:
if ip['subnet_id'] == subnet_id:
msg = (_("Router already has a port on subnet %s")
% subnet_id)
raise n_exc.BadRequest(resource='router', msg=msg)
sub_id = ip['subnet_id']
cidr = self._core_plugin._get_subnet(context.elevated(),
sub_id)['cidr']
ipnet = netaddr.IPNetwork(cidr)
match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr])
match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr])
if match1 or match2:
data = {'subnet_cidr': subnet_cidr,
'subnet_id': subnet_id,
'cidr': cidr,
'sub_id': sub_id}
msg = (_("Cidr %(subnet_cidr)s of subnet "
"%(subnet_id)s overlaps with cidr %(cidr)s "
"of subnet %(sub_id)s") % data)
raise n_exc.BadRequest(resource='router', msg=msg)
except exc.NoResultFound:
pass
def add_router_interface(self, context, router_id, interface_info):
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise n_exc.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
# make sure port update is committed
with context.session.begin(subtransactions=True):
if 'subnet_id' in interface_info:
msg = _("Cannot specify both subnet-id and port-id")
raise n_exc.BadRequest(resource='router', msg=msg)
port = self._core_plugin._get_port(context,
interface_info['port_id'])
if port['device_id']:
raise n_exc.PortInUse(net_id=port['network_id'],
port_id=port['id'],
device_id=port['device_id'])
fixed_ips = [ip for ip in port['fixed_ips']]
if len(fixed_ips) != 1:
msg = _('Router port must have exactly one fixed IP')
raise n_exc.BadRequest(resource='router', msg=msg)
subnet_id = fixed_ips[0]['subnet_id']
subnet = self._core_plugin._get_subnet(context, subnet_id)
self._check_for_dup_router_subnet(context, router_id,
port['network_id'],
subnet['id'],
subnet['cidr'])
port.update({'device_id': router_id,
'device_owner': DEVICE_OWNER_ROUTER_INTF})
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self._core_plugin._get_subnet(context, subnet_id)
# Ensure the subnet has a gateway
if not subnet['gateway_ip']:
msg = _('Subnet for router interface must have a gateway IP')
raise n_exc.BadRequest(resource='router', msg=msg)
self._check_for_dup_router_subnet(context, router_id,
subnet['network_id'],
subnet_id,
subnet['cidr'])
fixed_ip = {'ip_address': subnet['gateway_ip'],
'subnet_id': subnet['id']}
port = self._core_plugin.create_port(context, {
'port':
{'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'fixed_ips': [fixed_ip],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': router_id,
'device_owner': DEVICE_OWNER_ROUTER_INTF,
'name': ''}})
self.l3_rpc_notifier.routers_updated(
context, [router_id], 'add_router_interface')
info = {'id': router_id,
'tenant_id': subnet['tenant_id'],
'port_id': port['id'],
'subnet_id': port['fixed_ips'][0]['subnet_id']}
notifier_api.notify(context,
notifier_api.publisher_id('network'),
'router.interface.create',
notifier_api.CONF.default_notification_level,
{'router_interface': info})
return info
def _confirm_router_interface_not_in_use(self, context, router_id,
subnet_id):
subnet_db = self._core_plugin._get_subnet(context, subnet_id)
subnet_cidr = netaddr.IPNetwork(subnet_db['cidr'])
fip_qry = context.session.query(FloatingIP)
for fip_db in fip_qry.filter_by(router_id=router_id):
if netaddr.IPAddress(fip_db['fixed_ip_address']) in subnet_cidr:
raise l3.RouterInterfaceInUseByFloatingIP(
router_id=router_id, subnet_id=subnet_id)
def remove_router_interface(self, context, router_id, interface_info):
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise n_exc.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port_id = interface_info['port_id']
port_db = self._core_plugin._get_port(context, port_id)
if not (port_db['device_owner'] == DEVICE_OWNER_ROUTER_INTF and
port_db['device_id'] == router_id):
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
if 'subnet_id' in interface_info:
port_subnet_id = port_db['fixed_ips'][0]['subnet_id']
if port_subnet_id != interface_info['subnet_id']:
raise n_exc.SubnetMismatchForPort(
port_id=port_id,
subnet_id=interface_info['subnet_id'])
subnet_id = port_db['fixed_ips'][0]['subnet_id']
subnet = self._core_plugin._get_subnet(context, subnet_id)
self._confirm_router_interface_not_in_use(
context, router_id, subnet_id)
self._core_plugin.delete_port(context, port_db['id'],
l3_port_check=False)
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
self._confirm_router_interface_not_in_use(context, router_id,
subnet_id)
subnet = self._core_plugin._get_subnet(context, subnet_id)
found = False
try:
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port_id = p['id']
self._core_plugin.delete_port(context, p['id'],
l3_port_check=False)
found = True
break
except exc.NoResultFound:
pass
if not found:
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
self.l3_rpc_notifier.routers_updated(
context, [router_id], 'remove_router_interface')
info = {'id': router_id,
'tenant_id': subnet['tenant_id'],
'port_id': port_id,
'subnet_id': subnet_id}
notifier_api.notify(context,
notifier_api.publisher_id('network'),
'router.interface.delete',
notifier_api.CONF.default_notification_level,
{'router_interface': info})
return info
def _get_floatingip(self, context, id):
try:
floatingip = self._get_by_id(context, FloatingIP, id)
except exc.NoResultFound:
raise l3.FloatingIPNotFound(floatingip_id=id)
return floatingip
def _make_floatingip_dict(self, floatingip, fields=None):
res = {'id': floatingip['id'],
'tenant_id': floatingip['tenant_id'],
'floating_ip_address': floatingip['floating_ip_address'],
'floating_network_id': floatingip['floating_network_id'],
'router_id': floatingip['router_id'],
'port_id': floatingip['fixed_port_id'],
'fixed_ip_address': floatingip['fixed_ip_address'],
'status': floatingip['status']}
return self._fields(res, fields)
def _get_router_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
subnet_db = self._core_plugin._get_subnet(context,
internal_subnet_id)
if not subnet_db['gateway_ip']:
msg = (_('Cannot add floating IP to port on subnet %s '
'which has no gateway_ip') % internal_subnet_id)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
# find router interface ports on this network
router_intf_qry = context.session.query(models_v2.Port)
router_intf_ports = router_intf_qry.filter_by(
network_id=internal_port['network_id'],
device_owner=DEVICE_OWNER_ROUTER_INTF)
for intf_p in router_intf_ports:
if intf_p['fixed_ips'][0]['subnet_id'] == internal_subnet_id:
router_id = intf_p['device_id']
router_gw_qry = context.session.query(models_v2.Port)
has_gw_port = router_gw_qry.filter_by(
network_id=external_network_id,
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_GW).count()
if has_gw_port:
return router_id
raise l3.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet_id,
external_network_id=external_network_id,
port_id=internal_port['id'])
def _internal_fip_assoc_data(self, context, fip):
"""Retrieve internal port data for floating IP.
Retrieve information concerning the internal port where
the floating IP should be associated to.
"""
internal_port = self._core_plugin._get_port(context, fip['port_id'])
if not internal_port['tenant_id'] == fip['tenant_id']:
port_id = fip['port_id']
if 'id' in fip:
floatingip_id = fip['id']
data = {'port_id': port_id,
'floatingip_id': floatingip_id}
msg = (_('Port %(port_id)s is associated with a different '
'tenant than Floating IP %(floatingip_id)s and '
'therefore cannot be bound.') % data)
else:
msg = (_('Cannot create floating IP and bind it to '
'Port %s, since that port is owned by a '
'different tenant.') % port_id)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
internal_subnet_id = None
if 'fixed_ip_address' in fip and fip['fixed_ip_address']:
internal_ip_address = fip['fixed_ip_address']
for ip in internal_port['fixed_ips']:
if ip['ip_address'] == internal_ip_address:
internal_subnet_id = ip['subnet_id']
if not internal_subnet_id:
msg = (_('Port %(id)s does not have fixed ip %(address)s') %
{'id': internal_port['id'],
'address': internal_ip_address})
raise n_exc.BadRequest(resource='floatingip', msg=msg)
else:
ips = [ip['ip_address'] for ip in internal_port['fixed_ips']]
if not ips:
msg = (_('Cannot add floating IP to port %s that has'
'no fixed IP addresses') % internal_port['id'])
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if len(ips) > 1:
msg = (_('Port %s has multiple fixed IPs. Must provide'
' a specific IP when assigning a floating IP') %
internal_port['id'])
raise n_exc.BadRequest(resource='floatingip', msg=msg)
internal_ip_address = internal_port['fixed_ips'][0]['ip_address']
internal_subnet_id = internal_port['fixed_ips'][0]['subnet_id']
return internal_port, internal_subnet_id, internal_ip_address
def get_assoc_data(self, context, fip, floating_network_id):
"""Determine/extract data associated with the internal port.
When a floating IP is associated with an internal port,
we need to extract/determine some data associated with the
internal port, including the internal_ip_address, and router_id.
We also need to confirm that this internal port is owned by the
tenant who owns the floating IP.
"""
(internal_port, internal_subnet_id,
internal_ip_address) = self._internal_fip_assoc_data(context, fip)
router_id = self._get_router_for_floatingip(context,
internal_port,
internal_subnet_id,
floating_network_id)
# confirm that this router has a floating
# ip enabled gateway with support for this floating IP network
try:
port_qry = context.elevated().session.query(models_v2.Port)
port_qry.filter_by(
network_id=floating_network_id,
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_GW).one()
except exc.NoResultFound:
raise l3.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet_id,
port_id=internal_port['id'])
return (fip['port_id'], internal_ip_address, router_id)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
previous_router_id = floatingip_db.router_id
port_id = internal_ip_address = router_id = None
if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and
not ('port_id' in fip and fip['port_id'])):
msg = _("fixed_ip_address cannot be specified without a port_id")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if 'port_id' in fip and fip['port_id']:
port_id, internal_ip_address, router_id = self.get_assoc_data(
context,
fip,
floatingip_db['floating_network_id'])
fip_qry = context.session.query(FloatingIP)
try:
fip_qry.filter_by(
fixed_port_id=fip['port_id'],
floating_network_id=floatingip_db['floating_network_id'],
fixed_ip_address=internal_ip_address).one()
raise l3.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_db['id'],
floating_ip_address=floatingip_db['floating_ip_address'],
fixed_ip=internal_ip_address,
net_id=floatingip_db['floating_network_id'])
except exc.NoResultFound:
pass
floatingip_db.update({'fixed_ip_address': internal_ip_address,
'fixed_port_id': port_id,
'router_id': router_id,
'last_known_router_id': previous_router_id})
def create_floatingip(
self, context, floatingip,
initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
fip = floatingip['floatingip']
tenant_id = self._get_tenant_id_for_create(context, fip)
fip_id = uuidutils.generate_uuid()
f_net_id = fip['floating_network_id']
if not self._core_plugin._network_is_external(context, f_net_id):
msg = _("Network %s is not a valid external network") % f_net_id
raise n_exc.BadRequest(resource='floatingip', msg=msg)
with context.session.begin(subtransactions=True):
# This external port is never exposed to the tenant.
# it is used purely for internal system and admin use when
# managing floating IPs.
external_port = self._core_plugin.create_port(context.elevated(), {
'port':
{'tenant_id': '', # tenant intentionally not set
'network_id': f_net_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': fip_id,
'device_owner': DEVICE_OWNER_FLOATINGIP,
'name': ''}})
# Ensure IP addresses are allocated on external port
if not external_port['fixed_ips']:
raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id)
floating_fixed_ip = external_port['fixed_ips'][0]
floating_ip_address = floating_fixed_ip['ip_address']
floatingip_db = FloatingIP(
id=fip_id,
tenant_id=tenant_id,
status=initial_status,
floating_network_id=fip['floating_network_id'],
floating_ip_address=floating_ip_address,
floating_port_id=external_port['id'])
fip['tenant_id'] = tenant_id
# Update association with internal port
# and define external IP address
self._update_fip_assoc(context, fip,
floatingip_db, external_port)
context.session.add(floatingip_db)
router_id = floatingip_db['router_id']
if router_id:
self.l3_rpc_notifier.routers_updated(
context, [router_id],
'create_floatingip')
return self._make_floatingip_dict(floatingip_db)
def update_floatingip(self, context, id, floatingip):
fip = floatingip['floatingip']
with context.session.begin(subtransactions=True):
floatingip_db = self._get_floatingip(context, id)
fip['tenant_id'] = floatingip_db['tenant_id']
fip['id'] = id
fip_port_id = floatingip_db['floating_port_id']
before_router_id = floatingip_db['router_id']
self._update_fip_assoc(context, fip, floatingip_db,
self._core_plugin.get_port(
context.elevated(), fip_port_id))
router_ids = []
if before_router_id:
router_ids.append(before_router_id)
router_id = floatingip_db['router_id']
if router_id and router_id != before_router_id:
router_ids.append(router_id)
if router_ids:
self.l3_rpc_notifier.routers_updated(
context, router_ids, 'update_floatingip')
return self._make_floatingip_dict(floatingip_db)
def update_floatingip_status(self, context, floatingip_id, status):
"""Update operational status for floating IP in neutron DB."""
fip_query = self._model_query(context, FloatingIP).filter(
FloatingIP.id == floatingip_id)
fip_query.update({'status': status}, synchronize_session=False)
def delete_floatingip(self, context, id):
floatingip = self._get_floatingip(context, id)
router_id = floatingip['router_id']
with context.session.begin(subtransactions=True):
context.session.delete(floatingip)
self._core_plugin.delete_port(context.elevated(),
floatingip['floating_port_id'],
l3_port_check=False)
if router_id:
self.l3_rpc_notifier.routers_updated(
context, [router_id],
'delete_floatingip')
def get_floatingip(self, context, id, fields=None):
floatingip = self._get_floatingip(context, id)
return self._make_floatingip_dict(floatingip, fields)
def get_floatingips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'floatingip', limit,
marker)
if filters is not None:
for key, val in API_TO_DB_COLUMN_MAP.iteritems():
if key in filters:
filters[val] = filters.pop(key)
return self._get_collection(context, FloatingIP,
self._make_floatingip_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def delete_disassociated_floatingips(self, context, network_id):
query = self._model_query(context, FloatingIP)
query = query.filter_by(floating_network_id=network_id,
fixed_port_id=None,
router_id=None)
for fip in query:
self.delete_floatingip(context, fip.id)
def get_floatingips_count(self, context, filters=None):
return self._get_collection_count(context, FloatingIP,
filters=filters)
def prevent_l3_port_deletion(self, context, port_id):
"""Checks to make sure a port is allowed to be deleted.
Raises an exception if this is not the case. This should be called by
any plugin when the API requests the deletion of a port, since some
ports for L3 are not intended to be deleted directly via a DELETE
to /ports, but rather via other API calls that perform the proper
deletion checks.
"""
port_db = self._core_plugin._get_port(context, port_id)
if port_db['device_owner'] in [DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_ROUTER_GW,
DEVICE_OWNER_FLOATINGIP]:
# Raise port in use only if the port has IP addresses
# Otherwise it's a stale port that can be removed
fixed_ips = port_db['fixed_ips']
if fixed_ips:
raise l3.L3PortInUse(port_id=port_id,
device_owner=port_db['device_owner'])
else:
LOG.debug(_("Port %(port_id)s has owner %(port_owner)s, but "
"no IP address, so it can be deleted"),
{'port_id': port_db['id'],
'port_owner': port_db['device_owner']})
def disassociate_floatingips(self, context, port_id):
router_ids = set()
with context.session.begin(subtransactions=True):
fip_qry = context.session.query(FloatingIP)
floating_ips = fip_qry.filter_by(fixed_port_id=port_id)
for floating_ip in floating_ips:
router_ids.add(floating_ip['router_id'])
floating_ip.update({'fixed_port_id': None,
'fixed_ip_address': None,
'router_id': None})
if router_ids:
self.l3_rpc_notifier.routers_updated(
context, list(router_ids),
'disassociate_floatingips')
def _build_routers_list(self, routers, gw_ports):
gw_port_id_gw_port_dict = dict((gw_port['id'], gw_port)
for gw_port in gw_ports)
for router in routers:
gw_port_id = router['gw_port_id']
if gw_port_id:
router['gw_port'] = gw_port_id_gw_port_dict[gw_port_id]
return routers
def _get_sync_routers(self, context, router_ids=None, active=None):
"""Query routers and their gw ports for l3 agent.
Query routers with the router_ids. The gateway ports, if any,
will be queried too.
l3 agent has an option to deal with only one router id. In addition,
when we need to notify the agent the data about only one router
(when modification of router, its interfaces, gw_port and floatingips),
we will have router_ids.
@param router_ids: the list of router ids which we want to query.
if it is None, all of routers will be queried.
@return: a list of dicted routers with dicted gw_port populated if any
"""
filters = {'id': router_ids} if router_ids else {}
if active is not None:
filters['admin_state_up'] = [active]
router_dicts = self.get_routers(context, filters=filters)
gw_port_ids = []
if not router_dicts:
return []
for router_dict in router_dicts:
gw_port_id = router_dict['gw_port_id']
if gw_port_id:
gw_port_ids.append(gw_port_id)
gw_ports = []
if gw_port_ids:
gw_ports = self.get_sync_gw_ports(context, gw_port_ids)
return self._build_routers_list(router_dicts, gw_ports)
def _get_sync_floating_ips(self, context, router_ids):
"""Query floating_ips that relate to list of router_ids."""
if not router_ids:
return []
return self.get_floatingips(context, {'router_id': router_ids})
def get_sync_gw_ports(self, context, gw_port_ids):
if not gw_port_ids:
return []
filters = {'id': gw_port_ids}
gw_ports = self._core_plugin.get_ports(context, filters)
if gw_ports:
self._populate_subnet_for_ports(context, gw_ports)
return gw_ports
def get_sync_interfaces(self, context, router_ids,
device_owner=DEVICE_OWNER_ROUTER_INTF):
"""Query router interfaces that relate to list of router_ids."""
if not router_ids:
return []
filters = {'device_id': router_ids,
'device_owner': [device_owner]}
interfaces = self._core_plugin.get_ports(context, filters)
if interfaces:
self._populate_subnet_for_ports(context, interfaces)
return interfaces
def _populate_subnet_for_ports(self, context, ports):
"""Populate ports with subnet.
These ports already have fixed_ips populated.
"""
if not ports:
return
subnet_id_ports_dict = {}
for port in ports:
fixed_ips = port.get('fixed_ips', [])
if len(fixed_ips) > 1:
LOG.info(_("Ignoring multiple IPs on router port %s"),
port['id'])
continue
elif not fixed_ips:
# Skip ports without IPs, which can occur if a subnet
# attached to a router is deleted
LOG.info(_("Skipping port %s as no IP is configure on it"),
port['id'])
continue
fixed_ip = fixed_ips[0]
my_ports = subnet_id_ports_dict.get(fixed_ip['subnet_id'], [])
my_ports.append(port)
subnet_id_ports_dict[fixed_ip['subnet_id']] = my_ports
if not subnet_id_ports_dict:
return
filters = {'id': subnet_id_ports_dict.keys()}
fields = ['id', 'cidr', 'gateway_ip']
subnet_dicts = self._core_plugin.get_subnets(context, filters, fields)
for subnet_dict in subnet_dicts:
ports = subnet_id_ports_dict.get(subnet_dict['id'], [])
for port in ports:
# TODO(gongysh) stash the subnet into fixed_ips
# to make the payload smaller.
port['subnet'] = {'id': subnet_dict['id'],
'cidr': subnet_dict['cidr'],
'gateway_ip': subnet_dict['gateway_ip']}
def _process_sync_data(self, routers, interfaces, floating_ips):
routers_dict = {}
for router in routers:
routers_dict[router['id']] = router
for floating_ip in floating_ips:
router = routers_dict.get(floating_ip['router_id'])
if router:
router_floatingips = router.get(l3_constants.FLOATINGIP_KEY,
[])
router_floatingips.append(floating_ip)
router[l3_constants.FLOATINGIP_KEY] = router_floatingips
for interface in interfaces:
router = routers_dict.get(interface['device_id'])
if router:
router_interfaces = router.get(l3_constants.INTERFACE_KEY, [])
router_interfaces.append(interface)
router[l3_constants.INTERFACE_KEY] = router_interfaces
return routers_dict.values()
def get_sync_data(self, context, router_ids=None, active=None):
"""Query routers and their related floating_ips, interfaces."""
with context.session.begin(subtransactions=True):
routers = self._get_sync_routers(context,
router_ids=router_ids,
active=active)
router_ids = [router['id'] for router in routers]
floating_ips = self._get_sync_floating_ips(context, router_ids)
interfaces = self.get_sync_interfaces(context, router_ids)
return self._process_sync_data(routers, interfaces, floating_ips)
| apache-2.0 | -7,870,026,785,524,489,000 | 46.581136 | 79 | 0.544431 | false |
SunPower/Carousel | simkit/contrib/readers.py | 1 | 6137 | """
Custom data readers including :class:`simkit.contrib.readers.ArgumentReader`,
:class:`simkit.contrib.readers.DjangoModelReader` and
:class:`simkit.contrib.readers.HDF5Reader`.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import numpy as np
import h5py
from simkit.core.data_readers import DataReader
from simkit.core.data_sources import DataParameter
from simkit.core import Q_
import logging
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
def copy_model_instance(obj):
"""
Copy Django model instance as a dictionary excluding automatically created
fields like an auto-generated sequence as a primary key or an auto-created
many-to-one reverse relation.
:param obj: Django model object
:return: copy of model instance as dictionary
"""
meta = getattr(obj, '_meta') # make pycharm happy
# dictionary of model values excluding auto created and related fields
return {f.name: getattr(obj, f.name)
for f in meta.get_fields(include_parents=False)
if not f.auto_created}
# TODO: make parameters consistent for all readers
# TODO: parameters set by attributes in data source model fields
# EG: ghi = FloatField('GHI', units='W/m**2')
# EG: solar_azimuth = FloatField('solar azimuth', units='degrees')
# TODO: some parameters set in class Meta
# EG: class Meta: args = ['GHI', 'azimuth']
class ArgumentReader(DataReader):
"""
Read arguments passed directly to a simulation.
The argument parameters dictionary should have two keys: `args` and `kwargs`
which consist of the names and attributes of the positional and keyword
arguments respectively. For example::
{
'GHI': {'units': 'W/m**2', 'isconstant': False, 'argpos': 0},
'azimuth': {'units': 'degrees', 'isconstant': False, 'argpos': 1},
'DNI': {'units': 'W/m**2', 'isconstant': False},
'zenith': {'units': 'degrees', 'isconstant': False}
}
"""
#: True if reader accepts ``filename`` argument
is_file_reader = False # not a file reader
def load_data(self, *args, **kwargs):
"""
Collects positional and keyword arguments into `data` and applies units.
:return: data
"""
# get positional argument names from parameters and apply them to args
# update data with additional kwargs
argpos = {
v['extras']['argpos']: k for k, v in self.parameters.iteritems()
if 'argpos' in v['extras']
}
data = dict(
{argpos[n]: a for n, a in enumerate(args)}, **kwargs
)
return self.apply_units_to_cache(data)
def apply_units_to_cache(self, data):
"""
Applies units to data when a proxy reader is used. For example if the
data is cached as JSON and retrieved using the
:class:`~simkit.core.data_readers.JSONReader`, then units can be
applied from the original parameter schema.
:param data: Data read by proxy reader.
:return: data with units applied
"""
# if units key exists then apply
for k, v in self.parameters.iteritems():
if v and v.get('units'):
data[k] = Q_(data[k], v.get('units'))
return data
class DjangoModelReader(ArgumentReader):
"""
Reads arguments that are Django objects or lists of objects.
"""
def __init__(self, parameters=None, meta=None):
#: Django model
self.model = meta.model
model_meta = getattr(self.model, '_meta') # make pycharm happy
# model fields excluding AutoFields and related fields like one-to-many
all_model_fields = [
f for f in model_meta.get_fields(include_parents=False)
if not f.auto_created
]
all_field_names = [f.name for f in all_model_fields] # field names
# use all fields if no parameters given
if parameters is None:
parameters = DataParameter.fromkeys(
all_field_names, {}
)
fields = getattr(meta, 'fields', all_field_names) # specified fields
LOGGER.debug('fields:\n%r', fields)
exclude = getattr(meta, 'exclude', []) # specifically excluded fields
for f in all_model_fields:
# skip any fields not specified in data source
if f.name not in fields or f.name in exclude:
LOGGER.debug('skipping %s', f.name)
continue
# add field to parameters or update parameters with field type
param_dict = {'ftype': f.get_internal_type()}
if f.name in parameters:
parameters[f.name]['extras'].update(param_dict)
else:
parameters[f.name] = DataParameter(**param_dict)
super(DjangoModelReader, self).__init__(parameters, meta)
def load_data(self, model_instance, *args, **kwargs):
"""
Apply units to model.
:return: data
"""
model_dict = copy_model_instance(model_instance)
return super(DjangoModelReader, self).load_data(**model_dict)
class HDF5Reader(ArgumentReader):
"""
Reads data from an HDF5 file
"""
#: True if reader accepts ``filename`` argument
is_file_reader = True # is a file reader
def load_data(self, h5file, *args, **kwargs):
with h5py.File(h5file) as h5f:
h5data = dict.fromkeys(self.parameters)
for param, attrs in self.parameters.iteritems():
LOGGER.debug('parameter:\n%r', param)
node = attrs['extras']['node'] # full name of node
# composite datatype member
member = attrs['extras'].get('member')
if member is not None:
# if node is a table then get column/field/description
h5data[param] = np.asarray(h5f[node][member]) # copy member
else:
h5data[param] = np.asarray(h5f[node]) # copy array
return super(HDF5Reader, self).load_data(**h5data)
| bsd-3-clause | -144,540,593,695,385,950 | 37.35625 | 80 | 0.61447 | false |
quantextive/qtx | examples/QuantextiveAEX.py | 1 | 1548 |
# coding: utf-8
# In[1]:
import sys
from qtx import qtx
# In[2]:
import pandas as pd
# In[3]:
import warnings
warnings.filterwarnings('ignore')
# In[4]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
get_ipython().magic(u'pylab inline')
pylab.rcParams['figure.figsize'] = (15, 9)
# #### Select your API Endpoint
# In[5]:
api = "market-data-eod"
# #### Enter your API Token
# You can request your trial key at https://quantextive.com/request_aex/ or via the AEX portal (https://quantextive.com/dashboard)
# In[6]:
api_key = '*****************'
# #### Enter your Endpoint parameters
# In[7]:
params = {
'securityId': 'NSE:7UP',
'startDate': '2016-11-01',
'endDate': '2017-03-18'
}
# See a list of all companies accessible with the API at https://quantextive.com/api_company_list/
# See a list of all available endpoints (https://quantextive.com/api_home/)
# #### Initialize the module
# In[8]:
client = qtx.ApiClient()
# #### Return data based on your parameters
# In[9]:
data = client.get(api_key, api, params).data_frame()
# #### View data sample and datatypes
# In[10]:
data.head()
# In[11]:
data.dtypes
# #### Convert integer-like columns to numeric format
# In[12]:
data2 = data.convert_objects(convert_numeric=True, copy=True)
# #### Re-index the dataframe (Set date as index)
# In[13]:
data3 = data2.set_index('date')
# In[14]:
data3.head()
# #### Visualize data
# In[15]:
data3["close_value"].plot(grid = True)
# In[ ]:
| mit | 2,279,717,237,688,171,000 | 11 | 130 | 0.632429 | false |
huggingface/transformers | src/transformers/modelcard.py | 1 | 29548 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import requests
import yaml
from huggingface_hub import HfApi
from . import __version__
from .file_utils import (
CONFIG_NAME,
MODEL_CARD_NAME,
TF2_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_datasets_available,
is_offline_mode,
is_remote_url,
is_tokenizers_available,
is_torch_available,
)
from .training_args import ParallelMode
from .utils import logging
from .utils.modeling_auto_mapping import (
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_MASKED_LM_MAPPING_NAMES,
MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
)
TASK_MAPPING = {
"text-generation": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"fill-mask": MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"object-detection": MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"text2text-generation": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"text-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"table-question-answering": MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES,
"token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
}
logger = logging.get_logger(__name__)
class ModelCard:
r"""
Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards.
Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by
Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer,
Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993
Note: A model card can be loaded and saved to disk.
Parameters:
"""
def __init__(self, **kwargs):
warnings.warn(
"The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning
)
# Recommended attributes from https://arxiv.org/abs/1810.03993 (see papers)
self.model_details = kwargs.pop("model_details", {})
self.intended_use = kwargs.pop("intended_use", {})
self.factors = kwargs.pop("factors", {})
self.metrics = kwargs.pop("metrics", {})
self.evaluation_data = kwargs.pop("evaluation_data", {})
self.training_data = kwargs.pop("training_data", {})
self.quantitative_analyses = kwargs.pop("quantitative_analyses", {})
self.ethical_considerations = kwargs.pop("ethical_considerations", {})
self.caveats_and_recommendations = kwargs.pop("caveats_and_recommendations", {})
# Open additional attributes
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
def save_pretrained(self, save_directory_or_file):
"""Save a model card object to the directory or file `save_directory_or_file`."""
if os.path.isdir(save_directory_or_file):
# If we save using the predefined names, we can load using `from_pretrained`
output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME)
else:
output_model_card_file = save_directory_or_file
self.to_json_file(output_model_card_file)
logger.info(f"Model card saved in {output_model_card_file}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r"""
Instantiate a :class:`~transformers.ModelCard` from a pre-trained model model card.
Parameters:
pretrained_model_name_or_path: either:
- a string, the `model id` of a pretrained model card hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a
user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a model card file saved using the
:func:`~transformers.ModelCard.save_pretrained` method, e.g.: ``./my_model_directory/``.
- a path or url to a saved model card JSON `file`, e.g.: ``./my_model_directory/modelcard.json``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache
should not be used.
kwargs: (`optional`) dict: key/value pairs with which to update the ModelCard object after loading.
- The values in kwargs of any keys which are model card attributes will be used to override the loaded
values.
- Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the
`return_unused_kwargs` keyword parameter.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}. The proxies are used on each request.
find_from_standard_name: (`optional`) boolean, default True:
If the pretrained_model_name_or_path ends with our standard model or config filenames, replace them
with our standard modelcard filename. Can be used to directly feed a model/config url and access the
colocated modelcard.
return_unused_kwargs: (`optional`) bool:
- If False, then this function returns just the final model card object.
- If True, then this functions returns a tuple `(model card, unused_kwargs)` where `unused_kwargs` is a
dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of
kwargs which has not been used to update `ModelCard` and is otherwise ignored.
Examples::
modelcard = ModelCard.from_pretrained('bert-base-uncased') # Download model card from huggingface.co and cache.
modelcard = ModelCard.from_pretrained('./test/saved_model/') # E.g. model card was saved using `save_pretrained('./test/saved_model/')`
modelcard = ModelCard.from_pretrained('./test/saved_model/modelcard.json')
modelcard = ModelCard.from_pretrained('bert-base-uncased', output_attentions=True, foo=False)
"""
# This imports every model so let's do it dynamically here.
from transformers.models.auto.configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP
cache_dir = kwargs.pop("cache_dir", None)
proxies = kwargs.pop("proxies", None)
find_from_standard_name = kwargs.pop("find_from_standard_name", True)
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
from_pipeline = kwargs.pop("_from_pipeline", None)
user_agent = {"file_type": "model_card"}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if pretrained_model_name_or_path in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP:
# For simplicity we use the same pretrained url than the configuration files
# but with a different suffix (modelcard.json). This suffix is replaced below.
model_card_file = ALL_PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
model_card_file = os.path.join(pretrained_model_name_or_path, MODEL_CARD_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
model_card_file = pretrained_model_name_or_path
else:
model_card_file = hf_bucket_url(pretrained_model_name_or_path, filename=MODEL_CARD_NAME, mirror=None)
if find_from_standard_name or pretrained_model_name_or_path in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP:
model_card_file = model_card_file.replace(CONFIG_NAME, MODEL_CARD_NAME)
model_card_file = model_card_file.replace(WEIGHTS_NAME, MODEL_CARD_NAME)
model_card_file = model_card_file.replace(TF2_WEIGHTS_NAME, MODEL_CARD_NAME)
try:
# Load from URL or cache if already cached
resolved_model_card_file = cached_path(
model_card_file, cache_dir=cache_dir, proxies=proxies, user_agent=user_agent
)
if resolved_model_card_file == model_card_file:
logger.info(f"loading model card file {model_card_file}")
else:
logger.info(f"loading model card file {model_card_file} from cache at {resolved_model_card_file}")
# Load model card
modelcard = cls.from_json_file(resolved_model_card_file)
except (EnvironmentError, json.JSONDecodeError):
# We fall back on creating an empty model card
modelcard = cls()
# Update model card with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(modelcard, key):
setattr(modelcard, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model card: {modelcard}")
if return_unused_kwargs:
return modelcard, kwargs
else:
return modelcard
@classmethod
def from_dict(cls, json_object):
"""Constructs a `ModelCard` from a Python dictionary of parameters."""
return cls(**json_object)
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `ModelCard` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
dict_obj = json.loads(text)
return cls(**dict_obj)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
"""Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
AUTOGENERATED_COMMENT = """
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
"""
TASK_TAG_TO_NAME_MAPPING = {
"fill-mask": "Masked Language Modeling",
"image-classification": "Image Classification",
"multiple-choice": "Multiple Choice",
"object-detection": "Object Detection",
"question-answering": "Question Answering",
"summarization": "Summarization",
"table-question-answering": "Table Question Answering",
"text-classification": "Text Classification",
"text-generation": "Causal Language Modeling",
"text2text-generation": "Sequence-to-sequence Language Modeling",
"token-classification": "Token Classification",
"translation": "Translation",
"zero-shot-classification": "Zero Shot Classification",
}
METRIC_TAGS = [
"accuracy",
"bleu",
"f1",
"matthews_correlation",
"pearsonr",
"precision",
"recall",
"rouge",
"sacrebleu",
"spearmanr",
]
def _listify(obj):
if obj is None:
return []
elif isinstance(obj, str):
return [obj]
else:
return obj
def _insert_values_as_list(metadata, name, values):
if values is None:
return metadata
if isinstance(values, str):
values = [values]
if len(values) == 0:
return metadata
metadata[name] = values
return metadata
def infer_metric_tags_from_eval_results(eval_results):
if eval_results is None:
return {}
result = {}
for key in eval_results.keys():
if key.lower().replace(" ", "_") in METRIC_TAGS:
result[key.lower().replace(" ", "_")] = key
elif key.lower() == "rouge1":
result["rouge"] = key
return result
def _insert_value(metadata, name, value):
if value is None:
return metadata
metadata[name] = value
return metadata
def is_hf_dataset(dataset):
if not is_datasets_available():
return False
from datasets import Dataset
return isinstance(dataset, Dataset)
def _get_mapping_values(mapping):
result = []
for v in mapping.values():
if isinstance(v, (tuple, list)):
result += list(v)
else:
result.append(v)
return result
@dataclass
class TrainingSummary:
model_name: str
language: Optional[Union[str, List[str]]] = None
license: Optional[str] = None
tags: Optional[Union[str, List[str]]] = None
finetuned_from: Optional[str] = None
tasks: Optional[Union[str, List[str]]] = None
dataset: Optional[Union[str, List[str]]] = None
dataset_tags: Optional[Union[str, List[str]]] = None
dataset_args: Optional[Union[str, List[str]]] = None
eval_results: Optional[Dict[str, float]] = None
eval_lines: Optional[List[str]] = None
hyperparameters: Optional[Dict[str, Any]] = None
def __post_init__(self):
# Infer default license from the checkpoint used, if possible.
if (
self.license is None
and not is_offline_mode()
and self.finetuned_from is not None
and len(self.finetuned_from) > 0
):
try:
model_info = HfApi().model_info(self.finetuned_from)
for tag in model_info.tags:
if tag.startswith("license:"):
self.license = tag[8:]
except requests.exceptions.HTTPError:
pass
def create_model_index(self, metric_mapping):
model_index = {"name": self.model_name}
# Dataset mapping tag -> name
dataset_names = _listify(self.dataset)
dataset_tags = _listify(self.dataset_tags)
dataset_args = _listify(self.dataset_args)
if len(dataset_args) < len(dataset_tags):
dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args))
dataset_mapping = {tag: name for tag, name in zip(dataset_tags, dataset_names)}
dataset_arg_mapping = {tag: arg for tag, arg in zip(dataset_tags, dataset_args)}
task_mapping = {
task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING
}
if len(task_mapping) == 0 and len(dataset_mapping) == 0:
return model_index
if len(task_mapping) == 0:
task_mapping = {None: None}
if len(dataset_mapping) == 0:
dataset_mapping = {None: None}
model_index["results"] = []
# One entry per dataset and per task
all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping]
for task_tag, ds_tag in all_possibilities:
result = {}
if task_tag is not None:
result["task"] = {"name": task_mapping[task_tag], "type": task_tag}
if ds_tag is not None:
result["dataset"] = {"name": dataset_mapping[ds_tag], "type": ds_tag}
if dataset_arg_mapping[ds_tag] is not None:
result["dataset"]["args"] = dataset_arg_mapping[ds_tag]
if len(metric_mapping) > 0:
for metric_tag, metric_name in metric_mapping.items():
result["metric"] = {
"name": metric_name,
"type": metric_tag,
"value": self.eval_results[metric_name],
}
model_index["results"].append(result)
return [model_index]
def create_metadata(self):
metric_mapping = infer_metric_tags_from_eval_results(self.eval_results)
metadata = {}
metadata = _insert_values_as_list(metadata, "language", self.language)
metadata = _insert_value(metadata, "license", self.license)
metadata = _insert_values_as_list(metadata, "tags", self.tags)
metadata = _insert_values_as_list(metadata, "datasets", self.dataset_tags)
metadata = _insert_values_as_list(metadata, "metrics", list(metric_mapping.keys()))
metadata["model_index"] = self.create_model_index(metric_mapping)
return metadata
def to_model_card(self):
model_card = ""
metadata = yaml.dump(self.create_metadata(), sort_keys=False)
if len(metadata) > 0:
model_card = f"---\n{metadata}---\n"
# Now the model card for realsies.
model_card += AUTOGENERATED_COMMENT
model_card += f"\n# {self.model_name}\n\n"
if self.finetuned_from is None:
model_card += "This model was trained from scratch on "
else:
model_card += f"This model is a fine-tuned version of [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on "
if self.dataset is None:
model_card += "an unkown dataset."
else:
if isinstance(self.dataset, str):
model_card += f"the {self.dataset} dataset."
elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1:
model_card += f"the {self.dataset[0]} dataset."
else:
model_card += (
", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets."
)
if self.eval_results is not None:
model_card += "\nIt achieves the following results on the evaluation set:\n"
model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()])
model_card += "\n"
model_card += "\n## Model description\n\nMore information needed\n"
model_card += "\n## Intended uses & limitations\n\nMore information needed\n"
model_card += "\n## Training and evaluation data\n\nMore information needed\n"
model_card += "\n## Training procedure\n"
model_card += "\n### Training hyperparameters\n"
if self.hyperparameters is not None:
model_card += "\nThe following hyperparameters were used during training:\n"
model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()])
model_card += "\n"
else:
model_card += "\nMore information needed\n"
if self.eval_lines is not None:
model_card += "\n### Training results\n\n"
model_card += make_markdown_table(self.eval_lines)
model_card += "\n"
model_card += "\n### Framework versions\n\n"
model_card += f"- Transformers {__version__}\n"
if is_torch_available():
import torch
model_card += f"- Pytorch {torch.__version__}\n"
if is_datasets_available():
import datasets
model_card += f"- Datasets {datasets.__version__}\n"
if is_tokenizers_available():
import tokenizers
model_card += f"- Tokenizers {tokenizers.__version__}\n"
return model_card
@classmethod
def from_trainer(
cls,
trainer,
language=None,
license=None,
tags=None,
model_name=None,
finetuned_from=None,
tasks=None,
dataset_tags=None,
dataset=None,
dataset_args=None,
):
# Infer default from dataset
one_dataset = trainer.train_dataset if trainer.train_dataset is not None else trainer.eval_dataset
if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None):
default_tag = one_dataset.builder_name
# Those are not real datasets from the Hub so we exclude them.
if default_tag not in ["csv", "json", "pandas", "parquet", "text"]:
if dataset_tags is None:
dataset_tags = [default_tag]
if dataset_args is None:
dataset_args = [one_dataset.config_name]
if dataset is None and dataset_tags is not None:
dataset = dataset_tags
# Infer default finetuned_from
if (
finetuned_from is None
and hasattr(trainer.model.config, "_name_or_path")
and not os.path.isdir(trainer.model.config._name_or_path)
):
finetuned_from = trainer.model.config._name_or_path
# Infer default task tag:
if tasks is None:
model_class_name = trainer.model.__class__.__name__
for task, mapping in TASK_MAPPING.items():
if model_class_name in _get_mapping_values(mapping):
tasks = task
if model_name is None:
model_name = Path(trainer.args.output_dir).name
# Add `generated_from_trainer` to the tags
if tags is None:
tags = ["generated_from_trainer"]
elif isinstance(tags, str) and tags != "generated_from_trainer":
tags = [tags, "generated_from_trainer"]
elif "generated_from_trainer" not in tags:
tags.append("generated_from_trainer")
_, eval_lines, eval_results = parse_log_history(trainer.state.log_history)
hyperparameters = extract_hyperparameters_from_trainer(trainer)
return cls(
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
eval_results=eval_results,
eval_lines=eval_lines,
hyperparameters=hyperparameters,
)
def parse_log_history(log_history):
"""
Parse the `log_history` of a Trainer to get the intermediate and final evaluation results.
"""
idx = 0
while idx < len(log_history) and "train_runtime" not in log_history[idx]:
idx += 1
# If there are no training logs
if idx == len(log_history):
idx -= 1
while idx >= 0 and "eval_loss" not in log_history[idx]:
idx -= 1
if idx > 0:
return None, None, log_history[idx]
else:
return None, None, None
# From now one we can assume we have training logs:
train_log = log_history[idx]
lines = []
training_loss = "No log"
for i in range(idx):
if "loss" in log_history[i]:
training_loss = log_history[i]["loss"]
if "eval_loss" in log_history[i]:
metrics = log_history[i].copy()
_ = metrics.pop("total_flos", None)
epoch = metrics.pop("epoch", None)
step = metrics.pop("step", None)
_ = metrics.pop("eval_runtime", None)
_ = metrics.pop("eval_samples_per_second", None)
_ = metrics.pop("eval_steps_per_second", None)
values = {"Training Loss": training_loss, "Epoch": epoch, "Step": step}
for k, v in metrics.items():
if k == "eval_loss":
values["Validation Loss"] = v
else:
splits = k.split("_")
name = " ".join([part.capitalize() for part in splits[1:]])
values[name] = v
lines.append(values)
idx = len(log_history) - 1
while idx >= 0 and "eval_loss" not in log_history[idx]:
idx -= 1
if idx > 0:
eval_results = {}
for key, value in log_history[idx].items():
if key.startswith("eval_"):
key = key[5:]
if key not in ["runtime", "samples_per_second", "steps_per_second", "epoch", "step"]:
camel_cased_key = " ".join([part.capitalize() for part in key.split("_")])
eval_results[camel_cased_key] = value
return train_log, lines, eval_results
else:
return train_log, lines, None
def _maybe_round(v, decimals=4):
if isinstance(v, float) and len(str(v).split(".")) > 1 and len(str(v).split(".")[1]) > decimals:
return f"{v:.{decimals}f}"
return str(v)
def _regular_table_line(values, col_widths):
values_with_space = [f"| {v}" + " " * (w - len(v) + 1) for v, w in zip(values, col_widths)]
return "".join(values_with_space) + "|\n"
def _second_table_line(col_widths):
values = ["|:" + "-" * w + ":" for w in col_widths]
return "".join(values) + "|\n"
def make_markdown_table(lines):
"""
Create a nice Markdown table from the results in `lines`.
"""
if lines is None or len(lines) == 0:
return ""
col_widths = {key: len(str(key)) for key in lines[0].keys()}
for line in lines:
for key, value in line.items():
if col_widths[key] < len(_maybe_round(value)):
col_widths[key] = len(_maybe_round(value))
table = _regular_table_line(list(lines[0].keys()), list(col_widths.values()))
table += _second_table_line(list(col_widths.values()))
for line in lines:
table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values()))
return table
_TRAINING_ARGS_KEYS = [
"learning_rate",
"train_batch_size",
"eval_batch_size",
"seed",
]
def extract_hyperparameters_from_trainer(trainer):
hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS}
if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]:
hyperparameters["distributed_type"] = (
"multi-GPU" if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value
)
if trainer.args.world_size > 1:
hyperparameters["num_devices"] = trainer.args.world_size
if trainer.args.gradient_accumulation_steps > 1:
hyperparameters["gradient_accumulation_steps"] = trainer.args.gradient_accumulation_steps
total_train_batch_size = (
trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps
)
if total_train_batch_size != hyperparameters["train_batch_size"]:
hyperparameters["total_train_batch_size"] = total_train_batch_size
total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size
if total_eval_batch_size != hyperparameters["eval_batch_size"]:
hyperparameters["total_eval_batch_size"] = total_eval_batch_size
if trainer.args.adafactor:
hyperparameters["optimizer"] = "Adafactor"
else:
hyperparameters[
"optimizer"
] = f"Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and epsilon={trainer.args.adam_epsilon}"
hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value
if trainer.args.warmup_ratio != 0.0:
hyperparameters["lr_scheduler_warmup_ratio"] = trainer.args.warmup_ratio
if trainer.args.warmup_steps != 0.0:
hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps
if trainer.args.max_steps != -1:
hyperparameters["training_steps"] = trainer.args.max_steps
else:
hyperparameters["num_epochs"] = trainer.args.num_train_epochs
if trainer.args.fp16:
if trainer.use_amp:
hyperparameters["mixed_precision_training"] = "Native AMP"
elif trainer.use_apex:
hyperparameters["mixed_precision_training"] = f"Apex, opt level {trainer.args.fp16_opt_level}"
if trainer.args.label_smoothing_factor != 0.0:
hyperparameters["label_smoothing_factor"] = trainer.args.label_smoothing_factor
return hyperparameters
| apache-2.0 | -3,755,412,001,345,164,300 | 38.608579 | 148 | 0.615405 | false |
haiweiosu/Padding_Oracle_Attacks | paddingoracle.py | 1 | 3421 | import os
from cryptography.hazmat.primitives import hashes, padding, ciphers
from cryptography.hazmat.backends import default_backend
import base64
import binascii
def xor(a,b):
"""
xors two raw byte streams.
"""
assert len(a) == len(b), "Lengths of two strings are not same. a = {}, b = {}".format(len(a), len(b))
return ''.join(chr(ord(ai)^ord(bi)) for ai,bi in zip(a,b))
def split_into_blocks(msg, l):
while msg:
yield msg[:l]
msg = msg[l:]
class PaddingOracle(object):
def __init__(self, msg_len=0):
self._backend = default_backend()
self._block_size_bytes = ciphers.algorithms.AES.block_size/8
self._key = os.urandom(self._block_size_bytes)
if msg_len>0:
self._msg = os.urandom(msg_len)
else:
self._msg = "Top-secret message!!!!"
def test(self, msg):
"""
Test whether your attack succceeded or not!
"""
return msg in [self._msg, self._padded_msg]
def setup(self):
padder = padding.PKCS7(ciphers.algorithms.AES.block_size).padder()
self._padded_msg = padded_msg = padder.update(self._msg) + padder.finalize()
# print padded_msg.encode('hex')
iv = os.urandom(self._block_size_bytes)
encryptor = ciphers.Cipher(ciphers.algorithms.AES(self._key),
ciphers.modes.CBC(iv),
self._backend).encryptor()
return iv + encryptor.update(padded_msg) + encryptor.finalize()
@property
def block_length(self):
return self._block_size_bytes
def encrypt(self, msg):
raise Exception("Encrypt is not allowed in this oracle!")
def decrypt(self, ctx):
iv, ctx = ctx[:self._block_size_bytes], ctx[self._block_size_bytes:]
unpadder = padding.PKCS7(ciphers.algorithms.AES.block_size).unpadder()
decryptor = ciphers.Cipher(ciphers.algorithms.AES(self._key),
ciphers.modes.CBC(iv),
self._backend).decryptor()
padded_msg = decryptor.update(ctx) + decryptor.finalize()
try:
msg = unpadder.update(padded_msg) + unpadder.finalize()
return True # Successful decryption
except ValueError:
return False # Error!!
################################################################################
## The following code provides API to access padding oracle server.
################################################################################
from urllib2 import urlopen
import json
url = 'https://paddingoracle.herokuapp.com/'
class PaddingOracleServer(object):
def __init__(self, msg_len=0):
pass
@property
def block_length(self):
return ciphers.algorithms.AES.block_size/8
def decrypt(self, ctx):
dec_url = url + "decrypt/{}".format(base64.urlsafe_b64encode(ctx))
ret = json.load(urlopen(dec_url))
return ret['return'] == 0
def ciphertext(self):
ctx_url = url + "ctx"
ret = json.load(urlopen(ctx_url))
return base64.urlsafe_b64decode(str(ret['ctx']))
def test(self, msg):
test_url = url + "test/{}".format(base64.urlsafe_b64encode(msg))
ret = json.load(urlopen(test_url))
return ret['return'] == 0
def setup(self):
return self.ciphertext() | apache-2.0 | 7,257,267,256,434,626,000 | 32.881188 | 105 | 0.566793 | false |
anthonyng2/Machine-Learning-For-Finance | Regression Based Machine Learning for Algorithmic Trading/Pairs Trading scikit-learn Linear.py | 1 | 5074 | '''
Anthony NG
@ 2017
## MIT License
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import numpy as np
import pandas as pd
from zipline.utils import tradingcalendar
import pytz
from sklearn.linear_model import LinearRegression
reg = LinearRegression(fit_intercept=True)
def initialize(context):
# Quantopian backtester specific variables
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerTrade(cost=1))
set_symbol_lookup_date('2014-01-01')
context.stock_pairs = [(sid(5885), sid(4283))]
# set_benchmark(context.y)
context.num_pairs = len(context.stock_pairs)
# strategy specific variables
context.lookback = 20 # used for regression
context.z_window = 20 # used for zscore calculation, must be <= lookback
context.spread = np.ndarray((context.num_pairs, 0))
# context.hedgeRatioTS = np.ndarray((context.num_pairs, 0))
context.inLong = [False] * context.num_pairs
context.inShort = [False] * context.num_pairs
# Only do work 30 minutes before close
schedule_function(func=check_pair_status, date_rule=date_rules.every_day(), time_rule=time_rules.market_close(minutes=30))
# Will be called on every trade event for the securities you specify.
def handle_data(context, data):
# Our work is now scheduled in check_pair_status
pass
def check_pair_status(context, data):
if get_open_orders():
return
prices = history(35, '1d', 'price').iloc[-context.lookback::]
new_spreads = np.ndarray((context.num_pairs, 1))
for i in range(context.num_pairs):
(stock_y, stock_x) = context.stock_pairs[i]
Y = prices[stock_y]
X = prices[stock_x]
try:
hedge = hedge_ratio(Y, X, add_const=True)
except ValueError as e:
log.debug(e)
return
# context.hedgeRatioTS = np.append(context.hedgeRatioTS, hedge)
new_spreads[i, :] = Y[-1] - hedge * X[-1]
if context.spread.shape[1] > context.z_window:
# Keep only the z-score lookback period
spreads = context.spread[i, -context.z_window:]
zscore = (spreads[-1] - spreads.mean()) / spreads.std()
if context.inShort[i] and zscore < 0.0:
order_target(stock_y, 0)
order_target(stock_x, 0)
context.inShort[i] = False
context.inLong[i] = False
record(X_pct=0, Y_pct=0)
return
if context.inLong[i] and zscore > 0.0:
order_target(stock_y, 0)
order_target(stock_x, 0)
context.inShort[i] = False
context.inLong[i] = False
record(X_pct=0, Y_pct=0)
return
if zscore < -1.0 and (not context.inLong[i]):
# Only trade if NOT already in a trade
y_target_shares = 1
X_target_shares = -hedge
context.inLong[i] = True
context.inShort[i] = False
(y_target_pct, x_target_pct) = computeHoldingsPct( y_target_shares,X_target_shares, Y[-1], X[-1] )
order_target_percent( stock_y, y_target_pct * (1.0/context.num_pairs) )
order_target_percent( stock_x, x_target_pct * (1.0/context.num_pairs) )
record(Y_pct=y_target_pct, X_pct=x_target_pct)
return
if zscore > 1.0 and (not context.inShort[i]):
# Only trade if NOT already in a trade
y_target_shares = -1
X_target_shares = hedge
context.inShort[i] = True
context.inLong[i] = False
(y_target_pct, x_target_pct) = computeHoldingsPct( y_target_shares, X_target_shares, Y[-1], X[-1] )
order_target_percent( stock_y, y_target_pct * (1.0/context.num_pairs))
order_target_percent( stock_x, x_target_pct * (1.0/context.num_pairs))
record(Y_pct=y_target_pct, X_pct=x_target_pct)
context.spread = np.hstack([context.spread, new_spreads])
def hedge_ratio(Y, X, add_const=True):
reg.fit(X.reshape(-1,1), Y)
return reg.coef_
def computeHoldingsPct(yShares, xShares, yPrice, xPrice):
yDol = yShares * yPrice
xDol = xShares * xPrice
notionalDol = abs(yDol) + abs(xDol)
y_target_pct = yDol / notionalDol
x_target_pct = xDol / notionalDol
return (y_target_pct, x_target_pct)
| mit | -402,640,299,320,053,200 | 37.333333 | 460 | 0.58987 | false |
ChinaMassClouds/copenstack-server | openstack/src/horizon-2014.2/openstack_dashboard/dashboards/project/access_and_security/floating_ips/workflows.py | 1 | 6184 | # Copyright 2012 Nebula, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.utils import filters
from openstack_dashboard.openstack.common.log import operate_log
ALLOCATE_URL = "horizon:project:access_and_security:floating_ips:allocate"
class AssociateIPAction(workflows.Action):
ip_id = forms.DynamicTypedChoiceField(label=_("IP Address"),
coerce=filters.get_int_or_uuid,
empty_value=None,
add_item_link=ALLOCATE_URL)
instance_id = forms.ChoiceField(label=_("Instance"))
class Meta:
name = _("IP Address")
help_text = _("Select the IP address you wish to associate with "
"the selected instance.")
def __init__(self, *args, **kwargs):
super(AssociateIPAction, self).__init__(*args, **kwargs)
if api.base.is_service_enabled(self.request, 'network'):
label = _("Port to be associated")
else:
label = _("Instance to be associated")
self.fields['instance_id'].label = label
# If AssociateIP is invoked from instance menu, instance_id parameter
# is passed in URL. In Neutron based Floating IP implementation
# an association target is not an instance but a port, so we need
# to get an association target based on a received instance_id
# and set the initial value of instance_id ChoiceField.
q_instance_id = self.request.GET.get('instance_id')
if q_instance_id:
targets = self._get_target_list()
target_id = api.network.floating_ip_target_get_by_instance(
self.request, q_instance_id, targets)
self.initial['instance_id'] = target_id
def populate_ip_id_choices(self, request, context):
ips = []
redirect = reverse('horizon:project:access_and_security:index')
try:
ips = api.network.tenant_floating_ip_list(self.request)
except neutron_exc.ConnectionFailed:
exceptions.handle(self.request, redirect=redirect)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve floating IP addresses.'),
redirect=redirect)
options = sorted([(ip.id, ip.ip) for ip in ips if not ip.port_id])
if options:
options.insert(0, ("", _("Select an IP address")))
else:
options = [("", _("No floating IP addresses allocated"))]
return options
@memoized.memoized_method
def _get_target_list(self):
targets = []
try:
targets = api.network.floating_ip_target_list(self.request)
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to retrieve instance list.'),
redirect=redirect)
return targets
def populate_instance_id_choices(self, request, context):
targets = self._get_target_list()
instances = []
for target in targets:
instances.append((target.id, target.name))
# Sort instances for easy browsing
instances = sorted(instances, key=lambda x: x[1])
neutron_enabled = api.base.is_service_enabled(request, 'network')
if instances:
if neutron_enabled:
label = _("Select a port")
else:
label = _("Select an instance")
instances.insert(0, ("", label))
else:
if neutron_enabled:
label = _("No ports available")
else:
label = _("No instances available")
instances = (("", label),)
return instances
class AssociateIP(workflows.Step):
action_class = AssociateIPAction
contributes = ("ip_id", "instance_id", "ip_address")
def contribute(self, data, context):
context = super(AssociateIP, self).contribute(data, context)
ip_id = data.get('ip_id', None)
if ip_id:
ip_choices = dict(self.action.fields['ip_id'].choices)
context["ip_address"] = ip_choices.get(ip_id, None)
return context
class IPAssociationWorkflow(workflows.Workflow):
slug = "ip_association"
name = _("Manage Floating IP Associations")
finalize_button_name = _("Associate")
success_message = _('IP address %s associated.')
failure_message = _('Unable to associate IP address %s.')
success_url = "horizon:project:access_and_security:index"
default_steps = (AssociateIP,)
def format_status_message(self, message):
return message % self.context.get('ip_address', 'unknown IP address')
def handle(self, request, data):
try:
api.network.floating_ip_associate(request,
data['ip_id'],
data['instance_id'])
operate_log(request.user.username,
request.user.roles,
"floating ip associate")
except Exception:
exceptions.handle(request)
return False
return True
| gpl-2.0 | 661,193,687,322,583,200 | 38.139241 | 78 | 0.602523 | false |
Storj/dataserv-client | py2exe_MediaCollector.py | 1 | 1123 | import os
import glob
import lib2to3
from py2exe.build_exe import py2exe as build_exe
class MediaCollector(build_exe):
"""
This class Adds
jsonschema files draft3.json and draft4.json
lib2to3 files Grammar.txt and PatternGrammar.txt
to the list of compiled files so it will be included in the zipfile.
"""
def copy_extensions(self, extensions):
build_exe.copy_extensions(self, extensions)
# lib2to3 files Grammar.txt and PatternGrammar.txt
# Define the data path where the files reside.
data_path = os.path.join(lib2to3.__path__[0],'*.txt')
# Create the subdir where the json files are collected.
media = os.path.join('lib2to3')
full = os.path.join(self.collect_dir, media)
self.mkpath(full)
# Copy the json files to the collection dir. Also add the copied file
# to the list of compiled files so it will be included in the zipfile.
for f in glob.glob(data_path):
name = os.path.basename(f)
self.copy_file(f, os.path.join(full, name))
self.compiled_files.append(os.path.join(media, name))
| mit | 7,924,434,134,639,487,000 | 33.030303 | 76 | 0.674978 | false |
dbarbier/privot | python/test/t_DistFunc_beta.py | 1 | 1686 | #! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
# Beta related functions
# pBeta
p1Min = 0.2
p1Max = 5.0
n1 = 5
p2Min = 0.2
p2Max = 5.0
n2 = 5
xMin = 0.1
xMax = 0.9
nX = 5
for i1 in range(n1):
p1 = p1Min + (p1Max - p1Min) * i1 / (n1 - 1)
for i2 in range(n2):
p2 = p2Min + (p2Max - p2Min) * i2 / (n2 - 1)
for iX in range(nX):
x = xMin + (xMax - xMin) * iX / (nX - 1)
print "pBeta(", p1, ", ", p2, ", ", x, ")=%.6g" % DistFunc.pBeta(p1, p2, x), ", complementary=%.6g" % DistFunc.pBeta(p1, p2, x, True)
# qBeta
p1Min = 0.2
p1Max = 5.0
n1 = 5
p2Min = 0.2
p2Max = 5.0
n2 = 5
qMin = 0.1
qMax = 0.9
nQ = 5
for i1 in range(n1):
p1 = p1Min + (p1Max - p1Min) * i1 / (n1 - 1)
for i2 in range(n2):
p2 = p2Min + (p2Max - p2Min) * i2 / (n2 - 1)
for iQ in range(nQ):
q = qMin + (qMax - qMin) * iQ / (nQ - 1)
print "qBeta(", p1, ", ", p2, ", ", q, ")=%.6g" % DistFunc.qBeta(p1, p2, q), ", complementary=%.6g" % DistFunc.qBeta(p1, p2, q, True)
# rBeta
p1Min = 0.2
p1Max = 5.0
n1 = 5
p2Min = 0.2
p2Max = 5.0
n2 = 5
nR = 5
for i1 in range(n1):
p1 = p1Min + (p1Max - p1Min) * i1 / (n1 - 1)
for i2 in range(n2):
p2 = p2Min + (p2Max - p2Min) * i2 / (n2 - 1)
for iR in range(nR):
print "rBeta(", p1, ", ", p2, ")=%.6g" % DistFunc.rBeta(p1, p2)
except :
import sys
print "t_DistFunc_beta.py", sys.exc_type, sys.exc_value
| lgpl-3.0 | 495,374,394,030,962,700 | 26.639344 | 149 | 0.456109 | false |
ganeti-github-testing/ganeti-test-1 | lib/bootstrap.py | 1 | 44097 | #
#
# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions to bootstrap a new cluster.
"""
import os
import os.path
import re
import logging
import time
from ganeti.cmdlib import cluster
import ganeti.rpc.node as rpc
from ganeti import ssh
from ganeti import utils
from ganeti import errors
from ganeti import config
from ganeti import constants
from ganeti import objects
from ganeti import ssconf
from ganeti import serializer
from ganeti import hypervisor
from ganeti.storage import drbd
from ganeti.storage import filestorage
from ganeti import netutils
from ganeti import luxi
from ganeti import jstore
from ganeti import pathutils
from ganeti import runtime
from ganeti import vcluster
# ec_id for InitConfig's temporary reservation manager
_INITCONF_ECID = "initconfig-ecid"
#: After how many seconds daemon must be responsive
_DAEMON_READY_TIMEOUT = 10.0
def GenerateHmacKey(file_name):
"""Writes a new HMAC key.
@type file_name: str
@param file_name: Path to output file
"""
utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
backup=True)
# pylint: disable=R0913
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
new_confd_hmac_key, new_cds,
rapi_cert_pem=None, spice_cert_pem=None,
spice_cacert_pem=None, cds=None,
nodecert_file=pathutils.NODED_CERT_FILE,
rapicert_file=pathutils.RAPI_CERT_FILE,
spicecert_file=pathutils.SPICE_CERT_FILE,
spicecacert_file=pathutils.SPICE_CACERT_FILE,
hmackey_file=pathutils.CONFD_HMAC_KEY,
cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
"""Updates the cluster certificates, keys and secrets.
@type new_cluster_cert: bool
@param new_cluster_cert: Whether to generate a new cluster certificate
@type new_rapi_cert: bool
@param new_rapi_cert: Whether to generate a new RAPI certificate
@type new_spice_cert: bool
@param new_spice_cert: Whether to generate a new SPICE certificate
@type new_confd_hmac_key: bool
@param new_confd_hmac_key: Whether to generate a new HMAC key
@type new_cds: bool
@param new_cds: Whether to generate a new cluster domain secret
@type rapi_cert_pem: string
@param rapi_cert_pem: New RAPI certificate in PEM format
@type spice_cert_pem: string
@param spice_cert_pem: New SPICE certificate in PEM format
@type spice_cacert_pem: string
@param spice_cacert_pem: Certificate of the CA that signed the SPICE
certificate, in PEM format
@type cds: string
@param cds: New cluster domain secret
@type nodecert_file: string
@param nodecert_file: optional override of the node cert file path
@type rapicert_file: string
@param rapicert_file: optional override of the rapi cert file path
@type spicecert_file: string
@param spicecert_file: optional override of the spice cert file path
@type spicecacert_file: string
@param spicecacert_file: optional override of the spice CA cert file path
@type hmackey_file: string
@param hmackey_file: optional override of the hmac key file path
"""
# pylint: disable=R0913
# noded SSL certificate
utils.GenerateNewSslCert(
new_cluster_cert, nodecert_file, 1,
"Generating new cluster certificate at %s" % nodecert_file)
# confd HMAC key
if new_confd_hmac_key or not os.path.exists(hmackey_file):
logging.debug("Writing new confd HMAC key to %s", hmackey_file)
GenerateHmacKey(hmackey_file)
if rapi_cert_pem:
# Assume rapi_pem contains a valid PEM-formatted certificate and key
logging.debug("Writing RAPI certificate at %s", rapicert_file)
utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
else:
utils.GenerateNewSslCert(
new_rapi_cert, rapicert_file, 1,
"Generating new RAPI certificate at %s" % rapicert_file)
# SPICE
spice_cert_exists = os.path.exists(spicecert_file)
spice_cacert_exists = os.path.exists(spicecacert_file)
if spice_cert_pem:
# spice_cert_pem implies also spice_cacert_pem
logging.debug("Writing SPICE certificate at %s", spicecert_file)
utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
elif new_spice_cert or not spice_cert_exists:
if spice_cert_exists:
utils.CreateBackup(spicecert_file)
if spice_cacert_exists:
utils.CreateBackup(spicecacert_file)
logging.debug("Generating new self-signed SPICE certificate at %s",
spicecert_file)
(_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file, 1)
# Self-signed certificate -> the public certificate is also the CA public
# certificate
logging.debug("Writing the public certificate to %s",
spicecert_file)
utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
# Cluster domain secret
if cds:
logging.debug("Writing cluster domain secret to %s", cds_file)
utils.WriteFile(cds_file, data=cds, backup=True)
elif new_cds or not os.path.exists(cds_file):
logging.debug("Generating new cluster domain secret at %s", cds_file)
GenerateHmacKey(cds_file)
def _InitGanetiServerSetup(master_name):
"""Setup the necessary configuration for the initial node daemon.
This creates the nodepass file containing the shared password for
the cluster, generates the SSL certificate and starts the node daemon.
@type master_name: str
@param master_name: Name of the master node
"""
# Generate cluster secrets
GenerateClusterCrypto(True, False, False, False, False)
result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
if result.failed:
raise errors.OpExecError("Could not start the node daemon, command %s"
" had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output))
_WaitForNodeDaemon(master_name)
def _WaitForNodeDaemon(node_name):
"""Wait for node daemon to become responsive.
"""
def _CheckNodeDaemon():
# Pylint bug <http://www.logilab.org/ticket/35642>
# pylint: disable=E1101
result = rpc.BootstrapRunner().call_version([node_name])[node_name]
if result.fail_msg:
raise utils.RetryAgain()
try:
utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("Node daemon on %s didn't answer queries within"
" %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
def _WaitForMasterDaemon():
"""Wait for master daemon to become responsive.
"""
def _CheckMasterDaemon():
try:
cl = luxi.Client()
(cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
except Exception:
raise utils.RetryAgain()
logging.debug("Received cluster name %s from master", cluster_name)
try:
utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("Master daemon didn't answer queries within"
" %s seconds" % _DAEMON_READY_TIMEOUT)
def _WaitForSshDaemon(hostname, port):
"""Wait for SSH daemon to become responsive.
"""
family = ssconf.SimpleStore().GetPrimaryIPFamily()
hostip = netutils.GetHostname(name=hostname, family=family).ip
def _CheckSshDaemon():
if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True):
logging.debug("SSH daemon on %s:%s (IP address %s) has become"
" responsive", hostname, port, hostip)
else:
raise utils.RetryAgain()
try:
utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
" become responsive within %s seconds" %
(hostname, port, hostip, _DAEMON_READY_TIMEOUT))
def _InitFileStorageDir(file_storage_dir):
"""Initialize if needed the file storage.
@param file_storage_dir: the user-supplied value
@return: either empty string (if file storage was disabled at build
time) or the normalized path to the storage directory
"""
file_storage_dir = os.path.normpath(file_storage_dir)
if not os.path.isabs(file_storage_dir):
raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
" path" % file_storage_dir, errors.ECODE_INVAL)
if not os.path.exists(file_storage_dir):
try:
os.makedirs(file_storage_dir, 0750)
except OSError, err:
raise errors.OpPrereqError("Cannot create file storage directory"
" '%s': %s" % (file_storage_dir, err),
errors.ECODE_ENVIRON)
if not os.path.isdir(file_storage_dir):
raise errors.OpPrereqError("The file storage directory '%s' is not"
" a directory." % file_storage_dir,
errors.ECODE_ENVIRON)
return file_storage_dir
def _PrepareFileBasedStorage(
enabled_disk_templates, file_storage_dir,
default_dir, file_disk_template, _storage_path_acceptance_fn,
init_fn=_InitFileStorageDir, acceptance_fn=None):
"""Checks if a file-base storage type is enabled and inits the dir.
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of enabled disk templates
@type file_storage_dir: string
@param file_storage_dir: the file storage directory
@type default_dir: string
@param default_dir: default file storage directory when C{file_storage_dir}
is 'None'
@type file_disk_template: string
@param file_disk_template: a disk template whose storage type is 'ST_FILE',
'ST_SHARED_FILE' or 'ST_GLUSTER'
@type _storage_path_acceptance_fn: function
@param _storage_path_acceptance_fn: checks whether the given file-based
storage directory is acceptable
@see: C{cluster.CheckFileBasedStoragePathVsEnabledDiskTemplates} for details
@rtype: string
@returns: the name of the actual file storage directory
"""
assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
))
if file_storage_dir is None:
file_storage_dir = default_dir
if not acceptance_fn:
acceptance_fn = \
lambda path: filestorage.CheckFileStoragePathAcceptance(
path, exact_match_ok=True)
_storage_path_acceptance_fn(logging.warning, file_storage_dir,
enabled_disk_templates)
file_storage_enabled = file_disk_template in enabled_disk_templates
if file_storage_enabled:
try:
acceptance_fn(file_storage_dir)
except errors.FileStoragePathError as e:
raise errors.OpPrereqError(str(e))
result_file_storage_dir = init_fn(file_storage_dir)
else:
result_file_storage_dir = file_storage_dir
return result_file_storage_dir
def _PrepareFileStorage(
enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
acceptance_fn=None):
"""Checks if file storage is enabled and inits the dir.
@see: C{_PrepareFileBasedStorage}
"""
return _PrepareFileBasedStorage(
enabled_disk_templates, file_storage_dir,
pathutils.DEFAULT_FILE_STORAGE_DIR, constants.DT_FILE,
cluster.CheckFileStoragePathVsEnabledDiskTemplates,
init_fn=init_fn, acceptance_fn=acceptance_fn)
def _PrepareSharedFileStorage(
enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
acceptance_fn=None):
"""Checks if shared file storage is enabled and inits the dir.
@see: C{_PrepareFileBasedStorage}
"""
return _PrepareFileBasedStorage(
enabled_disk_templates, file_storage_dir,
pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, constants.DT_SHARED_FILE,
cluster.CheckSharedFileStoragePathVsEnabledDiskTemplates,
init_fn=init_fn, acceptance_fn=acceptance_fn)
def _PrepareGlusterStorage(
enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
acceptance_fn=None):
"""Checks if gluster storage is enabled and inits the dir.
@see: C{_PrepareFileBasedStorage}
"""
return _PrepareFileBasedStorage(
enabled_disk_templates, file_storage_dir,
pathutils.DEFAULT_GLUSTER_STORAGE_DIR, constants.DT_GLUSTER,
cluster.CheckGlusterStoragePathVsEnabledDiskTemplates,
init_fn=init_fn, acceptance_fn=acceptance_fn)
def _InitCheckEnabledDiskTemplates(enabled_disk_templates):
"""Checks the sanity of the enabled disk templates.
"""
if not enabled_disk_templates:
raise errors.OpPrereqError("Enabled disk templates list must contain at"
" least one member", errors.ECODE_INVAL)
invalid_disk_templates = \
set(enabled_disk_templates) - constants.DISK_TEMPLATES
if invalid_disk_templates:
raise errors.OpPrereqError("Enabled disk templates list contains invalid"
" entries: %s" % invalid_disk_templates,
errors.ECODE_INVAL)
def _RestrictIpolicyToEnabledDiskTemplates(ipolicy, enabled_disk_templates):
"""Restricts the ipolicy's disk templates to the enabled ones.
This function clears the ipolicy's list of allowed disk templates from the
ones that are not enabled by the cluster.
@type ipolicy: dict
@param ipolicy: the instance policy
@type enabled_disk_templates: list of string
@param enabled_disk_templates: the list of cluster-wide enabled disk
templates
"""
assert constants.IPOLICY_DTS in ipolicy
allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
restricted_disk_templates = list(set(allowed_disk_templates)
.intersection(set(enabled_disk_templates)))
ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
def _InitCheckDrbdHelper(drbd_helper, drbd_enabled):
"""Checks the DRBD usermode helper.
@type drbd_helper: string
@param drbd_helper: name of the DRBD usermode helper that the system should
use
"""
if not drbd_enabled:
return
if drbd_helper is not None:
try:
curr_helper = drbd.DRBD8.GetUsermodeHelper()
except errors.BlockDeviceError, err:
raise errors.OpPrereqError("Error while checking drbd helper"
" (disable drbd with --enabled-disk-templates"
" if you are not using drbd): %s" % str(err),
errors.ECODE_ENVIRON)
if drbd_helper != curr_helper:
raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
" is the current helper" % (drbd_helper,
curr_helper),
errors.ECODE_INVAL)
def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
master_netmask, master_netdev, file_storage_dir,
shared_file_storage_dir, gluster_storage_dir,
candidate_pool_size, secondary_ip=None,
vg_name=None, beparams=None, nicparams=None, ndparams=None,
hvparams=None, diskparams=None, enabled_hypervisors=None,
modify_etc_hosts=True, modify_ssh_setup=True,
maintain_node_health=False, drbd_helper=None, uid_pool=None,
default_iallocator=None, default_iallocator_params=None,
primary_ip_version=None, ipolicy=None,
prealloc_wipe_disks=False, use_external_mip_script=False,
hv_state=None, disk_state=None, enabled_disk_templates=None,
install_image=None, zeroing_image=None, compression_tools=None,
enabled_user_shutdown=False):
"""Initialise the cluster.
@type candidate_pool_size: int
@param candidate_pool_size: master candidate pool size
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of disk_templates to be used in this
cluster
@type enabled_user_shutdown: bool
@param enabled_user_shutdown: whether user shutdown is enabled cluster
wide
"""
# TODO: complete the docstring
if config.ConfigWriter.IsCluster():
raise errors.OpPrereqError("Cluster is already initialised",
errors.ECODE_STATE)
data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR)
queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR)
archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR)
for ddir in [queue_dir, data_dir, archive_dir]:
if os.path.isdir(ddir):
for entry in os.listdir(ddir):
if not os.path.isdir(os.path.join(ddir, entry)):
raise errors.OpPrereqError(
"%s contains non-directory enries like %s. Remove left-overs of an"
" old cluster before initialising a new one" % (ddir, entry),
errors.ECODE_STATE)
if not enabled_hypervisors:
raise errors.OpPrereqError("Enabled hypervisors list must contain at"
" least one member", errors.ECODE_INVAL)
invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
if invalid_hvs:
raise errors.OpPrereqError("Enabled hypervisors contains invalid"
" entries: %s" % invalid_hvs,
errors.ECODE_INVAL)
_InitCheckEnabledDiskTemplates(enabled_disk_templates)
try:
ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
except errors.ProgrammerError:
raise errors.OpPrereqError("Invalid primary ip version: %d." %
primary_ip_version, errors.ECODE_INVAL)
hostname = netutils.GetHostname(family=ipcls.family)
if not ipcls.IsValid(hostname.ip):
raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
" address." % (hostname.ip, primary_ip_version),
errors.ECODE_INVAL)
if ipcls.IsLoopback(hostname.ip):
raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
" address. Please fix DNS or %s." %
(hostname.ip, pathutils.ETC_HOSTS),
errors.ECODE_ENVIRON)
if not ipcls.Own(hostname.ip):
raise errors.OpPrereqError("Inconsistency: this host's name resolves"
" to %s,\nbut this ip address does not"
" belong to this host" %
hostname.ip, errors.ECODE_ENVIRON)
clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
raise errors.OpPrereqError("Cluster IP already active",
errors.ECODE_NOTUNIQUE)
if not secondary_ip:
if primary_ip_version == constants.IP6_VERSION:
raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
" IPv4 address must be given as secondary",
errors.ECODE_INVAL)
secondary_ip = hostname.ip
if not netutils.IP4Address.IsValid(secondary_ip):
raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
" IPv4 address." % secondary_ip,
errors.ECODE_INVAL)
if not netutils.IP4Address.Own(secondary_ip):
raise errors.OpPrereqError("You gave %s as secondary IP,"
" but it does not belong to this host." %
secondary_ip, errors.ECODE_ENVIRON)
if master_netmask is not None:
if not ipcls.ValidateNetmask(master_netmask):
raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
(master_netmask, primary_ip_version),
errors.ECODE_INVAL)
else:
master_netmask = ipcls.iplen
if vg_name:
# Check if volume group is valid
vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
constants.MIN_VG_SIZE)
if vgstatus:
raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
_InitCheckDrbdHelper(drbd_helper, drbd_enabled)
logging.debug("Stopping daemons (if any are running)")
result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
if result.failed:
raise errors.OpExecError("Could not stop daemons, command %s"
" had exitcode %s and error '%s'" %
(result.cmd, result.exit_code, result.output))
file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
file_storage_dir)
shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
shared_file_storage_dir)
gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates,
gluster_storage_dir)
if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
errors.ECODE_INVAL)
if not nicparams.get('mode', None) == constants.NIC_MODE_OVS:
# Do not do this check if mode=openvswitch, since the openvswitch is not
# created yet
result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
if result.failed:
raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
(master_netdev,
result.output.strip()), errors.ECODE_INVAL)
dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
utils.EnsureDirs(dirs)
objects.UpgradeBeParams(beparams)
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
objects.NIC.CheckParameterSyntax(nicparams)
full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
_RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
if ndparams is not None:
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
else:
ndparams = dict(constants.NDC_DEFAULTS)
# This is ugly, as we modify the dict itself
# FIXME: Make utils.ForceDictType pure functional or write a wrapper
# around it
if hv_state:
for hvname, hvs_data in hv_state.items():
utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
else:
hv_state = dict((hvname, constants.HVST_DEFAULTS)
for hvname in enabled_hypervisors)
# FIXME: disk_state has no default values yet
if disk_state:
for storage, ds_data in disk_state.items():
if storage not in constants.DS_VALID_TYPES:
raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
storage, errors.ECODE_INVAL)
for ds_name, state in ds_data.items():
utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
# hvparams is a mapping of hypervisor->hvparams dict
for hv_name, hv_params in hvparams.iteritems():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class = hypervisor.GetHypervisor(hv_name)
hv_class.CheckParameterSyntax(hv_params)
# diskparams is a mapping of disk-template->diskparams dict
for template, dt_params in diskparams.items():
param_keys = set(dt_params.keys())
default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
if not (param_keys <= default_param_keys):
unknown_params = param_keys - default_param_keys
raise errors.OpPrereqError("Invalid parameters for disk template %s:"
" %s" % (template,
utils.CommaJoin(unknown_params)),
errors.ECODE_INVAL)
utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
if template == constants.DT_DRBD8 and vg_name is not None:
# The default METAVG value is equal to the VG name set at init time,
# if provided
dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
try:
utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
except errors.OpPrereqError, err:
raise errors.OpPrereqError("While verify diskparam options: %s" % err,
errors.ECODE_INVAL)
# set up ssh config and /etc/hosts
rsa_sshkey = ""
dsa_sshkey = ""
if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
rsa_sshkey = sshline.split(" ")[1]
if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
dsa_sshkey = sshline.split(" ")[1]
if not rsa_sshkey and not dsa_sshkey:
raise errors.OpPrereqError("Failed to find SSH public keys",
errors.ECODE_ENVIRON)
if modify_etc_hosts:
utils.AddHostToEtcHosts(hostname.name, hostname.ip)
if modify_ssh_setup:
ssh.InitSSHSetup()
if default_iallocator is not None:
alloc_script = utils.FindFile(default_iallocator,
constants.IALLOCATOR_SEARCH_PATH,
os.path.isfile)
if alloc_script is None:
raise errors.OpPrereqError("Invalid default iallocator script '%s'"
" specified" % default_iallocator,
errors.ECODE_INVAL)
else:
# default to htools
if utils.FindFile(constants.IALLOC_HAIL,
constants.IALLOCATOR_SEARCH_PATH,
os.path.isfile):
default_iallocator = constants.IALLOC_HAIL
# check if we have all the users we need
try:
runtime.GetEnts()
except errors.ConfigurationError, err:
raise errors.OpPrereqError("Required system user/group missing: %s" %
err, errors.ECODE_ENVIRON)
candidate_certs = {}
now = time.time()
if compression_tools is not None:
cluster.CheckCompressionTools(compression_tools)
initial_dc_config = dict(active=True,
interval=int(constants.MOND_TIME_INTERVAL * 1e6))
data_collectors = dict(
(name, initial_dc_config.copy())
for name in constants.DATA_COLLECTOR_NAMES)
# init of cluster config file
cluster_config = objects.Cluster(
serial_no=1,
rsahostkeypub=rsa_sshkey,
dsahostkeypub=dsa_sshkey,
highest_used_port=(constants.FIRST_DRBD_PORT - 1),
mac_prefix=mac_prefix,
volume_group_name=vg_name,
tcpudp_port_pool=set(),
master_ip=clustername.ip,
master_netmask=master_netmask,
master_netdev=master_netdev,
cluster_name=clustername.name,
file_storage_dir=file_storage_dir,
shared_file_storage_dir=shared_file_storage_dir,
gluster_storage_dir=gluster_storage_dir,
enabled_hypervisors=enabled_hypervisors,
beparams={constants.PP_DEFAULT: beparams},
nicparams={constants.PP_DEFAULT: nicparams},
ndparams=ndparams,
hvparams=hvparams,
diskparams=diskparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
modify_ssh_setup=modify_ssh_setup,
uid_pool=uid_pool,
ctime=now,
mtime=now,
maintain_node_health=maintain_node_health,
data_collectors=data_collectors,
drbd_usermode_helper=drbd_helper,
default_iallocator=default_iallocator,
default_iallocator_params=default_iallocator_params,
primary_ip_family=ipcls.family,
prealloc_wipe_disks=prealloc_wipe_disks,
use_external_mip_script=use_external_mip_script,
ipolicy=full_ipolicy,
hv_state_static=hv_state,
disk_state_static=disk_state,
enabled_disk_templates=enabled_disk_templates,
candidate_certs=candidate_certs,
osparams={},
osparams_private_cluster={},
install_image=install_image,
zeroing_image=zeroing_image,
compression_tools=compression_tools,
enabled_user_shutdown=enabled_user_shutdown,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
secondary_ip=secondary_ip,
serial_no=1,
master_candidate=True,
offline=False, drained=False,
ctime=now, mtime=now,
)
InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
cfg = config.ConfigWriter(offline=True)
ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
cfg.Update(cfg.GetClusterInfo(), logging.error)
ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
master_uuid = cfg.GetMasterNode()
if modify_ssh_setup:
ssh.InitPubKeyFile(master_uuid)
# set up the inter-node password and certificate
_InitGanetiServerSetup(hostname.name)
logging.debug("Starting daemons")
result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
if result.failed:
raise errors.OpExecError("Could not start daemons, command %s"
" had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output))
_WaitForMasterDaemon()
def InitConfig(version, cluster_config, master_node_config,
cfg_file=pathutils.CLUSTER_CONF_FILE):
"""Create the initial cluster configuration.
It will contain the current node, which will also be the master
node, and no instances.
@type version: int
@param version: configuration version
@type cluster_config: L{objects.Cluster}
@param cluster_config: cluster configuration
@type master_node_config: L{objects.Node}
@param master_node_config: master node configuration
@type cfg_file: string
@param cfg_file: configuration file path
"""
uuid_generator = config.TemporaryReservationManager()
cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
_INITCONF_ECID)
master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
_INITCONF_ECID)
cluster_config.master_node = master_node_config.uuid
nodes = {
master_node_config.uuid: master_node_config,
}
default_nodegroup = objects.NodeGroup(
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
name=constants.INITIAL_NODE_GROUP_NAME,
members=[master_node_config.uuid],
diskparams={},
)
nodegroups = {
default_nodegroup.uuid: default_nodegroup,
}
now = time.time()
config_data = objects.ConfigData(version=version,
cluster=cluster_config,
nodegroups=nodegroups,
nodes=nodes,
instances={},
networks={},
disks={},
filters={},
serial_no=1,
ctime=now, mtime=now)
utils.WriteFile(cfg_file,
data=serializer.Dump(config_data.ToDict()),
mode=0600)
def FinalizeClusterDestroy(master_uuid):
"""Execute the last steps of cluster destroy
This function shuts down all the daemons, completing the destroy
begun in cmdlib.LUDestroyOpcode.
"""
livelock = utils.livelock.LiveLock("bootstrap_destroy")
cfg = config.GetConfig(None, livelock)
modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
runner = rpc.BootstrapRunner()
master_name = cfg.GetNodeName(master_uuid)
master_params = cfg.GetMasterNetworkParameters()
master_params.uuid = master_uuid
ems = cfg.GetUseExternalMipScript()
result = runner.call_node_deactivate_master_ip(master_name, master_params,
ems)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master IP: %s", msg)
result = runner.call_node_stop_master(master_name)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master role: %s", msg)
result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
msg = result.fail_msg
if msg:
logging.warning("Could not shutdown the node daemon and cleanup"
" the node: %s", msg)
def SetupNodeDaemon(opts, cluster_name, node, ssh_port):
"""Add a node to the cluster.
This function must be called before the actual opcode, and will ssh
to the remote node, copy the needed files, and start ganeti-noded,
allowing the master to do the rest via normal rpc calls.
@param cluster_name: the cluster name
@param node: the name of the new node
@param ssh_port: the SSH port of the new node
"""
data = {
constants.NDS_CLUSTER_NAME: cluster_name,
constants.NDS_NODE_DAEMON_CERTIFICATE:
utils.ReadFile(pathutils.NODED_CERT_FILE),
constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
constants.NDS_START_NODE_DAEMON: True,
}
ssh.RunSshCmdWithStdin(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
ssh_port, data,
debug=opts.debug, verbose=opts.verbose,
use_cluster_key=True, ask_key=opts.ssh_key_check,
strict_host_check=opts.ssh_key_check,
ensure_version=True)
_WaitForSshDaemon(node, ssh_port)
_WaitForNodeDaemon(node)
def MasterFailover(no_voting=False):
"""Failover the master node.
This checks that we are not already the master, and will cause the
current master to cease being master, and the non-master to become
new master.
@type no_voting: boolean
@param no_voting: force the operation without remote nodes agreement
(dangerous)
"""
sstore = ssconf.SimpleStore()
old_master, new_master = ssconf.GetMasterAndMyself(sstore)
node_names = sstore.GetNodeList()
mc_list = sstore.GetMasterCandidates()
if old_master == new_master:
raise errors.OpPrereqError("This commands must be run on the node"
" where you want the new master to be."
" %s is already the master" %
old_master, errors.ECODE_INVAL)
if new_master not in mc_list:
mc_no_master = [name for name in mc_list if name != old_master]
raise errors.OpPrereqError("This node is not among the nodes marked"
" as master candidates. Only these nodes"
" can become masters. Current list of"
" master candidates is:\n"
"%s" % ("\n".join(mc_no_master)),
errors.ECODE_STATE)
if not no_voting:
vote_list = GatherMasterVotes(node_names)
if vote_list:
voted_master = vote_list[0][0]
if voted_master is None:
raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
" not respond.", errors.ECODE_ENVIRON)
elif voted_master != old_master:
raise errors.OpPrereqError("I have a wrong configuration, I believe"
" the master is %s but the other nodes"
" voted %s. Please resync the configuration"
" of this node." %
(old_master, voted_master),
errors.ECODE_STATE)
# end checks
rcode = 0
logging.info("Setting master to %s, old master: %s", new_master, old_master)
try:
# Forcefully start WConfd so that we can access the configuration
result = utils.RunCmd([pathutils.DAEMON_UTIL,
"start", constants.WCONFD, "--force-node",
"--no-voting", "--yes-do-it"])
if result.failed:
raise errors.OpPrereqError("Could not start the configuration daemon,"
" command %s had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output),
errors.ECODE_NOENT)
# instantiate a real config writer, as we now know we have the
# configuration data
livelock = utils.livelock.LiveLock("bootstrap_failover")
cfg = config.GetConfig(None, livelock, accept_foreign=True)
old_master_node = cfg.GetNodeInfoByName(old_master)
if old_master_node is None:
raise errors.OpPrereqError("Could not find old master node '%s' in"
" cluster configuration." % old_master,
errors.ECODE_NOENT)
cluster_info = cfg.GetClusterInfo()
new_master_node = cfg.GetNodeInfoByName(new_master)
if new_master_node is None:
raise errors.OpPrereqError("Could not find new master node '%s' in"
" cluster configuration." % new_master,
errors.ECODE_NOENT)
cluster_info.master_node = new_master_node.uuid
# this will also regenerate the ssconf files, since we updated the
# cluster info
cfg.Update(cluster_info, logging.error)
# if cfg.Update worked, then it means the old master daemon won't be
# able now to write its own config file (we rely on locking in both
# backend.UploadFile() and ConfigWriter._Write(); hence the next
# step is to kill the old master
logging.info("Stopping the master daemon on node %s", old_master)
runner = rpc.BootstrapRunner()
master_params = cfg.GetMasterNetworkParameters()
master_params.uuid = old_master_node.uuid
ems = cfg.GetUseExternalMipScript()
result = runner.call_node_deactivate_master_ip(old_master,
master_params, ems)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master IP: %s", msg)
result = runner.call_node_stop_master(old_master)
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
" %s, please disable manually: %s", old_master, msg)
except errors.ConfigurationError, err:
logging.error("Error while trying to set the new master: %s",
str(err))
return 1
finally:
# stop WConfd again:
result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.WCONFD])
if result.failed:
logging.error("Could not stop the configuration daemon,"
" command %s had exitcode %s and error %s",
result.cmd, result.exit_code, result.output)
logging.info("Checking master IP non-reachability...")
master_ip = sstore.GetMasterIP()
total_timeout = 30
# Here we have a phase where no master should be running
def _check_ip():
if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
raise utils.RetryAgain()
try:
utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
except utils.RetryTimeout:
logging.warning("The master IP is still reachable after %s seconds,"
" continuing but activating the master on the current"
" node will probably fail", total_timeout)
if jstore.CheckDrainFlag():
logging.info("Undraining job queue")
jstore.SetDrainFlag(False)
logging.info("Starting the master daemons on the new master")
result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
no_voting)
msg = result.fail_msg
if msg:
logging.error("Could not start the master role on the new master"
" %s, please check: %s", new_master, msg)
rcode = 1
logging.info("Master failed over from %s to %s", old_master, new_master)
return rcode
def GetMaster():
"""Returns the current master node.
This is a separate function in bootstrap since it's needed by
gnt-cluster, and instead of importing directly ssconf, it's better
to abstract it in bootstrap, where we do use ssconf in other
functions too.
"""
sstore = ssconf.SimpleStore()
old_master, _ = ssconf.GetMasterAndMyself(sstore)
return old_master
def GatherMasterVotes(node_names):
"""Check the agreement on who is the master.
This function will return a list of (node, number of votes), ordered
by the number of votes. Errors will be denoted by the key 'None'.
Note that the sum of votes is the number of nodes this machine
knows, whereas the number of entries in the list could be different
(if some nodes vote for another master).
We remove ourselves from the list since we know that (bugs aside)
since we use the same source for configuration information for both
backend and boostrap, we'll always vote for ourselves.
@type node_names: list
@param node_names: the list of nodes to query for master info; the current
node will be removed if it is in the list
@rtype: list
@return: list of (node, votes)
"""
myself = netutils.Hostname.GetSysName()
try:
node_names.remove(myself)
except ValueError:
pass
if not node_names:
# no nodes left (eventually after removing myself)
return []
results = rpc.BootstrapRunner().call_master_node_name(node_names)
if not isinstance(results, dict):
# this should not happen (unless internal error in rpc)
logging.critical("Can't complete rpc call, aborting master startup")
return [(None, len(node_names))]
votes = {}
for node_name in results:
nres = results[node_name]
msg = nres.fail_msg
if msg:
logging.warning("Error contacting node %s: %s", node_name, msg)
node = None
else:
node = nres.payload
if node not in votes:
votes[node] = 1
else:
votes[node] += 1
vote_list = [v for v in votes.items()]
# sort first on number of votes then on name, since we want None
# sorted later if we have the half of the nodes not responding, and
# half voting all for the same master
vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
return vote_list
| bsd-2-clause | -1,674,988,445,099,428,400 | 37.580052 | 80 | 0.651722 | false |
badele/home-assistant | homeassistant/components/light/tellstick.py | 1 | 4248 | """
homeassistant.components.light.tellstick
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Tellstick lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.tellstick/
"""
from homeassistant.components.light import Light, ATTR_BRIGHTNESS
from homeassistant.const import (EVENT_HOMEASSISTANT_STOP,
ATTR_FRIENDLY_NAME)
REQUIREMENTS = ['tellcore-py==1.1.2']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return Tellstick lights. """
import tellcore.telldus as telldus
from tellcore.library import DirectCallbackDispatcher
import tellcore.constants as tellcore_constants
core = telldus.TelldusCore(callback_dispatcher=DirectCallbackDispatcher())
switches_and_lights = core.devices()
lights = []
for switch in switches_and_lights:
if switch.methods(tellcore_constants.TELLSTICK_DIM):
lights.append(TellstickLight(switch))
def _device_event_callback(id_, method, data, cid):
""" Called from the TelldusCore library to update one device """
for light_device in lights:
if light_device.tellstick_device.id == id_:
# Execute the update in another thread
light_device.update_ha_state(True)
break
callback_id = core.register_device_event(_device_event_callback)
def unload_telldus_lib(event):
""" Un-register the callback bindings """
if callback_id is not None:
core.unregister_callback(callback_id)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, unload_telldus_lib)
add_devices_callback(lights)
class TellstickLight(Light):
""" Represents a Tellstick light. """
def __init__(self, tellstick_device):
import tellcore.constants as tellcore_constants
self.tellstick_device = tellstick_device
self.state_attr = {ATTR_FRIENDLY_NAME: tellstick_device.name}
self._brightness = 0
self.last_sent_command_mask = (tellcore_constants.TELLSTICK_TURNON |
tellcore_constants.TELLSTICK_TURNOFF |
tellcore_constants.TELLSTICK_DIM |
tellcore_constants.TELLSTICK_UP |
tellcore_constants.TELLSTICK_DOWN)
@property
def name(self):
""" Returns the name of the switch if any. """
return self.tellstick_device.name
@property
def is_on(self):
""" True if switch is on. """
return self._brightness > 0
@property
def brightness(self):
""" Brightness of this light between 0..255. """
return self._brightness
def turn_off(self, **kwargs):
""" Turns the switch off. """
self.tellstick_device.turn_off()
self._brightness = 0
self.update_ha_state()
def turn_on(self, **kwargs):
""" Turns the switch on. """
brightness = kwargs.get(ATTR_BRIGHTNESS)
if brightness is None:
self._brightness = 255
else:
self._brightness = brightness
self.tellstick_device.dim(self._brightness)
self.update_ha_state()
def update(self):
""" Update state of the light. """
import tellcore.constants as tellcore_constants
last_command = self.tellstick_device.last_sent_command(
self.last_sent_command_mask)
if last_command == tellcore_constants.TELLSTICK_TURNON:
self._brightness = 255
elif last_command == tellcore_constants.TELLSTICK_TURNOFF:
self._brightness = 0
elif (last_command == tellcore_constants.TELLSTICK_DIM or
last_command == tellcore_constants.TELLSTICK_UP or
last_command == tellcore_constants.TELLSTICK_DOWN):
last_sent_value = self.tellstick_device.last_sent_value()
if last_sent_value is not None:
self._brightness = last_sent_value
@property
def should_poll(self):
""" Tells Home Assistant not to poll this entity. """
return False
| mit | 1,873,232,507,389,760,800 | 33.819672 | 78 | 0.621469 | false |
juliantaylor/scipy | scipy/interpolate/fitpack2.py | 1 | 55084 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
If None (default), s=len(w) which should be a good value if 1/w[i] is
an estimate of the standard deviation of y[i]. If 0, spline will
interpolate through all data points.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = linspace(-3, 3, 100)
>>> y = exp(-x**2) + randn(100)/10
>>> s = UnivariateSpline(x, y, s=1)
>>> xs = linspace(-3, 3, 1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y.
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
s - positive smoothing factor defined for
estimation condition:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
Default s=len(w) which should be a good value
if 1/w[i] is an estimate of the standard
deviation of y[i].
"""
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None,None,None,None,None,k,None,len(t),t,
c,None,None,None,None)
return self
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[n], nest) for n in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0):
""" Evaluate spline (or its nu-th derivative) at positions x.
Note: x can be unordered but the evaluation is more efficient
if x is (partially) ordered.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
return fitpack.splev(x, self._eval_args, der=nu)
def get_knots(self):
""" Return positions of (boundary and interior) knots of the spline.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline
approximation: ``sum((w[i] * (y[i]-s(x[i])))**2, axis=0)``.
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x."""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
.. versionadded:: 0.13.0
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of `cos(x) = sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
return UnivariateSpline._from_tck(tck)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
.. versionadded:: 0.13.0
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), bbox=[x[0],x[-1]].
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = linspace(-3, 3, 100)
>>> y = exp(-x**2) + randn(100)/10
>>> s = InterpolatedUnivariateSpline(x, y)
>>> xs = linspace(-3, 3, 1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order
and bbox[0]<t[0]<...<t[-1]<bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), bbox=[x[0],x[-1]].
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import LSQUnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = linspace(-3,3,100)
>>> y = exp(-x**2) + randn(100)/10
>>> t = [-1,0,1]
>>> s = LSQUnivariateSpline(x,y,t)
>>> xs = linspace(-3,3,1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y
with knots [-3,-1,0,1,3]
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
t - 1-d sequence of the positions of user-defined
interior knots of the spline (t must be in strictly
ascending order and bbox[0]<t[0]<...<t[-1]<bbox[-1])
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1),t,[xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0,axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
data = dfitpack.fpcurfm1(x,y,k,t,w=w,xb=xb,xe=xe)
self._data = data[:-3] + (None,None,data[-1])
self._reset_class()
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array-like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
mth : str
Deprecated argument. Has no effect.
"""
x = np.asarray(x)
y = np.asarray(y)
if mth is not None:
warnings.warn("The `mth` argument is deprecated and will be removed",
FutureWarning)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array-like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=1)
if ier > 10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if not ier in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array-like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array-like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(),s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if not ier in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(),knotst,knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif not ier in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians, and must lie within (0, 2pi).
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
>>> lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
>>> ax = fig2.add_subplot(2, 2, ii+1)
>>> ax.imshow(data_interp, interpolation='nearest')
>>> ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if not ier in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| bsd-3-clause | -6,407,795,155,517,847,000 | 35.845485 | 81 | 0.563176 | false |
apanda/modeling | tests/examples/lsrr_example.py | 1 | 2652 | import components
def LSRRExample ():
ctx = components.Context(['e0' , 'e1', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], \
['ip_e0', 'ip_e1', 'ip_a', 'ip_b', 'ip_c', 'ip_d', 'ip_e', 'ip_f', 'ip_g', 'ip_h'])
net = components.Network(ctx)
# Register something that tells us about LSR
ip_lsr_field = components.LSRROption ('ip_lsr', ctx)
ctx.AddPolicy (ip_lsr_field)
e0 = components.EndHost(ctx.e0, net, ctx)
e1 = components.EndHost(ctx.e1, net, ctx)
# Yeah I can put this in a list etc., doing it this way mostly for no good reason.
a = components.LSRRRouter (ctx.a, ip_lsr_field, net, ctx)
b = components.LSRRRouter (ctx.b, ip_lsr_field, net, ctx)
c = components.LSRRRouter (ctx.c, ip_lsr_field, net, ctx)
d = components.LSRRRouter (ctx.d, ip_lsr_field, net, ctx)
e = components.LSRRRouter (ctx.e, ip_lsr_field, net, ctx)
f = components.LSRRRouter (ctx.f, ip_lsr_field, net, ctx)
g = components.LSRRRouter (ctx.g, ip_lsr_field, net, ctx)
h = components.LSRRRouter (ctx.h, ip_lsr_field, net, ctx)
net.setAddressMappings([(e0, ctx.ip_e0), \
(e1, ctx.ip_e1), \
(a, ctx.ip_a), \
(b, ctx.ip_b), \
(c, ctx.ip_c), \
(d, ctx.ip_d), \
(e, ctx.ip_e), \
(f, ctx.ip_f), \
(g, ctx.ip_g), \
(h, ctx.ip_h)])
routing_table = [(ctx.ip_e0, e0), \
(ctx.ip_e1, e1), \
(ctx.ip_a, a), \
(ctx.ip_b, b), \
(ctx.ip_c, c), \
(ctx.ip_d, d), \
(ctx.ip_e, e), \
(ctx.ip_f, f), \
(ctx.ip_g, g), \
(ctx.ip_h, h)]
nodes = [e0, e1, a, b, c, d, e, f, g, h]
node_dict = {'a': a, \
'b': b, \
'c': c, \
'd': d, \
'e': e, \
'f': f, \
'g': g, \
'h': h}
for n in nodes:
net.RoutingTable(n, routing_table)
net.Attach(*nodes)
class LSRRReturn (object):
def __init__ (self, net, ctx, e0, e1, **nodes):
self.net = net
self.ctx = ctx
self.e0 = e0
self.e1 = e1
for k, v in nodes.iteritems():
setattr(self, k, v)
self.check = components.PropertyChecker (ctx, net)
return LSRRReturn (net, ctx, e0, e1, **node_dict)
| bsd-3-clause | -5,174,499,600,245,879,000 | 41.774194 | 111 | 0.424962 | false |
hjanime/VisTrails | vistrails/db/versions/v1_0_2/domain/vistrail.py | 1 | 9546 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import copy
import hashlib
from auto_gen import DBVistrail as _DBVistrail
from auto_gen import DBAdd, DBChange, DBDelete, DBAbstraction, DBGroup, \
DBModule, DBAnnotation, DBActionAnnotation
from id_scope import IdScope
class DBVistrail(_DBVistrail):
def __init__(self, *args, **kwargs):
_DBVistrail.__init__(self, *args, **kwargs)
self.idScope = IdScope(remap={DBAdd.vtType: 'operation',
DBChange.vtType: 'operation',
DBDelete.vtType: 'operation',
DBAbstraction.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType,
DBActionAnnotation.vtType: \
DBAnnotation.vtType})
self.idScope.setBeginId('action', 1)
self.db_objects = {}
# keep a reference to the current logging information here
self.db_log_filename = None
self.log = None
def __copy__(self):
return DBVistrail.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBVistrail.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBVistrail
cp.idScope = copy.copy(self.idScope)
cp.db_objects = copy.copy(self.db_objects)
cp.db_log_filename = self.db_log_filename
if self.log is not None:
cp.log = copy.copy(self.log)
else:
cp.log = None
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBVistrail()
new_obj = _DBVistrail.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
if hasattr(old_obj, 'db_log_filename'):
new_obj.db_log_filename = old_obj.db_log_filename
if hasattr(old_obj, 'log'):
new_obj.log = old_obj.log
return new_obj
def update_id_scope(self):
def getOldObjId(operation):
if operation.vtType == 'change':
return operation.db_oldObjId
return operation.db_objectId
def getNewObjId(operation):
if operation.vtType == 'change':
return operation.db_newObjId
return operation.db_objectId
for action in self.db_actions:
self.idScope.updateBeginId('action', action.db_id+1)
if action.db_session is not None:
self.idScope.updateBeginId('session', action.db_session + 1)
for operation in action.db_operations:
self.idScope.updateBeginId('operation', operation.db_id+1)
if operation.vtType == 'add' or operation.vtType == 'change':
# update ids of data
self.idScope.updateBeginId(operation.db_what,
getNewObjId(operation)+1)
if operation.db_data is None:
if operation.vtType == 'change':
operation.db_objectId = operation.db_oldObjId
self.db_add_object(operation.db_data)
for annotation in action.db_annotations:
self.idScope.updateBeginId('annotation', annotation.db_id+1)
for annotation in self.db_annotations:
self.idScope.updateBeginId('annotation', annotation.db_id+1)
for annotation in self.db_actionAnnotations:
self.idScope.updateBeginId('annotation', annotation.db_id+1)
def db_add_object(self, obj):
self.db_objects[(obj.vtType, obj.db_id)] = obj
def db_get_object(self, type, id):
return self.db_objects.get((type, id), None)
def db_update_object(self, obj, **kwargs):
# want to swap out old object with a new version
# need this for updating aliases...
# hack it using setattr...
real_obj = self.db_objects[(obj.vtType, obj.db_id)]
for (k, v) in kwargs.iteritems():
if hasattr(real_obj, k):
setattr(real_obj, k, v)
def update_checkout_version(self, app=''):
checkout_key = "__checkout_version_"
action_key = checkout_key + app
annotation_key = action_key + '_annotationhash'
action_annotation_key = action_key + '_actionannotationhash'
# delete previous checkout annotations
deletekeys = [action_key,annotation_key,action_annotation_key]
for key in deletekeys:
while self.db_has_annotation_with_key(key):
a = self.db_get_annotation_by_key(key)
self.db_delete_annotation(a)
# annotation hash - requires annotations to be clean
value = self.hashAnnotations()
if self.db_has_annotation_with_key(annotation_key):
annotation = self.db_get_annotation_by_key(annotation_key)
annotation.db_value = value
else:
annotation=DBAnnotation(self.idScope.getNewId(DBAnnotation.vtType),
annotation_key, value)
self.db_add_annotation(annotation)
# action annotation hash
value = self.hashActionAnnotations()
if self.db_has_annotation_with_key(action_annotation_key):
annotation = self.db_get_annotation_by_key(action_annotation_key)
annotation.db_value = value
else:
annotation=DBAnnotation(self.idScope.getNewId(DBAnnotation.vtType),
action_annotation_key, value)
self.db_add_annotation(annotation)
# last action id hash
if len(self.db_actions) == 0:
value = 0
else:
value = max(v.db_id for v in self.db_actions)
if self.db_has_annotation_with_key(action_key):
annotation = self.db_get_annotation_by_key(action_key)
annotation.db_value = str(value)
else:
annotation=DBAnnotation(self.idScope.getNewId(DBAnnotation.vtType),
action_key, str(value))
self.db_add_annotation(annotation)
def hashAnnotations(self):
annotations = {}
for annotation in self.db_annotations:
if annotation._db_key not in annotations:
annotations[annotation._db_key] = []
if annotation._db_value not in annotations[annotation._db_key]:
annotations[annotation._db_key].append(annotation._db_value)
keys = annotations.keys()
keys.sort()
m = hashlib.md5()
for k in keys:
m.update(str(k))
annotations[k].sort()
for v in annotations[k]:
m.update(str(v))
return m.hexdigest()
def hashActionAnnotations(self):
action_annotations = {}
for action_id, key, value in [[aa.db_action_id, aa.db_key,
aa.db_value] for aa in self.db_actionAnnotations]:
index = (str(action_id), key)
if index not in action_annotations:
action_annotations[index] = []
if value not in action_annotations[index]:
action_annotations[index].append(value)
keys = action_annotations.keys()
keys.sort()
m = hashlib.md5()
for k in keys:
m.update(k[0] + k[1])
action_annotations[k].sort()
for v in action_annotations[k]:
m.update(str(v))
return m.hexdigest()
| bsd-3-clause | -2,098,377,584,519,645,000 | 42.589041 | 80 | 0.593233 | false |
fake-name/ChromeController | ChromeController/manager.py | 1 | 35830 |
import distutils.spawn
import os.path
import sys
import subprocess
import pprint
import types
import json
import base64
import signal
import pprint
import time
import http.cookiejar
import urllib.parse
import ChromeController.filter_funcs as filter_funcs
from ChromeController.cr_exceptions import ChromeResponseNotReceived
from ChromeController.cr_exceptions import ChromeNavigateTimedOut
from ChromeController.cr_exceptions import ChromeError
from ChromeController.resources import js
# We use the generated wrapper. If you want a different version, use the CLI interface to update.
from ChromeController.Generator.Generated import ChromeRemoteDebugInterface as ChromeRemoteDebugInterface_base
DEFAULT_TIMEOUT_SECS = 10
class RemoteObject():
def __init__(self, object_meta):
self.object_meta = object_meta
# TODO: Allow retreiving/interacting with these.
def __repr__(self):
return "<(Unimplemented) RemoteObject for JS object: '%s'>" % (self.object_meta, )
class ChromeRemoteDebugInterface(ChromeRemoteDebugInterface_base):
'''
Remote control class for Chromium.
'''
def __init__(self,
binary = None,
dbg_port = None,
use_execution_manager = None,
additional_options = [],
visible_size = None,
disable_page = False,
disable_dom = False,
disable_network = False,
*args,
**kwargs):
super().__init__(
binary = binary,
dbg_port = dbg_port,
use_execution_manager = use_execution_manager,
additional_options = additional_options,
*args, **kwargs)
if disable_page:
self.log.debug("Not enabling page debug interface")
else:
self.Page_enable()
if disable_dom:
self.log.debug("Not enabling DOM debug interface")
else:
self.DOM_enable()
if disable_network:
self.log.debug("Not enabling Network debug interface")
else:
self.Network_enable()
if visible_size:
assert isinstance(visible_size, tuple), "visible_size must be a 2-tuple containing 2 integers"
assert len(visible_size) == 2, "visible_size must be a 2-tuple containing 2 integers"
assert all([isinstance(val, int) for val in visible_size]), "visible_size must be a 2-tuple containing 2 integers"
self.log.debug("Visible size overridden to %sx%s" % visible_size)
self.Emulation_setVisibleSize(*visible_size)
else:
self.Emulation_setVisibleSize(1024, 1366)
self.__new_tab_scripts = []
# cr_ver = self.Browser_getVersion()
# self.log.debug("Remote browser version info:")
# self.log.debug(str(cr_ver))
# 'protocolVersion'
# 'product'
# 'revision'
# 'userAgent'
# 'jsVersion'
def update_headers(self, header_args):
'''
Given a set of headers, update both the user-agent
and additional headers for the remote browser.
header_args must be a dict. Keys are the names of
the corresponding HTTP header.
return value is a 2-tuple of the results of the user-agent
update, as well as the extra headers update.
If no 'User-Agent' key is present in the new headers,
the first item in the tuple will be None
'''
assert isinstance(header_args, dict), "header_args must be a dict, passed type was %s" \
% (type(header_args), )
ua = header_args.pop('User-Agent', None)
ret_1 = None
if ua:
ret_1 = self.Network_setUserAgentOverride(userAgent=ua)
ret_2 = self.Network_setExtraHTTPHeaders(headers = header_args)
return (ret_1, ret_2)
def __remove_default_members(self, js_object):
ret = []
# This is kind of horrible
for item in js_object:
if 'name' in item:
if item['name'] == '__defineGetter__':
continue
if item['name'] == '__defineSetter__':
continue
if item['name'] == '__lookupGetter__':
continue
if item['name'] == '__lookupSetter__':
continue
if item['name'] == '__proto__':
continue
if item['name'] == 'constructor':
continue
if item['name'] == 'hasOwnProperty':
continue
if item['name'] == 'isPrototypeOf':
continue
if item['name'] == 'propertyIsEnumerable':
continue
if item['name'] == 'toLocaleString':
continue
if item['name'] == 'toString':
continue
if item['name'] == 'valueOf':
continue
if item['name'] == 'ABORT_ERR':
continue
if item['name'] == 'DATA_CLONE_ERR':
continue
if item['name'] == 'INUSE_ATTRIBUTE_ERR':
continue
if item['name'] == 'INVALID_ACCESS_ERR':
continue
if item['name'] == 'INVALID_CHARACTER_ERR':
continue
if item['name'] == 'INVALID_MODIFICATION_ERR':
continue
if item['name'] == 'INVALID_NODE_TYPE_ERR':
continue
if item['name'] == 'INVALID_STATE_ERR':
continue
if item['name'] == 'NAMESPACE_ERR':
continue
if item['name'] == 'NETWORK_ERR':
continue
if item['name'] == 'NO_DATA_ALLOWED_ERR':
continue
if item['name'] == 'NO_MODIFICATION_ALLOWED_ERR':
continue
if item['name'] == 'NOT_FOUND_ERR':
continue
if item['name'] == 'NOT_SUPPORTED_ERR':
continue
if item['name'] == 'QUOTA_EXCEEDED_ERR':
continue
if item['name'] == 'SECURITY_ERR':
continue
if item['name'] == 'SYNTAX_ERR':
continue
if item['name'] == 'TIMEOUT_ERR':
continue
if item['name'] == 'TYPE_MISMATCH_ERR':
continue
if item['name'] == 'URL_MISMATCH_ERR':
continue
if item['name'] == 'VALIDATION_ERR':
continue
if item['name'] == 'WRONG_DOCUMENT_ERR':
continue
if item['name'] == 'DOMSTRING_SIZE_ERR':
continue
if item['name'] == 'HIERARCHY_REQUEST_ERR':
continue
if item['name'] == 'INDEX_SIZE_ERR':
continue
ret.append(item)
return ret
def __unpack_object(self, object):
assert isinstance(object, dict), "Object values must be a dict! Passed %s (%s)" % (type(object), object)
ret = {}
for key, value in object.items():
assert isinstance(key, str)
if isinstance(value, str):
ret[key] = value
elif isinstance(value, int):
ret[key] = value
elif isinstance(value, float):
ret[key] = value
elif value is None: # Dammit, NoneType isn't exposed
ret[key] = value
elif value in (True, False):
ret[key] = value
elif isinstance(value, dict):
ret[key] = self.__unpack_object(value)
else:
raise ValueError("Unknown type in object: %s (%s)" % (type(value), value))
return ret
def __decode_serialized_value(self, value):
assert 'type' in value, "Missing 'type' key from value: '%s'" % (value, )
if 'get' in value and 'set' in value:
self.log.debug("Unserializable remote script object")
return RemoteObject(value['objectId'])
if value['type'] == 'object' and 'objectId' in value:
self.log.debug("Unserializable remote script object")
return RemoteObject(value['objectId'])
assert 'value' in value, "Missing 'value' key from value: '%s'" % (value, )
if value['type'] == 'number':
return float(value['value'])
if value['type'] == 'string':
return value['value']
if value['type'] == 'object':
return self.__unpack_object(value['value'])
# Special case for null/none objects
if (
'subtype' in value
and
value['subtype'] == 'null'
and
value['type'] == 'object'
and
value['value'] is None):
return None
self.log.warning("Unknown serialized javascript value of type %s", value['type'])
self.log.warning("Complete value: %s", value)
return value
def _unpack_xhr_resp(self, values):
ret = {}
# Handle single objects without all the XHR stuff.
# This seems to be a chrome 84 change.
if set(values.keys()) == set(['type', 'value']):
if values['type'] == 'object':
return self.__decode_serialized_value(values)
for entry in values:
# assert 'configurable' in entry, "'configurable' missing from entry (%s, %s)" % (entry, values)
# assert 'enumerable' in entry, "'enumerable' missing from entry (%s, %s)" % (entry, values)
# assert 'isOwn' in entry, "'isOwn' missing from entry (%s, %s)" % (entry, values)
assert 'name' in entry, "'name' missing from entry (%s, %s)" % (entry, values)
assert 'value' in entry, "'value' missing from entry (%s, %s)" % (entry, values)
# assert 'writable' in entry, "'writable' missing from entry (%s, %s)" % (entry, values)
if 'isOwn' in entry and entry['isOwn'] is False:
continue
assert entry['name'] not in ret
ret[entry['name']] = self.__decode_serialized_value(entry['value'])
return ret
def xhr_fetch(self, url, headers=None, post_data=None, post_type=None):
'''
Execute a XMLHttpRequest() for content at `url`. If
`headers` are specified, they must be a dict of string:string
keader:values. post_data must also be pre-encoded.
Note that this will be affected by the same-origin policy of the current
page, so it can fail if you are requesting content from another domain and
the current site has restrictive same-origin policies (which is very common).
'''
'''
If you're thinking this is kind of a hack, well, it is.
We also cheat a bunch and use synchronous XMLHttpRequest()s, because it
SO much easier.
'''
js_script = '''
function (url, headers, post_data, post_type){
var req = new XMLHttpRequest();
// We use sync calls, since we want to wait until the call completes
// This will probably be depreciated at some point.
if (post_data)
{
req.open("POST", url, false);
if (post_type)
req.setRequestHeader("Content-Type", post_type);
}
else
req.open("GET", url, false);
if (headers)
{
let entries = Object.entries(headers);
for (let idx = 0; idx < entries.length; idx += 1)
{
req.setRequestHeader(entries[idx][0], entries[idx][1]);
}
}
if (post_data)
req.send(post_data);
else
req.send();
return {
url : url,
headers : headers,
resp_headers : req.getAllResponseHeaders(),
post : post_data,
response : req.responseText,
mimetype : req.getResponseHeader("Content-Type"),
code : req.status
};
}
'''
ret = self.execute_javascript_function(js_script, [url, headers, post_data, post_type])
# print()
# print()
# print("XHR Response")
# pprint.pprint(ret)
# print()
# print()
ret = self._unpack_xhr_resp(ret)
return ret
# if
def __unwrap_object_return(self, ret):
if "result" in ret and 'result' in ret['result']:
res = ret['result']['result']
if 'objectId' in res:
resp4 = self.Runtime_getProperties(res['objectId'])
if "result" in resp4 and 'result' in resp4['result']:
res_full = resp4['result']['result']
return self.__remove_default_members(res_full)
# Direct POD type return, just use it directly.
if "type" in res and "value" in res:
return res
self.log.error("Failed fetching results from call!")
return ret
def __exec_js(self, script, should_call=False, args=None):
'''
Execute the passed javascript function/statement, optionally with passed
arguments.
Note that if args is not False, or should_call is True the passed script
will be treated as a function definition and called via
`(script).apply(null, args)`. Otherwise, the passed script will simply
be evaluated.
Note that if `script` is not a function, it must be a single statement.
The presence of semicolons not enclosed in a bracket scope will produce
an error.
'''
if args is None:
args = {}
# How chromedriver does this:
# std::unique_ptr<base::Value>* result) {
# std::string json;
# base::JSONWriter::Write(args, &json);
# // TODO(zachconrad): Second null should be array of shadow host ids.
# std::string expression = base::StringPrintf(
# "(%s).apply(null, [null, %s, %s])",
# kCallFunctionScript,
# function.c_str(),
# json.c_str());
if args or should_call:
expression = "({script}).apply(null, JSON.parse({args}))".format(
script=script,
args=repr(json.dumps(args))
)
else:
expression = "({script})".format(
script=script,
)
resp3 = self.Runtime_evaluate(expression=expression, returnByValue=True)
resp4 = self.__unwrap_object_return(resp3)
return resp4
# Interact with http.cookiejar.Cookie() instances
def get_cookies(self):
'''
Retreive the cookies from the remote browser.
Return value is a list of http.cookiejar.Cookie() instances.
These can be directly used with the various http.cookiejar.XXXCookieJar
cookie management classes.
'''
ret = self.Network_getAllCookies()
assert 'result' in ret, "No return value in function response!"
assert 'cookies' in ret['result'], "No 'cookies' key in function response"
cookies = []
for raw_cookie in ret['result']['cookies']:
# Chromium seems to support the following key values for the cookie dict:
# "name"
# "value"
# "domain"
# "path"
# "expires"
# "httpOnly"
# "session"
# "secure"
#
# This seems supported by the fact that the underlying chromium cookie implementation has
# the following members:
# std::string name_;
# std::string value_;
# std::string domain_;
# std::string path_;
# base::Time creation_date_;
# base::Time expiry_date_;
# base::Time last_access_date_;
# bool secure_;
# bool httponly_;
# CookieSameSite same_site_;
# CookiePriority priority_;
#
# See chromium/net/cookies/canonical_cookie.h for more.
#
# I suspect the python cookie implementation is derived exactly from the standard, while the
# chromium implementation is more of a practically derived structure.
# Network.setCookie
baked_cookie = http.cookiejar.Cookie(
# We assume V0 cookies, principally because I don't think I've /ever/ actually encountered a V1 cookie.
# Chromium doesn't seem to specify it.
version = 0,
name = raw_cookie['name'],
value = raw_cookie['value'],
port = None,
port_specified = False,
domain = raw_cookie['domain'],
domain_specified = True,
domain_initial_dot = False,
path = raw_cookie['path'],
path_specified = False,
secure = raw_cookie['secure'],
expires = raw_cookie['expires'],
discard = raw_cookie['session'],
comment = None,
comment_url = None,
rest = {"httponly":"%s" % raw_cookie['httpOnly']},
rfc2109 = False
)
cookies.append(baked_cookie)
return cookies
def set_cookie(self, cookie):
'''
Add a cookie to the remote chromium instance.
Passed value `cookie` must be an instance of `http.cookiejar.Cookie()`.
'''
# Function path: Network.setCookie
# Domain: Network
# Method name: setCookie
# WARNING: This function is marked 'Experimental'!
# Parameters:
# Required arguments:
# 'url' (type: string) -> The request-URI to associate with the setting of the cookie. This value can affect the default domain and path values of the created cookie.
# 'name' (type: string) -> The name of the cookie.
# 'value' (type: string) -> The value of the cookie.
# Optional arguments:
# 'domain' (type: string) -> If omitted, the cookie becomes a host-only cookie.
# 'path' (type: string) -> Defaults to the path portion of the url parameter.
# 'secure' (type: boolean) -> Defaults ot false.
# 'httpOnly' (type: boolean) -> Defaults to false.
# 'sameSite' (type: CookieSameSite) -> Defaults to browser default behavior.
# 'expirationDate' (type: Timestamp) -> If omitted, the cookie becomes a session cookie.
# Returns:
# 'success' (type: boolean) -> True if successfully set cookie.
# Description: Sets a cookie with the given cookie data; may overwrite equivalent cookies if they exist.
assert isinstance(cookie, http.cookiejar.Cookie), 'The value passed to `set_cookie` must be an instance of http.cookiejar.Cookie().' + \
' Passed: %s ("%s").' % (type(cookie), cookie)
# Yeah, the cookielib stores this attribute as a string, despite it containing a
# boolean value. No idea why.
is_http_only = str(cookie.get_nonstandard_attr('httponly', 'False')).lower() == "true"
# I'm unclear what the "url" field is actually for. A cookie only needs the domain and
# path component to be fully defined. Considering the API apparently allows the domain and
# path parameters to be unset, I think it forms a partially redundant, with some
# strange interactions with mode-changing between host-only and more general
# cookies depending on what's set where.
# Anyways, given we need a URL for the API to work properly, we produce a fake
# host url by building it out of the relevant cookie properties.
fake_url = urllib.parse.urlunsplit((
"http" if is_http_only else "https", # Scheme
cookie.domain, # netloc
cookie.path, # path
'', # query
'', # fragment
))
params = {
'url' : fake_url,
'name' : cookie.name,
'value' : cookie.value if cookie.value else "",
'domain' : cookie.domain,
'path' : cookie.path,
'secure' : cookie.secure,
'expires' : float(cookie.expires) if cookie.expires else float(2**32),
'httpOnly' : is_http_only,
# The "sameSite" flag appears to be a chromium-only extension for controlling
# cookie sending in non-first-party contexts. See:
# https://bugs.chromium.org/p/chromium/issues/detail?id=459154
# Anyways, we just use the default here, whatever that is.
# sameSite = cookie.xxx
}
ret = self.Network_setCookie(**params)
return ret
def clear_cookies(self):
'''
At this point, this is just a thin shim around the Network_clearBrowserCookies() operation.
That function postdates the clear_cookies() call here.
'''
self.Network_clearBrowserCookies()
def navigate_to(self, url):
'''
Trigger a page navigation to url `url`.
Note that this is done via javascript injection, and as such results in
the `referer` header being sent with the url of the network location.
This is useful when a page's navigation is stateful, or for simple
cases of referrer spoofing.
'''
assert "'" not in url
return self.__exec_js("window.location.href = '{}'".format(url))
def get_current_url(self):
'''
Probe the remote session for the current window URL.
This is primarily used to do things like unwrap redirects,
or circumvent outbound url wrappers.
'''
res = self.Page_getNavigationHistory()
assert 'result' in res
assert 'currentIndex' in res['result']
assert 'entries' in res['result']
return res['result']['entries'][res['result']['currentIndex']]['url']
def get_page_url_title(self):
'''
Get the title and current url from the remote session.
Return is a 2-tuple: (page_title, page_url).
'''
cr_tab_id = self.transport._get_cr_tab_meta_for_key(self.tab_id)['id']
targets = self.Target_getTargets()
assert 'result' in targets
assert 'targetInfos' in targets['result']
for tgt in targets['result']['targetInfos']:
if tgt['targetId'] == cr_tab_id:
# {
# 'title': 'Page Title 1',
# 'targetId': '9d2c503c-e39e-42cc-b950-96db073918ee',
# 'attached': True,
# 'url': 'http://localhost:47181/with_title_1',
# 'type': 'page'
# }
title = tgt['title']
cur_url = tgt['url']
return title, cur_url
def click_link_containing_url(self, url):
'''
TODO
'''
# exec_func =
self.__exec_js("window.location.href = '/test'")
# js.kCallFunctionScript
# "window.history.back();"
# elem = self.find_element("//a".format(url))
# print(elem)
def execute_javascript_statement(self, script):
'''
Execute a javascript string in the context of the browser tab.
This only works for simple JS statements. More complex usage should
be via execute_javascript_function().
This can also be used to interrogate the JS interpreter, as simply passing
variable names of interest will return the variable value.
'''
ret = self.__exec_js(script=script)
return ret
def execute_javascript_function(self, script, args=None):
'''
Execute a javascript function in the context of the browser tab.
The passed script must be a single function definition, which will
be called via ({script}).apply(null, {args}).
'''
ret = self.__exec_js(script=script, should_call=True, args=args)
return ret
def find_element(self, search):
'''
DOM_performSearch(self, query, includeUserAgentShadowDOM)
Python Function: DOM_performSearch
Domain: DOM
Method name: performSearch
WARNING: This function is marked 'Experimental'!
Parameters:
'query' (type: string) -> Plain text or query selector or XPath search query.
'includeUserAgentShadowDOM' (type: boolean) -> True to search in user agent shadow DOM.
Returns:
'searchId' (type: string) -> Unique search session identifier.
'resultCount' (type: integer) -> Number of search results.
Description: Searches for a given string in the DOM tree. Use <code>getSearchResults</code> to access search results or <code>cancelSearch</code> to end this search session.
Python Function: DOM_getSearchResults
Domain: DOM
Method name: getSearchResults
WARNING: This function is marked 'Experimental'!
Parameters:
'searchId' (type: string) -> Unique search session identifier.
'fromIndex' (type: integer) -> Start index of the search result to be returned.
'toIndex' (type: integer) -> End index of the search result to be returned.
Returns:
'nodeIds' (type: array) -> Ids of the search result nodes.
Description: Returns search results from given <code>fromIndex</code> to given <code>toIndex</code> from the sarch with the given identifier.
DOM_discardSearchResults(self, searchId)
Python Function: DOM_discardSearchResults
Domain: DOM
Method name: discardSearchResults
WARNING: This function is marked 'Experimental'!
Parameters:
'searchId' (type: string) -> Unique search session identifier.
No return value.
Description: Discards search results from the session with the given id. <code>getSearchResults</code> should no longer be called for that search.
'''
res = self.DOM_performSearch(search, includeUserAgentShadowDOM=False)
assert 'result' in res
assert 'searchId' in res['result']
searchid = res['result']['searchId']
res_cnt = res['result']['resultCount']
self.log.debug("%s", res)
self.log.debug("%s", searchid)
if res_cnt == 0:
return None
items = self.DOM_getSearchResults(searchId=searchid, fromIndex=0, toIndex=res_cnt)
self.log.debug("Results:")
self.log.debug("%s", items)
# DOM_getSearchResults
def click_element(self, contains_url):
'''
TODO
ChromeDriver source for how to click an element:
Status ExecuteClickElement(Session* session,
WebView* web_view,
const std::string& element_id,
const base::DictionaryValue& params,
std::unique_ptr<base::Value>* value) {
std::string tag_name;
Status status = GetElementTagName(session, web_view, element_id, &tag_name);
if (status.IsError())
return status;
if (tag_name == "option") {
bool is_toggleable;
status = IsOptionElementTogglable(
session, web_view, element_id, &is_toggleable);
if (status.IsError())
return status;
if (is_toggleable)
return ToggleOptionElement(session, web_view, element_id);
else
return SetOptionElementSelected(session, web_view, element_id, true);
} else {
WebPoint location;
status = GetElementClickableLocation(
session, web_view, element_id, &location);
if (status.IsError())
return status;
std::list<MouseEvent> events;
events.push_back(
MouseEvent(kMovedMouseEventType, kNoneMouseButton,
location.x, location.y, session->sticky_modifiers, 0));
events.push_back(
MouseEvent(kPressedMouseEventType, kLeftMouseButton,
location.x, location.y, session->sticky_modifiers, 1));
events.push_back(
MouseEvent(kReleasedMouseEventType, kLeftMouseButton,
location.x, location.y, session->sticky_modifiers, 1));
status =
web_view->DispatchMouseEvents(events, session->GetCurrentFrameId());
if (status.IsOk())
session->mouse_position = location;
return status;
}
}
'''
pass
def get_unpacked_response_body(self, requestId, mimetype="application/unknown"):
'''
Return a unpacked, decoded resposne body from Network_getResponseBody()
'''
content = self.Network_getResponseBody(requestId)
assert 'result' in content
result = content['result']
assert 'base64Encoded' in result
assert 'body' in result
if result['base64Encoded']:
content = base64.b64decode(result['body'])
else:
content = result['body']
self.log.info("Navigate complete. Received %s byte response with type %s.", len(content), mimetype)
return {'binary' : result['base64Encoded'], 'mimetype' : mimetype, 'content' : content}
def handle_page_location_changed(self, timeout=None):
'''
If the chrome tab has internally redirected (generally because jerberscript), this
will walk the page navigation responses and attempt to fetch the response body for
the tab's latest location.
'''
# In general, this is often called after other mechanisms have confirmed
# that the tab has already navigated. As such, we want to not wait a while
# to discover something went wrong, so use a timeout that basically just
# results in checking the available buffer, and nothing else.
if not timeout:
timeout = 0.1
self.log.debug("We may have redirected. Checking.")
messages = self.transport.recv_all_filtered(filter_funcs.capture_loading_events, tab_key=self.tab_id)
if not messages:
raise ChromeError("Couldn't track redirect! No idea what to do!")
last_message = messages[-1]
self.log.info("Probably a redirect! New content url: '%s'", last_message['params']['documentURL'])
resp = self.transport.recv_filtered(filter_funcs.network_response_recieved_for_url(last_message['params']['documentURL'], last_message['params']['frameId']), tab_key=self.tab_id)
resp = resp['params']
ctype = 'application/unknown'
resp_response = resp['response']
if 'mimeType' in resp_response:
ctype = resp_response['mimeType']
if 'headers' in resp_response and 'content-type' in resp_response['headers']:
ctype = resp_response['headers']['content-type'].split(";")[0]
# We assume the last document request was the redirect.
# This is /probably/ kind of a poor practice, but what the hell.
# I have no idea what this would do if there are non-html documents (or if that can even happen.)
return self.get_unpacked_response_body(last_message['params']['requestId'], mimetype=ctype)
def blocking_navigate_and_get_source(self, url, timeout=DEFAULT_TIMEOUT_SECS):
'''
Do a blocking navigate to url `url`, and then extract the
response body and return that.
This effectively returns the *unrendered* page content that's sent over the wire. As such,
if the page does any modification of the contained markup during rendering (via javascript), this
function will not reflect the changes made by the javascript.
The rendered page content can be retreived by calling `get_rendered_page_source()`.
Due to the remote api structure, accessing the raw content after the content has been loaded
is not possible, so any task requiring the raw content must be careful to request it
before it actually navigates to said content.
Return value is a dictionary with two keys:
{
'binary' : (boolean, true if content is binary, false if not)
'content' : (string of bytestring, depending on whether `binary` is true or not)
}
'''
resp = self.blocking_navigate(url, timeout)
assert 'requestId' in resp
assert 'response' in resp
# self.log.debug('blocking_navigate Response %s', pprint.pformat(resp))
ctype = 'application/unknown'
resp_response = resp['response']
if 'mimeType' in resp_response:
ctype = resp_response['mimeType']
if 'headers' in resp_response and 'content-type' in resp_response['headers']:
ctype = resp_response['headers']['content-type'].split(";")[0]
self.log.debug("Trying to get response body")
try:
ret = self.get_unpacked_response_body(resp['requestId'], mimetype=ctype)
except ChromeError:
ret = self.handle_page_location_changed(timeout)
return ret
def get_rendered_page_source(self, dom_idle_requirement_secs=3, max_wait_timeout=30):
'''
Get the HTML markup for the current page.
This is done by looking up the root DOM node, and then requesting the outer HTML
for that node ID.
This calls return will reflect any modifications made by javascript to the
page. For unmodified content, use `blocking_navigate_and_get_source()`
dom_idle_requirement_secs specifies the period of time for which there must have been no
DOM modifications before treating the rendered output as "final". This call will therefore block for
at least dom_idle_requirement_secs seconds.
'''
# There are a bunch of events which generally indicate a page is still doing *things*.
# I have some concern about how this will handle things like advertisements, which
# basically load crap forever. That's why we have the max_wait_timeout.
target_events = [
"Page.frameResized",
"Page.frameStartedLoading",
"Page.frameNavigated",
"Page.frameAttached",
"Page.frameStoppedLoading",
"Page.frameScheduledNavigation",
"Page.domContentEventFired",
"Page.frameClearedScheduledNavigation",
"Page.loadEventFired",
"DOM.documentUpdated",
"DOM.childNodeInserted",
"DOM.childNodeRemoved",
"DOM.childNodeCountUpdated",
]
start_time = time.time()
try:
while 1:
if time.time() - start_time > max_wait_timeout:
self.log.debug("Page was not idle after waiting %s seconds. Giving up and extracting content now.", max_wait_timeout)
self.transport.recv_filtered(
filter_funcs.wait_for_methods(target_events),
tab_key = self.tab_id,
timeout = dom_idle_requirement_secs
)
except ChromeResponseNotReceived:
# We timed out, the DOM is probably idle.
pass
# We have to find the DOM root node ID
dom_attr = self.DOM_getDocument(depth=-1, pierce=False)
assert 'result' in dom_attr
assert 'root' in dom_attr['result']
assert 'nodeId' in dom_attr['result']['root']
# Now, we have the root node ID.
root_node_id = dom_attr['result']['root']['nodeId']
# Use that to get the HTML for the specified node
response = self.DOM_getOuterHTML(nodeId=root_node_id)
assert 'result' in response
assert 'outerHTML' in response['result']
return response['result']['outerHTML']
def take_screeshot(self):
'''
Take a screenshot of the virtual viewport content.
Return value is a png image as a bytestring.
'''
resp = self.Page_captureScreenshot()
assert 'result' in resp
assert 'data' in resp['result']
imgdat = base64.b64decode(resp['result']['data'])
return imgdat
def blocking_navigate(self, url, timeout=DEFAULT_TIMEOUT_SECS):
'''
Do a blocking navigate to url `url`.
This function triggers a navigation, and then waits for the browser
to claim the page has finished loading.
Roughly, this corresponds to the javascript `DOMContentLoaded` event,
meaning the dom for the page is ready.
Internals:
A navigation command results in a sequence of events:
- Page.frameStartedLoading" (with frameid)
- Page.frameStoppedLoading" (with frameid)
- Page.loadEventFired" (not attached to an ID)
Therefore, this call triggers a navigation option,
and then waits for the expected set of response event messages.
'''
self.transport.flush(tab_key=self.tab_id)
self.log.debug("Blocking navigate to URL: '%s'", url)
ret = self.Page_navigate(url = url)
assert("result" in ret), "Missing return content"
assert("frameId" in ret['result']), "Missing 'frameId' in return content"
assert("loaderId" in ret['result']), "Missing 'loaderId' in return content"
expected_id = ret['result']['frameId']
loader_id = ret['result']['loaderId']
try:
self.log.debug("Waiting for frame navigated command response.")
self.transport.recv_filtered(filter_funcs.check_frame_navigated_command(expected_id), tab_key=self.tab_id, timeout=timeout)
self.log.debug("Waiting for frameStartedLoading response.")
self.transport.recv_filtered(filter_funcs.check_frame_load_command("Page.frameStartedLoading"), tab_key=self.tab_id, timeout=timeout)
self.log.debug("Waiting for frameStoppedLoading response.")
self.transport.recv_filtered(filter_funcs.check_frame_load_command("Page.frameStoppedLoading"), tab_key=self.tab_id, timeout=timeout)
# self.transport.recv_filtered(check_load_event_fired, tab_key=self.tab_id, timeout=timeout)
self.log.debug("Waiting for responseReceived response.")
resp = self.transport.recv_filtered(filter_funcs.network_response_recieved_for_url(url=None, expected_id=expected_id), tab_key=self.tab_id, timeout=timeout)
if resp is None:
raise ChromeNavigateTimedOut("Blocking navigate timed out!")
return resp['params']
# The `Page.frameNavigated ` event does not get fired for non-markup responses.
# Therefore, if we timeout on waiting for that, check to see if we received a binary response.
except ChromeResponseNotReceived:
# So this is basically broken, fix is https://bugs.chromium.org/p/chromium/issues/detail?id=831887
# but that bug report isn't fixed yet.
# Siiiigh.
self.log.warning("Failed to receive expected response to navigate command. Checking if response is a binary object.")
resp = self.transport.recv_filtered(
keycheck = filter_funcs.check_frame_loader_command(
method_name = "Network.responseReceived",
loader_id = loader_id
),
tab_key = self.tab_id,
timeout = timeout)
return resp['params']
def new_tab(self, *args, **kwargs):
tab = super().new_tab(*args, **kwargs)
for script in self.__new_tab_scripts:
tab.Page_addScriptToEvaluateOnNewDocument(script)
return tab
def install_evasions(self):
'''
Load headless detection evasions from the puppeteer-extra repository (
https://github.com/berstend/puppeteer-extra/tree/master/packages/puppeteer-extra-plugin-stealth/evasions).
'''
from ChromeController.resources import evasions
scripts = evasions.load_evasions()
self.__new_tab_scripts.extend(scripts.values())
for script, contents in scripts.items():
print("Loading '%s'" % script)
ret = self.Page_addScriptToEvaluateOnNewDocument(contents)
pprint.pprint(ret)
ret2 = self.execute_javascript_function("function()" + contents)
pprint.pprint(ret2)
# ret3 = self.execute_javascript_statement(contents)
# pprint.pprint(ret3)
| bsd-3-clause | -1,907,269,585,833,294,600 | 31.221223 | 184 | 0.661792 | false |
bdang2012/taiga-back-casting | taiga/projects/signals.py | 1 | 3787 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from django.conf import settings
from taiga.projects.services.tags_colors import update_project_tags_colors_handler, remove_unused_tags
from taiga.projects.notifications.services import create_notify_policy_if_not_exists
from taiga.base.utils.db import get_typename_for_model_class
####################################
# Signals over project items
####################################
## TAGS
def tags_normalization(sender, instance, **kwargs):
if isinstance(instance.tags, (list, tuple)):
instance.tags = list(map(str.lower, instance.tags))
def update_project_tags_when_create_or_edit_taggable_item(sender, instance, **kwargs):
update_project_tags_colors_handler(instance)
def update_project_tags_when_delete_taggable_item(sender, instance, **kwargs):
remove_unused_tags(instance.project)
instance.project.save()
def membership_post_delete(sender, instance, using, **kwargs):
instance.project.update_role_points()
def create_notify_policy(sender, instance, using, **kwargs):
if instance.user:
create_notify_policy_if_not_exists(instance.project, instance.user)
def project_post_save(sender, instance, created, **kwargs):
"""
Populate new project dependen default data
"""
if not created:
return
if instance._importing:
return
template = getattr(instance, "creation_template", None)
if template is None:
ProjectTemplate = apps.get_model("projects", "ProjectTemplate")
template = ProjectTemplate.objects.get(slug=settings.DEFAULT_PROJECT_TEMPLATE)
template.apply_to_project(instance)
instance.save()
Role = apps.get_model("users", "Role")
try:
owner_role = instance.roles.get(slug=template.default_owner_role)
except Role.DoesNotExist:
owner_role = instance.roles.first()
if owner_role:
Membership = apps.get_model("projects", "Membership")
Membership.objects.create(user=instance.owner, project=instance, role=owner_role,
is_owner=True, email=instance.owner.email)
def try_to_close_or_open_user_stories_when_edit_us_status(sender, instance, created, **kwargs):
from taiga.projects.userstories import services
for user_story in instance.user_stories.all():
if services.calculate_userstory_is_closed(user_story):
services.close_userstory(user_story)
else:
services.open_userstory(user_story)
def try_to_close_or_open_user_stories_when_edit_task_status(sender, instance, created, **kwargs):
from taiga.projects.userstories import services
UserStory = apps.get_model("userstories", "UserStory")
for user_story in UserStory.objects.filter(tasks__status=instance).distinct():
if services.calculate_userstory_is_closed(user_story):
services.close_userstory(user_story)
else:
services.open_userstory(user_story)
| agpl-3.0 | -6,576,361,779,421,681,000 | 35.747573 | 102 | 0.705416 | false |
reinvantveer/edna-ld | etl/lib/MIFparser.py | 1 | 2473 | from osgeo import gdal
import signal
from osgeo import ogr
import json
gdal.UseExceptions()
"""
This module has a heavy dependency on the python GDAL package, which can be
a total pain in the ass to install, depending on your platform. But it is needed
for parsing the Mapinfo Interchange Format (MIF) files...
Support for Windows is easiest through the OSGeo4W installer
"""
class MIFparser:
"""
This class is responsible for reading MapInfo Interchange Format files.
They are recognizable by the .mif (upper or lowercase) file extension.
"""
# Catch segmentation faults
@staticmethod
def _sig_handler(signum, frame):
raise ValueError("segfault")
@staticmethod
def to_dict(file_path):
# TODO: write code to actually handle the error!
# signal.signal(signal.SIGSEGV, MIFparser._sig_handler)
wkt_features = [] # Initialize empty array of target features
try:
data_source = ogr.Open(file_path, 0)
except Exception as e:
raise ValueError(e)
data_source = gdal.ogr.Open(file_path, 0)
err = gdal.GetLastErrorMsg()
if err:
raise ValueError(err + ' on ' + file_path)
if not data_source:
raise ValueError('Unable to read data from file %s' % file_path)
layer = data_source.GetLayer()
err = gdal.GetLastErrorMsg()
if err:
raise ValueError(err + ' on ' + file_path)
for feature in layer:
# shortcut to dumping non-geometry attributes from feature to our dictionary
try:
geojson = feature.ExportToJson()
except Exception as e:
raise ValueError('Unable to extract features from file %s due to %s' % (file_path, e))
geojson_as_dict = json.loads(geojson)
wkt_feature = geojson_as_dict['properties']
# tack on the geometry as well-known text
geom = feature.GetGeometryRef()
err = gdal.GetLastErrorMsg()
if err:
raise ValueError(err + ' on ' + file_path)
if not geom:
raise ValueError('Unable to extract geometries from %s' % file_path)
wkt_feature['WKT'] = geom.ExportToWkt()
wkt_features.append(wkt_feature)
if not wkt_features:
raise ValueError('Unable to extract features from %s' % file_path)
return wkt_features
| mit | 209,170,776,351,899,870 | 29.9125 | 102 | 0.615042 | false |
stephanie-wang/ray | python/ray/tune/schedulers/async_hyperband.py | 1 | 7105 | import logging
import numpy as np
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
logger = logging.getLogger(__name__)
class AsyncHyperBandScheduler(FIFOScheduler):
"""Implements the Async Successive Halving.
This should provide similar theoretical performance as HyperBand but
avoid straggler issues that HyperBand faces. One implementation detail
is when using multiple brackets, trial allocation to bracket is done
randomly with over a softmax probability.
See https://arxiv.org/abs/1810.05934
Args:
time_attr (str): A training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
max_t (float): max time units per trial. Trials will be stopped after
max_t time units (determined by time_attr) have passed.
grace_period (float): Only stop trials at least this old in time.
The units are the same as the attribute named by `time_attr`.
reduction_factor (float): Used to set halving rate and amount. This
is simply a unit-less scalar.
brackets (int): Number of brackets. Each bracket has a different
halving rate, specified by the reduction factor.
"""
def __init__(self,
time_attr="training_iteration",
reward_attr=None,
metric="episode_reward_mean",
mode="max",
max_t=100,
grace_period=1,
reduction_factor=4,
brackets=1):
assert max_t > 0, "Max (time_attr) not valid!"
assert max_t >= grace_period, "grace_period must be <= max_t!"
assert grace_period > 0, "grace_period must be positive!"
assert reduction_factor > 1, "Reduction Factor not valid!"
assert brackets > 0, "brackets must be positive!"
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
FIFOScheduler.__init__(self)
self._reduction_factor = reduction_factor
self._max_t = max_t
self._trial_info = {} # Stores Trial -> Bracket
# Tracks state for new trial add
self._brackets = [
_Bracket(grace_period, max_t, reduction_factor, s)
for s in range(brackets)
]
self._counter = 0 # for
self._num_stopped = 0
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._time_attr = time_attr
def on_trial_add(self, trial_runner, trial):
sizes = np.array([len(b._rungs) for b in self._brackets])
probs = np.e**(sizes - sizes.max())
normalized = probs / probs.sum()
idx = np.random.choice(len(self._brackets), p=normalized)
self._trial_info[trial.trial_id] = self._brackets[idx]
def on_trial_result(self, trial_runner, trial, result):
action = TrialScheduler.CONTINUE
if self._time_attr not in result or self._metric not in result:
return action
if result[self._time_attr] >= self._max_t:
action = TrialScheduler.STOP
else:
bracket = self._trial_info[trial.trial_id]
action = bracket.on_result(trial, result[self._time_attr],
self._metric_op * result[self._metric])
if action == TrialScheduler.STOP:
self._num_stopped += 1
return action
def on_trial_complete(self, trial_runner, trial, result):
if self._time_attr not in result or self._metric not in result:
return
bracket = self._trial_info[trial.trial_id]
bracket.on_result(trial, result[self._time_attr],
self._metric_op * result[self._metric])
del self._trial_info[trial.trial_id]
def on_trial_remove(self, trial_runner, trial):
del self._trial_info[trial.trial_id]
def debug_string(self):
out = "Using AsyncHyperBand: num_stopped={}".format(self._num_stopped)
out += "\n" + "\n".join([b.debug_str() for b in self._brackets])
return out
class _Bracket():
"""Bookkeeping system to track the cutoffs.
Rungs are created in reversed order so that we can more easily find
the correct rung corresponding to the current iteration of the result.
Example:
>>> b = _Bracket(1, 10, 2, 3)
>>> b.on_result(trial1, 1, 2) # CONTINUE
>>> b.on_result(trial2, 1, 4) # CONTINUE
>>> b.cutoff(b._rungs[-1][1]) == 3.0 # rungs are reversed
>>> b.on_result(trial3, 1, 1) # STOP
>>> b.cutoff(b._rungs[0][1]) == 2.0
"""
def __init__(self, min_t, max_t, reduction_factor, s):
self.rf = reduction_factor
MAX_RUNGS = int(np.log(max_t / min_t) / np.log(self.rf) - s + 1)
self._rungs = [(min_t * self.rf**(k + s), {})
for k in reversed(range(MAX_RUNGS))]
def cutoff(self, recorded):
if not recorded:
return None
return np.nanpercentile(
list(recorded.values()), (1 - 1 / self.rf) * 100)
def on_result(self, trial, cur_iter, cur_rew):
action = TrialScheduler.CONTINUE
for milestone, recorded in self._rungs:
if cur_iter < milestone or trial.trial_id in recorded:
continue
else:
cutoff = self.cutoff(recorded)
if cutoff is not None and cur_rew < cutoff:
action = TrialScheduler.STOP
if cur_rew is None:
logger.warning("Reward attribute is None! Consider"
" reporting using a different field.")
else:
recorded[trial.trial_id] = cur_rew
break
return action
def debug_str(self):
iters = " | ".join([
"Iter {:.3f}: {}".format(milestone, self.cutoff(recorded))
for milestone, recorded in self._rungs
])
return "Bracket: " + iters
ASHAScheduler = AsyncHyperBandScheduler
if __name__ == "__main__":
sched = AsyncHyperBandScheduler(
grace_period=1, max_t=10, reduction_factor=2)
print(sched.debug_string())
bracket = sched._brackets[0]
print(bracket.cutoff({str(i): i for i in range(20)}))
| apache-2.0 | -8,235,873,777,404,223,000 | 38.692737 | 79 | 0.580859 | false |
Bjwebb/detecting-clouds | test.py | 1 | 1519 | from utils import get_sidereal_time
from process import open_fits, flatten_max, DataProcessor
import dateutil.parser
import os, shutil
dp = DataProcessor()
dp.outdir = 'test/out'
dp.verbose = 1
#date_obs = '2011-05-25T06:00:10'
date_obs = '2012-02-29T10:37:12'
name = date_obs + '.fits'
path = os.path.join('sym', name[0:4], name[5:7], name[8:10])
dp.process_file(os.path.join(path, name))
"""
dt = dateutil.parser.parse(name.split('.')[0])
s = get_sidereal_time(dt).seconds
path_end = os.path.join(*[ unicode(x).zfill(2) for x in [ s/3600, (s/60)%60 ] ])
fname = os.path.join('out', 'fits', 'sid', path_end, 'total.fits')
tdata = open_fits(fname)
night = os.listdir(os.path.join('sid', path_end))
for i in [100, 250, 500, 1000, 3000, 4000, 5000, 2000]:
dp.output('total', tdata, image_filter=flatten_max(i*len(night)))
shutil.copyfile(os.path.join('test','out','png','total.png'),
os.path.join('test', 'total{0}.png').format(i))
"""
from django.template import Context, Template
t = Template(open(os.path.join('clouds','templates','clouds','image.html')).read())
from catlib import parse_cat
point_list = map(lambda (i,row):row, parse_cat(os.path.join('test','out','cat',path,date_obs+'.cat')).iterrows())
with open(os.path.join('test',date_obs+'.html'), 'w') as out:
out.write(t.render(Context({'point_list': point_list,
'object': {'get_url': 'sym/'+date_obs[:4]+'/'+date_obs[5:7]+'/'+date_obs[8:10]+'/'+date_obs }
})))
| mit | -2,582,954,058,109,025,000 | 36.04878 | 125 | 0.626728 | false |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/core/reshape.py | 1 | 38451 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import numpy as np
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse import SparseDataFrame, SparseSeries
from pandas.sparse.array import SparseArray
from pandas._sparse import IntIndex
from pandas.core.categorical import Categorical
from pandas.core.common import (notnull, _ensure_platform_int, _maybe_promote,
isnull)
from pandas.core.groupby import get_group_index, _compress_group_index
import pandas.core.common as com
import pandas.algos as algos
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 2
b 3 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = com.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = _compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = com.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
values = [ Categorical.from_array(values[:,i],
categories=self.is_categorical.categories,
ordered=True)
for i in range(values.shape[-1]) ]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
# if our mask is all True, then we can use our existing dtype
if self.mask.all():
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
for i in range(values.shape[1]):
chunk = new_values[:, i * width: (i + 1) * width]
mask_chunk = new_mask[:, i * width: (i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels,
labels=result_labels,
names=self.new_index_names,
verify_integrity=False)
def _unstack_multiple(data, clocs):
from pandas.core.groupby import decons_obs_group_ids
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = _compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids,
obs_ids, shape, clabels, xnull=False)
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = Series(data.values, index=dummy_index)
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [val if i > val else val - 1 for val in clocs]
return result
dummy = DataFrame(data.values, index=dummy_index,
columns=data.columns)
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
indexed = self.set_index([index, columns])
return indexed.unstack(columns)
else:
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([self[index],
self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sortlevel(0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level)
return unstacker.get_result()
def _unstack_frame(obj, level):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.ix[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns)
return unstacker.get_result()
def get_compressed_ids(labels, sizes):
from pandas.core.groupby import get_group_index
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return _compress_group_index(ids, sort=True)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_levels.append(frame.columns)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
new_labels.append(np.tile(np.arange(K), N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
ilabels = np.arange(N).repeat(K)
clabels = np.tile(np.arange(K), N).ravel()
new_index = MultiIndex(levels=[frame.index, frame.columns],
labels=[ilabels, clabels],
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notnull(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level numbers, "
"not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something
we can safely pass to swaplevel:
We generally want to convert the level number into
a level name, except when columns do not have names,
in which case we must leave as a level number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sortlevel(level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[
lev.take(lab) for lev, lab in
zip(this.columns.levels[:-1], this.columns.labels[:-1])
]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
slice_len = loc.stop - loc.start
# can make more efficient?
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.ix[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.ix[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(frame.columns.levels[level_num])
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def melt(frame, id_vars=None, value_vars=None,
var_name=None, value_name='value', col_level=None):
"""
"Unpivots" a DataFrame from wide format to long format, optionally leaving
identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> pd.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
# TODO: what about the existing index?
if id_vars is not None:
if not isinstance(id_vars, (tuple, list, np.ndarray)):
id_vars = [id_vars]
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not isinstance(value_vars, (tuple, list, np.ndarray)):
value_vars = [value_vars]
frame = frame.ix[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i for i in
range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns.get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2008], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team hr year
0 Red Sox 514 2007
1 Yankees 573 2007
2 Red Sox 545 2008
3 Yankees 526 2008
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
mdata[target] = com._concat_compat([data[col].values for col in names])
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notnull(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j):
"""
Wide panel to long format. Less flexible but more user-friendly than melt.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : list
A list of stub names. The wide format variables are assumed to
start with the stub names.
i : str
The name of the id variable.
j : str
The name of the subobservation variable.
stubend : str
Regex to match for the end of the stubs.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable as well as
variables for i and j.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> wide_to_long(df, ["A", "B"], i="id", j="year")
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
Notes
-----
All extra variables are treated as extra id variables. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, regex):
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j):
varnames = get_var_names(df, "^" + stub)
newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub,
var_name=j)
newdf_j = newdf[j].str.replace(stub, "")
try:
newdf_j = newdf_j.astype(int)
except ValueError:
pass
newdf[j] = newdf_j
return newdf
id_vars = get_var_names(df, "^(?!%s)" % "|".join(stubnames))
if i not in id_vars:
id_vars += [i]
newdf = melt_stub(df, stubnames[0], id_vars, j)
for stub in stubnames[1:]:
new = melt_stub(df, stub, id_vars, j)
newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
return newdf.set_index([i, j])
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the returned DataFrame should be sparse or not.
Returns
-------
dummies : DataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
>>> get_dummies(df, prefix=['col1', 'col2']):
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
See also ``Series.str.get_dummies``.
"""
from pandas.tools.merge import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(include=['object',
'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did "
"not match the length of the columns "
"being encoded ({2}).")
if com.is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
result = data.drop(columns_to_encode, axis=1)
with_dummies = [result]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False, sparse=False):
# Series avoids inconsistent NaN handling
cat = Categorical.from_array(Series(data), ordered=True)
levels = cat.categories
# if all NaN
if not dummy_na and len(levels) == 0:
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index)
codes = cat.codes.copy()
if dummy_na:
codes[codes == -1] = len(cat.categories)
levels = np.append(cat.categories, np.nan)
number_of_cols = len(levels)
if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v)
for v in levels]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [ [] for _ in range(len(dummy_cols)) ]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs)), sparse_index=IntIndex(N, ixs),
fill_value=0)
sparse_series[col] = SparseSeries(data=sarr, index=index)
return SparseDataFrame(sparse_series, index=index, columns=dummy_cols)
else:
dummy_mat = np.eye(number_of_cols).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {
'major': 0,
'minor': 1
}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
cat = Categorical.from_array(mapped_items.take(labels), ordered=True)
labels = cat.codes
items = cat.categories
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
| mit | 5,223,546,128,094,792,000 | 32.233362 | 88 | 0.565369 | false |
projectatomic/osbs-client | tests/cli/test_capture.py | 1 | 1660 | """
Copyright (c) 2015, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
import json
import os
import pytest
from osbs.constants import DEFAULT_NAMESPACE
from osbs.cli.capture import setup_json_capture
from tests.constants import TEST_BUILD
@pytest.fixture # noqa
def osbs_with_capture(osbs, tmpdir):
setup_json_capture(osbs, osbs.os_conf, str(tmpdir))
return osbs
def test_json_capture_no_watch(osbs_with_capture, tmpdir):
for visit in ["000", "001"]:
osbs_with_capture.list_builds()
filename = "get-build.openshift.io_v1_namespaces_{n}_builds_-{v}.json"
path = os.path.join(str(tmpdir), filename.format(n=DEFAULT_NAMESPACE,
v=visit))
assert os.access(path, os.R_OK)
with open(path) as fp:
obj = json.load(fp)
assert obj
def test_json_capture_watch(osbs_with_capture, tmpdir):
# Take the first two yielded values (fresh object, update)
# PyCQA/pylint#2731 fixed in 2.4.4, so noqa
for _ in zip(range(2), # pylint: disable=W1638
osbs_with_capture.os.watch_resource('builds', TEST_BUILD)):
pass
filename = "get-build.openshift.io_v1_watch_namespaces_{n}_builds_{b}_-000-000.json"
path = os.path.join(str(tmpdir), filename.format(n=DEFAULT_NAMESPACE,
b=TEST_BUILD))
assert os.access(path, os.R_OK)
with open(path) as fp:
obj = json.load(fp)
assert obj
| bsd-3-clause | -4,635,834,311,523,012,000 | 30.320755 | 88 | 0.633133 | false |
qPCR4vir/orange3 | Orange/widgets/visualize/owboxplot.py | 1 | 30964 | # -*- coding: utf-8 -*-
import sys
import math
import itertools
import numpy as np
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4.QtGui import QSizePolicy
import scipy.special
import Orange.data
from Orange.statistics import contingency, distribution
from Orange.widgets import widget, gui
from Orange.widgets.settings import (Setting, DomainContextHandler,
ContextSetting)
from Orange.widgets.utils import datacaching, vartype
def compute_scale(min_, max_):
if min_ == max_:
return math.floor(min_), 1
magnitude = int(3 * math.log10(abs(max_ - min_)) + 1)
if magnitude % 3 == 0:
first_place = 1
elif magnitude % 3 == 1:
first_place = 2
else:
first_place = 5
magnitude = magnitude // 3 - 1
step = first_place * pow(10, magnitude)
first_val = math.ceil(min_ / step) * step
return first_val, step
class BoxData:
def __init__(self, dist):
self.dist = dist
self.N = N = np.sum(dist[1])
if N == 0:
return
self.a_min = float(dist[0, 0])
self.a_max = float(dist[0, -1])
self.mean = float(np.sum(dist[0] * dist[1]) / N)
self.var = float(np.sum(dist[1] * (dist[0] - self.mean) ** 2) / N)
self.dev = math.sqrt(self.var)
s = 0
thresholds = [N / 4, N / 2, N / 4 * 3]
thresh_i = 0
q = []
for i, e in enumerate(dist[1]):
s += e
if s >= thresholds[thresh_i]:
if s == thresholds[thresh_i] and i + 1 < dist.shape[1]:
q.append(float((dist[0, i] + dist[0, i + 1]) / 2))
else:
q.append(float(dist[0, i]))
thresh_i += 1
if thresh_i == 3:
break
while len(q) < 3:
q.append(q[-1])
self.q25, self.median, self.q75 = q
# noinspection PyUnresolvedReferences
class OWBoxPlot(widget.OWWidget):
"""
Here's how the widget's functions call each other:
- `set_data` is a signal handler fills the list boxes and calls `attr_changed`.
- `attr_changed` handles changes of attribute or grouping (callbacks for
list boxes). It recomputes box data by calling `compute_box_data`, shows
the appropriate display box (discrete/continuous) and then calls
`layout_changed`
- `layout_changed` constructs all the elements for the scene (as lists of
QGraphicsItemGroup) and calls `display_changed`. It is called when the
attribute or grouping is changed (by attr_changed) and on resize event.
- `display_changed` puts the elements corresponding to the current display
settings on the scene. It is called when the elements are reconstructed
(layout is changed due to selection of attributes or resize event), or
when the user changes display settings or colors.
For discrete attributes, the flow is a bit simpler: the elements are not
constructed in advance (by layout_changed). Instead, layout_changed and
display_changed call display_changed_disc that draws everything.
"""
name = "Box Plot"
description = "Visualize the distribution of feature values in a box plot."
icon = "icons/BoxPlot.svg"
priority = 100
inputs = [("Data", Orange.data.Table, "set_data")]
#: Comparison types for continuous variables
CompareNone, CompareMedians, CompareMeans = 0, 1, 2
settingsHandler = DomainContextHandler()
attributes_select = ContextSetting([0])
grouping_select = ContextSetting([0])
show_annotations = Setting(True)
compare = Setting(CompareMedians)
stattest = Setting(0)
sig_threshold = Setting(0.05)
stretched = Setting(True)
_sorting_criteria_attrs = {
CompareNone: "", CompareMedians: "median", CompareMeans: "mean"
}
_pen_axis_tick = QtGui.QPen(QtCore.Qt.white, 5)
_pen_axis = QtGui.QPen(QtCore.Qt.darkGray, 3)
_pen_median = QtGui.QPen(QtGui.QBrush(QtGui.QColor(0xff, 0xff, 0x00)), 2)
_pen_paramet = QtGui.QPen(QtGui.QBrush(QtGui.QColor(0x33, 0x00, 0xff)), 2)
_pen_dotted = QtGui.QPen(QtGui.QBrush(QtGui.QColor(0x33, 0x00, 0xff)), 1)
_pen_dotted.setStyle(QtCore.Qt.DotLine)
_post_line_pen = QtGui.QPen(QtCore.Qt.lightGray, 2)
_post_grp_pen = QtGui.QPen(QtCore.Qt.lightGray, 4)
for pen in (_pen_paramet, _pen_median, _pen_dotted,
_pen_axis, _pen_axis_tick, _post_line_pen, _post_grp_pen):
pen.setCosmetic(True)
pen.setCapStyle(QtCore.Qt.RoundCap)
pen.setJoinStyle(QtCore.Qt.RoundJoin)
_pen_axis_tick.setCapStyle(QtCore.Qt.FlatCap)
_box_brush = QtGui.QBrush(QtGui.QColor(0x33, 0x88, 0xff, 0xc0))
_axis_font = QtGui.QFont()
_axis_font.setPixelSize(12)
_label_font = QtGui.QFont()
_label_font.setPixelSize(11)
_attr_brush = QtGui.QBrush(QtGui.QColor(0x33, 0x00, 0xff))
graph_name = "box_scene"
def __init__(self):
super().__init__()
self.grouping = []
self.attributes = []
self.stats = []
self.dataset = None
self.posthoc_lines = []
self.label_txts = self.mean_labels = self.boxes = self.labels = \
self.label_txts_all = self.attr_labels = self.order = []
self.p = -1.0
self.scale_x = self.scene_min_x = self.scene_width = 0
self.label_width = 0
self.attr_list_box = gui.listBox(
self.controlArea, self, "attributes_select", "attributes",
box="Variable", callback=self.attr_changed,
sizeHint=QtCore.QSize(200, 150))
self.attr_list_box.setSizePolicy(QSizePolicy.Fixed,
QSizePolicy.MinimumExpanding)
box = gui.vBox(self.controlArea, "Grouping")
self.group_list_box = gui.listBox(
box, self, 'grouping_select', "grouping",
callback=self.attr_changed,
sizeHint=QtCore.QSize(200, 100))
self.group_list_box.setSizePolicy(QSizePolicy.Fixed,
QSizePolicy.MinimumExpanding)
# TODO: move Compare median/mean to grouping box
self.display_box = gui.vBox(self.controlArea, "Display")
gui.checkBox(self.display_box, self, "show_annotations", "Annotate",
callback=self.display_changed)
self.compare_rb = gui.radioButtonsInBox(
self.display_box, self, 'compare',
btnLabels=["No comparison", "Compare medians", "Compare means"],
callback=self.display_changed)
self.stretching_box = gui.checkBox(
self.controlArea, self, 'stretched', "Stretch bars", box='Display',
callback=self.display_changed).box
gui.vBox(self.mainArea, addSpace=True)
self.box_scene = QtGui.QGraphicsScene()
self.box_view = QtGui.QGraphicsView(self.box_scene)
self.box_view.setRenderHints(QtGui.QPainter.Antialiasing |
QtGui.QPainter.TextAntialiasing |
QtGui.QPainter.SmoothPixmapTransform)
self.box_view.viewport().installEventFilter(self)
self.mainArea.layout().addWidget(self.box_view)
e = gui.hBox(self.mainArea, addSpace=False)
self.infot1 = gui.widgetLabel(e, "<center>No test results.</center>")
self.mainArea.setMinimumWidth(650)
self.stats = self.dist = self.conts = []
self.is_continuous = False
self.update_display_box()
def eventFilter(self, obj, event):
if obj is self.box_view.viewport() and \
event.type() == QtCore.QEvent.Resize:
self.layout_changed()
return super().eventFilter(obj, event)
# noinspection PyTypeChecker
def set_data(self, dataset):
if dataset is not None and (
not bool(dataset) or not len(dataset.domain)):
dataset = None
self.closeContext()
self.dataset = dataset
self.dist = self.stats = self.conts = []
self.grouping_select = []
self.attributes_select = []
self.attr_list_box.clear()
self.group_list_box.clear()
if dataset:
domain = dataset.domain
self.attributes = [(a.name, vartype(a)) for a in domain.variables +
domain.metas if a.is_primitive()]
self.grouping = ["None"] + [(a.name, vartype(a)) for a in
domain.variables + domain.metas
if a.is_discrete]
self.grouping_select = [0]
self.attributes_select = [0]
self.openContext(self.dataset)
self.attr_changed()
else:
self.reset_all_data()
def reset_all_data(self):
self.attr_list_box.clear()
self.group_list_box.clear()
self.clear_scene()
self.infot1.setText("")
def attr_changed(self):
self.compute_box_data()
self.update_display_box()
self.layout_changed()
if self.is_continuous:
heights = 90 if self.show_annotations else 60
self.box_view.centerOn(self.scene_min_x + self.scene_width / 2,
-30 - len(self.stats) * heights / 2 + 45)
else:
self.box_view.centerOn(self.scene_width / 2,
-30 - len(self.boxes) * 40 / 2 + 45)
def compute_box_data(self):
dataset = self.dataset
if dataset is None:
self.stats = self.dist = self.conts = []
return
attr = self.attributes[self.attributes_select[0]][0]
attr = dataset.domain[attr]
self.is_continuous = attr.is_continuous
group_by = self.grouping_select[0]
if group_by:
group = self.grouping[group_by][0]
self.dist = []
self.conts = datacaching.getCached(
dataset, contingency.get_contingency,
(dataset, attr, group))
if self.is_continuous:
self.stats = [BoxData(cont) for cont in self.conts]
self.label_txts_all = dataset.domain[group].values
else:
self.dist = datacaching.getCached(
dataset, distribution.get_distribution, (dataset, attr))
self.conts = []
if self.is_continuous:
self.stats = [BoxData(self.dist)]
self.label_txts_all = [""]
self.label_txts = [txts for stat, txts in zip(self.stats,
self.label_txts_all)
if stat.N > 0]
self.stats = [stat for stat in self.stats if stat.N > 0]
def update_display_box(self):
if self.is_continuous:
self.stretching_box.hide()
self.display_box.show()
group_by = self.grouping_select[0]
self.compare_rb.setEnabled(group_by != 0)
else:
self.stretching_box.show()
self.display_box.hide()
def clear_scene(self):
self.box_scene.clear()
self.attr_labels = []
self.labels = []
self.boxes = []
self.mean_labels = []
self.posthoc_lines = []
def layout_changed(self):
self.clear_scene()
if self.dataset is None or len(self.conts) == len(self.dist) == 0:
return
if not self.is_continuous:
return self.display_changed_disc()
attr = self.attributes[self.attributes_select[0]][0]
attr = self.dataset.domain[attr]
self.mean_labels = [self.mean_label(stat, attr, lab)
for stat, lab in zip(self.stats, self.label_txts)]
self.draw_axis()
self.boxes = [self.box_group(stat) for stat in self.stats]
self.labels = [self.label_group(stat, attr, mean_lab)
for stat, mean_lab in zip(self.stats, self.mean_labels)]
self.attr_labels = [QtGui.QGraphicsSimpleTextItem(lab)
for lab in self.label_txts]
for it in itertools.chain(self.labels, self.boxes, self.attr_labels):
self.box_scene.addItem(it)
self.display_changed()
def display_changed(self):
if self.dataset is None:
return
if not self.is_continuous:
return self.display_changed_disc()
self.order = list(range(len(self.stats)))
criterion = self._sorting_criteria_attrs[self.compare]
if criterion:
self.order = sorted(
self.order, key=lambda i: getattr(self.stats[i], criterion))
heights = 90 if self.show_annotations else 60
for row, box_index in enumerate(self.order):
y = (-len(self.stats) + row) * heights + 10
self.boxes[box_index].setY(y)
labels = self.labels[box_index]
if self.show_annotations:
labels.show()
labels.setY(y)
else:
labels.hide()
label = self.attr_labels[box_index]
label.setY(y - 15 - label.boundingRect().height())
if self.show_annotations:
label.hide()
else:
stat = self.stats[box_index]
if self.compare == OWBoxPlot.CompareMedians:
pos = stat.median + 5 / self.scale_x
elif self.compare == OWBoxPlot.CompareMeans:
pos = stat.mean + 5 / self.scale_x
else:
pos = stat.q25
label.setX(pos * self.scale_x)
label.show()
r = QtCore.QRectF(self.scene_min_x, -30 - len(self.stats) * heights,
self.scene_width, len(self.stats) * heights + 90)
self.box_scene.setSceneRect(r)
self.compute_tests()
self.show_posthoc()
def display_changed_disc(self):
self.clear_scene()
self.attr_labels = [QtGui.QGraphicsSimpleTextItem(lab)
for lab in self.label_txts_all]
if not self.stretched:
if self.grouping_select[0]:
self.labels = [QtGui.QGraphicsTextItem("{}".format(int(sum(cont))))
for cont in self.conts]
else:
self.labels = [QtGui.QGraphicsTextItem(str(int(sum(self.dist))))]
self.draw_axis_disc()
if self.grouping_select[0]:
self.boxes = [self.strudel(cont) for cont in self.conts]
else:
self.boxes = [self.strudel(self.dist)]
selected_grouping = self.grouping[self.grouping_select[0]][0]
selected_attribute = self.attributes[self.attributes_select[0]][0]
for row, box in enumerate(self.boxes):
y = (-len(self.boxes) + row) * 40 + 10
self.box_scene.addItem(box)
box.setPos(0, y)
label = self.attr_labels[row]
b = label.boundingRect()
label.setPos(-b.width() - 10, y - b.height() / 2)
self.box_scene.addItem(label)
if not self.stretched:
label = self.labels[row]
b = label.boundingRect()
if self.grouping_select[0]:
right = self.scale_x * sum(self.conts[row])
else:
right = self.scale_x * sum(self.dist)
label.setPos(right + 10, y - b.height() / 2)
self.box_scene.addItem(label)
if selected_attribute != selected_grouping:
attr = self.attributes[self.attributes_select[0]][0]
selected_attr = self.dataset.domain[attr]
for label_text, bar_part in zip(selected_attr.values,
box.childItems()):
label = QtGui.QGraphicsSimpleTextItem(label_text)
label.setPos(bar_part.boundingRect().x(),
y - label.boundingRect().height() - 8)
self.box_scene.addItem(label)
self.box_scene.setSceneRect(-self.label_width - 5,
-30 - len(self.boxes) * 40,
self.scene_width, len(self.boxes * 40) + 90)
self.infot1.setText("")
# noinspection PyPep8Naming
def compute_tests(self):
# The t-test and ANOVA are implemented here since they efficiently use
# the widget-specific data in self.stats.
# The non-parametric tests can't do this, so we use statistics.tests
def stat_ttest():
d1, d2 = self.stats
pooled_var = d1.var / d1.N + d2.var / d2.N
df = pooled_var ** 2 / \
((d1.var / d1.N) ** 2 / (d1.N - 1) +
(d2.var / d2.N) ** 2 / (d2.N - 1))
t = abs(d1.mean - d2.mean) / math.sqrt(pooled_var)
p = 2 * (1 - scipy.special.stdtr(df, t))
return t, p
# TODO: Check this function
# noinspection PyPep8Naming
def stat_ANOVA():
N = sum(stat.N for stat in self.stats)
grand_avg = sum(stat.N * stat.mean for stat in self.stats) / N
var_between = sum(stat.N * (stat.mean - grand_avg) ** 2
for stat in self.stats)
df_between = len(self.stats) - 1
var_within = sum(stat.N * stat.var for stat in self.stats)
df_within = N - len(self.stats)
F = (var_between / df_between) / (var_within / df_within)
p = 1 - scipy.special.fdtr(df_between, df_within, F)
return F, p
if self.compare == OWBoxPlot.CompareNone or len(self.stats) < 2:
t = ""
elif any(s.N <= 1 for s in self.stats):
t = "At least one group has just one instance, " \
"cannot compute significance"
elif len(self.stats) == 2:
if self.compare == OWBoxPlot.CompareMedians:
t = ""
# z, self.p = tests.wilcoxon_rank_sum(
# self.stats[0].dist, self.stats[1].dist)
# t = "Mann-Whitney's z: %.1f (p=%.3f)" % (z, self.p)
else:
t, self.p = stat_ttest()
t = "Student's t: %.3f (p=%.3f)" % (t, self.p)
else:
if self.compare == OWBoxPlot.CompareMedians:
t = ""
# U, self.p = -1, -1
# t = "Kruskal Wallis's U: %.1f (p=%.3f)" % (U, self.p)
else:
F, self.p = stat_ANOVA()
t = "ANOVA: %.3f (p=%.3f)" % (F, self.p)
self.infot1.setText("<center>%s</center>" % t)
def mean_label(self, stat, attr, val_name):
label = QtGui.QGraphicsItemGroup()
t = QtGui.QGraphicsSimpleTextItem(
"%.*f" % (attr.number_of_decimals + 1, stat.mean), label)
t.setFont(self._label_font)
bbox = t.boundingRect()
w2, h = bbox.width() / 2, bbox.height()
t.setPos(-w2, -h)
tpm = QtGui.QGraphicsSimpleTextItem(
" \u00b1 " + "%.*f" % (attr.number_of_decimals + 1, stat.dev),
label)
tpm.setFont(self._label_font)
tpm.setPos(w2, -h)
if val_name:
vnm = QtGui.QGraphicsSimpleTextItem(val_name + ": ", label)
vnm.setFont(self._label_font)
vnm.setBrush(self._attr_brush)
vb = vnm.boundingRect()
label.min_x = -w2 - vb.width()
vnm.setPos(label.min_x, -h)
else:
label.min_x = -w2
return label
def draw_axis(self):
"""Draw the horizontal axis and sets self.scale_x"""
bottom = min(stat.a_min for stat in self.stats)
top = max(stat.a_max for stat in self.stats)
first_val, step = compute_scale(bottom, top)
while bottom <= first_val:
first_val -= step
bottom = first_val
no_ticks = math.ceil((top - first_val) / step) + 1
top = max(top, first_val + no_ticks * step)
gbottom = min(bottom, min(stat.mean - stat.dev for stat in self.stats))
gtop = max(top, max(stat.mean + stat.dev for stat in self.stats))
bv = self.box_view
viewrect = bv.viewport().rect().adjusted(15, 15, -15, -30)
self.scale_x = scale_x = viewrect.width() / (gtop - gbottom)
# In principle we should repeat this until convergence since the new
# scaling is too conservative. (No chance am I doing this.)
mlb = min(stat.mean + mean_lab.min_x / scale_x
for stat, mean_lab in zip(self.stats, self.mean_labels))
if mlb < gbottom:
gbottom = mlb
self.scale_x = scale_x = viewrect.width() / (gtop - gbottom)
self.scene_min_x = gbottom * scale_x
self.scene_width = (gtop - gbottom) * scale_x
val = first_val
attr = self.attributes[self.attributes_select[0]][0]
attr_desc = self.dataset.domain[attr]
while True:
l = self.box_scene.addLine(val * scale_x, -1, val * scale_x, 1,
self._pen_axis_tick)
l.setZValue(100)
t = self.box_scene.addSimpleText(
attr_desc.repr_val(val), self._axis_font)
t.setFlags(t.flags() |
QtGui.QGraphicsItem.ItemIgnoresTransformations)
r = t.boundingRect()
t.setPos(val * scale_x - r.width() / 2, 8)
if val >= top:
break
val += step
self.box_scene.addLine(bottom * scale_x - 4, 0,
top * scale_x + 4, 0, self._pen_axis)
def draw_axis_disc(self):
"""
Draw the horizontal axis and sets self.scale_x for discrete attributes
"""
if self.stretched:
step = steps = 10
else:
if self.grouping_select[0]:
max_box = max(float(np.sum(dist)) for dist in self.conts)
else:
max_box = float(np.sum(self.dist))
if max_box == 0:
self.scale_x = 1
return
_, step = compute_scale(0, max_box)
step = int(step) if step > 1 else 1
steps = int(math.ceil(max_box / step))
max_box = step * steps
bv = self.box_view
viewrect = bv.viewport().rect().adjusted(15, 15, -15, -30)
self.scene_width = viewrect.width()
lab_width = max(lab.boundingRect().width() for lab in self.attr_labels)
lab_width = max(lab_width, 40)
lab_width = min(lab_width, self.scene_width / 3)
self.label_width = lab_width
right_offset = 0 # offset for the right label
if not self.stretched and self.labels:
if self.grouping_select[0]:
rows = list(zip(self.conts, self.labels))
else:
rows = [(self.dist, self.labels[0])]
# available space left of the 'group labels'
available = self.scene_width - lab_width - 10
scale_x = (available - right_offset) / max_box
max_right = max(sum(dist) * scale_x + 10 +
lbl.boundingRect().width()
for dist, lbl in rows)
right_offset = max(0, max_right - max_box * scale_x)
self.scale_x = scale_x = (self.scene_width - lab_width - 10 - right_offset) / max_box
self.box_scene.addLine(0, 0, max_box * scale_x, 0, self._pen_axis)
for val in range(0, step * steps + 1, step):
l = self.box_scene.addLine(val * scale_x, -1, val * scale_x, 1,
self._pen_axis_tick)
l.setZValue(100)
t = self.box_scene.addSimpleText(str(val), self._axis_font)
t.setPos(val * scale_x - t.boundingRect().width() / 2, 8)
if self.stretched:
self.scale_x *= 100
def label_group(self, stat, attr, mean_lab):
def centered_text(val, pos):
t = QtGui.QGraphicsSimpleTextItem(
"%.*f" % (attr.number_of_decimals + 1, val), labels)
t.setFont(self._label_font)
bbox = t.boundingRect()
t.setPos(pos - bbox.width() / 2, 22)
return t
def line(x, down=1):
QtGui.QGraphicsLineItem(x, 12 * down, x, 20 * down, labels)
def move_label(label, frm, to):
label.setX(to)
to += t_box.width() / 2
path = QtGui.QPainterPath()
path.lineTo(0, 4)
path.lineTo(to - frm, 4)
path.lineTo(to - frm, 8)
p = QtGui.QGraphicsPathItem(path)
p.setPos(frm, 12)
labels.addToGroup(p)
labels = QtGui.QGraphicsItemGroup()
labels.addToGroup(mean_lab)
m = stat.mean * self.scale_x
mean_lab.setPos(m, -22)
line(m, -1)
msc = stat.median * self.scale_x
med_t = centered_text(stat.median, msc)
med_box_width2 = med_t.boundingRect().width()
line(msc)
x = stat.q25 * self.scale_x
t = centered_text(stat.q25, x)
t_box = t.boundingRect()
med_left = msc - med_box_width2
if x + t_box.width() / 2 >= med_left - 5:
move_label(t, x, med_left - t_box.width() - 5)
else:
line(x)
x = stat.q75 * self.scale_x
t = centered_text(stat.q75, x)
t_box = t.boundingRect()
med_right = msc + med_box_width2
if x - t_box.width() / 2 <= med_right + 5:
move_label(t, x, med_right + 5)
else:
line(x)
return labels
def box_group(self, stat, height=20):
def line(x0, y0, x1, y1, *args):
return QtGui.QGraphicsLineItem(x0 * scale_x, y0, x1 * scale_x, y1,
*args)
scale_x = self.scale_x
box = QtGui.QGraphicsItemGroup()
whisker1 = line(stat.a_min, -1.5, stat.a_min, 1.5, box)
whisker2 = line(stat.a_max, -1.5, stat.a_max, 1.5, box)
vert_line = line(stat.a_min, 0, stat.a_max, 0, box)
mean_line = line(stat.mean, -height / 3, stat.mean, height / 3, box)
for it in (whisker1, whisker2, mean_line):
it.setPen(self._pen_paramet)
vert_line.setPen(self._pen_dotted)
var_line = line(stat.mean - stat.dev, 0, stat.mean + stat.dev, 0, box)
var_line.setPen(self._pen_paramet)
mbox = QtGui.QGraphicsRectItem(stat.q25 * scale_x, -height / 2,
(stat.q75 - stat.q25) * scale_x, height,
box)
mbox.setBrush(self._box_brush)
mbox.setPen(QtGui.QPen(QtCore.Qt.NoPen))
mbox.setZValue(-200)
median_line = line(stat.median, -height / 2,
stat.median, height / 2, box)
median_line.setPen(self._pen_median)
median_line.setZValue(-150)
return box
def strudel(self, dist):
attr = self.attributes[self.attributes_select[0]][0]
attr = self.dataset.domain[attr]
ss = np.sum(dist)
box = QtGui.QGraphicsItemGroup()
if ss < 1e-6:
QtGui.QGraphicsRectItem(0, -10, 1, 10, box)
cum = 0
for i, v in enumerate(dist):
if v < 1e-6:
continue
if self.stretched:
v /= ss
v *= self.scale_x
rect = QtGui.QGraphicsRectItem(cum + 1, -6, v - 2, 12, box)
rect.setBrush(QtGui.QBrush(QtGui.QColor(*attr.colors[i])))
rect.setPen(QtGui.QPen(QtCore.Qt.NoPen))
if self.stretched:
tooltip = "{}: {:.2f}%".format(attr.values[i],
100 * dist[i] / sum(dist))
else:
tooltip = "{}: {}".format(attr.values[i], int(dist[i]))
rect.setToolTip(tooltip)
cum += v
return box
def show_posthoc(self):
def line(y0, y1):
it = self.box_scene.addLine(x, y0, x, y1, self._post_line_pen)
it.setZValue(-100)
self.posthoc_lines.append(it)
while self.posthoc_lines:
self.box_scene.removeItem(self.posthoc_lines.pop())
if self.compare == OWBoxPlot.CompareNone or len(self.stats) < 2:
return
if self.compare == OWBoxPlot.CompareMedians:
crit_line = "median"
elif self.compare == OWBoxPlot.CompareMeans:
crit_line = "mean"
else:
assert False
xs = []
height = 90 if self.show_annotations else 60
y_up = -len(self.stats) * height + 10
for pos, box_index in enumerate(self.order):
stat = self.stats[box_index]
x = getattr(stat, crit_line) * self.scale_x
xs.append(x)
by = y_up + pos * height
line(by + 12, 3)
line(by - 12, by - 25)
used_to = []
last_to = 0
for frm, frm_x in enumerate(xs[:-1]):
for to in range(frm + 1, len(xs)):
if xs[to] - frm_x > 1.5:
to -= 1
break
if last_to == to or frm == to:
continue
for rowi, used in enumerate(used_to):
if used < frm:
used_to[rowi] = to
break
else:
rowi = len(used_to)
used_to.append(to)
y = - 6 - rowi * 6
it = self.box_scene.addLine(frm_x - 2, y, xs[to] + 2, y,
self._post_grp_pen)
self.posthoc_lines.append(it)
last_to = to
def get_widget_name_extension(self):
if self.attributes_select and len(self.attributes):
return self.attributes[self.attributes_select[0]][0]
def send_report(self):
self.report_plot()
text = ""
if self.attributes_select and len(self.attributes):
text += "Box plot for attribute '{}' ".format(
self.attributes[self.attributes_select[0]][0])
if self.grouping_select and len(self.grouping):
text += "grouped by '{}'".format(
self.grouping[self.grouping_select[0]][0])
if text:
self.report_caption(text)
def main(argv=None):
if argv is None:
argv = sys.argv
argv = list(argv)
app = QtGui.QApplication(argv)
if len(argv) > 1:
filename = argv[1]
else:
filename = "brown-selected"
data = Orange.data.Table(filename)
w = OWBoxPlot()
w.show()
w.raise_()
w.set_data(data)
w.handleNewSignals()
rval = app.exec_()
w.set_data(None)
w.handleNewSignals()
w.saveSettings()
return rval
if __name__ == "__main__":
sys.exit(main())
| bsd-2-clause | 7,862,687,610,765,192,000 | 36.807082 | 93 | 0.536171 | false |
beewizzard/discord-dicebot | diceroll_bot.py | 1 | 5786 | import discord
import asyncio
from discord.errors import HTTPException
import os
import random
import re
# Use these variable to limit overloading of the dice roller
MAXIMUM_DICE_ARGS = 10
MAXIMUM_DICE_COUNT = 1000
MAXIMUM_DICE_SIDES = 1000
MAX_MESSAGE_LENGTH = 2000
class Error(Exception):
"""Base class for exceptions"""
pass
class DiceFormatError(Error):
"""Exception raised for errors in dice string format.
Attributes:
invalid_dice_str -- the invalid dice string which caused this exception to be raised
"""
error_format = "__Error__: {0.mention} provided invalid dice [{1}].\n" \
"Valid format is <x>d<y>[(+|-)<z>].\n" \
"All values must be positive integers."
def __init__(self, invalid_dice_str):
self.invalid_dice_str = invalid_dice_str
def get_error_string(self, author):
return self.error_format.format(author, self.invalid_dice_str)
class DiceValueError(Error):
"""Exception raised for errors in dice values
Attributes:
invalid_dice_str -- the invalid dice string which caused this exception to be raised
"""
error_format = "__Error__: {0.mention} gave a bad value for [{1}].\n" \
"Dice count maximum: {2}\n" \
"Dice sides maximum: {3}"
def __init__(self, invalid_dice_str):
self.invalid_dice_str = invalid_dice_str
def get_error_string(self, author):
return self.error_format.format(
author,
self.invalid_dice_str,
MAXIMUM_DICE_COUNT,
MAXIMUM_DICE_SIDES,
)
def get_roll(dice_str, rng=random.SystemRandom()):
"""
Simulates the effect of rolling one or more dice.
:param dice_str: A dice string with the following format (invalid format raises an InputError):
<x>d<y>[(+|-)<z>]
where x, y, and z are all positive integers.
x and y may be no greater than 1000, else a ValueError is raised.
:param rng: A random number generator. Defaults to random.SystemRandom()
:return: An int list of all dice rolled
"""
match = re.match(r'^(\d+)d(\d+)([+-]\d+)?$', dice_str)
if match:
result = []
add = 0
num_dice = int(match.group(1))
num_sides = int(match.group(2))
if match.group(3):
add = int(match.group(3))
# Check for valid dice count and sides
if num_dice > MAXIMUM_DICE_COUNT or num_sides > MAXIMUM_DICE_SIDES:
raise DiceValueError(dice_str)
for x in range(0, num_dice):
roll = rng.randint(1, num_sides) + add
result.append(roll)
return result
else:
raise DiceFormatError(dice_str)
client = discord.Client()
@client.event
async def on_ready():
print("Logged in as")
print(client.user.name)
print(client.user.id)
print("------")
@client.event
async def on_message(message):
try:
command, *args = message.content.split()
if command == '!roll-help':
#
# !help
#
pass
elif command == '!roll':
#
# !roll
#
rng = random.SystemRandom()
if len(args) > MAXIMUM_DICE_ARGS:
# Let the author know that only the first MAXIMUM_DICE_ARGS dice were considered
output = "Warning {0.mention}: maximum dice arguments is {1}. Proceeding with first {1} " \
"arguments...".format(message.author, MAXIMUM_DICE_ARGS)
await client.send_message(message.channel, output)
dice_list = args[:MAXIMUM_DICE_ARGS]
response_format = "{0.mention} rolled:\n{1}"
roll_format = "**{0}**: {1}"
if len(dice_list) == 0:
output = roll_format.format("1d20", str(rng.randint(1, 20)))
await client.send_message(message.channel, response_format.format(message.author, output))
else:
try:
rolls = [roll_format.format(dice_str, " ".join([str(x) for x in get_roll(dice_str, rng)]))
for dice_str in dice_list]
output = "\n".join(rolls)
# Check to make sure the message isn't too long
if len(output) > MAX_MESSAGE_LENGTH:
# TODO: split up the message and deliver in pieces
await client.send_message(message.channel, "__Error__: {0.mention} The response was too long "
"for the server to handle. Try fewer/smaller dice.".
format(message.author))
else:
await client.send_message(message.channel, response_format.format(message.author, output))
except DiceFormatError as e:
await client.send_message(message.channel, e.get_error_string(message.author))
except DiceValueError as e:
await client.send_message(message.channel, e.get_error_string(message.author))
except HTTPException:
await client.send_message(message.channel, "__Error__: {0.mention} An error occurred while "
"attempting to communicate with the server.".
format(message.author))
# TODO: catch all other exceptions and log to file
# TODO: Add "try !roll-help" to end of every error message
except ValueError:
# Empty message. Do nothing
pass
client.run(os.environ['DISCORD_DICEROLL_TOKEN'])
| gpl-3.0 | 5,443,215,532,587,629,000 | 34.496933 | 119 | 0.55859 | false |
sachingupta006/Mezzanine | mezzanine/generic/forms.py | 1 | 5182 |
from django import forms
from django.contrib.comments.forms import CommentSecurityForm, CommentForm
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.forms import Html5Mixin
from mezzanine.generic.models import Keyword, ThreadedComment, RATING_RANGE
class KeywordsWidget(forms.MultiWidget):
"""
Form field for the ``KeywordsField`` generic relation field. Since
the admin with model forms has no form field for generic
relations, this form field provides a single field for managing
the keywords. It contains two actual widgets, a text input for
entering keywords, and a hidden input that stores the ID of each
``Keyword`` instance.
The attached JavaScript adds behaviour so that when the form is
submitted, an AJAX post is made that passes the list of keywords
in the text input, and returns a list of keyword IDs which are
then entered into the hidden input before the form submits. The
list of IDs in the hidden input is what is used when retrieving
an actual value from the field for the form.
"""
class Media:
js = ("mezzanine/js/%s" % settings.JQUERY_FILENAME,
"mezzanine/js/admin/keywords_field.js",)
def __init__(self, attrs=None):
"""
Setup the text and hidden form field widgets.
"""
widgets = (forms.HiddenInput,
forms.TextInput(attrs={"class": "vTextField"}))
super(KeywordsWidget, self).__init__(widgets, attrs)
self._ids = []
def decompress(self, value):
"""
Takes the sequence of ``AssignedKeyword`` instances and splits
them into lists of keyword IDs and titles each mapping to one
of the form field widgets.
"""
if hasattr(value, "select_related"):
keywords = [a.keyword for a in value.select_related("keyword")]
if keywords:
keywords = [(str(k.id), k.title) for k in keywords]
self._ids, words = zip(*keywords)
return (",".join(self._ids), ", ".join(words))
return ("", "")
def format_output(self, rendered_widgets):
"""
Wraps the output HTML with a list of all available ``Keyword``
instances that can be clicked on to toggle a keyword.
"""
rendered = super(KeywordsWidget, self).format_output(rendered_widgets)
links = ""
for keyword in Keyword.objects.all().order_by("title"):
prefix = "+" if str(keyword.id) not in self._ids else "-"
links += ("<a href='#'>%s%s</a>" % (prefix, unicode(keyword)))
rendered += mark_safe("<p class='keywords-field'>%s</p>" % links)
return rendered
def value_from_datadict(self, data, files, name):
"""
Return the comma separated list of keyword IDs for use in
``KeywordsField.save_form_data()``.
"""
return data.get("%s_0" % name, "")
class ThreadedCommentForm(CommentForm, Html5Mixin):
name = forms.CharField(label=_("Name"), help_text=_("required"),
max_length=50)
email = forms.EmailField(label=_("Email"),
help_text=_("required (not published)"))
url = forms.URLField(label=_("Website"), help_text=_("optional"),
required=False)
# These are used to get/set prepopulated fields via cookies.
cookie_fields = ("name", "email", "url")
cookie_prefix = "mezzanine-comment-"
def __init__(self, request, *args, **kwargs):
"""
Set some initial field values from cookies or the logged in
user, and apply some HTML5 attributes to the fields if the
``FORMS_USE_HTML5`` setting is ``True``.
The default values that are filled in the CommentForm has been
changed such that preference is given to user values than
cookie values
"""
kwargs.setdefault("initial", {})
user = request.user
for field in ThreadedCommentForm.cookie_fields:
cookie_name = ThreadedCommentForm.cookie_prefix + field
if user.is_authenticated():
if field == "name":
value = user.get_full_name()
if not value:
value = user.username
elif field == "email":
value = user.email
else:
value=""
else:
value = request.COOKIES.get(cookie_name, "")
kwargs["initial"][field] = value
super(ThreadedCommentForm, self).__init__(*args, **kwargs)
def get_comment_model(self):
"""
Use the custom comment model instead of the built-in one.
"""
return ThreadedComment
class RatingForm(CommentSecurityForm):
"""
Form for a rating. Subclasses ``CommentSecurityForm`` to make use
of its easy setup for generic relations.
"""
value = forms.ChoiceField(label="", widget=forms.RadioSelect,
choices=zip(RATING_RANGE, RATING_RANGE))
| bsd-2-clause | -7,536,275,535,220,474,000 | 38.557252 | 78 | 0.606716 | false |
Sabayon/entropy | client/solo/commands/help.py | 1 | 3023 | # -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <[email protected]>
@contact: [email protected]
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Command Line Client}.
"""
import argparse
from entropy.i18n import _
from entropy.output import teal, purple, darkgreen
from _entropy.solo.colorful import ColorfulFormatter
from _entropy.solo.commands.descriptor import SoloCommandDescriptor
from _entropy.solo.commands.command import SoloCommand
class SoloHelp(SoloCommand):
"""
Main Solo help command.
"""
NAME = "help"
ALIASES = ["-h", "--help"]
CATCH_ALL = True
def parse(self):
"""
Parse help command
"""
return self._show_help, []
def bashcomp(self, last_arg):
"""
Overridden from SoloCommand
"""
import sys
descriptors = SoloCommandDescriptor.obtain()
descriptors.sort(key = lambda x: x.get_name())
outcome = []
for descriptor in descriptors:
name = descriptor.get_name()
if name == SoloHelp.NAME:
# do not add self
continue
outcome.append(name)
aliases = descriptor.get_class().ALIASES
outcome.extend(aliases)
def _startswith(string):
if last_arg is not None:
return string.startswith(last_arg)
return True
outcome = sorted(filter(_startswith, outcome))
sys.stdout.write(" ".join(outcome) + "\n")
sys.stdout.flush()
def _show_help(self, *args):
# equo help <foo> <bar>
if len(self._args) > 1:
# syntax error
return -10
parser = argparse.ArgumentParser(
description=_("Entropy Command Line Client, Equo"),
epilog="http://www.sabayon.org",
formatter_class=ColorfulFormatter)
# filtered out in solo.main. Will never get here
parser.add_argument(
"--color", action="store_true",
default=None, help=_("force colored output"))
descriptors = SoloCommandDescriptor.obtain()
descriptors.sort(key = lambda x: x.get_name())
group = parser.add_argument_group("command", "available commands")
for descriptor in descriptors:
if descriptor.get_class().HIDDEN:
continue
aliases = descriptor.get_class().ALIASES
aliases_str = ", ".join([teal(x) for x in aliases])
if aliases_str:
aliases_str = " [%s]" % (aliases_str,)
name = "%s%s" % (purple(descriptor.get_name()),
aliases_str)
desc = descriptor.get_description()
group.add_argument(name, help=darkgreen(desc), action="store_true")
parser.print_help()
if not self._args:
return 1
return 0
SoloCommandDescriptor.register(
SoloCommandDescriptor(
SoloHelp,
SoloHelp.NAME,
_("this help"))
)
| gpl-2.0 | -6,783,595,746,927,479,000 | 28.349515 | 79 | 0.571948 | false |
Diti24/python-ivi | ivi/ics/__init__.py | 1 | 1184 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Jeff Wurzbach
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Ethernet to Modbus bridge
from .ics8099 import ics8099
| mit | 5,884,640,324,053,623,000 | 36 | 77 | 0.799831 | false |
jriehl/numba | numba/typing/collections.py | 1 | 4091 | from __future__ import print_function, division, absolute_import
from .. import types, utils, errors
import operator
from .templates import (AttributeTemplate, ConcreteTemplate, AbstractTemplate,
infer_global, infer, infer_getattr,
signature, bound_function, make_callable_template)
from .builtins import normalize_1d_index
@infer_global(operator.contains)
class InContainer(AbstractTemplate):
key = operator.contains
def generic(self, args, kws):
cont, item = args
if isinstance(cont, types.Container):
return signature(types.boolean, cont, cont.dtype)
@infer_global(len)
class ContainerLen(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Container)):
return signature(types.intp, val)
@infer_global(operator.truth)
class SequenceBool(AbstractTemplate):
key = operator.truth
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Sequence)):
return signature(types.boolean, val)
@infer_global(operator.getitem)
class GetItemSequence(AbstractTemplate):
key = operator.getitem
def generic(self, args, kws):
seq, idx = args
if isinstance(seq, types.Sequence):
idx = normalize_1d_index(idx)
if isinstance(idx, types.SliceType):
# Slicing a tuple only supported with static_getitem
if not isinstance(seq, types.BaseTuple):
return signature(seq, seq, idx)
elif isinstance(idx, types.Integer):
return signature(seq.dtype, seq, idx)
@infer_global(operator.setitem)
class SetItemSequence(AbstractTemplate):
def generic(self, args, kws):
seq, idx, value = args
if isinstance(seq, types.MutableSequence):
idx = normalize_1d_index(idx)
if isinstance(idx, types.SliceType):
return signature(types.none, seq, idx, seq)
elif isinstance(idx, types.Integer):
if not self.context.can_convert(value, seq.dtype):
msg = "invalid setitem with value of {} to element of {}"
raise errors.TypingError(msg.format(types.unliteral(value), seq.dtype))
return signature(types.none, seq, idx, seq.dtype)
@infer_global(operator.delitem)
class DelItemSequence(AbstractTemplate):
def generic(self, args, kws):
seq, idx = args
if isinstance(seq, types.MutableSequence):
idx = normalize_1d_index(idx)
return signature(types.none, seq, idx)
# --------------------------------------------------------------------------
# named tuples
@infer_getattr
class NamedTupleAttribute(AttributeTemplate):
key = types.BaseNamedTuple
def resolve___class__(self, tup):
return types.NamedTupleClass(tup.instance_class)
def generic_resolve(self, tup, attr):
# Resolution of other attributes
try:
index = tup.fields.index(attr)
except ValueError:
return
return tup[index]
@infer_getattr
class NamedTupleClassAttribute(AttributeTemplate):
key = types.NamedTupleClass
def resolve___call__(self, classty):
"""
Resolve the named tuple constructor, aka the class's __call__ method.
"""
instance_class = classty.instance_class
pysig = utils.pysignature(instance_class)
def typer(*args, **kws):
# Fold keyword args
try:
bound = pysig.bind(*args, **kws)
except TypeError as e:
msg = "In '%s': %s" % (instance_class, e)
e.args = (msg,)
raise
assert not bound.kwargs
return types.BaseTuple.from_types(bound.args, instance_class)
# Override the typer's pysig to match the namedtuple constructor's
typer.pysig = pysig
return types.Function(make_callable_template(self.key, typer))
| bsd-2-clause | -9,103,186,875,090,370,000 | 32.260163 | 91 | 0.609875 | false |
enixdark/im-r-e-d-i-s | flask-cook/migrations/versions/399106d8a6ad_.py | 1 | 1071 | """empty message
Revision ID: 399106d8a6ad
Revises: None
Create Date: 2015-03-06 03:55:19.157958
"""
# revision identifiers, used by Alembic.
revision = '399106d8a6ad'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('price', sa.Float(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('product')
op.drop_table('category')
### end Alembic commands ###
| mit | -4,522,302,353,539,535,000 | 26.461538 | 64 | 0.661998 | false |
stonebig/bokeh | bokeh/model.py | 1 | 29925 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a base class for all objects (called Bokeh Models) that can go in
a Bokeh |Document|.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from json import loads
from operator import itemgetter
# External imports
from six import iteritems, string_types
# Bokeh imports
from .core.json_encoder import serialize_json
from .core.properties import Any, Dict, Instance, List, String
from .core.has_props import HasProps, MetaHasProps
from .core.query import find
from .events import Event
from .themes import default as default_theme
from .util.callback_manager import PropertyCallbackManager, EventCallbackManager
from .util.future import with_metaclass
from .util.serialization import make_id
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'collect_models',
'get_class',
'Model',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def collect_filtered_models(discard, *input_values):
''' Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc, unless filtered-out by the
provided callable.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go.
Args:
*discard (Callable[[Model], bool])
a callable which accepts a *Model* instance as its single argument
and returns a boolean stating whether to discard the instance. The
latter means that the instance will not be added to collected
models nor will its references be explored.
*input_values (Model)
Bokeh models to collect other models from
Returns:
None
'''
ids = set([])
collected = []
queued = []
def queue_one(obj):
if obj.id not in ids and not (callable(discard) and discard(obj)):
queued.append(obj)
for value in input_values:
_visit_value_and_its_immediate_references(value, queue_one)
while queued:
obj = queued.pop(0)
if obj.id not in ids:
ids.add(obj.id)
collected.append(obj)
_visit_immediate_value_references(obj, queue_one)
return collected
def collect_models(*input_values):
''' Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go. The resulting list is
duplicate-free based on objects' identifiers.
Args:
*input_values (Model)
Bokeh models to collect other models from
Returns:
list[Model] : all models reachable from this one.
'''
return collect_filtered_models(None, *input_values)
def get_class(view_model_name):
''' Look up a Bokeh model class, given its view model name.
Args:
view_model_name (str) :
A view model name for a Bokeh model to look up
Returns:
Model: the model class corresponding to ``view_model_name``
Raises:
KeyError, if the model cannot be found
Example:
.. code-block:: python
>>> from bokeh.model import get_class
>>> get_class("Range1d")
<class 'bokeh.models.ranges.Range1d'>
'''
# in order to look up from the model catalog that MetaModel maintains, it
# has to be creates first. These imports ensure that all built-in Bokeh
# models are represented in the catalog.
from . import models; models
from .plotting import Figure; Figure
d = MetaModel.model_class_reverse_map
if view_model_name in d:
return d[view_model_name]
else:
raise KeyError("View model name '%s' not found" % view_model_name)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class MetaModel(MetaHasProps):
''' Specialize the construction of |Model| classes.
This class is a `metaclass`_ for |Model| that is responsible for
automatically cataloging all Bokeh models that get defined, so that the
serialization machinery between Bokeh and BokehJS can function properly.
.. note::
It is worth pointing out explicitly that this relies on the rules
for Metaclass inheritance in Python.
Bokeh works by replicating Python model objects (e.g. plots, ranges,
data sources, which are all |HasProps| subclasses) into BokehJS. In the
case of using a Bokeh server, the Bokeh model objects can also be
synchronized bidirectionally. This is accomplished by serializing the
models to and from a JSON format, that includes the name of the model type
as part of the payload, as well as a unique ID, and all the attributes:
.. code-block:: javascript
{
type: "Plot",
id: 100032,
attributes: { ... }
}
Typically the type name is inferred automatically from the Python class
name, and is set as the ``__view_model__`` class attribute on the Model
class that is create. But it is also possible to override this value
explicitly:
.. code-block:: python
class Foo(Model): pass
class Bar(Model):
__view_model__ == "Quux"
This metaclass will raise an error if two Bokeh models are created that
attempt to have the same view model name. The only exception made is if
one of the models has a custom ``__implementation__`` in its class
definition.
This metaclass also handles subtype relationships between Bokeh models.
Occasionally it may be necessary for multiple class types on the Python
side to resolve to the same type on the BokehJS side. This is called
subtyping, and is expressed through a ``__subtype__`` class attribute on
a model:
.. code-block:: python
class Foo(Model): pass
class Bar(Foo):
__view_model__ = "Foo"
__subtype__ = "Bar"
In this case, python instances of ``Foo`` and ``Bar`` will both resolve to
``Foo`` models in BokehJS. In the context of a Bokeh server application,
the original python types will be faithfully round-tripped. (Without the
``__subtype__`` specified, the above code would raise an error due to
duplicate view model names.)
.. _metaclass: https://docs.python.org/3/reference/datamodel.html#metaclasses
'''
model_class_reverse_map = {}
def __new__(meta_cls, class_name, bases, class_dict):
'''
Raises:
Warning
'''
# use an explicitly provided view model name if there is one
if "__view_model__" not in class_dict:
class_dict["__view_model__"] = class_name
# call the parent metaclass to create the new model type
newcls = super(MetaModel, meta_cls).__new__(meta_cls, class_name, bases, class_dict)
# update the mapping of view model names to classes, checking for any duplicates
# and handling any subtype relationships or custom implementations
entry = class_dict.get("__subtype__", class_dict["__view_model__"])
if entry in MetaModel.model_class_reverse_map and not hasattr(newcls, "__implementation__"):
raise Warning("Duplicate __view_model__ or __subtype__ declaration of '%s' for " \
"class %s. Previous definition: %s" % \
(entry, class_name,
MetaModel.model_class_reverse_map[entry]))
MetaModel.model_class_reverse_map[entry] = newcls
return newcls
_HTML_REPR = """
<script>
(function() {
var expanded = false;
var ellipsis = document.getElementById("%(ellipsis_id)s");
ellipsis.addEventListener("click", function() {
var rows = document.getElementsByClassName("%(cls_name)s");
for (var i = 0; i < rows.length; i++) {
var el = rows[i];
el.style.display = expanded ? "none" : "table-row";
}
ellipsis.innerHTML = expanded ? "…)" : "‹‹‹";
expanded = !expanded;
});
})();
</script>
"""
class Model(with_metaclass(MetaModel, HasProps, PropertyCallbackManager, EventCallbackManager)):
''' Base class for all objects stored in Bokeh |Document| instances.
'''
def __new__(cls, *args, **kwargs):
obj = super(Model, cls).__new__(cls)
obj._id = kwargs.pop("id", make_id())
obj._document = None
obj._temp_document = None
return obj
def __init__(self, **kwargs):
# "id" is popped from **kw in __new__, so in an ideal world I don't
# think it should be here too. But Python does this, so it is:
#
# class Foo(object):
# def __new__(cls, *args, **kw):
# obj = super(Foo, cls).__new__(cls)
# obj.bar = kw.pop("bar", 111)
# print("__new__ :", id(kw), kw)
# return obj
# def __init__(self, **kw):
# print("__init__ :", id(kw), kw)
#
# >>> f = Foo(bar=10)
# __new__ : 4405522296 {}
# __init__ : 4405522296 {'bar': 10}
kwargs.pop("id", None)
super(Model, self).__init__(**kwargs)
default_theme.apply_to_model(self)
def __str__(self):
return "%s(id=%r, ...)" % (self.__class__.__name__, getattr(self, "id", None))
__repr__ = __str__
@property
def id(self):
return self._id
name = String(help="""
An arbitrary, user-supplied name for this model.
This name can be useful when querying the document to retrieve specific
Bokeh models.
.. code:: python
>>> plot.circle([1,2,3], [4,5,6], name="temp")
>>> plot.select(name="temp")
[GlyphRenderer(id='399d53f5-73e9-44d9-9527-544b761c7705', ...)]
.. note::
No uniqueness guarantees or other conditions are enforced on any names
that are provided, nor is the name used directly by Bokeh for any
reason.
""")
tags = List(Any, help="""
An optional list of arbitrary, user-supplied values to attach to this
model.
This data can be useful when querying the document to retrieve specific
Bokeh models:
.. code:: python
>>> r = plot.circle([1,2,3], [4,5,6])
>>> r.tags = ["foo", 10]
>>> plot.select(tags=['foo', 10])
[GlyphRenderer(id='1de4c3df-a83d-480a-899b-fb263d3d5dd9', ...)]
Or simply a convenient way to attach any necessary metadata to a model
that can be accessed by ``CustomJS`` callbacks, etc.
.. note::
No uniqueness guarantees or other conditions are enforced on any tags
that are provided, nor are the tags used directly by Bokeh for any
reason.
""")
js_event_callbacks = Dict(String, List(Instance("bokeh.models.callbacks.CustomJS")),
help="""
A mapping of event names to lists of ``CustomJS`` callbacks.
Typically, rather then modifying this property directly, callbacks should be
added using the ``Model.js_on_event`` method:
.. code:: python
callback = CustomJS(code="console.log('tap event occurred')")
plot.js_on_event('tap', callback)
""")
subscribed_events = List(String, help="""
List of events that are subscribed to by Python callbacks. This is
the set of events that will be communicated from BokehJS back to
Python for this model.
""")
js_property_callbacks = Dict(String, List(Instance("bokeh.models.callbacks.CustomJS")), help="""
A mapping of attribute names to lists of ``CustomJS`` callbacks, to be set up on
BokehJS side when the document is created.
Typically, rather then modifying this property directly, callbacks should be
added using the ``Model.js_on_change`` method:
.. code:: python
callback = CustomJS(code="console.log('stuff')")
plot.x_range.js_on_change('start', callback)
""")
# Properties --------------------------------------------------------------
@property
def document(self):
''' The |Document| this model is attached to (can be ``None``)
'''
if self._temp_document is not None:
return self._temp_document
return self._document
@property
def ref(self):
''' A Bokeh protocol "reference" to this model, i.e. a dict of the
form:
.. code-block:: python
{
'type' : << view model name >>
'id' : << unique model id >>
}
Additionally there may be a `subtype` field if this model is a subtype.
'''
if "__subtype__" in self.__class__.__dict__:
return {
'type' : self.__view_model__,
'subtype' : self.__subtype__,
'id' : self.id,
}
else:
return {
'type' : self.__view_model__,
'id' : self.id,
}
# Public methods ----------------------------------------------------------
def js_on_event(self, event, *callbacks):
if not isinstance(event, string_types) and issubclass(event, Event):
event = event.event_name
if event not in self.js_event_callbacks:
self.js_event_callbacks[event] = []
for callback in callbacks:
if callback in self.js_event_callbacks[event]:
continue
self.js_event_callbacks[event].append(callback)
def js_link(self, attr, other, other_attr):
''' Link two Bokeh model properties using JavaScript.
This is a convenience method that simplifies adding a CustomJS callback
to update one Bokeh model property whenever another changes value.
Args:
attr (str) :
The name of a Bokeh property on this model
other (Model):
A Bokeh model to link to self.attr
other_attr (str) :
The property on ``other`` to link together
Added in version 1.1
Raises:
ValueError
Examples:
This code with ``js_link``:
.. code :: python
select.js_link('value', plot, 'sizing_mode')
is equivalent to the following:
.. code:: python
from bokeh.models import CustomJS
select.js_on_change('value',
CustomJS(args=dict(other=plot),
code="other.sizing_mode = this.value"
)
)
'''
if attr not in self.properties():
raise ValueError("%r is not a property of self (%r)" % (attr, self))
if not isinstance(other, Model):
raise ValueError("'other' is not a Bokeh model: %r" % other)
if other_attr not in other.properties():
raise ValueError("%r is not a property of other (%r)" % (other_attr, other))
from bokeh.models.callbacks import CustomJS
cb = CustomJS(args=dict(other=other), code="other.%s = this.%s" % (other_attr, attr))
self.js_on_change(attr, cb)
def js_on_change(self, event, *callbacks):
''' Attach a ``CustomJS`` callback to an arbitrary BokehJS model event.
On the BokehJS side, change events for model properties have the
form ``"change:property_name"``. As a convenience, if the event name
passed to this method is also the name of a property on the model,
then it will be prefixed with ``"change:"`` automatically:
.. code:: python
# these two are equivalent
source.js_on_change('data', callback)
source.js_on_change('change:data', callback)
However, there are other kinds of events that can be useful to respond
to, in addition to property change events. For example to run a
callback whenever data is streamed to a ``ColumnDataSource``, use the
``"stream"`` event on the source:
.. code:: python
source.js_on_change('streaming', callback)
'''
if len(callbacks) == 0:
raise ValueError("js_on_change takes an event name and one or more callbacks, got only one parameter")
# handle any CustomJS callbacks here
from bokeh.models.callbacks import CustomJS
if not all(isinstance(x, CustomJS) for x in callbacks):
raise ValueError("not all callback values are CustomJS instances")
if event in self.properties():
event = "change:%s" % event
if event not in self.js_property_callbacks:
self.js_property_callbacks[event] = []
for callback in callbacks:
if callback in self.js_property_callbacks[event]:
continue
self.js_property_callbacks[event].append(callback)
def layout(self, side, plot):
'''
'''
try:
return self in getattr(plot, side)
except:
return []
def on_change(self, attr, *callbacks):
''' Add a callback on this object to trigger when ``attr`` changes.
Args:
attr (str) : an attribute name on this object
*callbacks (callable) : callback functions to register
Returns:
None
Example:
.. code-block:: python
widget.on_change('value', callback1, callback2, ..., callback_n)
'''
if attr not in self.properties():
raise ValueError("attempted to add a callback on nonexistent %s.%s property" % (self.__class__.__name__, attr))
super(Model, self).on_change(attr, *callbacks)
def references(self):
''' Returns all ``Models`` that this object has references to.
'''
return set(collect_models(self))
def select(self, selector):
''' Query this object and all of its references for objects that
match the given selector.
Args:
selector (JSON-like) :
Returns:
seq[Model]
'''
return find(self.references(), selector)
def select_one(self, selector):
''' Query this object and all of its references for objects that
match the given selector. Raises an error if more than one object
is found. Returns single matching object, or None if nothing is found
Args:
selector (JSON-like) :
Returns:
Model
'''
result = list(self.select(selector))
if len(result) > 1:
raise ValueError("Found more than one object matching %s: %r" % (selector, result))
if len(result) == 0:
return None
return result[0]
def set_select(self, selector, updates):
''' Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like) :
updates (dict) :
Returns:
None
'''
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val)
def to_json(self, include_defaults):
''' Returns a dictionary of the attributes of this object,
containing only "JSON types" (string, number, boolean,
none, dict, list).
References to other objects are serialized as "refs" (just
the object ID and type info), so the deserializer will
need to separately have the full attributes of those
other objects.
There's no corresponding ``from_json()`` because to
deserialize an object is normally done in the context of a
Document (since the Document can resolve references).
For most purposes it's best to serialize and deserialize
entire documents.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default
'''
return loads(self.to_json_string(include_defaults=include_defaults))
def to_json_string(self, include_defaults):
''' Returns a JSON string encoding the attributes of this object.
References to other objects are serialized as references
(just the object ID and type info), so the deserializer
will need to separately have the full attributes of those
other objects.
There's no corresponding ``from_json_string()`` because to
deserialize an object is normally done in the context of a
Document (since the Document can resolve references).
For most purposes it's best to serialize and deserialize
entire documents.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default
'''
json_like = self._to_json_like(include_defaults=include_defaults)
json_like['id'] = self.id
# serialize_json "fixes" the JSON from _to_json_like by converting
# all types into plain JSON types # (it converts Model into refs,
# for example).
return serialize_json(json_like)
def trigger(self, attr, old, new, hint=None, setter=None):
'''
'''
# The explicit assumption here is that hinted events do not need to
# go through all the same invalidation steps. Currently this is the
# case for ColumnsStreamedEvent and ColumnsPatchedEvent. However,
# this may need to be further refined in the future, if the
# assumption does not hold for future hinted events (e.g. the hint
# could specify explicitly whether to do normal invalidation or not)
if hint is None:
dirty = { 'count' : 0 }
def mark_dirty(obj):
dirty['count'] += 1
if self._document is not None:
_visit_value_and_its_immediate_references(new, mark_dirty)
_visit_value_and_its_immediate_references(old, mark_dirty)
if dirty['count'] > 0:
self._document._invalidate_all_models()
# chain up to invoke callbacks
super(Model, self).trigger(attr, old, new, hint=hint, setter=setter)
def _attach_document(self, doc):
''' Attach a model to a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to set the private ._document field properly
'''
if self._document is not None and self._document is not doc:
raise RuntimeError("Models must be owned by only a single document, %r is already in a doc" % (self))
doc.theme.apply_to_model(self)
self._document = doc
self._update_event_callbacks()
@staticmethod
def _clear_extensions():
MetaModel.model_class_reverse_map = {
k:v for k,v in MetaModel.model_class_reverse_map.items()
if getattr(v, "__implementation__", None) is None
}
def _detach_document(self):
''' Detach a model from a Bokeh |Document|.
This private interface should only ever called by the Document
implementation to unset the private ._document field properly
'''
self._document = None
default_theme.apply_to_model(self)
def _to_json_like(self, include_defaults):
''' Returns a dictionary of the attributes of this object, in
a layout corresponding to what BokehJS expects at unmarshalling time.
This method does not convert "Bokeh types" into "plain JSON types,"
for example each child Model will still be a Model, rather
than turning into a reference, numpy isn't handled, etc.
That's what "json like" means.
This method should be considered "private" or "protected",
for use internal to Bokeh; use ``to_json()`` instead because
it gives you only plain JSON-compatible types.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default.
'''
all_attrs = self.properties_with_values(include_defaults=include_defaults)
# If __subtype__ is defined, then this model may introduce properties
# that don't exist on __view_model__ in bokehjs. Don't serialize such
# properties.
subtype = getattr(self.__class__, "__subtype__", None)
if subtype is not None and subtype != self.__class__.__view_model__:
attrs = {}
for attr, value in all_attrs.items():
if attr in self.__class__.__dict__:
continue
else:
attrs[attr] = value
else:
attrs = all_attrs
for (k, v) in attrs.items():
# we can't serialize Infinity, we send it as None and
# the other side has to fix it up. This transformation
# can't be in our json_encoder because the json
# module checks for inf before it calls the custom
# encoder.
if isinstance(v, float) and v == float('inf'):
attrs[k] = None
return attrs
def _repr_html_(self):
'''
'''
module = self.__class__.__module__
name = self.__class__.__name__
_id = getattr(self, "_id", None)
cls_name = make_id()
def row(c):
return '<div style="display: table-row;">' + c + '</div>'
def hidden_row(c):
return '<div class="%s" style="display: none;">%s</div>' % (cls_name, c)
def cell(c):
return '<div style="display: table-cell;">' + c + '</div>'
html = ''
html += '<div style="display: table;">'
ellipsis_id = make_id()
ellipsis = '<span id="%s" style="cursor: pointer;">…)</span>' % ellipsis_id
prefix = cell('<b title="%s.%s">%s</b>(' % (module, name, name))
html += row(prefix + cell('id' + ' = ' + repr(_id) + ', ' + ellipsis))
props = self.properties_with_values().items()
sorted_props = sorted(props, key=itemgetter(0))
all_props = sorted_props
for i, (prop, value) in enumerate(all_props):
end = ')' if i == len(all_props)-1 else ','
html += hidden_row(cell("") + cell(prop + ' = ' + repr(value) + end))
html += '</div>'
html += _HTML_REPR % dict(ellipsis_id=ellipsis_id, cls_name=cls_name)
return html
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _visit_immediate_value_references(value, visitor):
''' Visit all references to another Model without recursing into any
of the child Model; may visit the same Model more than once if
it's referenced more than once. Does not visit the passed-in value.
'''
if isinstance(value, HasProps):
for attr in value.properties_with_refs():
child = getattr(value, attr)
_visit_value_and_its_immediate_references(child, visitor)
else:
_visit_value_and_its_immediate_references(value, visitor)
_common_types = {int, float, str}
def _visit_value_and_its_immediate_references(obj, visitor):
''' Recurse down Models, HasProps, and Python containers
The ordering in this function is to optimize performance. We check the
most comomn types (int, float, str) first so that we can quickly return in
the common case. We avoid isinstance and issubclass checks in a couple
places with `type` checks because isinstance checks can be slow.
'''
typ = type(obj)
if typ in _common_types: # short circuit on common base types
return
if typ is list or issubclass(typ, (list, tuple)): # check common containers
for item in obj:
_visit_value_and_its_immediate_references(item, visitor)
elif issubclass(typ, dict):
for key, value in iteritems(obj):
_visit_value_and_its_immediate_references(key, visitor)
_visit_value_and_its_immediate_references(value, visitor)
elif issubclass(typ, HasProps):
if issubclass(typ, Model):
visitor(obj)
else:
# this isn't a Model, so recurse into it
_visit_immediate_value_references(obj, visitor)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 237,578,417,187,544,830 | 33.555427 | 123 | 0.574169 | false |
geolovic/TProfiler | test/01_heads_from_points.py | 1 | 4156 | # -*- coding: utf-8 -*-
"""
José Vicente Pérez
Granada University (Spain)
March, 2017
Testing suite for profiler.py
Last modified: 26 October 2017
"""
import time
import profiler as p
print("Test for profiler.heads_from_points()")
def test01():
"""
Test for profiler.heads_from_points() function
Intput points are in "data/in/main_heads.shp"
Test con id_field
"""
inicio = time.time()
print("=" * 40)
print("Test 01 para profiler.heads_from_points() function")
print("Testing heads with an id_field")
print("Test in progress...")
# Test parameters
dem = "data/in/darro25.tif"
pointshp = "data/in/main_heads.shp"
out_txt = "data/out/01_cabeceras_puntos_01.txt"
id_field = "id"
cabeceras = p.heads_from_points(dem, pointshp, id_field)
outfile = open(out_txt, "w")
outfile.write("ROW;COL;X;Y;Z;id\n")
for cab in cabeceras:
cab = [str(value) for value in cab]
linea = ";".join(cab) + "\n"
outfile.write(linea)
outfile.close()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_txt)
print("=" * 40)
def test02():
"""
Test for profiler.heads_from_points() function
Intput points are in "data/in/main_heads.shp"
Test sin campo id_field
"""
inicio = time.time()
print("=" * 40)
print("Test 02 para profiler.heads_from_points() function")
print("Testing without id_field")
print("Test in progress...")
# Test parameters
dem = "data/in/darro25.tif"
pointshp = "data/in/main_heads.shp"
out_txt = "data/out/01_cabeceras_puntos_02.txt"
cabeceras = p.heads_from_points(dem, pointshp)
outfile = open(out_txt, "w")
outfile.write("ROW;COL;X;Y;Z;id\n")
for cab in cabeceras:
cab = [str(value) for value in cab]
linea = ";".join(cab) + "\n"
outfile.write(linea)
outfile.close()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_txt)
print("=" * 40)
def test03():
"""
Test for profiler.heads_from_points() function
Intput points are in "data/in/main_heads.shp"
Test con id_field que no existe en la capa de puntos
"""
inicio = time.time()
print("=" * 40)
print("Test 03 para profiler.heads_from_points() function")
print("Testing a field that is not in head shapefile")
print("Test in progress...")
# Test parameters
dem = "data/in/darro25.tif"
pointshp = "data/in/main_heads.shp"
out_txt = "data/out/01_cabeceras_puntos_03.txt"
id_field = "id_rio"
cabeceras = p.heads_from_points(dem, pointshp, id_field)
outfile = open(out_txt, "w")
outfile.write("ROW;COL;X;Y;Z;id\n")
for cab in cabeceras:
cab = [str(value) for value in cab]
linea = ";".join(cab) + "\n"
outfile.write(linea)
outfile.close()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_txt)
print("=" * 40)
def test04():
"""
Test for profiler.heads_from_points() function
Intput points are in "data/in/cabeceras_darro.shp"
Test un campo id_field que no es entero
"""
inicio = time.time()
print("=" * 40)
print("Test 04 para profiler.heads_from_points() function")
print("Testing wrong id_field (bad field type)")
print("Test in progress...")
# Test parameters
dem = "data/in/darro25.tif"
pointshp = "data/in/main_heads.shp"
out_txt = "data/out/01_cabeceras_puntos_04.txt"
id_field = "name"
cabeceras = p.heads_from_points(dem, pointshp, id_field)
outfile = open(out_txt, "w")
outfile.write("ROW;COL;X;Y;Z;id\n")
for cab in cabeceras:
cab = [str(value) for value in cab]
linea = ";".join(cab) + "\n"
outfile.write(linea)
outfile.close()
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_txt)
print("=" * 40)
test01()
test02()
test03()
test04()
| gpl-3.0 | -6,848,232,473,843,238,000 | 24.484663 | 66 | 0.603274 | false |
T-002/pycast | pycast/errors/meansignedpercentageerror.py | 1 | 2419 | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012-2015 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pycast.errors.meanabsolutepercentageerror import MeanAbsolutePercentageError
class MeanSignedPercentageError(MeanAbsolutePercentageError):
"""An over/under estimation aware percentage error."""
def local_error(self, originalValue, calculatedValue):
"""Calculates the error between the two given values.
:param list originalValue: List containing the values of the original data.
:param list calculatedValue: List containing the values of the calculated TimeSeries that
corresponds to originalValue.
:return: Returns the error measure of the two given values.
:rtype: numeric
"""
return (float(calculatedValue[0] - originalValue[0])/originalValue[0])*100 if originalValue[0] else None
# if calculatedValue[0] - originalValue[0] > 0:
# # over estimation
# return super(MeanSignedPercentageError, self).local_error(originalValue, calculatedValue)
# else:
# # under estimation
# local = super(MeanSignedPercentageError, self).local_error(originalValue, calculatedValue)
# if local:
# return local * -1
# else:
# return None
MSPE = MeanSignedPercentageError
| mit | -5,777,831,087,039,715,000 | 42.196429 | 112 | 0.716825 | false |
gppezzi/easybuild-framework | test/framework/tweak.py | 1 | 17582 | ##
# Copyright 2014-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for framework/easyconfig/tweak.py
@author: Kenneth Hoste (Ghent University)
"""
import os
import sys
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered, init_config
from unittest import TextTestRunner
from easybuild.framework.easyconfig.easyconfig import get_toolchain_hierarchy, process_easyconfig
from easybuild.framework.easyconfig.parser import EasyConfigParser
from easybuild.framework.easyconfig.tweak import find_matching_easyconfigs, obtain_ec_for, pick_version, tweak_one
from easybuild.framework.easyconfig.tweak import check_capability_mapping, match_minimum_tc_specs
from easybuild.framework.easyconfig.tweak import get_dep_tree_of_toolchain
from easybuild.framework.easyconfig.tweak import map_toolchain_hierarchies, map_easyconfig_to_target_tc_hierarchy
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import module_classes
from easybuild.tools.filetools import change_dir, write_file
class TweakTest(EnhancedTestCase):
"""Tests for tweak functionality."""
def test_pick_version(self):
"""Test pick_version function."""
# if required version is not available, the most recent version less than or equal should be returned
self.assertEqual(('1.4', '1.0'), pick_version('1.4', ['0.5', '1.0', '1.5']))
# if required version is available, that should be what's returned
self.assertEqual(('0.5', '0.5'), pick_version('0.5', ['0.5', '1.0', '1.5']))
self.assertEqual(('1.0', '1.0'), pick_version('1.0', ['0.5', '1.0', '1.5']))
self.assertEqual(('1.5', '1.5'), pick_version('1.5', ['0.5', '1.0', '1.5']))
# if no required version is specified, most recent version is picked
self.assertEqual(('1.5', '1.5'), pick_version(None, ['0.5', '1.0', '1.5']))
# if only a single version is available, there's nothing much to choose from
self.assertEqual(('1.4', '0.5'), pick_version('1.4', ['0.5']))
self.assertEqual(('0.5', '0.5'), pick_version(None, ['0.5']))
# check correct ordering of versions (not alphabetical ordering!)
self.assertEqual(('1.12', '1.10'), pick_version('1.12', ['1.5', '1.20', '1.1', '1.50', '1.10', '1.9', '1.8']))
# if no older versions are available, oldest available version is returned
self.assertEqual(('0.8', '1.1'), pick_version('0.8', ['1.5', '1.1', '1.10', '1.8']))
def test_find_matching_easyconfigs(self):
"""Test find_matching_easyconfigs function."""
test_easyconfigs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
for (name, installver) in [('GCC', '4.8.2'), ('gzip', '1.5-foss-2018a')]:
ecs = find_matching_easyconfigs(name, installver, [test_easyconfigs_path])
self.assertTrue(len(ecs) == 1 and ecs[0].endswith('/%s-%s.eb' % (name, installver)))
ecs = find_matching_easyconfigs('GCC', '*', [test_easyconfigs_path])
gccvers = ['4.6.3', '4.6.4', '4.8.2', '4.8.3', '4.9.2', '4.9.3-2.25', '4.9.3-2.26', '6.4.0-2.28', '7.3.0-2.30']
self.assertEqual(len(ecs), len(gccvers))
ecs_basename = [os.path.basename(ec) for ec in ecs]
for gccver in gccvers:
gcc_ec = 'GCC-%s.eb' % gccver
self.assertTrue(gcc_ec in ecs_basename, "%s is included in %s" % (gcc_ec, ecs_basename))
def test_obtain_ec_for(self):
"""Test obtain_ec_for function."""
init_config(build_options={'silent': True})
test_easyconfigs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
# find existing easyconfigs
specs = {
'name': 'GCC',
'version': '6.4.0',
'versionsuffix': '-2.28',
}
(generated, ec_file) = obtain_ec_for(specs, [test_easyconfigs_path])
self.assertFalse(generated)
self.assertEqual(os.path.basename(ec_file), 'GCC-6.4.0-2.28.eb')
specs = {
'name': 'ScaLAPACK',
'version': '2.0.2',
'toolchain_name': 'gompi',
'toolchain_version': '2018a',
'versionsuffix': '-OpenBLAS-0.2.20',
}
(generated, ec_file) = obtain_ec_for(specs, [test_easyconfigs_path])
self.assertFalse(generated)
self.assertEqual(os.path.basename(ec_file), 'ScaLAPACK-2.0.2-gompi-2018a-OpenBLAS-0.2.20.eb')
specs = {
'name': 'ifort',
'versionsuffix': '-GCC-4.9.3-2.25',
}
(generated, ec_file) = obtain_ec_for(specs, [test_easyconfigs_path])
self.assertFalse(generated)
self.assertEqual(os.path.basename(ec_file), 'ifort-2016.1.150-GCC-4.9.3-2.25.eb')
# latest version if not specified
specs = {
'name': 'GCC',
}
(generated, ec_file) = obtain_ec_for(specs, [test_easyconfigs_path])
self.assertFalse(generated)
self.assertEqual(os.path.basename(ec_file), 'GCC-7.3.0-2.30.eb')
# generate non-existing easyconfig
change_dir(self.test_prefix)
specs = {
'name': 'GCC',
'version': '4.9.0',
}
(generated, ec_file) = obtain_ec_for(specs, [test_easyconfigs_path])
self.assertTrue(generated)
self.assertEqual(os.path.basename(ec_file), 'GCC-4.9.0.eb')
def test_tweak_one_version(self):
"""Test tweak_one function"""
test_easyconfigs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
toy_ec = os.path.join(test_easyconfigs_path, 't', 'toy', 'toy-0.0.eb')
# test tweaking of software version (--try-software-version)
tweaked_toy_ec = os.path.join(self.test_prefix, 'toy-tweaked.eb')
tweak_one(toy_ec, tweaked_toy_ec, {'version': '1.2.3'})
toy_ec_parsed = EasyConfigParser(toy_ec).get_config_dict()
tweaked_toy_ec_parsed = EasyConfigParser(tweaked_toy_ec).get_config_dict()
# checksums should be reset to empty list, only version should be changed, nothing else
self.assertEqual(tweaked_toy_ec_parsed['checksums'], [])
self.assertEqual(tweaked_toy_ec_parsed['version'], '1.2.3')
for key in [k for k in toy_ec_parsed.keys() if k not in ['checksums', 'version']]:
val = toy_ec_parsed[key]
self.assertTrue(key in tweaked_toy_ec_parsed, "Parameter '%s' not defined in tweaked easyconfig file" % key)
tweaked_val = tweaked_toy_ec_parsed.get(key)
self.assertEqual(val, tweaked_val, "Different value for %s parameter: %s vs %s" % (key, val, tweaked_val))
# check behaviour if target file already exists
error_pattern = "File exists, not overwriting it without --force"
self.assertErrorRegex(EasyBuildError, error_pattern, tweak_one, toy_ec, tweaked_toy_ec, {'version': '1.2.3'})
# existing file does get overwritten when --force is used
init_config(build_options={'force': True, 'silent': True})
write_file(tweaked_toy_ec, '')
tweak_one(toy_ec, tweaked_toy_ec, {'version': '1.2.3'})
tweaked_toy_ec_parsed = EasyConfigParser(tweaked_toy_ec).get_config_dict()
self.assertEqual(tweaked_toy_ec_parsed['version'], '1.2.3')
def test_check_capability_mapping(self):
"""Test comparing the functionality of two toolchains"""
test_easyconfigs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
init_config(build_options={
'valid_module_classes': module_classes(),
'robot_path': test_easyconfigs,
})
get_toolchain_hierarchy.clear()
foss_hierarchy = get_toolchain_hierarchy({'name': 'foss', 'version': '2018a'}, incl_capabilities=True)
iimpi_hierarchy = get_toolchain_hierarchy({'name': 'iimpi', 'version': '2016.01'},
incl_capabilities=True)
# Hierarchies are returned with top-level toolchain last, foss has 4 elements here, intel has 2
self.assertEqual(foss_hierarchy[0]['name'], 'GCC')
self.assertEqual(foss_hierarchy[1]['name'], 'golf')
self.assertEqual(foss_hierarchy[2]['name'], 'gompi')
self.assertEqual(foss_hierarchy[3]['name'], 'foss')
self.assertEqual(iimpi_hierarchy[0]['name'], 'GCCcore')
self.assertEqual(iimpi_hierarchy[1]['name'], 'iccifort')
self.assertEqual(iimpi_hierarchy[2]['name'], 'iimpi')
# golf <-> iimpi (should return False)
self.assertFalse(check_capability_mapping(foss_hierarchy[1], iimpi_hierarchy[1]), "golf requires math libs")
# gompi <-> iimpi
self.assertTrue(check_capability_mapping(foss_hierarchy[2], iimpi_hierarchy[2]))
# GCC <-> iimpi
self.assertTrue(check_capability_mapping(foss_hierarchy[0], iimpi_hierarchy[2]))
# GCC <-> iccifort
self.assertTrue(check_capability_mapping(foss_hierarchy[0], iimpi_hierarchy[1]))
# GCC <-> GCCcore
self.assertTrue(check_capability_mapping(foss_hierarchy[0], iimpi_hierarchy[0]))
def test_match_minimum_tc_specs(self):
"""Test matching a toolchain to lowest possible in a hierarchy"""
test_easyconfigs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
init_config(build_options={
'robot_path': test_easyconfigs,
'silent': True,
'valid_module_classes': module_classes(),
})
get_toolchain_hierarchy.clear()
foss_hierarchy = get_toolchain_hierarchy({'name': 'foss', 'version': '2018a'}, incl_capabilities=True)
iimpi_hierarchy = get_toolchain_hierarchy({'name': 'iimpi', 'version': '2016.01'},
incl_capabilities=True)
# Hierarchies are returned with top-level toolchain last, foss has 4 elements here, intel has 2
self.assertEqual(foss_hierarchy[0]['name'], 'GCC')
self.assertEqual(foss_hierarchy[1]['name'], 'golf')
self.assertEqual(foss_hierarchy[2]['name'], 'gompi')
self.assertEqual(foss_hierarchy[3]['name'], 'foss')
self.assertEqual(iimpi_hierarchy[0]['name'], 'GCCcore')
self.assertEqual(iimpi_hierarchy[1]['name'], 'iccifort')
self.assertEqual(iimpi_hierarchy[2]['name'], 'iimpi')
# base compiler first (GCCcore which maps to GCC/6.4.0-2.28)
self.assertEqual(match_minimum_tc_specs(iimpi_hierarchy[0], foss_hierarchy),
{'name': 'GCC', 'version': '6.4.0-2.28'})
# then iccifort (which also maps to GCC/6.4.0-2.28)
self.assertEqual(match_minimum_tc_specs(iimpi_hierarchy[1], foss_hierarchy),
{'name': 'GCC', 'version': '6.4.0-2.28'})
# Then MPI
self.assertEqual(match_minimum_tc_specs(iimpi_hierarchy[2], foss_hierarchy),
{'name': 'gompi', 'version': '2018a'})
# Check against own math only subtoolchain for math
self.assertEqual(match_minimum_tc_specs(foss_hierarchy[1], foss_hierarchy),
{'name': 'golf', 'version': '2018a'})
# Make sure there's an error when we can't do the mapping
error_msg = "No possible mapping from source toolchain spec .*"
self.assertErrorRegex(EasyBuildError, error_msg, match_minimum_tc_specs,
foss_hierarchy[3], iimpi_hierarchy)
def test_dep_tree_of_toolchain(self):
"""Test getting list of dependencies of a toolchain (as EasyConfig objects)"""
test_easyconfigs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
init_config(build_options={
'valid_module_classes': module_classes(),
'robot_path': test_easyconfigs,
'check_osdeps': False,
})
toolchain_spec = {'name': 'foss', 'version': '2018a'}
list_of_deps = get_dep_tree_of_toolchain(toolchain_spec, self.modtool)
expected_deps = [
['GCC', '6.4.0'],
['OpenBLAS', '0.2.20'],
['hwloc', '1.11.8'],
['OpenMPI', '2.1.2'],
['gompi', '2018a'],
['FFTW', '3.3.7'],
['ScaLAPACK', '2.0.2'],
['foss', '2018a']
]
actual_deps = [[dep['name'], dep['version']] for dep in list_of_deps]
self.assertEqual(expected_deps, actual_deps)
def test_map_toolchain_hierarchies(self):
"""Test mapping between two toolchain hierarchies"""
test_easyconfigs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
init_config(build_options={
'robot_path': test_easyconfigs,
'silent': True,
'valid_module_classes': module_classes(),
})
get_toolchain_hierarchy.clear()
foss_tc = {'name': 'foss', 'version': '2018a'}
gompi_tc = {'name': 'gompi', 'version': '2018a'}
iimpi_tc = {'name': 'iimpi', 'version': '2016.01'}
# GCCcore is mapped to GCC, iccifort is mapped to GCC, iimpi is mapped to gompi
expected = {
'GCCcore': {'name': 'GCC', 'version': '6.4.0-2.28'},
'iccifort': {'name': 'GCC', 'version': '6.4.0-2.28'},
'iimpi': {'name': 'gompi', 'version': '2018a'},
}
self.assertEqual(map_toolchain_hierarchies(iimpi_tc, foss_tc, self.modtool), expected)
# GCC is mapped to iccifort, gompi is mapped to iimpi
expected = {
'GCC': {'name': 'iccifort', 'version': '2016.1.150-GCC-4.9.3-2.25'},
'gompi': {'name': 'iimpi', 'version': '2016.01'}
}
self.assertEqual(map_toolchain_hierarchies(gompi_tc, iimpi_tc, self.modtool), expected)
# Expect an error when there is no possible mapping
error_msg = "No possible mapping from source toolchain spec .*"
self.assertErrorRegex(EasyBuildError, error_msg, map_toolchain_hierarchies,
foss_tc, iimpi_tc, self.modtool)
# Test that we correctly include GCCcore binutils when it is there
gcc_binutils_tc = {'name': 'GCC', 'version': '4.9.3-2.26'}
iccifort_binutils_tc = {'name': 'iccifort', 'version': '2016.1.150-GCC-4.9.3-2.25'}
# Should see a binutils in the mapping (2.26 will get mapped to 2.25)
expected = {
'GCC': {'name': 'iccifort', 'version': '2016.1.150-GCC-4.9.3-2.25'},
'GCCcore': {'name': 'GCCcore', 'version': '4.9.3'},
'binutils': {'version': '2.25', 'versionsuffix': ''}
}
self.assertEqual(map_toolchain_hierarchies(gcc_binutils_tc, iccifort_binutils_tc, self.modtool), expected)
def test_map_easyconfig_to_target_tc_hierarchy(self):
"""Test mapping of easyconfig to target hierarchy"""
test_easyconfigs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
init_config(build_options={
'robot_path': test_easyconfigs,
'silent': True,
'valid_module_classes': module_classes(),
})
get_toolchain_hierarchy.clear()
gcc_binutils_tc = {'name': 'GCC', 'version': '4.9.3-2.26'}
iccifort_binutils_tc = {'name': 'iccifort', 'version': '2016.1.150-GCC-4.9.3-2.25'}
# The below mapping includes a binutils mapping (2.26 to 2.25)
tc_mapping = map_toolchain_hierarchies(gcc_binutils_tc, iccifort_binutils_tc, self.modtool)
ec_spec = os.path.join(test_easyconfigs, 'h', 'hwloc', 'hwloc-1.6.2-GCC-4.9.3-2.26.eb')
tweaked_spec = map_easyconfig_to_target_tc_hierarchy(ec_spec, tc_mapping)
tweaked_ec = process_easyconfig(tweaked_spec)[0]
tweaked_dict = tweaked_ec['ec'].asdict()
# First check the mapped toolchain
key, value = 'toolchain', iccifort_binutils_tc
self.assertTrue(key in tweaked_dict and value == tweaked_dict[key])
# Also check that binutils has been mapped
for key, value in {'name': 'binutils', 'version': '2.25', 'versionsuffix': ''}.items():
self.assertTrue(key in tweaked_dict['builddependencies'][0] and
value == tweaked_dict['builddependencies'][0][key])
def suite():
""" return all the tests in this file """
return TestLoaderFiltered().loadTestsFromTestCase(TweakTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
| gpl-2.0 | -2,243,934,843,653,057,300 | 50.409357 | 120 | 0.61489 | false |
mjamesruggiero/lily | lily/apriori.py | 1 | 4157 | from collections import defaultdict
import logging
logging.basicConfig(level=logging.INFO, format="%(funcName)s\t%(message)s")
def createC1(dataset):
C1 = []
for transaction in dataset:
for item in transaction:
if not [item] in C1:
C1.append([item])
C1.sort()
return map(frozenset, C1)
def scan_d(dataset, candidate_sets, minimum_support):
ss_count = count_candiates(candidate_sets, dataset)
num_items = float(len(dataset))
return_list = []
support_data = {}
for key in ss_count:
support = ss_count[key] / num_items
if support >= minimum_support:
return_list.insert(0, key)
support_data[key] = support
return return_list, support_data
def count_candiates(candidate_sets, dataset):
counts = defaultdict(int)
for element in dataset:
for candidate in candidate_sets:
if candidate.issubset(element):
counts[candidate] += 1
return counts
def apriori_generate(Lk, k):
"""
Takes a list of frequent itemsets, Lk
and the size of the sets, to produce
candidate itemsets.
"""
return_list = []
len_Lk = len(Lk)
for i in range(len_Lk):
for j in range(i + 1, len_Lk):
L1 = list(Lk[i])[:k - 2]
L2 = list(Lk[j])[:k - 2]
L1.sort()
L2.sort()
if L1 == L2:
return_list.append(Lk[i] | Lk[j]) # set union
return return_list
def apriori(dataset, minimum_support=0.5):
C1 = createC1(dataset)
D = map(set, dataset)
L1, support_data = scan_d(D, C1, minimum_support)
L = [L1]
k = 2
while(len(L[k - 2]) > 0):
Ck = apriori_generate(L[k - 2], k)
Lk, support_k = scan_d(D, Ck, minimum_support)
support_data.update(support_k)
L.append(Lk)
k += 1
return L, support_data
def generate_rules(L, support_data, minimum_confidence=0.7):
big_rule_list = []
for i in range(1, len(L)):
for freq_set in L[i]:
h1 = [frozenset([item]) for item in freq_set]
if i > 1:
rules_from_consequent(freq_set,
h1,
support_data,
big_rule_list,
minimum_confidence)
else:
calculate_confidence(freq_set,
h1,
support_data,
big_rule_list,
minimum_confidence)
return big_rule_list
def calculate_confidence(freq_set,
h,
support_data,
big_rule_list,
minimum_confidence):
pruned_h = []
for conseq in h:
conf = support_data[freq_set] / support_data[freq_set - conseq]
if conf >= minimum_confidence:
big_rule_list.append((freq_set - conseq, conseq, conf))
pruned_h.append(conseq)
return pruned_h
def rules_from_consequent(freq_set,
h,
support_data,
big_rule_list,
minimum_confidence=0.7):
"""
TODO: instead of moving large param list around,
use an object
"""
m = len(h[0])
if len(freq_set) > (m + 1): # merge it more
new_candidates = apriori_generate(h, m + 1)
new_candidates = calculate_confidence(freq_set,
new_candidates,
support_data,
big_rule_list,
minimum_confidence)
if len(new_candidates) > 1: # need at least 2 sets to merge
rules_from_consequent(freq_set,
new_candidates,
support_data,
big_rule_list,
minimum_confidence)
| apache-2.0 | 2,322,242,299,370,424 | 30.976923 | 75 | 0.479192 | false |
synsun/robotframework | src/robot/testdoc.py | 1 | 9849 | #!/usr/bin/env python
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for the `Testdoc` tool.
This module can be executed from the command line using the following
approaches::
python -m robot.testdoc
python path/to/robot/testdoc.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module also provides :func:`testdoc` and :func:`testdoc_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
import os.path
import sys
import time
# Allows running as a script. __name__ check needed with multiprocessing:
# https://github.com/robotframework/robotframework/issues/1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.conf import RobotSettings
from robot.htmldata import HtmlFileWriter, ModelWriter, JsonWriter, TESTDOC
from robot.parsing import disable_curdir_processing
from robot.running import TestSuiteBuilder
from robot.utils import (abspath, Application, file_writer, format_time,
get_link_path, html_escape, html_format, is_string,
long, secs_to_timestr, seq2str2, timestr_to_secs,
unescape)
USAGE = """robot.testdoc -- Robot Framework test data documentation tool
Version: <VERSION>
Usage: python -m robot.testdoc [options] data_sources output_file
Testdoc generates a high level test documentation based on Robot Framework
test data. Generated documentation includes name, documentation and other
metadata of each test suite and test case, as well as the top-level keywords
and their arguments.
Options
=======
-T --title title Set the title of the generated documentation.
Underscores in the title are converted to spaces.
The default title is the name of the top level suite.
-N --name name Override the name of the top level suite.
-D --doc document Override the documentation of the top level suite.
-M --metadata name:value * Set/override metadata of the top level suite.
-G --settag tag * Set given tag(s) to all test cases.
-t --test name * Include tests by name.
-s --suite name * Include suites by name.
-i --include tag * Include tests by tags.
-e --exclude tag * Exclude tests by tags.
-h -? --help Print this help.
All options except --title have exactly same semantics as same options have
when executing test cases.
Execution
=========
Data can be given as a single file, directory, or as multiple files and
directories. In all these cases, the last argument must be the file where
to write the output. The output is always created in HTML format.
Testdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). It can be executed as an installed module like
`python -m robot.testdoc` or as a script like `python path/robot/testdoc.py`.
Examples:
python -m robot.testdoc my_test.html testdoc.html
jython -m robot.testdoc -N smoke_tests -i smoke path/to/my_tests smoke.html
ipy path/to/robot/testdoc.py first_suite.txt second_suite.txt output.html
For more information about Testdoc and other built-in tools, see
http://robotframework.org/robotframework/#built-in-tools.
"""
class TestDoc(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(2,))
def main(self, datasources, title=None, **options):
outfile = abspath(datasources.pop())
suite = TestSuiteFactory(datasources, **options)
self._write_test_doc(suite, outfile, title)
self.console(outfile)
def _write_test_doc(self, suite, outfile, title):
with file_writer(outfile) as output:
model_writer = TestdocModelWriter(output, suite, title)
HtmlFileWriter(output, model_writer).write(TESTDOC)
@disable_curdir_processing
def TestSuiteFactory(datasources, **options):
settings = RobotSettings(options)
if is_string(datasources):
datasources = [datasources]
suite = TestSuiteBuilder().build(*datasources)
suite.configure(**settings.suite_config)
return suite
class TestdocModelWriter(ModelWriter):
def __init__(self, output, suite, title=None):
self._output = output
self._output_path = getattr(output, 'name', None)
self._suite = suite
self._title = title.replace('_', ' ') if title else suite.name
def write(self, line):
self._output.write('<script type="text/javascript">\n')
self.write_data()
self._output.write('</script>\n')
def write_data(self):
generated_time = time.localtime()
model = {
'suite': JsonConverter(self._output_path).convert(self._suite),
'title': self._title,
'generated': format_time(generated_time, gmtsep=' '),
'generatedMillis': long(time.mktime(generated_time) * 1000)
}
JsonWriter(self._output).write_json('testdoc = ', model)
class JsonConverter(object):
def __init__(self, output_path=None):
self._output_path = output_path
def convert(self, suite):
return self._convert_suite(suite)
def _convert_suite(self, suite):
return {
'source': suite.source or '',
'relativeSource': self._get_relative_source(suite.source),
'id': suite.id,
'name': self._escape(suite.name),
'fullName': self._escape(suite.longname),
'doc': self._html(suite.doc),
'metadata': [(self._escape(name), self._html(value))
for name, value in suite.metadata.items()],
'numberOfTests': suite.test_count ,
'suites': self._convert_suites(suite),
'tests': self._convert_tests(suite),
'keywords': list(self._convert_keywords(suite))
}
def _get_relative_source(self, source):
if not source or not self._output_path:
return ''
return get_link_path(source, os.path.dirname(self._output_path))
def _escape(self, item):
return html_escape(item)
def _html(self, item):
return html_format(unescape(item))
def _convert_suites(self, suite):
return [self._convert_suite(s) for s in suite.suites]
def _convert_tests(self, suite):
return [self._convert_test(t) for t in suite.tests]
def _convert_test(self, test):
return {
'name': self._escape(test.name),
'fullName': self._escape(test.longname),
'id': test.id,
'doc': self._html(test.doc),
'tags': [self._escape(t) for t in test.tags],
'timeout': self._get_timeout(test.timeout),
'keywords': list(self._convert_keywords(test))
}
def _convert_keywords(self, item):
for kw in getattr(item, 'keywords', []):
if kw.type == kw.SETUP_TYPE:
yield self._convert_keyword(kw, 'SETUP')
elif kw.type == kw.TEARDOWN_TYPE:
yield self._convert_keyword(kw, 'TEARDOWN')
elif kw.type == kw.FOR_LOOP_TYPE:
yield self._convert_for_loop(kw)
else:
yield self._convert_keyword(kw, 'KEYWORD')
def _convert_for_loop(self, kw):
return {
'name': self._escape(self._get_for_loop(kw)),
'arguments': '',
'type': 'FOR'
}
def _convert_keyword(self, kw, kw_type):
return {
'name': self._escape(self._get_kw_name(kw)),
'arguments': self._escape(', '.join(kw.args)),
'type': kw_type
}
def _get_kw_name(self, kw):
if kw.assign:
return '%s = %s' % (', '.join(a.rstrip('= ') for a in kw.assign), kw.name)
return kw.name
def _get_for_loop(self, kw):
joiner = ' %s ' % kw.flavor
return ', '.join(kw.variables) + joiner + seq2str2(kw.values)
def _get_timeout(self, timeout):
if timeout is None:
return ''
try:
tout = secs_to_timestr(timestr_to_secs(timeout.value))
except ValueError:
tout = timeout.value
if timeout.message:
tout += ' :: ' + timeout.message
return tout
def testdoc_cli(arguments):
"""Executes `Testdoc` similarly as from the command line.
:param arguments: command line arguments as a list of strings.
For programmatic usage the :func:`testdoc` function is typically better. It
has a better API for that and does not call :func:`sys.exit` like
this function.
Example::
from robot.testdoc import testdoc_cli
testdoc_cli(['--title', 'Test Plan', 'mytests', 'plan.html'])
"""
TestDoc().execute_cli(arguments)
def testdoc(*arguments, **options):
"""Executes `Testdoc` programmatically.
Arguments and options have same semantics, and options have same names,
as arguments and options to Testdoc.
Example::
from robot.testdoc import testdoc
testdoc('mytests', 'plan.html', title='Test Plan')
"""
TestDoc().execute(*arguments, **options)
if __name__ == '__main__':
testdoc_cli(sys.argv[1:])
| apache-2.0 | 3,419,999,885,271,458,000 | 33.557895 | 86 | 0.636816 | false |
mementum/backtrader | backtrader/utils/flushfile.py | 1 | 1588 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
class flushfile(object):
def __init__(self, f):
self.f = f
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
if sys.platform == 'win32':
sys.stdout = flushfile(sys.stdout)
sys.stderr = flushfile(sys.stderr)
class StdOutDevNull(object):
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def write(self, x):
pass
def flush(self):
pass
def stop(self):
sys.stdout = self.stdout
| gpl-3.0 | 289,876,395,016,867,500 | 26.859649 | 79 | 0.588161 | false |
exildev/webpage | exile/migrations/0007_auto_20170511_2256.py | 1 | 2816 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-05-11 22:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exile', '0006_seccion_posicion'),
]
operations = [
migrations.CreateModel(
name='Contacto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=300)),
('email', models.EmailField(max_length=254)),
('asunto', models.CharField(max_length=300)),
('mensaje', models.TextField()),
],
),
migrations.CreateModel(
name='Footer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='FooterPrincipal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateField(auto_now_add=True)),
('footer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='exile.Footer')),
],
options={
'verbose_name': 'Footer Principal',
'verbose_name_plural': 'Footer Principal',
},
),
migrations.CreateModel(
name='OrdenFooter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField()),
('footer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.Footer')),
('pagina', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.Page')),
],
),
migrations.CreateModel(
name='SeccionFooter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=400)),
],
),
migrations.AddField(
model_name='ordenfooter',
name='seccion',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exile.SeccionFooter'),
),
migrations.AddField(
model_name='footer',
name='paginas',
field=models.ManyToManyField(through='exile.OrdenFooter', to='exile.Page'),
),
]
| mit | -656,454,561,851,177,300 | 38.661972 | 114 | 0.550426 | false |
tanghaibao/goatools | tests/test_get_lower_select.py | 1 | 2985 | #!/usr/bin/env python
"""Test get_all_parents vs """
from __future__ import print_function
__copyright__ = "Copyright (C) 2010-2019, DV Klopfenstein, H Tang et al. All rights reserved."
import os
import sys
import timeit
from goatools.base import get_godag
from goatools.godag.go_tasks import get_id2lowerselect
from goatools.godag.prttime import prt_hms
from goatools.test_data.checks import CheckGOs
from goatools.godag.relationship_combos import RelationshipCombos
def test_get_lowerselect(prt=sys.stdout):
"""Test getting parents and user-specfied ancestor relationships"""
# Load GO-DAG
repo = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
godag = get_godag(os.path.join(repo, 'go-basic.obo'), optional_attrs='relationship')
run = RelationshipCombos(godag)
run.chk_relationships_all()
rels_combo = run.get_relationship_combos()
print('{N} COMBINATIONS OF RELATIONSHIPS'.format(N=len(rels_combo)))
for relidx, rels_set in enumerate(rels_combo, 1):
print('{I}) RELATIONSHIPS[{N}]: {Rs}'.format(
I=relidx, N=len(rels_set), Rs=' '.join(sorted(rels_set))))
# ------------------------------------------------------------------------
# Get all parents for all GO IDs using get_all_parents in GOTerm class
tic = timeit.default_timer()
# pylint: disable=line-too-long
go2lowerselect_orig = {o.item_id:get_all_lowerselect(o, rels_set) for o in run.go2obj.values()}
tic = prt_hms(tic, "Get all goobj's parents using get_all_lowerselect(GOTerm)", prt)
# ------------------------------------------------------------------------
# Get all parents for all GO IDs using GOTerm get_all_parents
go2lowerselect_fast = get_id2lowerselect(run.go2obj.values(), rels_set)
tic = prt_hms(tic, "Get all goobj's parents using go_tasks::get_id2lowerselect", prt)
# ------------------------------------------------------------------------
# Compare parent lists
chkr = CheckGOs('test_get_lower_select', godag)
chkr.chk_a2bset(go2lowerselect_orig, go2lowerselect_fast) # EXPECTED, ACTUAL
print("PASSED: get_lowerselect RELATIONSHIPS[{N}]: {Rs}".format(
N=len(rels_set), Rs=' '.join(sorted(rels_set))))
# ------------------------------------------------------------------------------------
def get_all_lowerselect(goterm, relationship_set):
"""Return all parent GO IDs through both 'is_a' and all relationships."""
# SLOW WHEN RUNNING MORE THAN ONE GO TERM: GOTerm::get_all_lowerselect
all_lower = set()
for lower in goterm.get_goterms_lower_rels(relationship_set):
all_lower.add(lower.item_id)
all_lower |= get_all_lowerselect(lower, relationship_set)
return all_lower
if __name__ == '__main__':
PRT = None if len(sys.argv) != 1 else sys.stdout
test_get_lowerselect(PRT)
# Copyright (C) 2010-2019, DV Klopfenstein, H Tang et al. All rights reserved.
| bsd-2-clause | -2,906,338,326,993,548,300 | 46.380952 | 103 | 0.60804 | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/_pytest/reports.py | 1 | 13737 | from pprint import pprint
import py
import six
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprEntry
from _pytest._code.code import ReprEntryNative
from _pytest._code.code import ReprExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import ReprFuncArgs
from _pytest._code.code import ReprLocals
from _pytest._code.code import ReprTraceback
from _pytest._code.code import TerminalRepr
from _pytest.outcomes import skip
from _pytest.pathlib import Path
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d["version_info"][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d["id"],
d["sysplatform"],
ver,
d["executable"],
)
return s
class BaseReport(object):
when = None
location = None
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, "node"):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, "toterminal"):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self):
"""
Read-only property that returns the full string representation
of ``longrepr``.
.. versionadded:: 3.0
"""
tw = py.io.TerminalWriter(stringio=True)
tw.hasmarkup = False
self.toterminal(tw)
exc = tw.stringio.getvalue()
return exc.strip()
@property
def caplog(self):
"""Return captured log lines, if log capturing is enabled
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
@property
def capstdout(self):
"""Return captured text from stdout, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stdout")
)
@property
def capstderr(self):
"""Return captured text from stderr, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stderr")
)
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
@property
def count_towards_summary(self):
"""
**Experimental**
Returns True if this report should be counted towards the totals shown at the end of the
test session: "1 passed, 1 failure, etc".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
return True
@property
def head_line(self):
"""
**Experimental**
Returns the head line shown with longrepr output for this report, more commonly during
traceback representation during failures::
________ Test.foo ________
In the example above, the head_line is "Test.foo".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
if self.location is not None:
fspath, lineno, domain = self.location
return domain
def _to_json(self):
"""
This was originally the serialize_report() function from xdist (ca03269).
Returns the contents of this report as a dict of builtin entries, suitable for
serialization.
Experimental method.
"""
def disassembled_report(rep):
reprtraceback = rep.longrepr.reprtraceback.__dict__.copy()
reprcrash = rep.longrepr.reprcrash.__dict__.copy()
new_entries = []
for entry in reprtraceback["reprentries"]:
entry_data = {
"type": type(entry).__name__,
"data": entry.__dict__.copy(),
}
for key, value in entry_data["data"].items():
if hasattr(value, "__dict__"):
entry_data["data"][key] = value.__dict__.copy()
new_entries.append(entry_data)
reprtraceback["reprentries"] = new_entries
return {
"reprcrash": reprcrash,
"reprtraceback": reprtraceback,
"sections": rep.longrepr.sections,
}
d = self.__dict__.copy()
if hasattr(self.longrepr, "toterminal"):
if hasattr(self.longrepr, "reprtraceback") and hasattr(
self.longrepr, "reprcrash"
):
d["longrepr"] = disassembled_report(self)
else:
d["longrepr"] = six.text_type(self.longrepr)
else:
d["longrepr"] = self.longrepr
for name in d:
if isinstance(d[name], (py.path.local, Path)):
d[name] = str(d[name])
elif name == "result":
d[name] = None # for now
return d
@classmethod
def _from_json(cls, reportdict):
"""
This was originally the serialize_report() function from xdist (ca03269).
Factory method that returns either a TestReport or CollectReport, depending on the calling
class. It's the callers responsibility to know which class to pass here.
Experimental method.
"""
if reportdict["longrepr"]:
if (
"reprcrash" in reportdict["longrepr"]
and "reprtraceback" in reportdict["longrepr"]
):
reprtraceback = reportdict["longrepr"]["reprtraceback"]
reprcrash = reportdict["longrepr"]["reprcrash"]
unserialized_entries = []
reprentry = None
for entry_data in reprtraceback["reprentries"]:
data = entry_data["data"]
entry_type = entry_data["type"]
if entry_type == "ReprEntry":
reprfuncargs = None
reprfileloc = None
reprlocals = None
if data["reprfuncargs"]:
reprfuncargs = ReprFuncArgs(**data["reprfuncargs"])
if data["reprfileloc"]:
reprfileloc = ReprFileLocation(**data["reprfileloc"])
if data["reprlocals"]:
reprlocals = ReprLocals(data["reprlocals"]["lines"])
reprentry = ReprEntry(
lines=data["lines"],
reprfuncargs=reprfuncargs,
reprlocals=reprlocals,
filelocrepr=reprfileloc,
style=data["style"],
)
elif entry_type == "ReprEntryNative":
reprentry = ReprEntryNative(data["lines"])
else:
_report_unserialization_failure(entry_type, cls, reportdict)
unserialized_entries.append(reprentry)
reprtraceback["reprentries"] = unserialized_entries
exception_info = ReprExceptionInfo(
reprtraceback=ReprTraceback(**reprtraceback),
reprcrash=ReprFileLocation(**reprcrash),
)
for section in reportdict["longrepr"]["sections"]:
exception_info.addsection(*section)
reportdict["longrepr"] = exception_info
return cls(**reportdict)
def _report_unserialization_failure(type_name, report_class, reportdict):
url = "https://github.com/pytest-dev/pytest/issues"
stream = py.io.TextIO()
pprint("-" * 100, stream=stream)
pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream)
pprint("report_name: %s" % report_class, stream=stream)
pprint(reportdict, stream=stream)
pprint("Please report this bug at %s" % url, stream=stream)
pprint("-" * 100, stream=stream)
raise RuntimeError(stream.getvalue())
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
__test__ = False
def __init__(
self,
nodeid,
location,
keywords,
outcome,
longrepr,
when,
sections=(),
duration=0,
user_properties=None,
**extra
):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: user properties is a list of tuples (name, value) that holds user
#: defined properties of the test
self.user_properties = list(user_properties or [])
#: list of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid,
self.when,
self.outcome,
)
@classmethod
def from_item_and_call(cls, item, call):
"""
Factory method to create and fill a TestReport with standard item and call info.
"""
when = call.when
duration = call.stop - call.start
keywords = {x: 1 for x in item.keywords}
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(
excinfo, style=item.config.option.tbstyle
)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" % (key, rwhen), content))
return cls(
item.nodeid,
item.location,
keywords,
outcome,
longrepr,
when,
sections,
duration,
user_properties=item.user_properties,
)
class CollectReport(BaseReport):
when = "collect"
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid,
len(self.result),
self.outcome,
)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
def pytest_report_to_serializable(report):
if isinstance(report, (TestReport, CollectReport)):
data = report._to_json()
data["_report_type"] = report.__class__.__name__
return data
def pytest_report_from_serializable(data):
if "_report_type" in data:
if data["_report_type"] == "TestReport":
return TestReport._from_json(data)
elif data["_report_type"] == "CollectReport":
return CollectReport._from_json(data)
assert False, "Unknown report_type unserialize data: {}".format(
data["_report_type"]
)
| mit | 2,341,126,185,610,534,400 | 31.17096 | 98 | 0.549538 | false |
Jonahss/magnificent | neilk/monitor.py | 1 | 2086 | #
# Monitor a URL continuously, providing
# reports on its availability in a log file
# and as a web page.
#
import json
import logging
import sys
from twisted.internet import task
from twisted.internet import reactor
from twisted.web import server, resource
import urllib2
config = {}
log = logging.getLogger(__name__)
checks = 0
successes = 0
failures = 0
def log_to_stderr(log):
""" set up logging on standard error """
format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(stream=sys.stderr,
format=format_str,
level=logging.DEBUG)
def health_check():
""" perform the health check for a URL """
global config, log, checks, successes, failures
request = urllib2.Request(config["url"])
checks += 1
try:
response = urllib2.urlopen(request)
log.info("%s is okay! (%s)", config["url"], response.getcode())
successes += 1
except urllib2.URLError, e:
log.info("%s is ERROR! (%s)", config["url"], e)
failures += 1
def generate_report():
""" format a string with current report """
report = "%i checks, %i failures, %.2f%% success rate"
return report % (checks,
failures,
100 * float(successes)/checks)
def log_health_report():
""" log the report """
log.info("REPORT: " + generate_report())
class MonitorSite(resource.Resource):
""" simple twisted site, gives the report out on the web """
isLeaf = True
def render_GET(self, request):
return generate_report()
if __name__ == "__main__":
log_to_stderr(log)
config = json.loads(open("monitor_config.json", "rb").read())
site = server.Site(MonitorSite())
reactor.listenTCP(config["port"], site)
log.info("Started site on port %i", config["port"])
check_loop = task.LoopingCall(health_check)
check_loop.start(config["url_frequency"])
report_loop = task.LoopingCall(log_health_report)
report_loop.start(config["report_frequency"])
reactor.run()
| mit | -6,028,109,550,642,355,000 | 25.075 | 71 | 0.624161 | false |
dalf/searx | searx/engines/framalibre.py | 1 | 2104 | # SPDX-License-Identifier: AGPL-3.0-or-later
"""
FramaLibre (It)
"""
from html import escape
from urllib.parse import urljoin, urlencode
from lxml import html
from searx.utils import extract_text
# about
about = {
"website": 'https://framalibre.org/',
"wikidata_id": 'Q30213882',
"official_api_documentation": None,
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['it']
paging = True
# search-url
base_url = 'https://framalibre.org/'
search_url = base_url + 'recherche-par-crit-res?{query}&page={offset}'
# specific xpath variables
results_xpath = '//div[@class="nodes-list-row"]/div[contains(@typeof,"sioc:Item")]'
link_xpath = './/h3[@class="node-title"]/a[@href]'
thumbnail_xpath = './/img[@class="media-object img-responsive"]/@src'
content_xpath = './/div[@class="content"]//p'
# do search-request
def request(query, params):
offset = (params['pageno'] - 1)
params['url'] = search_url.format(query=urlencode({'keys': query}),
offset=offset)
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
# parse results
for result in dom.xpath(results_xpath):
link = result.xpath(link_xpath)[0]
href = urljoin(base_url, link.attrib.get('href'))
# there's also a span (class="rdf-meta element-hidden" property="dc:title")'s content property for this...
title = escape(extract_text(link))
thumbnail_tags = result.xpath(thumbnail_xpath)
thumbnail = None
if len(thumbnail_tags) > 0:
thumbnail = extract_text(thumbnail_tags[0])
if thumbnail[0] == '/':
thumbnail = base_url + thumbnail
content = escape(extract_text(result.xpath(content_xpath)))
# append result
results.append({'url': href,
'title': title,
'img_src': thumbnail,
'content': content})
# return results
return results
| agpl-3.0 | 5,341,742,455,289,489,000 | 28.222222 | 114 | 0.612167 | false |
aj00200/binaryslaveserver | src/libs/server/greeter.py | 1 | 1146 | '''
Handle connections from a client greeter. Provide information such as
sub-servers and news.
'''
import asyncore
import socket
import struct
class Handler(asyncore.dispatcher_with_send):
def handle_read(self):
data = self.recv(1)
if data == b'\x00':
# TODO: load alternate server names from the config
self.send(struct.pack('16s16s16s16s', b'localhost', b'localhost',
b'localhost', b'localhost'))
elif data == b'\x01':
self.send(b'Welcome to the new world')
class Server(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accepted(self, sock, addr):
print('[*] Got a new connection')
handler = Handler(sock)
def handle_accept(self):
# Required for Python 3.1
print('[*] Got a new connection')
pair = self.accept()
if pair is not None:
handler = Handler(pair[0])
| gpl-3.0 | -4,989,029,583,675,852,000 | 29.157895 | 77 | 0.595113 | false |
matthias-k/pysaliency | pysaliency/quilt.py | 1 | 5072 | """
Code to apply quilt patches to files
This module enables pysaliency to use quilt patches
to patch code from external saliency models. While
in Linux, quilt itself could be used to apply the patches,
in Windows and Mac quilt might not be available and
nontrivial to install for users.
It does not support all possible patch files but only
the subset of functionality needed by pysaliency.
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import os.path
from .utils import full_split
class Hunk(object):
def __init__(self, lines):
meta_data = lines.pop(0)
a, src_data, target_data, b = meta_data.split()
assert a == '@@'
assert b == '@@'
start, length = self.parse_position(src_data)
assert start < 0
self.source_start = -start
self.source_length = length
start, length = self.parse_position(target_data)
assert start > 0
self.target_start = start
self.target_length = length
self.lines = lines
def parse_position(self, position):
start, length = position.split(',')
start = int(start)
length = int(length)
return start, length
def apply(self, source, target):
src_pos = self.source_start - 1
assert len(target) == self.target_start - 1
for line in self.lines:
type, data = line[0], line[1:]
if type == ' ':
assert source[src_pos] == data
target.append(data)
src_pos += 1
elif type == '-':
assert source[src_pos] == data
src_pos += 1
elif type == '+':
target.append(data)
elif type == '\\':
# Newline stuff, ignore
pass
else:
raise ValueError(line)
assert src_pos == self.source_start + self.source_length - 1
assert len(target) == self.target_start + self.target_length - 1
return src_pos
class Diff(object):
def __init__(self, lines):
source = lines.pop(0)
assert source.startswith('--- ')
_, source = source.split('--- ', 1)
source, _ = source.split('\t', 1)
source = os.path.join(*full_split(source)[1:])
target = lines.pop(0)
assert target.startswith('+++ ')
_, target = target.split('+++ ', 1)
target, _ = target.split('\t', 1)
target = os.path.join(*full_split(target)[1:])
self.source_filename = source
self.target_filename = target
self.hunks = []
while lines:
assert lines[0].startswith('@@ ')
hunk_lines = [lines.pop(0)]
while lines and not lines[0].startswith('@@ '):
line = lines.pop(0)
if line:
hunk_lines.append(line)
self.hunks.append(Hunk(hunk_lines))
def apply(self, location):
hunks = list(self.hunks)
source = open(os.path.join(location, self.source_filename)).read()
source = source.split('\n')
target = []
src_pos = 0
while src_pos < len(source):
if hunks:
if hunks[0].source_start == src_pos+1:
hunk = hunks.pop(0)
src_pos = hunk.apply(source, target)
continue
target.append(source[src_pos])
src_pos += 1
open(os.path.join(location, self.target_filename), 'w').write('\n'.join(target))
class PatchFile(object):
def __init__(self, patch):
self.diffs = []
lines = patch.split('\n')
while lines:
index1 = lines.pop(0)
assert index1.startswith('Index: ')
index2 = lines.pop(0)
assert index2.startswith('==============')
diff = []
diff.append(lines.pop(0))
while lines and not lines[0].startswith('Index: '):
diff.append(lines.pop(0))
diff_obj = Diff(diff)
self.diffs.append(diff_obj)
def apply(self, location, verbose=True):
for diff in self.diffs:
if verbose:
print("Patching {}".format(diff.source_filename))
diff.apply(location)
class QuiltSeries(object):
def __init__(self, patches_location):
self.patches_location = patches_location
series = open(os.path.join(self.patches_location, 'series')).read()
self.patches = []
self.patch_names = []
for line in series.split('\n'):
if not line:
continue
patch_content = open(os.path.join(self.patches_location, line)).read()
self.patches.append(PatchFile(patch_content))
self.patch_names.append(line)
def apply(self, location, verbose=True):
for patch, name in zip(self.patches, self.patch_names):
if verbose:
print("Applying {}".format(name))
patch.apply(location, verbose=verbose)
| mit | 6,860,629,781,753,440,000 | 32.813333 | 88 | 0.545347 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.