repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jamesiter/JimV-C | jimvc/api/snapshot.py | 1 | 12999 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import ceil
import requests
from flask import Blueprint, url_for
from flask import request
import json
import jimit as ji
import os
from jimvc.api.base import Base
from jimvc.models import Guest, Config, Disk
from jimvc.models import Snapshot, SnapshotDiskMapping
from jimvc.models import OSTemplateImage
from jimvc.models import Utils
from jimvc.models import Rules
from jimvc.models import OSTemplateImageKind
__author__ = 'James Iter'
__date__ = '2018/4/10'
__contact__ = '[email protected]'
__copyright__ = '(c) 2018 by James Iter.'
blueprint = Blueprint(
'api_snapshot',
__name__,
url_prefix='/api/snapshot'
)
blueprints = Blueprint(
'api_snapshots',
__name__,
url_prefix='/api/snapshots'
)
snapshot_base = Base(the_class=Snapshot, the_blueprint=blueprint, the_blueprints=blueprints)
@Utils.dumps2response
def r_create():
args_rules = [
Rules.GUEST_UUID.value
]
if 'label' in request.json:
args_rules.append(
Rules.LABEL.value,
)
try:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ji.Check.previewing(args_rules, request.json)
snapshot = Snapshot()
guest = Guest()
guest.uuid = request.json.get('guest_uuid')
guest.get_by('uuid')
snapshot.label = request.json.get('label', '')
snapshot.status = guest.status
snapshot.guest_uuid = guest.uuid
snapshot.snapshot_id = '_'.join(['tmp', ji.Common.generate_random_code(length=8)])
snapshot.parent_id = '-'
snapshot.progress = 0
snapshot.create()
snapshot.get_by('snapshot_id')
message = {
'_object': 'snapshot',
'action': 'create',
'uuid': guest.uuid,
'node_id': guest.node_id,
'passback_parameters': {'id': snapshot.id}
}
Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False))
ret['data'] = snapshot.__dict__
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_update(snapshot_id):
snapshot = Snapshot()
args_rules = [
Rules.SNAPSHOT_ID.value
]
if 'label' in request.json:
args_rules.append(
Rules.LABEL.value,
)
if args_rules.__len__() < 2:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
request.json['snapshot_id'] = snapshot_id
try:
ji.Check.previewing(args_rules, request.json)
snapshot.snapshot_id = request.json.get('snapshot_id')
snapshot.get_by('snapshot_id')
snapshot.label = request.json.get('label', snapshot.label)
snapshot.update()
snapshot.get()
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = snapshot.__dict__
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_get(snapshots_id):
return snapshot_base.get(ids=snapshots_id, ids_rule=Rules.SNAPSHOTS_ID.value, by_field='snapshot_id')
@Utils.dumps2response
def r_get_by_filter():
return snapshot_base.get_by_filter()
@Utils.dumps2response
def r_content_search():
return snapshot_base.content_search()
@Utils.dumps2response
def r_delete(snapshots_id):
args_rules = [
Rules.SNAPSHOTS_ID.value
]
try:
ji.Check.previewing(args_rules, {'snapshots_id': snapshots_id})
snapshot = Snapshot()
guest = Guest()
# 检测所指定的 快照 都存在
for snapshot_id in snapshots_id.split(','):
snapshot.snapshot_id = snapshot_id
snapshot.get_by('snapshot_id')
guest.uuid = snapshot.guest_uuid
guest.get_by('uuid')
# 执行删除操作
for snapshot_id in snapshots_id.split(','):
snapshot.snapshot_id = snapshot_id
snapshot.get_by('snapshot_id')
guest.uuid = snapshot.guest_uuid
guest.get_by('uuid')
message = {
'_object': 'snapshot',
'action': 'delete',
'uuid': snapshot.guest_uuid,
'snapshot_id': snapshot.snapshot_id,
'node_id': guest.node_id,
'passback_parameters': {'id': snapshot.id}
}
Utils.emit_instruction(message=json.dumps(message))
# 删除创建失败的 快照
if snapshot.progress == 255:
SnapshotDiskMapping.delete_by_filter(filter_str=':'.join(['snapshot_id', 'eq', snapshot.snapshot_id]))
snapshot.delete()
else:
snapshot.progress = 254
snapshot.update()
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_revert(snapshot_id):
args_rules = [
Rules.SNAPSHOT_ID.value
]
try:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ji.Check.previewing(args_rules, {'snapshot_id': snapshot_id})
snapshot = Snapshot()
guest = Guest()
snapshot.snapshot_id = snapshot_id
snapshot.get_by('snapshot_id')
snapshot.progress = 253
snapshot.update()
snapshot.get()
guest.uuid = snapshot.guest_uuid
guest.get_by('uuid')
message = {
'_object': 'snapshot',
'action': 'revert',
'uuid': guest.uuid,
'snapshot_id': snapshot.snapshot_id,
'node_id': guest.node_id,
'passback_parameters': {'id': snapshot.id}
}
Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False))
ret['data'] = snapshot.__dict__
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_get_disks(snapshot_id):
args_rules = [
Rules.SNAPSHOT_ID.value
]
try:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = list()
ji.Check.previewing(args_rules, {'snapshot_id': snapshot_id})
rows, _ = SnapshotDiskMapping.get_by_filter(filter_str=':'.join(['snapshot_id', 'eq', snapshot_id]))
for row in rows:
ret['data'].append(row['disk_uuid'])
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_get_snapshots_by_disks_uuid(disks_uuid):
args_rules = [
Rules.UUIDS.value
]
try:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = list()
ji.Check.previewing(args_rules, {'uuids': disks_uuid})
rows, _ = SnapshotDiskMapping.get_by_filter(filter_str=':'.join(['disk_uuid', 'in', disks_uuid]))
ret['data'] = rows
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_convert_to_os_template_image(snapshot_id, disk_uuid):
args_rules = [
Rules.SNAPSHOT_ID.value,
Rules.DISK_UUID.value,
Rules.LABEL.value
]
try:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ji.Check.previewing(args_rules, {'snapshot_id': snapshot_id, 'disk_uuid': disk_uuid,
'label': request.json.get('label')})
rows, _ = SnapshotDiskMapping.get_by_filter(filter_str=':'.join(['snapshot_id', 'eq', snapshot_id]))
disks_uuid = list()
for row in rows:
disks_uuid.append(row['disk_uuid'])
if disk_uuid not in disks_uuid:
ret['state'] = ji.Common.exchange_state(40401)
ret['state']['sub']['zh-cn'] = ''.join([ret['state']['sub']['zh-cn'], u': 未在快照: ',
snapshot_id, u' 中找到磁盘:', disk_uuid])
return ret
config = Config()
config.id = 1
config.get()
snapshot = Snapshot()
os_template_image = OSTemplateImage()
guest = Guest()
disk = Disk()
snapshot.snapshot_id = snapshot_id
snapshot.get_by('snapshot_id')
snapshot.progress = 252
guest.uuid = snapshot.guest_uuid
guest.get_by('uuid')
disk.uuid = disk_uuid
disk.get_by('uuid')
os_template_image.id = guest.os_template_image_id
os_template_image.get()
image_name = '_'.join([snapshot.snapshot_id, disk.uuid]) + '.' + disk.format
os_template_image.id = 0
os_template_image.label = request.json.get('label')
os_template_image.path = '/'.join([os.path.dirname(os_template_image.path), image_name])
os_template_image.kind = OSTemplateImageKind.custom.value
os_template_image.progress = 0
os_template_image.create_time = ji.Common.tus()
if os_template_image.exist_by('path'):
ret['state'] = ji.Common.exchange_state(40901)
ret['state']['sub']['zh-cn'] = ''.join([ret['state']['sub']['zh-cn'], ': ', os_template_image.path])
return ret
os_template_image.create()
os_template_image.get_by('path')
message = {
'_object': 'snapshot',
'action': 'convert',
'uuid': disk.guest_uuid,
'snapshot_id': snapshot.snapshot_id,
'storage_mode': config.storage_mode,
'dfs_volume': config.dfs_volume,
'node_id': disk.node_id,
'snapshot_path': disk.path,
'template_path': os_template_image.path,
'os_template_image_id': os_template_image.id,
'passback_parameters': {'id': snapshot.snapshot_id, 'os_template_image_id': os_template_image.id}
}
Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False))
snapshot.update()
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_show():
args = list()
page = int(request.args.get('page', 1))
page_size = int(request.args.get('page_size', 10))
keyword = request.args.get('keyword', None)
order_by = request.args.get('order_by', None)
order = request.args.get('order', 'desc')
if page is not None:
args.append('page=' + page.__str__())
if page_size is not None:
args.append('page_size=' + page_size.__str__())
if keyword is not None:
args.append('keyword=' + keyword.__str__())
if order_by is not None:
args.append('order_by=' + order_by)
if order is not None:
args.append('order=' + order)
snapshots_url = url_for('api_snapshots.r_get_by_filter', _external=True)
if keyword is not None:
snapshots_url = url_for('api_snapshots.r_content_search', _external=True)
if args.__len__() > 0:
snapshots_url = snapshots_url + '?' + '&'.join(args)
snapshots_ret = requests.get(url=snapshots_url, cookies=request.cookies)
snapshots_ret = json.loads(snapshots_ret.content)
guests_uuid = list()
for snapshot in snapshots_ret['data']:
guests_uuid.append(snapshot['guest_uuid'])
guests, _ = Guest.get_by_filter(filter_str='uuid:in:' + ','.join(guests_uuid))
# Guest uuid 与 Guest 的映射
guests_mapping_by_uuid = dict()
for guest in guests:
guests_mapping_by_uuid[guest['uuid']] = guest
for i, snapshot in enumerate(snapshots_ret['data']):
if snapshot['guest_uuid'].__len__() == 36:
snapshots_ret['data'][i]['guest'] = guests_mapping_by_uuid[snapshot['guest_uuid']]
last_page = int(ceil(1 / float(page_size)))
page_length = 5
pages = list()
if page < int(ceil(page_length / 2.0)):
for i in range(1, page_length + 1):
pages.append(i)
if i == last_page or last_page == 0:
break
elif last_page - page < page_length / 2:
for i in range(last_page - page_length + 1, last_page + 1):
if i < 1:
continue
pages.append(i)
else:
for i in range(page - page_length / 2, page + int(ceil(page_length / 2.0))):
pages.append(i)
if i == last_page or last_page == 0:
break
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = {
'snapshots': snapshots_ret['data'],
'paging': snapshots_ret['paging'],
'guests_mapping_by_uuid': guests_mapping_by_uuid,
'page': page,
'page_size': page_size,
'keyword': keyword,
'pages': pages,
'last_page': last_page,
'order_by': order_by,
'order': order
}
return ret
| gpl-3.0 | 4,323,306,669,651,697,000 | 25.970772 | 118 | 0.571639 | false |
AlienCowEatCake/ImageViewer | src/ThirdParty/Exiv2/exiv2-0.27.3-Source/tests/bugfixes/redmine/test_issue_540.py | 3 | 5145 | # -*- coding: utf-8 -*-
import system_tests
class PrettyPrintXmp(metaclass=system_tests.CaseMeta):
url = "http://dev.exiv2.org/issues/540"
filename = "$data_path/exiv2-bug540.jpg"
commands = ["$exiv2 -u -px $filename"]
stdout = ["""Xmp.dc.creator XmpSeq 1 Ian Britton
Xmp.dc.description LangAlt 1 lang="x-default" Communications
Xmp.dc.rights LangAlt 1 lang="x-default" ian Britton - FreeFoto.com
Xmp.dc.source XmpText 12 FreeFoto.com
Xmp.dc.subject XmpBag 1 Communications
Xmp.dc.title LangAlt 1 lang="x-default" Communications
Xmp.exif.ApertureValue XmpText 3 F16
Xmp.exif.BrightnessValue XmpText 8 0.260156
Xmp.exif.ColorSpace XmpText 1 sRGB
Xmp.exif.DateTimeOriginal XmpText 20 2002:07:13 15:58:28
Xmp.exif.ExifVersion XmpText 4 2.00
Xmp.exif.ExposureBiasValue XmpText 6 -13/20 EV
Xmp.exif.ExposureProgram XmpText 1 Shutter priority
Xmp.exif.FNumber XmpText 3 F0.6
Xmp.exif.FileSource XmpText 1 (0)
Xmp.exif.FlashpixVersion XmpText 4 1.00
Xmp.exif.FocalLength XmpText 3 0.0 mm
Xmp.exif.FocalPlaneResolutionUnit XmpText 1 inch
Xmp.exif.FocalPlaneXResolution XmpText 8 12.0508
Xmp.exif.FocalPlaneYResolution XmpText 8 12.0508
Xmp.exif.GPSLatitude XmpText 13 54,59.380000N
Xmp.exif.GPSLongitude XmpText 12 1,54.850000W
Xmp.exif.GPSMapDatum XmpText 5 WGS84
Xmp.exif.GPSTimeStamp XmpText 20 2002:07:13 14:58:24
Xmp.exif.GPSVersionID XmpText 7 2.0.0.0
Xmp.exif.ISOSpeedRatings XmpSeq 1 0
Xmp.exif.MeteringMode XmpText 1 Multi-segment
Xmp.exif.PixelXDimension XmpText 4 2400
Xmp.exif.PixelYDimension XmpText 4 1600
Xmp.exif.SceneType XmpText 1 (0)
Xmp.exif.SensingMethod XmpText 1 One-chip color area
Xmp.exif.ShutterSpeedValue XmpText 10 1/724 s
Xmp.pdf.Keywords XmpText 14 Communications
Xmp.photoshop.AuthorsPosition XmpText 12 Photographer
Xmp.photoshop.CaptionWriter XmpText 11 Ian Britton
Xmp.photoshop.Category XmpText 3 BUS
Xmp.photoshop.City XmpText 1
Xmp.photoshop.Country XmpText 14 Ubited Kingdom
Xmp.photoshop.Credit XmpText 11 Ian Britton
Xmp.photoshop.DateCreated XmpText 10 2002-06-20
Xmp.photoshop.Headline XmpText 14 Communications
Xmp.photoshop.State XmpText 1
Xmp.photoshop.SupplementalCategories XmpBag 1 Communications
Xmp.photoshop.Urgency XmpText 1 5
Xmp.tiff.Artist XmpText 11 Ian Britton
Xmp.tiff.BitsPerSample XmpSeq 1 8
Xmp.tiff.Compression XmpText 1 6
Xmp.tiff.Copyright LangAlt 1 lang="x-default" ian Britton - FreeFoto.com
Xmp.tiff.ImageDescription LangAlt 1 lang="x-default" Communications
Xmp.tiff.ImageLength XmpText 3 400
Xmp.tiff.ImageWidth XmpText 3 600
Xmp.tiff.Make XmpText 8 FUJIFILM
Xmp.tiff.Model XmpText 12 FinePixS1Pro
Xmp.tiff.Orientation XmpText 1 top, left
Xmp.tiff.ResolutionUnit XmpText 1 inch
Xmp.tiff.Software XmpText 19 Adobe Photoshop 7.0
Xmp.tiff.XResolution XmpText 5 300
Xmp.tiff.YCbCrPositioning XmpText 1 Co-sited
Xmp.tiff.YResolution XmpText 5 300
Xmp.xmp.CreateDate XmpText 20 2002-07-13T15:58:28Z
Xmp.xmp.ModifyDate XmpText 20 2002-07-19T13:28:10Z
Xmp.xmpBJ.JobRef XmpText 0 type="Bag"
Xmp.xmpBJ.JobRef[1] XmpText 0 type="Struct"
Xmp.xmpBJ.JobRef[1]/stJob:name XmpText 12 Photographer
Xmp.xmpMM.DocumentID XmpText 58 adobe:docid:photoshop:84d4dba8-9b11-11d6-895d-c4d063a70fb0
Xmp.xmpRights.Marked XmpText 4 True
Xmp.xmpRights.WebStatement XmpText 16 www.freefoto.com
"""]
stderr = [""]
retval = [0]
| gpl-3.0 | 4,314,905,811,117,422,000 | 62.518519 | 118 | 0.515063 | false |
fabiking/plugin.video.Mfabiking | resources/tools/livesoccertv.py | 1 | 9605 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# Parser de LiveSoccerTV para PalcoTV
# Version 0.1 (05.05.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#------------------------------------------------------------
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
from __main__ import *
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
playlists = xbmc.translatePath(os.path.join('special://userdata/playlists', ''))
temp = xbmc.translatePath(os.path.join('special://userdata/playlists/tmp', ''))
def lstv0(params):
plugintools.log("[%s %s] LiveSoccerTV " % (addonName, addonVersion))
thumbnail = params.get("thumbnail")
fanart = params.get("fanart")
url = params.get("url")
data = gethttp_referer_headers(url,url)
today0 = plugintools.find_single_match(data, '<a class="open-calendar">(.*?)</a>')
today1 = plugintools.find_single_match(data, '<a class="open-calendar navbar_cal_current-data">(.*?)</a>')
today0 = diasem(today0)
plugintools.add_item(action="", title='[COLOR lightyellow][B]LiveSoccerTV[/B] / [COLOR lightgreen][I]'+today0+' '+today1+'[/I][/COLOR]', url = "", thumbnail = thumbnail , fanart = fanart, folder = False, isPlayable = False)
ligas = plugintools.find_multiple_matches(data, '<div class="clearfix b_trim">(.*?)<div class="b_league -low -blue-bg -accordion -white-border-bottom">')
liga_logo = plugintools.find_multiple_matches(data, 'class="fll b_league_logo"><img src="([^"]+)')
print 'liga_logo',liga_logo
i=0
for entry in ligas:
cabecera = plugintools.find_single_match(entry, '<span class="fll b_league_name b_trim_inner">(.*?)</span>')
try: ligalogo = liga_logo[i]
except: ligalogo = thumbnail
#plugintools.log("cabecera= "+cabecera)
cabecera=cabecera.replace("'", "'")
plugintools.add_item(action="", title='[COLOR orange][B]'+cabecera+'[/B][/COLOR]', fanart=fanart, thumbnail=ligalogo, url="", folder=False, isPlayable=False)
matches = plugintools.find_multiple_matches(entry, '<div class="b_match_info-elem-wrapper">(.*?)class="b_match_all-link"></a></div>')
i = i + 1
for entry in matches:
url = 'http'+plugintools.find_single_match(entry, 'href="http([^"]+)')
teams = plugintools.find_multiple_matches(entry, '<span>(.*?)</span>')
goals = plugintools.find_multiple_matches(entry, '<div class="b_match_count">(.*?)</div>')
chs = plugintools.find_single_match(entry, '<div class="b_match_channel_links">(.*?)</div>').strip()
chs = chs.split(",")
bcasters = ""
for item in chs:
if bcasters == "": bcasters = item
else: bcasters = bcasters + ", " + item
if chs[0] == "":
bcasters = 'Sin emisión en España'
print bcasters
bcasters = bcasters.replace("\t", "")
if len(goals) == 2:
match_title = '[COLOR white]'+teams[0] + '[COLOR lightyellow][B] '+goals[0]+'[/COLOR][/B][COLOR white] vs ' + teams[1]+' [/COLOR][COLOR lightyellow][B]'+goals[1]+'[/COLOR][/B]'
else:
match_title = '[COLOR white]'+teams[0] + ' vs ' + teams[1]+'[/COLOR]'
match_title=match_title.replace("'", "'")
plugintools.add_item(action="lstv1", title=match_title, url=url, thumbnail=ligalogo, extra=bcasters, fanart=fanart, folder=False, isPlayable=False)
def lstv1(params):
menu_selec = ['[COLOR cyan]'+params.get("extra")+'[/COLOR]', "Ver cobertura internacional", "Estadísticas en vivo"]
dia_lstv = plugintools.selector(menu_selec, params.get("title"))
if dia_lstv == 1: lstv2()
if dia_lstv == 2: lstv3()
def lstv2():
params = plugintools.get_params()
url = params.get("url")
data = gethttp_referer_headers(url,url)
match_coverage = plugintools.find_single_match(data, 'International Coverage(.*?)<div id="match-lineups" class="match-info hidden">')
country_match = plugintools.find_multiple_matches(match_coverage, '<div class="row">(.*?)<div class="b_channel col-xs-12 -low b_trim -international">')
for entry in country_match:
plugintools.log("entry= "+entry)
country = plugintools.find_single_match(entry, '<div class="fll b_channel_name -broadcast -country b_trim_inner">(.*?)</div>').replace(" ", "").strip()
if country != "":
channels = ""
channel = plugintools.find_multiple_matches(entry, '<div class="fll b_channel_name -broadcast b_trim_inner">(.*?)</div>')
for item in channel:
if channels == "":
channels = item
else:
channels = channels + ', '+item
lstv_file = open(temp + "lstv.tmp", "a")
lstv_file.write('[COLOR gold][B]'+country+'[/B][/COLOR][COLOR white]: '+channels+'[/COLOR]\n')
lstv_file.close()
params["url"] = temp + 'lstv.tmp'
txt_reader(params)
def lstv3():
params=plugintools.get_params()
title = params.get("title").replace("[COLOR white]", "[COLOR lightgreen]")
team_a = title.split(" vs ")[0]
team_b = title.split(" vs ")[1]
url = 'http://m.livesoccertv.com/match/1709586/olympiakos-piraeus-vs-bayern-m-nchen/'
data = gethttp_referer_headers(url,url)
lstv_file = open(temp + "lstv_stats.tmp", "wb")
lstv_file.write("\n[COLOR red]"+title+"[/COLOR]\n")
lstv_file.write("\n[COLOR gold]TITULARES[/COLOR]\n")
stats = plugintools.find_single_match(data, '<span>Stats</span>(.*?)Substitutes</h3>')
players_a = plugintools.find_multiple_matches(stats, '<div class="fll b_lineup_players b_trim_inner -right">(.*?)</div>')
players_b = plugintools.find_multiple_matches(stats, '<div class="fll b_lineup_players b_trim_inner -left">(.*?)</div>')
i = 0
while i < len(players_a):
players_a[i]=players_a[i].replace("</span>", "[/COLOR] ").replace('<span class="b_lineup_number">', '[COLOR lightyellow]').rstrip()
players_b[i]=players_b[i].replace("</span>", "[/COLOR] ").replace('<span class="b_lineup_number">', '[COLOR lightyellow]').rstrip()
spaces = 80 - len(players_b[i])
plugintools.log("longitud_texto= "+str(len(players_a[i])))
plugintools.log("espacios que faltan= "+str(spaces))
tabulador = ""
j = spaces
k = 0
while k <= j:
tabulador = tabulador + "..."
k = k + 1
line_player = players_b[i]+tabulador+players_a[i]+'\n'
lstv_file.write(line_player)
print line_player
i = i + 1
lstv_file.write("\n\n[COLOR gold]SUPLENTES[/COLOR]\n")
stats = plugintools.find_single_match(data, 'Substitutes</h3>(.*?)<div id="match-stats"')
players_a = plugintools.find_multiple_matches(stats, '<div class="fll b_lineup_players b_trim_inner -right">(.*?)</div>')
players_b = plugintools.find_multiple_matches(stats, '<div class="fll b_lineup_players b_trim_inner -left">(.*?)</div>')
i = 0
while i < len(players_a):
players_a[i]=players_a[i].replace("</span>", "[/COLOR] ").replace('<span class="b_lineup_number">', '[COLOR lightyellow]').rstrip()
players_b[i]=players_b[i].replace("</span>", "[/COLOR] ").replace('<span class="b_lineup_number">', '[COLOR lightyellow]').rstrip()
spaces = 80 - len(players_b[i])
tabulador = ""
j = spaces
k = 0
while k <= j:
tabulador = tabulador + "..."
k = k + 1
line_player = players_b[i]+tabulador+players_a[i]+'\n'
lstv_file.write(line_player)
print line_player
i = i + 1
lstv_file.close()
params["url"] = temp + 'lstv_stats.tmp'
txt_reader(params)
def gethttp_referer_headers(url,referer):
plugintools.modo_vista("tvshows");request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer", referer])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers);
try: r='\'set-cookie\',\s\'([^;]+.)';jar=plugintools.find_single_match(str(response_headers),r);jar=getjad(jar);
except: pass
try: r='\'location\',\s\'([^\']+)';loc=plugintools.find_single_match(str(response_headers),r);
except: pass
if loc:
request_headers.append(["Referer",url]);
if jar: request_headers.append(["Cookie",jar]);#print jar
body,response_headers=plugintools.read_body_and_headers(loc,headers=request_headers);
try: r='\'set-cookie\',\s\'([^;]+.)';jar=plugintools.find_single_match(str(response_headers),r);jar=getjad(jar);
except: pass
plugintools.modo_vista("tvshows")
return body
def diasem(dia):
if dia == "Monday":
dia = "Lun"
elif dia == "Tuesday":
dia = "Mar"
elif dia == "Wednesday":
dia = "Mié"
elif dia == "Thursday":
dia = "Jue"
elif dia == "Friday":
dia = "Vie"
elif dia == "Saturday":
dia = "Sáb"
elif dia == "Sunday":
dia = "Dom"
return dia
| gpl-2.0 | -7,714,623,710,669,975,000 | 46.058824 | 227 | 0.587083 | false |
mar29th/ring | ring/connection.py | 1 | 8091 | # Copyright 2016 Douban Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import os
import threading
import cPickle
from ring.connection_impl import Again
from ring.constants import (
TYPE_ACTIVATE_SEND, TYPE_ACTIVATE_RECV, BACKLOG, TYPE_ERROR, TYPE_CLOSED, TYPE_FINALIZE,
TYPE_CONNECT_SUCCESS, ERR_CONNRESET
)
from ring.events import Mailbox
from ring.poller import READ
from ring.puller import PullerConnectionImpl
from ring.pusher import PusherConnectionImpl
from ring.replier import ReplierConnectionImpl
from ring.requester import RequesterConnectionImpl
from ring.utils import RingError, raise_exc_info
_idle = 1
_open = 1 << 1
_closing = 1 << 2
_closed = 1 << 3
REPLIER = 1
REQUESTER = 2
PULLER = 3
PUSHER = 5
NONBLOCK = 1
POLLIN = 1
POLLOUT = 1 << 1
class ConnectionError(RingError):
def __init__(self, errno):
super(ConnectionError, self).__init__(self, os.strerror(errno))
self.errno = errno
class ConnectionInUse(RingError):
def __init__(self):
super(ConnectionInUse, self).__init__('Connection in use')
class ConnectionClosedError(RingError):
def __init__(self):
super(ConnectionClosedError, self).__init__('Socket closed')
class Connection(object):
def __init__(self, type, ctx):
self._socket = None
self._type = type
self._state = _idle
self._context = ctx
self._target_addr = None
self._target_port = None
self._bound_addr = None
self._bound_port = None
self._mailbox = Mailbox()
self._lock = threading.RLock()
def bind(self, target):
if self._state & (_closing | _closed):
raise ConnectionClosedError
if self._state != _idle:
raise ConnectionInUse
if not self._type & (REPLIER | PULLER):
raise NotImplementedError('Bind is not applicable to such type of socket')
self._bound_addr = target[0]
self._bound_port = target[1]
self._initialize_socket()
self._state = _open
self._socket.bind(target)
self._socket.listen(BACKLOG)
self._initialize_impl()
def connect(self, target):
if self._state & (_closing | _closed):
raise ConnectionClosedError
if self._state != _idle:
raise ConnectionInUse
if not self._type & (REQUESTER | PUSHER):
raise NotImplementedError('Connect is not applicable to such type of socket')
self._target_addr = target[0]
self._target_port = target[1]
self._state = _open
self._initialize_socket()
self._initialize_impl()
self._impl.connect(target)
self._process_commands(None)
def close(self):
if self._state != _open:
raise ConnectionClosedError
self._impl.close()
self._context.reaper.register(
self._mailbox.waker_fd, READ, lambda fd, events: self._process_commands(0))
self._state = _closing
def _initialize_socket(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setblocking(0)
if self._type & (REPLIER | PULLER):
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def _initialize_impl(self):
if self._type == REPLIER:
self._impl = ReplierConnectionImpl(self._socket, self._context, self._mailbox)
elif self._type == REQUESTER:
self._impl = RequesterConnectionImpl(self._socket, self._context, self._mailbox)
elif self._type == PULLER:
self._impl = PullerConnectionImpl(self._socket, self._context, self._mailbox)
elif self._type == PUSHER:
self._impl = PusherConnectionImpl(self._socket, self._context, self._mailbox)
else:
raise RuntimeError('Type not implemented')
def _process_commands(self, timeout):
while 1:
try:
result = self._mailbox.recv(timeout)
except Again:
return
else:
cmd = result.command
if cmd == TYPE_ACTIVATE_SEND:
self._impl.activate_send(*result.args)
elif cmd == TYPE_ACTIVATE_RECV:
self._impl.activate_recv(*result.args)
elif cmd == TYPE_CONNECT_SUCCESS:
# Nothing to be done. We're just attempting to block here.
pass
elif cmd == TYPE_ERROR:
self._impl.connection_close(*result.args)
if not getattr(result.args[1][1], 'errno', -1) in ERR_CONNRESET:
# Only raise the exception when the error is not connection reset
raise_exc_info(result.args[1])
elif cmd == TYPE_CLOSED:
self._impl.connection_close(*result.args)
elif cmd == TYPE_FINALIZE:
self._impl.connection_finalize()
self._connection_finalize()
# Finalize event should break immediately as everything is closed.
break
else:
raise RuntimeError('Received undefined command %s' % (cmd,))
# Rerun. Set timeout to 0.
timeout = 0
def _connection_finalize(self):
self._socket.close()
self._context.reaper.unregister(self._mailbox.waker_fd)
self._mailbox.close()
self._state = _closed
def getsockname(self):
if self._state != _open:
raise ConnectionClosedError
return self._socket.getsockname()
def poll(self, events):
return (POLLIN & events & self._impl.recv_available()) | \
(POLLOUT & events & self._impl.send_available()) << 1
def recv(self, flags=0):
if self._state != _open:
raise ConnectionClosedError
# Process once
self._process_commands(0)
# Receive once
try:
return self._impl.recv()
except Again:
if not flags & NONBLOCK:
# If the connection should block, wait until recv is activated
pass
else:
# If user wants nonblocking send, just raise Again to user
raise
# Let's wait
while 1:
self._process_commands(None)
try:
return self._impl.recv()
except Again:
continue
def send(self, data, flags=0):
if self._state != _open:
raise ConnectionClosedError
# Process once
self._process_commands(0)
# Send once
try:
self._impl.send(data)
except Again:
if not flags & NONBLOCK:
# If the connection should block, wait until send is activated
pass
else:
# If user wants nonblocking send, just raise Again to user
raise
# Let's wait
while 1:
self._process_commands(None)
try:
self._impl.send(data)
break
except Again:
continue
def recv_pyobj(self, flags=0):
return cPickle.loads(self.recv(flags=flags))
def send_pyobj(self, data, flags=0):
self.send(cPickle.dumps(data), flags=flags)
__all__ = ['Connection', 'REPLIER', 'REQUESTER', 'PULLER', 'PUSHER', 'NONBLOCK']
| apache-2.0 | -2,285,787,492,178,655,700 | 30.119231 | 92 | 0.577555 | false |
gumblex/textimgbot | textimgbot.py | 1 | 11074 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Telegram Text Image Render Bot
'''
import os
import re
import sys
import time
import json
import queue
import base64
import logging
import hashlib
import requests
import tempfile
import functools
import threading
import subprocess
import collections
import concurrent.futures
logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(name)s:%(levelname)s] %(message)s', level=logging.DEBUG if sys.argv[-1] == '-v' else logging.INFO)
logger_botapi = logging.getLogger('botapi')
logger_inkscape = logging.getLogger('inkscape')
executor = concurrent.futures.ThreadPoolExecutor(5)
HSession = requests.Session()
re_formatstring = re.compile(r'\{\d*\}')
template_cache = None
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
hashstr = lambda s: base64.urlsafe_b64encode(
hashlib.sha256(s.encode('utf-8')).digest()).decode('utf-8').rstrip('=')
def hashfile(filename):
hash_obj = hashlib.new('sha256')
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_obj.update(chunk)
return base64.urlsafe_b64encode(hash_obj.digest()).decode('utf-8').rstrip('=')
# Bot API
class BotAPIFailed(Exception):
pass
def async_func(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
def func_noerr(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
logger_botapi.exception('Async function failed.')
executor.submit(func_noerr, *args, **kwargs)
return wrapped
def bot_api(method, **params):
for att in range(3):
try:
req = HSession.post(('https://api.telegram.org/bot%s/' %
CFG.apitoken) + method, data=params, timeout=45)
retjson = req.content
if not retjson:
continue
ret = json.loads(retjson.decode('utf-8'))
break
except Exception as ex:
if att < 1:
time.sleep((att + 1) * 2)
else:
raise ex
if not ret['ok']:
raise BotAPIFailed(repr(ret))
return ret['result']
def sendmsg_sync(text, chat_id, reply_to_message_id=None, **kwargs):
text = text.strip()
if not text:
logger_botapi.warning('Empty message ignored: %s, %s' % (chat_id, reply_to_message_id))
return
logger_botapi.debug('sendMessage(%s): %s' % (len(text), text[:20]))
if len(text) > 2000:
text = text[:1999] + '…'
reply_id = reply_to_message_id
if reply_to_message_id and reply_to_message_id < 0:
reply_id = None
return bot_api('sendMessage', chat_id=chat_id, text=text,
reply_to_message_id=reply_id, **kwargs)
sendmsg = async_func(sendmsg_sync)
@async_func
def answer(inline_query_id, results, **kwargs):
return bot_api('answerInlineQuery', inline_query_id=inline_query_id,
results=json.dumps(results), **kwargs)
def getupdates():
global CFG, STATE
while 1:
try:
updates = bot_api('getUpdates', offset=CFG.get('offset', 0), timeout=10)
except Exception:
logger_botapi.exception('Get updates failed.')
continue
if updates:
CFG['offset'] = updates[-1]["update_id"] + 1
for upd in updates:
MSG_Q.put(upd)
time.sleep(.2)
def retrieve(url, filename, raisestatus=True):
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
if raisestatus:
r.raise_for_status()
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return r.status_code
def parse_cmd(text: str):
t = text.strip().replace('\xa0', ' ').split(' ', 1)
if not t:
return (None, None)
cmd = t[0].rsplit('@', 1)
if len(cmd[0]) < 2 or cmd[0][0] != "/":
return (None, None)
if len(cmd) > 1 and 'username' in CFG and cmd[-1] != CFG.username:
return (None, None)
expr = t[1] if len(t) > 1 else ''
return (cmd[0][1:], expr.strip())
# Processing
def update_templates():
global template_cache
template_cache = collections.OrderedDict()
for i in os.listdir(CFG['templates']):
name, ext = os.path.splitext(i)
if ext == '.svg':
template_cache[name] = os.path.join(CFG['templates'], i)
def generate_image(templatefile, output, *args, **kwargs):
with open(templatefile, 'r', encoding='utf-8') as f:
template = f.read().format(*args, **kwargs)
with tempfile.NamedTemporaryFile('w', suffix='.svg') as f:
f.write(template)
proc = subprocess.Popen(
('inkscape', '-z', '--export-background=white', '-e', output + '.png', f.name),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
try:
outs, errs = proc.communicate(timeout=10)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
if proc.returncode != 0:
logger_inkscape.error('Inkscape returns %s', proc.returncode)
logger_inkscape.info(outs.decode())
return False
proc = subprocess.Popen(
('convert', output + '.png', output),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
try:
outs, errs = proc.communicate(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
try:
os.unlink(output + '.png')
except FileNotFoundError:
pass
if proc.returncode != 0:
logger_inkscape.error('Convert returns %s', proc.returncode)
logger_inkscape.info(outs.decode())
return False
return True
def render_images(text):
args = [text] + text.split('/')
ret = []
futures = []
for template, templatefile in template_cache.items():
fileid = hashstr('%s|%s' % (template, text))
filepath = os.path.join(CFG['images'], fileid + '.jpg')
if os.path.isfile(filepath):
ret.append(fileid)
else:
futures.append(
(fileid, executor.submit(generate_image, templatefile, filepath, *args)))
for fileid, future in futures:
if future.result():
ret.append(fileid)
return ret
# Query handling
START = 'This is the Text Image Render Bot. Send /help, or directly use its inline mode.'
HELP = (
'You can type text for images in its inline mode, seperate parameters by "/".\n'
'You can add your SVG template by sending SVG files, delete your template by '
'/delsvg [id]. The SVG must have Python str.format code ({0} is the full text, '
'{1} and so on are the parameters), and must be compatible with Inkscape.'
)
def handle_api_update(d: dict):
logger_botapi.debug('Update: %r' % d)
try:
if 'inline_query' in d:
query = d['inline_query']
text = query['query'].strip()
if text:
images = render_images(text)
logging.info('Rendered: %s', text)
r = answer(query['id'], inline_result(images))
logger_botapi.debug(r)
elif 'message' in d:
msg = d['message']
text = msg.get('text', '')
document = msg.get('document')
ret = None
if document:
on_document(document, msg['chat'], msg)
elif text:
cmd, expr = parse_cmd(text)
if msg['chat']['type'] == 'private':
if cmd == 'start':
ret = START
elif cmd == 'delsvg':
ret = cmd_delsvg(expr, msg['chat'], msg['message_id'], msg)
else:
ret = HELP
if ret:
sendmsg(ret, msg['chat']['id'], msg['message_id'])
except Exception:
logger_botapi.exception('Failed to process a message.')
def inline_result(images):
ret = []
for d in images:
ret.append({
'type': 'photo',
'id': d,
'photo_url': CFG['urlroot'] + d + '.jpg',
'thumb_url': CFG['urlroot'] + d + '.jpg',
})
return ret
def cmd_delsvg(expr, chat, replyid, msg):
if chat['type'] != 'private':
return
# the length of hashstr/hashfile.
if len(expr) == 43 and expr in template_cache:
try:
os.unlink(os.path.join(CFG['templates'], expr + '.svg'))
except FileNotFoundError:
pass
del template_cache[expr]
return "Template deleted."
else:
return "Invalid template id."
@async_func
def on_document(document, chat, msg):
if chat['type'] != 'private':
return
file_id = document['file_id']
fp = bot_api('getFile', file_id=file_id)
file_size = fp.get('file_size') or document.get('file_size')
if file_size and file_size > 300*1024:
sendmsg_sync('File too big. Must be <300 KiB.', chat['id'], msg['message_id'])
return
file_path = fp.get('file_path')
if not file_path:
raise BotAPIFailed("can't get file_path for " + file_id)
file_ext = os.path.splitext(file_path)[1]
if file_ext != '.svg':
sendmsg_sync('Template must be a SVG file.', chat['id'], msg['message_id'])
return
cachename = file_id + file_ext
url_file = 'https://api.telegram.org/file/bot%s/' % CFG['apitoken']
with tempfile.TemporaryDirectory() as tmpdir:
fpath = os.path.join(tmpdir, cachename)
retrieve(url_file + file_path, fpath)
try:
with open(fpath, 'r', encoding='utf-8') as f:
template = f.read()
template.index('<?xml')
template.index('<svg')
assert re_formatstring.search(template)
assert generate_image(fpath, os.path.join(tmpdir, file_id + '.jpg'), "test")
except Exception:
sendmsg_sync('Invaild SVG file.', chat['id'], msg['message_id'])
return
filehash = hashfile(fpath)
os.rename(fpath, os.path.join(CFG['templates'], filehash + '.svg'))
sendmsg_sync('Template uploaded. ID: ' + filehash, chat['id'], msg['message_id'])
update_templates()
def load_config():
return AttrDict(json.load(open('config.json', encoding='utf-8')))
def save_config(config):
json.dump(config, open('config.json', 'w'), sort_keys=True, indent=1)
if __name__ == '__main__':
CFG = load_config()
MSG_Q = queue.Queue()
update_templates()
apithr = threading.Thread(target=getupdates)
apithr.daemon = True
apithr.start()
logging.info('Satellite launched')
while 1:
handle_api_update(MSG_Q.get())
| mit | 4,691,295,569,011,948,000 | 32.349398 | 160 | 0.570538 | false |
flgiordano/netcash | +/google-cloud-sdk/lib/googlecloudsdk/api_lib/sdktool/info_holder.py | 1 | 11330 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains utilities for holding and formatting install information.
This is useful for the output of 'gcloud info', which in turn is extremely
useful for debugging issues related to weird installations, out-of-date
installations, and so on.
"""
import datetime
import os
import re
import StringIO
import sys
import textwrap
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import named_configs
from googlecloudsdk.core import properties
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import platforms
class InfoHolder(object):
"""Base object to hold all the configuration info."""
def __init__(self):
self.basic = BasicInfo()
self.installation = InstallationInfo()
self.config = ConfigInfo()
self.logs = LogsInfo()
def __str__(self):
out = StringIO.StringIO()
out.write(str(self.basic) + '\n')
out.write(str(self.installation) + '\n')
out.write(str(self.config) + '\n')
out.write(str(self.logs) + '\n')
return out.getvalue()
class BasicInfo(object):
"""Holds basic information about your system setup."""
def __init__(self):
platform = platforms.Platform.Current()
self.version = config.CLOUD_SDK_VERSION
self.operating_system = platform.operating_system
self.architecture = platform.architecture
self.python_version = sys.version
self.site_packages = 'site' in sys.modules
def __str__(self):
return textwrap.dedent("""\
Google Cloud SDK [{version}]
Platform: [{os}, {arch}]
Python Version: [{python_version}]
Python Location: [{python_location}]
Site Packages: [{site_packages}]
""".format(
version=self.version,
os=self.operating_system.name,
arch=self.architecture.name,
python_location=sys.executable,
python_version=self.python_version.replace('\n', ' '),
site_packages='Enabled' if self.site_packages else 'Disabled'))
class InstallationInfo(object):
"""Holds information about your Cloud SDK installation."""
def __init__(self):
self.sdk_root = config.Paths().sdk_root
self.release_channel = config.INSTALLATION_CONFIG.release_channel
self.repo_url = config.INSTALLATION_CONFIG.snapshot_url
repos = properties.VALUES.component_manager.additional_repositories.Get(
validate=False)
self.additional_repos = repos.split(',') if repos else []
self.path = os.environ.get('PATH', '')
if self.sdk_root:
manager = update_manager.UpdateManager()
self.components = manager.GetCurrentVersionsInformation()
self.old_tool_paths = manager.FindAllOldToolsOnPath()
paths = [os.path.realpath(p) for p in self.path.split(os.pathsep)]
this_path = os.path.realpath(
os.path.join(self.sdk_root,
update_manager.UpdateManager.BIN_DIR_NAME))
# TODO(user): Validate symlinks in /usr/local/bin when we start
# creating them.
self.on_path = this_path in paths
else:
self.components = {}
self.old_tool_paths = []
self.on_path = False
def __str__(self):
out = StringIO.StringIO()
out.write('Installation Root: [{0}]\n'.format(
self.sdk_root if self.sdk_root else 'N/A'))
if config.INSTALLATION_CONFIG.IsAlternateReleaseChannel():
out.write('Release Channel: [{0}]\n'.format(self.release_channel))
out.write('Repository URL: [{0}]\n'.format(self.repo_url))
if self.additional_repos:
out.write('Additional Repositories:\n {0}\n'.format(
'\n '.join(self.additional_repos)))
if self.components:
components = ['{0}: [{1}]'.format(name, value) for name, value in
self.components.iteritems()]
out.write('Installed Components:\n {0}\n'.format(
'\n '.join(components)))
out.write('System PATH: [{0}]\n'.format(self.path))
out.write('Cloud SDK on PATH: [{0}]\n'.format(self.on_path))
if self.old_tool_paths:
out.write('\nWARNING: There are old versions of the Google Cloud '
'Platform tools on your system PATH.\n {0}\n'
.format('\n '.join(self.old_tool_paths)))
return out.getvalue()
class ConfigInfo(object):
"""Holds information about where config is stored and what values are set."""
def __init__(self):
self.paths = config.Paths()
self.active_config_name = named_configs.GetNameOfActiveNamedConfig()
if self.active_config_name is not None:
self.active_config_path = named_configs.GetFileForActiveNamedConfig()
self.account = properties.VALUES.core.account.Get(validate=False)
self.project = properties.VALUES.core.project.Get(validate=False)
self.properties = properties.VALUES.AllValues()
def __str__(self):
out = StringIO.StringIO()
out.write('Installation Properties: [{0}]\n'
.format(self.paths.installation_properties_path))
out.write('User Config Directory: [{0}]\n'
.format(self.paths.global_config_dir))
if self.active_config_name is not None:
out.write('Active Configuration Name: [{0}]\n'
.format(self.active_config_name))
out.write('Active Configuration Path: [{0}]\n\n'
.format(self.active_config_path))
else:
out.write('User Properties: [{0}]\n\n'
.format(self.paths.user_properties_path))
out.write('Account: [{0}]\n'.format(self.account))
out.write('Project: [{0}]\n\n'.format(self.project))
out.write('Current Properties:\n')
for section, props in self.properties.iteritems():
out.write(' [{section}]\n'.format(section=section))
for name, value in props.iteritems():
out.write(' {name}: [{value}]\n'.format(
name=name, value=value))
return out.getvalue()
def RecentLogFiles(logs_dir, num=1):
"""Finds the most recent (not current) gcloud log files.
Args:
logs_dir: str, The path to the logs directory being used.
num: the number of log files to find
Returns:
A list of full paths to the latest num log files, excluding the current
log file. If there are fewer than num log files, include all of
them. They will be in chronological order.
"""
date_dirs = FilesSortedByName(logs_dir)
if not date_dirs:
return []
found_files = []
for date_dir in reversed(date_dirs):
log_files = reversed(FilesSortedByName(date_dir) or [])
found_files.extend(log_files)
if len(found_files) >= num + 1:
return found_files[1:num+1]
return found_files[1:]
def LastLogFile(logs_dir):
"""Finds the last (not current) gcloud log file.
Args:
logs_dir: str, The path to the logs directory being used.
Returns:
str, The full path to the last (but not the currently in use) log file
if it exists, or None.
"""
files = RecentLogFiles(logs_dir)
if files:
return files[0]
return None
def FilesSortedByName(directory):
"""Gets the list of files in the given directory, sorted by name.
Args:
directory: str, The path to the directory to list.
Returns:
[str], The full paths of the files, sorted by file name, or None.
"""
if not os.path.isdir(directory):
return None
dates = os.listdir(directory)
if not dates:
return None
return [os.path.join(directory, date) for date in sorted(dates)]
class LogData(object):
"""Representation of a log file.
Stores information such as the name of the log file, its contents, and the
command run.
"""
# This precedes the traceback in the log file.
TRACEBACK_MARKER = 'BEGIN CRASH STACKTRACE\n'
# This shows the command run in the log file
COMMAND_REGEXP = r'Running (gcloud\.[a-z.]+)'
def __init__(self, filename, command, contents, traceback):
self.filename = filename
self.command = command
self.contents = contents
self.traceback = traceback
def __str__(self):
crash_detected = ' (crash detected)' if self.traceback else ''
return '[{0}]: [{1}]{2}'.format(self.relative_path, self.command,
crash_detected)
@property
def relative_path(self):
logs_dir = config.Paths().logs_dir
if not self.filename.startswith(logs_dir):
return self.filename
return self.filename[len(logs_dir + os.path.sep):]
@property
def date(self):
"""Return the date that this log file was created, based on its filename.
Returns:
datetime.datetime that the log file was created or None, if the filename
pattern was not recognized.
"""
datetime_string = ':'.join(os.path.split(self.relative_path))
datetime_format = (log.DAY_DIR_FORMAT + ':' + log.FILENAME_FORMAT +
log.LOG_FILE_EXTENSION)
try:
return datetime.datetime.strptime(datetime_string, datetime_format)
except ValueError:
# This shouldn't happen, but it's better not to crash because of it.
return None
@classmethod
def FromFile(cls, log_file):
"""Parse the file at the given path into a LogData.
Args:
log_file: str, the path to the log file to read
Returns:
LogData, representation of the log file
"""
with open(log_file) as log_fp:
contents = log_fp.read()
traceback = None
command = None
match = re.search(cls.COMMAND_REGEXP, contents)
if match:
# ex. gcloud.group.subgroup.command
dotted_cmd_string, = match.groups()
command = ' '.join(dotted_cmd_string.split('.'))
if cls.TRACEBACK_MARKER in contents:
traceback = (contents.split(cls.TRACEBACK_MARKER)[-1])
# Trim any log lines that follow the traceback
traceback = re.split(log.LOG_PREFIX_PATTERN, traceback)[0]
traceback = traceback.strip()
return cls(log_file, command, contents, traceback)
class LogsInfo(object):
"""Holds information about where logs are located."""
NUM_RECENT_LOG_FILES = 5
def __init__(self):
paths = config.Paths()
self.logs_dir = paths.logs_dir
self.last_log = LastLogFile(self.logs_dir)
self.last_logs = RecentLogFiles(self.logs_dir, self.NUM_RECENT_LOG_FILES)
def __str__(self):
return textwrap.dedent("""\
Logs Directory: [{logs_dir}]
Last Log File: [{log_file}]
""".format(logs_dir=self.logs_dir, log_file=self.last_log))
def LastLogContents(self):
if not self.last_log:
return ''
with open(self.last_log) as fp:
return fp.read()
def GetRecentRuns(self):
"""Return the most recent runs, as reported by info_holder.LogsInfo.
Returns:
A list of LogData
"""
return [LogData.FromFile(log_file) for log_file in self.last_logs]
| bsd-3-clause | -181,870,205,835,193,120 | 32.323529 | 79 | 0.660724 | false |
liquidinstruments/pymoku | pymoku/_waveform_generator.py | 1 | 66353 | import math
import logging
import warnings
from pymoku._instrument import to_reg_unsigned
from pymoku._instrument import from_reg_unsigned
from pymoku._instrument import to_reg_signed
from pymoku._instrument import from_reg_signed
from pymoku._instrument import deprecated
from pymoku._instrument import MokuInstrument
from pymoku._instrument import needs_commit
from pymoku._instrument import ValueOutOfRangeException
from pymoku._instrument import DAC_SMP_RATE
from pymoku import _utils
from pymoku._trigger import Trigger
from pymoku._sweep_generator import SweepGenerator
warnings.simplefilter('always', DeprecationWarning)
log = logging.getLogger(__name__)
REG_BASE_MOD_0 = 43
REG_BASE_MOD_1 = 60
REG_BASE_WAV_0 = 80
REG_BASE_WAV_1 = 104
REG_GATETHRESH_L_CH1 = 76
REG_GATETHRESH_H_CH1 = 77
REG_GATETHRESH_L_CH2 = 78
REG_GATETHRESH_H_CH2 = 79
_WG_WAVE_SINE = 0
_WG_WAVE_SQUARE = 1
_WG_MOD_NONE = 0
_WG_MOD_AMPL = 1
_WG_MOD_FREQ = 2
_WG_MOD_PHASE = 4
_WG_MODSOURCE_INT = 0
_WG_MODSOURCE_ADC = 1
_WG_MODSOURCE_DAC = 2
_WG_FREQSCALE = 1.0e9 / 2**64
_WG_FREQSCALE_SQR = 1.0e9 / 2**48
_WG_PERIODSCALE_SQR = 2**48 - 1
_WG_RISESCALE = 2**24
_WG_MAX_RISE = 1.0 / (2 ** 39 - 1)
_WG_TIMESCALE = 1.0 / (2**32 - 1) # Doesn't wrap
_WG_MOD_FREQ_MAX = 62.5e6
_WG_MOD_DEPTH_MAX = 2.0 ** 31 - 1 # 100% modulation depth in bits
_WG_TRIG_ADC1 = 0
_WG_TRIG_ADC2 = 1
_WG_TRIG_DAC1 = 2
_WG_TRIG_DAC2 = 3
_WG_TRIG_EXT = 4
_WG_TRIG_INTER = 5
_WG_MOD_ADC1 = 0
_WG_MOD_ADC2 = 1
_WG_MOD_DAC1 = 2
_WG_MOD_DAC2 = 3
_WG_MOD_INTER = 4
_WG_MOD_GATE = 5
_WG_GATE_ADC = 0
_WG_GATE_DAC = 1
_WG_GATE_SWEEP = 2
_WG_GATE_EXT = 3
_WG_TRIG_MODE_OFF = 0
_WG_TRIG_MODE_GATE = 1
_WG_TRIG_MODE_START = 2
_WG_TRIG_MODE_NCYCLE = 3
_WG_TRIG_MODE_SWEEP = 4
_WG_TRIGLVL_ADC_MAX = 5.0
_WG_TRIGLVL_ADC_MIN = -5.0
_WG_TRIGLVL_DAC_MAX = 1.0
_WG_TRIGLVL_DAC_MIN = -1.0
class BasicWaveformGenerator(MokuInstrument):
"""
.. automethod:: pymoku.instruments.WaveformGenerator.__init__
"""
def __init__(self):
""" Create a new WaveformGenerator instance, ready to be attached to a
Moku."""
super(BasicWaveformGenerator, self).__init__()
self._register_accessors(_wavegen_reg_handlers)
self.id = 4
self.type = "signal_generator"
self._sweep1 = SweepGenerator(self, REG_BASE_WAV_0 + 3)
self._sweep2 = SweepGenerator(self, REG_BASE_WAV_1 + 3)
self.enable_reset_ch1 = False
self.enable_reset_ch2 = False
@needs_commit
def set_defaults(self):
super(BasicWaveformGenerator, self).set_defaults()
self.enable_ch1 = True
self.enable_ch2 = True
self.out1_amplitude = 0
self.out2_amplitude = 0
self.adc1_statuslight = False
self.adc2_statuslight = False
# Init channel sweep gens:
self._set_sweepgenerator(self._sweep1, 0, 0, 0, 0, 0, 0, 0)
self._set_sweepgenerator(self._sweep2, 0, 0, 0, 0, 0, 0, 0)
# Disable inputs on hardware that supports it
self.en_in_ch1 = True
self.en_in_ch2 = True
# Configure front end:
self._set_frontend(channel=1, fiftyr=True, atten=False, ac=False)
self._set_frontend(channel=2, fiftyr=True, atten=False, ac=False)
def _set_sweepgenerator(self, sweepgen, waveform=None, waitfortrig=None,
frequency=None, offset=None, logsweep=None,
duration=None, holdlast=None):
sweepgen.waveform = 2
sweepgen.stop = (2**64 - 1)
sweepgen.direction = 0
if waitfortrig is not None:
sweepgen.waitfortrig = waitfortrig
if offset is not None:
sweepgen.start = offset / 360.0 * (2**64 - 1)
if frequency is not None:
sweepgen.step = frequency / _WG_FREQSCALE
if duration is not None:
sweepgen.duration = duration * 125.0e6
if logsweep is not None:
sweepgen.logsweep = logsweep
if holdlast is not None:
sweepgen.holdlast = holdlast
@needs_commit
def gen_sinewave(self, ch, amplitude, frequency, offset=0, phase=0.0):
""" Generate a Sine Wave with the given parameters on the given
channel.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, [0.0,2.0] Vpp
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, [0,250e6] Hz
:param frequency: Frequency of the wave
:type offset: float, [-1.0,1.0] Volts
:param offset: DC offset applied to the waveform
:type phase: float, [0-360] degrees
:param phase: Phase offset of the wave
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'output channel')
_utils.check_parameter_valid(
'range', amplitude, [0.0, 2.0], 'sinewave amplitude', 'Volts')
_utils.check_parameter_valid(
'range', frequency, [0, 250e6], 'sinewave frequency', 'Hz')
_utils.check_parameter_valid(
'range', offset, [-1.0, 1.0], 'sinewave offset', 'Volts')
_utils.check_parameter_valid(
'range', phase, [0, 360], 'sinewave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude / 2.0)
lower_voltage = offset - (amplitude / 2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException(
"Sinewave offset limited by amplitude (max output "
"range 2.0Vpp).")
if ch == 1:
self.enable_ch1 = True
self._set_sweepgenerator(
sweepgen=self._sweep1, frequency=frequency, offset=phase)
self.amplitude_ch1 = amplitude
self.offset_ch1 = offset
self.waveform_type_ch1 = _WG_WAVE_SINE
self.phase_dly_ch1 = (11 * frequency / 125e6) % 1 * 2**32
elif ch == 2:
self.enable_ch2 = True
self._set_sweepgenerator(
sweepgen=self._sweep2, frequency=frequency, offset=phase)
self.amplitude_ch2 = amplitude
self.offset_ch2 = offset
self.waveform_type_ch2 = _WG_WAVE_SINE
self.phase_dly_ch2 = (11 * frequency / 125e6) % 1 * 2**32
@needs_commit
def gen_squarewave(self, ch, amplitude, frequency, offset=0.0,
duty=0.5, risetime=0.0, falltime=0.0, phase=0.0):
""" Generate a Square Wave with given parameters on the given channel.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, [0, 2.0] volts
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, [0, 100e6] hertz
:param frequency: Frequency of the wave
:type offset: float, [-1.0, 1.0] volts
:param offset: DC offset applied to the waveform
:type duty: float, [0, 1.0]
:param duty: Fractional duty cycle
:type risetime: float, [0, 1.0]
:param risetime: Fraction of a cycle taken for the waveform to rise
:type falltime: float [0, 1.0]
:param falltime: Fraction of a cycle taken for the waveform to fall
:type phase: float, degrees 0-360
:param phase: Phase offset of the wave
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'output channel')
_utils.check_parameter_valid(
'range', amplitude, [0.0, 2.0], 'squarewave amplitude', 'Volts')
_utils.check_parameter_valid(
'range', frequency, [0, 100e6], 'squarewave frequency', 'Hz')
_utils.check_parameter_valid(
'range', offset, [-1.0, 1.0], 'squarewave offset', 'Volts')
_utils.check_parameter_valid(
'range', duty, [0, 1.0], 'squarewave duty', 'cycles')
_utils.check_parameter_valid(
'range', risetime, [0, 1.0], 'squarewave risetime', 'cycles')
_utils.check_parameter_valid(
'range', falltime, [0, 1.0], 'squarewave falltime', 'cycles')
_utils.check_parameter_valid(
'range', phase, [0, 360], 'squarewave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude / 2.0)
lower_voltage = offset - (amplitude / 2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException(
"Squarewave offset limited by amplitude (max output "
"range 2.0Vpp).")
frequency = float(frequency)
if duty < risetime:
raise ValueOutOfRangeException(
"Squarewave duty too small for given rise time.")
elif duty + falltime > 1:
raise ValueOutOfRangeException(
"Squarewave duty and fall time too big.")
# ensure duty cycle and fall/rise time combinations don't overflow
if frequency != 0:
minedgetime = 4.0e-9 * frequency
if risetime < minedgetime:
risetime = minedgetime
log.warning(
"WARNING: Risetime restricted to minimum value of 4 ns.")
if falltime < minedgetime:
falltime = minedgetime
log.warning(
"WARNING: Falltime restricted to minimum value of 4 ns.")
if duty < minedgetime:
duty = minedgetime
log.warning("WARNING: Duty cycle restricted to %s" % duty)
if duty > 1 - minedgetime:
duty = 1 - minedgetime
log.warning("WARNING: Duty cycle restricted to %s" % duty)
if risetime > 1 - minedgetime:
risetime = 1 - minedgetime
log.warning("WARNING: Risetime restricted to maximum value.")
if falltime > 1 - minedgetime:
falltime = 1 - minedgetime
log.warning("WARNING: Falltime restricted to maximum value.")
else:
falltime = _WG_MAX_RISE
risetime = _WG_MAX_RISE
# Set rise/fall rate and t0, t1 and t2
t0 = risetime
t1 = duty
t2 = duty + falltime
phase_dly = 0
if ch == 1:
self.waveform_type_ch1 = _WG_WAVE_SQUARE
self.enable_ch1 = True
self._set_sweepgenerator(sweepgen=self._sweep1,
frequency=frequency,
offset=phase,
holdlast=0)
self.amplitude_ch1 = amplitude
self.offset_ch1 = offset
# This is overdefined, but saves the FPGA doing a tricky division
self.t0_ch1 = t0
self.t1_ch1 = t1
self.t2_ch1 = t2
self.riserate_ch1 = risetime
self.fallrate_ch1 = -falltime
self.phase_dly_ch1 = phase_dly
elif ch == 2:
self.waveform_type_ch2 = _WG_WAVE_SQUARE
self.enable_ch2 = True
self._set_sweepgenerator(sweepgen=self._sweep2,
frequency=frequency,
offset=phase,
holdlast=0)
self.amplitude_ch2 = amplitude
self.offset_ch2 = offset
self.t0_ch2 = t0
self.t1_ch2 = t1
self.t2_ch2 = t2
self.riserate_ch2 = risetime
self.fallrate_ch2 = -falltime
self.phase_dly_ch2 = phase_dly
@needs_commit
def gen_rampwave(
self, ch, amplitude, frequency, offset=0, symmetry=0.5, phase=0.0):
""" Generate a Ramp with the given parameters on the given channel.
This is a wrapper around the Square Wave generator,
using the *riserate* and *fallrate* parameters to form the ramp.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, [0, 2.0] volts
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, [0, 100e6] hertz
:param frequency: Frequency of the wave
:type offset: float, [-1.0, 1.0] volts
:param offset: DC offset applied to the waveform
:type symmetry: float, [0, 1.0]
:param symmetry: Fraction of the cycle rising.
:type phase: float, degrees [0, 360]
:param phase: Phase offset of the wave
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'output channel')
_utils.check_parameter_valid(
'range', amplitude, [0.0, 2.0], 'rampwave amplitude', 'Volts')
_utils.check_parameter_valid(
'range', frequency, [0, 100e6], 'rampwave frequency', 'Hz')
_utils.check_parameter_valid(
'range', offset, [-1.0, 1.0], 'rampwave offset', 'cycles')
_utils.check_parameter_valid(
'range', symmetry, [0, 1.0], 'rampwave symmetry', 'fraction')
_utils.check_parameter_valid(
'range', phase, [0, 360], 'rampwave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude / 2.0)
lower_voltage = offset - (amplitude / 2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException(
"Rampwave offset limited by amplitude "
"(max output range 2.0Vpp).")
self.gen_squarewave(ch, amplitude, frequency,
offset=offset, duty=symmetry,
risetime=symmetry,
falltime=1 - symmetry,
phase=phase)
@needs_commit
def sync_phase(self):
""" Synchronize the phase of both output channels.
The phase of both channels is reset to their respestive phase offset
values.
"""
self.enable_reset_ch1 = True
self.enable_reset_ch2 = True
@needs_commit
def gen_off(self, ch=None):
""" Turn Waveform Generator output(s) off.
The channel will be turned on when configuring the waveform type but
can be turned off using this function. If *ch* is None (the default),
both channels will be turned off, otherwise just the one specified by
the argument.
:type ch: int; {1,2} or None
:param ch: Channel to turn off, or both.
"""
_utils.check_parameter_valid(
'set', ch, [1, 2], 'output channel', allow_none=True)
if ch is None or ch == 1:
self.enable_ch1 = False
if ch is None or ch == 2:
self.enable_ch2 = False
class WaveformGenerator(BasicWaveformGenerator):
""" Waveform Generator instrument object.
To run a new Waveform Generator instrument, this should be instantiated
and deployed via a connected :any:`Moku` object using :any:
`deploy_instrument`. Alternatively, a pre-configured instrument object can
be obtained by discovering an already running Waveform Generator
instrument on a Moku:Lab device via
:any:`discover_instrument`.
.. automethod:: pymoku.instruments.WaveformGenerator.__init__
.. attribute:: type
:annotation: = "signal_generator"
Name of this instrument.
"""
def __init__(self):
""" Create a new WaveformGenerator instance, ready to be attached to a
Moku."""
super(WaveformGenerator, self).__init__()
self._register_accessors(_wavegen_mod_reg_handlers)
# Define any (non-register-mapped) properties that are used when
# committing as a commit is called when the instrument is set running
self.trig_volts_ch1 = 0.0
self.trig_volts_ch2 = 0.0
self._trigger1 = Trigger(self, 28)
self._trigger2 = Trigger(self, 45)
self._sweepmod1 = SweepGenerator(self, 34)
self._sweepmod2 = SweepGenerator(self, 51)
@needs_commit
def set_defaults(self):
super(WaveformGenerator, self).set_defaults()
self._init_trig_modulation(1)
self._init_trig_modulation(2)
self.phasedly_en_ch1 = 1
self.phasedly_en_ch2 = 1
self.sine_trigdly_ch1 = 0
self.sine_trigdly_ch2 = 0
def _init_trig_modulation(self, ch):
# initialise the state of all modules used in modulation/trigger/sweep
# modes
if ch == 1:
# Set AM/FM/PM and sweep enable to zero:
self.amod_enable_ch1 = False
self.fmod_enable_ch1 = False
self.pmod_enable_ch1 = False
self.sweep_enable_ch1 = False
# Default trigger module values:
self._trigger1.trigtype = 0
self._trigger1.edge = 0
self._trigger1.pulsetype = 0
self._trigger1.hysteresis = 0
self._trigger1.timer = 0
self._trigger1.holdoff = 0
self._trigger1.auto_holdoff = 0
self._trigger1.ntrigger = 0
self._trigger1.ntrigger_mode = 0
self._trigger1.level = 0
self._trigger1.duration = 0
# Default modulating sweep generator values:
self._sweepmod1.waveform = 0
self._sweepmod1.waitfortrig = 0
self._sweepmod1.holdlast = 0
self._sweepmod1.direction = 0
self._sweepmod1.logsweep = 0
self._sweepmod1.start = 0
self._sweepmod1.stop = 0
self._sweepmod1.step = 0
self._sweepmod1.duration = 0
# Trigger/modulation/gate source/threshold default values:
self.trig_source_ch1 = _WG_TRIG_ADC1
self.mod_source_ch1 = _WG_MOD_ADC1
self.gate_thresh_ch1 = 0
self.mod_depth_ch1 = 0
# Default waveform sweep generator values that are touched in
# modulation/trigger/sweep modes:
self._sweep1.waitfortrig = 0
self._sweep1.duration = 0
self._sweep1.holdlast = 0
# Gated mode flag used to toggle amplitude division by 2 on
# the FPGA
self.gate_mode_ch1 = 0
# Trigger mode flag to enable calibration calculations in
# _update_dependent_regs function
self.trig_sweep_mode_ch1 = 0
# Phase delay flag, trig delay flag
self.phasedly_en_ch1 = 1
self.sine_trigdly_ch1 = 0
else:
# Set AM/FM/PM and sweep enable to zero:
self.amod_enable_ch2 = False
self.fmod_enable_ch2 = False
self.pmod_enable_ch2 = False
self.sweep_enable_ch2 = False
# Default trigger module values:
self._trigger2.trigtype = 0
self._trigger2.edge = 0
self._trigger2.pulsetype = 0
self._trigger2.hysteresis = 0
self._trigger2.timer = 0
self._trigger2.holdoff = 0
self._trigger2.auto_holdoff = 0
self._trigger2.ntrigger = 0
self._trigger2.ntrigger_mode = 0
self._trigger2.level = 0
self._trigger2.duration = 0
# Default modulating sweep generator values:
self._sweepmod2.waveform = 0
self._sweepmod2.waitfortrig = 0
self._sweepmod2.holdlast = 0
self._sweepmod2.direction = 0
self._sweepmod2.logsweep = 0
self._sweepmod2.start = 0
self._sweepmod2.stop = 0
self._sweepmod2.step = 0
self._sweepmod2.duration = 0
# Trigger/modulation/gate source/threshold default values:
self.trig_source_ch2 = _WG_TRIG_ADC2
self.mod_source_ch2 = _WG_MOD_ADC2
self.gate_thresh_ch2 = 0
self.mod_depth_ch2 = 0
# Default waveform sweep generator values that are touched in
# modulation/trigger/sweep modes:
self._sweep2.waitfortrig = 0
self._sweep2.duration = 0
self._sweep2.holdlast = 0
# Gated mode flag used to toggle amplitude division by 2 on
# the FPGA
self.gate_mode_ch2 = 0
# Trigger mode flag to enable calibration calculations in
# _update_dependent_regs function
self.trig_sweep_mode_ch2 = 0
# Phase delay flag, trig delay flag
self.phasedly_en_ch2 = 1
self.sine_trigdly_ch2 = 0
@needs_commit
@deprecated(category='param',
message="'in' and 'out' trigger sources have been deprecated."
" Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.")
def set_trigger(self, ch, mode, ncycles=1, sweep_start_freq=None,
sweep_end_freq=0, sweep_duration=1.0e-3,
trigger_source='adc1', trigger_threshold=0.0,
internal_trig_period=1.0, internal_trig_high=0.5):
""" Configure gated, start, ncycle or sweep trigger mode on target
channel.
The trigger event can come from an ADC input channel, the opposite
generated waveform, the external trigger input (for hardware that
supports that) or a internally-generated clock of configurable
period.
The trigger event can be used in several different ways:
- *gated*: The output waveform is only generated while the trigger is
asserted
- *start*: The output waveform is enabled once the trigger event fires
- *ncycle*: The output waveform starts at a trigger event and
completes the given number of cycles, before turning off and
re-arming
- *sweep*: The trigger event starts the waveform generation at the
*sweep_start_freq*, before automatically sweeping the
frequency to *sweep_end_freq* over the course of *sweep_duration*
seconds.
:type ch: int
:param ch: target channel.
:type mode: string, {'gated', 'start', 'ncycle', 'sweep', 'off'}
:param mode: Select the mode in which the trigger is operated.
:type ncycles: int, [1, 1e6]
:param ncycles: integer number of signal repetitions in ncycle mode.
:type sweep_start_freq: float, [0.0,250.0e6], hertz
:param sweep_start_freq: starting sweep frequency, set to current
waveform frequency if not specified. Value range may vary for
different waveforms.
:type sweep_end_freq: float, [0.0,250.0e6], hertz
:param sweep_end_freq: finishing sweep frequency. Value range may vary
for different waveforms.
:type sweep_duration: float, [1.0e-3,1000.0], seconds
:param sweep_duration: sweep duration in seconds.
:type trigger_source: string {'adc1','adc2', 'dac1', 'dac2',
'external', 'internal', 'in', 'out'}
:param trigger_source: defines which source should be used as
triggering signal. In and out sources are deprecated.
:type trigger_threshold: float, [-5, 5], volts
:param trigger_threshold: The threshold value range dependes on the
source and the attenution used. Values ranges might be less for
different settings.
:type internal_trig_period: float, [0,1e11], seconds
:param internal_trig_period: period of the internal trigger clock,
if used.
:type internal_trig_high: float, [0,1e11], seconds
:param internal_trig_high: High time of the internal trigger clock,
if used. Must be less than the internal trigger period.
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'output channel')
_utils.check_parameter_valid(
'set', mode, ['gated', 'start', 'ncycle', 'sweep'], 'trigger mode')
_utils.check_parameter_valid(
'set', trigger_source, ['adc1',
'adc2',
'dac1',
'dac2',
'external',
'internal',
'in',
'out'], 'trigger source')
_utils.check_parameter_valid('range', ncycles, [1, 1e6], 'ncycles')
_utils.check_parameter_valid(
'range', sweep_duration, [0.001, 1000.0],
'sweep duration', 'seconds')
_utils.check_parameter_valid(
'range', internal_trig_period, [100.0e-9, 1000.0],
'internal trigger period', 'seconds')
_utils.check_parameter_valid(
'range', internal_trig_high, [10.0e-9, 1000.0],
'internal trigger high time', 'seconds')
if trigger_source in ['in', 'out']:
warnings.warn(
message="'in' and 'out' trigger sources have been deprecated. "
"Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.",
category=DeprecationWarning,
stacklevel=1
)
# 'in' and 'out' trigger sources are deprecated sources.
# Convert to adc/dac source type:
if ch == 1:
if trigger_source == 'in':
trigger_source = 'adc1'
elif trigger_source == 'out':
trigger_source = 'dac2'
if ch == 2:
if trigger_source == 'in':
trigger_source = 'adc2'
elif trigger_source == 'out':
trigger_source = 'dac1'
# Can't use current channel as trigger mode source:
if ch == 1 and trigger_source == 'dac1':
raise ValueOutOfRangeException(
"dac1 cannot be used as the trigger source for trigger "
"mode on channel 1.")
elif ch == 2 and trigger_source == 'dac2':
raise ValueOutOfRangeException(
"dac2 cannot be used as the trigger source for trigger "
"mode on channel 2.")
# Can't use modulation with trigger/sweep modes
self.set_modulate_trig_off(ch)
# Configure trigger and source settings:
if ch == 1:
_WG_TRIG_ADC = _WG_TRIG_ADC2
_WG_TRIG_DAC = _WG_TRIG_DAC1
else:
_WG_TRIG_ADC = _WG_TRIG_ADC1
_WG_TRIG_DAC = _WG_TRIG_DAC2
_str_to_trigger_source = {
'adc1': _WG_TRIG_ADC1,
'adc2': _WG_TRIG_ADC2,
'dac1': _WG_TRIG_DAC1,
'dac2': _WG_TRIG_DAC2,
'external': _WG_TRIG_EXT,
'internal': _WG_TRIG_INTER
}
trigger_source = _utils.str_to_val(_str_to_trigger_source,
trigger_source,
'trigger source')
if trigger_source is _WG_TRIG_ADC:
_utils.check_parameter_valid('range', trigger_threshold,
[_WG_TRIGLVL_ADC_MIN,
_WG_TRIGLVL_ADC_MAX],
'trigger threshold', 'Volts')
elif trigger_source is _WG_TRIG_DAC:
_utils.check_parameter_valid('range', trigger_threshold,
[_WG_TRIGLVL_DAC_MIN,
_WG_TRIGLVL_DAC_MAX],
'trigger threshold', 'Volts')
# The internal trigger's duty cycle is only used in gated burst mode.
# Duty cycle is limited such that the duty period is not
# less than 8 ns and not greater than the trigger period minus 8 ns.
if internal_trig_high > internal_trig_period:
raise ValueOutOfRangeException(
"Internal trigger high must be less"
" than or equal to the internal trigger period.")
if (internal_trig_period - internal_trig_high) <= 8.0e-9:
internal_trig_high = internal_trig_period - 10.0e-9
if ch == 1:
self._trigger1.trigtype = 0
self._trigger1.edge = 0
self.trig_sweep_mode_ch1 = 1
elif ch == 2:
self._trigger1.trigtype = 0
self._trigger1.edge = 0
self.trig_sweep_mode_ch2 = 1
# Configure trigger mode settings:
_str_to_trigger_mode = {
'gated': _WG_TRIG_MODE_GATE,
'start': _WG_TRIG_MODE_START,
'ncycle': _WG_TRIG_MODE_NCYCLE,
'sweep': _WG_TRIG_MODE_SWEEP
}
mode = _utils.str_to_val(_str_to_trigger_mode, mode, 'trigger mode')
# set status light register
if ch == 1:
self.adc1_statuslight = True if (
trigger_source == _WG_TRIG_ADC1) else False
else:
self.adc2_statuslight = True if (
trigger_source == _WG_TRIG_ADC2) else False
if sweep_start_freq is None or mode != _WG_TRIG_MODE_SWEEP:
channel_frequency = (self._sweep1.step * _WG_FREQSCALE) \
if ch == 1 else (self._sweep2.step * _WG_FREQSCALE)
else:
channel_frequency = sweep_start_freq
waveform = self.waveform_type_ch1 if ch == 1 else \
self.waveform_type_ch2
# if waveform is a sinewave certain ranges do change
if waveform == _WG_WAVE_SINE:
_utils.check_parameter_valid('range',
sweep_end_freq,
[0.0, 250.0e6],
'sweep finishing frequency',
'frequency')
_utils.check_parameter_valid('range',
channel_frequency,
[0.0, 250.0e6],
'sweep starting frequency',
'frequency')
else:
_utils.check_parameter_valid('range',
sweep_end_freq,
[0.0, 100.0e6],
'sweep finishing frequency',
'frequency')
_utils.check_parameter_valid('range',
channel_frequency,
[0.0, 100.0e6],
'sweep starting frequency',
'frequency')
# minimum frequency deviation in sweep mode is 1 mHz
if abs(channel_frequency - sweep_end_freq) < 1.0e-3:
raise ValueOutOfRangeException(
"Frequency deviation in sweep mode is restricted to values "
"greater than 1 mHz.")
if mode == _WG_TRIG_MODE_GATE:
self._set_trigger_gated(ch, waveform, trigger_source,
trigger_threshold, internal_trig_period,
internal_trig_high)
elif mode == _WG_TRIG_MODE_START:
self._set_trigger_start(ch, trigger_source, trigger_threshold)
elif mode == _WG_TRIG_MODE_NCYCLE:
self._set_trigger_ncycle(ch, channel_frequency, ncycles,
trigger_threshold, trigger_source,
internal_trig_period)
elif mode == _WG_TRIG_MODE_SWEEP:
self._set_trigger_sweep(ch, waveform, trigger_source,
sweep_end_freq, channel_frequency,
sweep_duration, trigger_threshold)
def _set_trigger_gated(self, ch, waveform, trigger_source,
trigger_threshold, internal_trig_period,
internal_trig_high):
# Threshold calculations. Calibration is applied in
# _update_dependent_regs
if trigger_source == _WG_TRIG_EXT:
trigger_threshold = 0
elif trigger_source == _WG_TRIG_INTER:
trigger_threshold = -2 ** 47 + (
1.0 - internal_trig_high / internal_trig_period) * (
2 ** 48 - 1)
if ch == 1:
self._sweepmod1.step = 1 / internal_trig_period / _WG_FREQSCALE
self._sweepmod1.waveform = 2
self._sweepmod1.direction = 1
else:
self._sweepmod2.step = 1 / internal_trig_period / _WG_FREQSCALE
self._sweepmod2.waveform = 2
self._sweepmod2.direction = 1
if ch == 1:
self.amod_enable_ch1 = True
self.mod_source_ch1 = _WG_MOD_GATE
self.mod_depth_uncalibrated_ch1 = 1.0
self._sweep1.waitfortrig = 0
self.trig_source_ch1 = trigger_source
self.gate_thresh_uncalibrated_ch1 = trigger_threshold
self.gate_mode_ch1 = 1
elif ch == 2:
self.amod_enable_ch2 = True
self.mod_source_ch2 = _WG_MOD_GATE
self.mod_depth_uncalibrated_ch2 = 1.0
self._sweep2.waitfortrig = 0
self.trig_source_ch2 = trigger_source
self.gate_thresh_uncalibrated_ch2 = trigger_threshold
self.gate_mode_ch2 = 1
def _set_trigger_start(self, ch, trigger_source, trigger_threshold):
# Internal trigger source cannot be used for burst start mode:
if trigger_source == _WG_TRIG_INTER:
raise ValueOutOfRangeException("The internal trigger source cannot"
" be used in start burst mode.")
# Calculate threshold level and configure modulating sweep generator.
# Calibration is added to threshold in _set_dependent_regs.
if trigger_source == _WG_TRIG_EXT:
trigger_threshold = 0
if ch == 1:
self._sweepmod1.direction = 1
elif ch == 2:
self._sweepmod2.direction = 1
if ch == 1:
self.trigger_threshold_uncalibrated_ch1 = trigger_threshold
self.trig_source_ch1 = trigger_source
self._sweep1.waitfortrig = 1
self._sweep1.duration = 0
self.enable_reset_ch1 = True
self.phasedly_en_ch1 = 0
self.sine_trigdly_ch1 = 1 if self.waveform_type_ch1 == \
_WG_WAVE_SINE else 0
elif ch == 2:
self.trigger_threshold_uncalibrated_ch2 = trigger_threshold
self.trig_source_ch2 = trigger_source
self._sweep2.waitfortrig = 1
self._sweep2.duration = 0
self.enable_reset_ch2 = True
self.phasedly_en_ch2 = 0
self.sine_trigdly_ch2 = 1 if self.waveform_type_ch2 == \
_WG_WAVE_SINE else 0
def _set_trigger_ncycle(self, ch, channel_frequency, ncycles,
trigger_threshold, trigger_source,
internal_trig_period):
# Waveform frequencies are restricted to <= 10 MHz in Ncycle burst
# mode:
if channel_frequency > 10.0e6:
raise ValueOutOfRangeException(
"Waveform frequencies are restricted to 10 MHz or less in"
" Ncycle burst mode.")
# Calculate threshold level and configure modulating sweep generator.
# Calibration is added to threshold in _set_dependent_regs.
if trigger_source == _WG_TRIG_EXT:
trigger_threshold = 0
elif trigger_source == _WG_TRIG_INTER:
trigger_threshold = 0
if ch == 1:
self._set_sweepgenerator(sweepgen=self._sweepmod1, waveform=2,
waitfortrig=0,
frequency=1.0 / internal_trig_period,
offset=0, logsweep=0, duration=0,
holdlast=0)
self._sweepmod1.direction = 1
elif ch == 2:
self._set_sweepgenerator(sweepgen=self._sweepmod2, waveform=2,
waitfortrig=0,
frequency=1.0 / internal_trig_period,
offset=0,
logsweep=0,
duration=0,
holdlast=0)
self._sweepmod2.direction = 1
# ensure combination of signal frequency and Ncycles doesn't cause
# 64 bit register overflow:
FPGA_cycles = (math.floor(
125e6 / channel_frequency * ncycles) - 1) if \
channel_frequency != 0.0 else 0
if FPGA_cycles > 2**63 - 1:
raise ValueOutOfRangeException("NCycle Register Overflow")
if ch == 1:
self.trigger_threshold_uncalibrated_ch1 = trigger_threshold
self.trig_source_ch1 = trigger_source
self._sweep1.waitfortrig = 1
self._sweep1.duration = FPGA_cycles
self._sweep1.holdlast = 0
self.enable_reset_ch1 = True
self.phasedly_en_ch1 = 0
self.sine_trigdly_ch1 = 1 if \
self.waveform_type_ch1 == _WG_WAVE_SINE else 0
elif ch == 2:
self.trigger_threshold_uncalibrated_ch2 = trigger_threshold
self.trig_source_ch2 = trigger_source
self._sweep2.waitfortrig = 1
self._sweep2.duration = FPGA_cycles
self._sweep2.holdlast = 0
self.enable_reset_ch2 = True
self.phasedly_en_ch2 = 0
self.sine_trigdly_ch2 = 1 if \
self.waveform_type_ch2 == _WG_WAVE_SINE else 0
def _set_trigger_sweep(self, ch, waveform, trigger_source, sweep_end_freq,
channel_frequency, sweep_duration,
trigger_threshold):
# Calculate threshold level and enable/disable continuous sweep.
# Calibration is added to threshold in _set_dependent_regs.
if trigger_source == _WG_TRIG_EXT:
trigger_threshold = 0
mod_continuous_sweep = 1
elif trigger_source == _WG_TRIG_INTER:
trigger_threshold = 1
mod_continuous_sweep = 0
else:
mod_continuous_sweep = 1
# calculate sweep parameters:
mod_start_freq = 0
range_shift = 0
deltafreq_persecond = (sweep_end_freq - channel_frequency) / (
sweep_duration)
mod_step = abs(2.0**64 / 1e18 * deltafreq_persecond)
mod_duration_FPGAcycles = math.floor(sweep_duration * 125e6)
mod_stop_freq = mod_step * 1e9 * sweep_duration
range_shift = min(
math.floor(abs(math.log(max(mod_step / 2.0**64,
mod_stop_freq / 2.0**64), 2))), 63)
mod_step *= 2**range_shift
mod_stop_freq *= 2**range_shift
# check if reverse sweep:
if (sweep_end_freq - channel_frequency) < 0:
mod_direction = 1
else:
mod_direction = 0
if ch == 1:
self._set_sweepgenerator(sweepgen=self._sweep1,
frequency=channel_frequency,
waitfortrig=0)
self._sweepmod1.waitfortrig = mod_continuous_sweep
self._sweepmod1.start = mod_start_freq
self._sweepmod1.stop = mod_stop_freq
self._sweepmod1.step = mod_step
self._sweepmod1.duration = mod_duration_FPGAcycles
self._sweepmod1.direction = 0
self.reverse_sweep_ch1 = mod_direction
self._sweepmod1.waveform = 2
self._sweepmod1.holdlast = 0
self.amod_enable_ch1 = False
self.pmod_enable_ch1 = False
self.fmod_enable_ch1 = False
self.sweep_enable_ch1 = True
self.trig_source_ch1 = trigger_source
self.trigger_threshold_uncalibrated_ch1 = trigger_threshold
self.range_shift_ch1 = range_shift
else:
self._set_sweepgenerator(sweepgen=self._sweep2,
frequency=channel_frequency,
waitfortrig=0)
self._sweepmod2.waitfortrig = mod_continuous_sweep
self._sweepmod2.start = mod_start_freq
self._sweepmod2.stop = mod_stop_freq
self._sweepmod2.step = mod_step
self._sweepmod2.duration = mod_duration_FPGAcycles
self._sweepmod2.direction = 0
self.reverse_sweep_ch2 = mod_direction
self._sweepmod2.waveform = 2
self._sweepmod2.holdlast = 0
self.amod_enable_ch2 = False
self.pmod_enable_ch2 = False
self.fmod_enable_ch2 = False
self.sweep_enable_ch2 = True
self.trig_source_ch2 = trigger_source
self.trigger_threshold_uncalibrated_ch2 = trigger_threshold
self.range_shift_ch2 = range_shift
@needs_commit
@deprecated(category='method', message="'gen_modulate_off' has been "
"deprecated. Use set_modulate_trig_off instead.")
def gen_modulate_off(self, ch=None):
"""
'gen_modulate_off' has been deprecated. Use set_modulate_trig_off
instead.
Turn off modulation for the specified output channel.
If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Output channel to turn modulation off.
"""
# warnings.warn("'gen_modulate_off' has been deprecated. Use
# set_modulate_trig_off instead.", DeprecationWarning)
self.set_modulate_trig_off(ch)
@needs_commit
@deprecated(category='method', message="'gen_trigger_off' has been "
"deprecated. Use set_modulate_trig_off instead.")
def gen_trigger_off(self, ch=None):
"""
'gen_trigger_off' has been deprecated. Use set_modulate_trig_off
instead."
Turn off trigger/sweep mode for the specified output channel.
If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Output channel to turn trigger/sweep mode off
"""
# warnings.warn("'gen_trigger_off' has been deprecated. Use
# set_modulate_trig_off instead.", DeprecationWarning)
self.set_modulate_trig_off(ch)
@needs_commit
def set_modulate_trig_off(self, ch=None):
"""
Turn off modulation and trigger modes for the specified output channel.
If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Output channel to turn modulation off.
"""
_utils.check_parameter_valid('set', ch, [1, 2],
'output channel', allow_none=True)
self._init_trig_modulation(ch)
@needs_commit
@deprecated(category='param',
message="'in' and 'out' modulation sources have been "
"deprecated. Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.")
def gen_modulate(self, ch, mtype, source, depth, frequency=0.0):
"""
Set up modulation on an output channel.
:type ch: int; {1,2}
:param ch: Channel to modulate
:type mtype: string, {'amplitude', 'frequency', 'phase'}
:param mtype: Modulation type. Respectively Off, Amplitude, Frequency
and Phase modulation.
:type source: string,
{'adc1', 'adc2', 'dac1', 'dac2', 'internal', 'in', 'out'}
:param source: Modulation source. Respectively Internal Sinewave,
associated input channel or opposite output channel.
In and out sources are deprecated.
:type depth: float 0-1, 0-125MHz or 0 - 360 deg
:param depth: Modulation depth (depends on modulation type):
Fractional modulation depth, Frequency Deviation/Volt or +/-
phase shift/Volt
:type frequency: float
:param frequency: Frequency of internally-generated sine wave
modulation. This parameter is ignored if the source is set to
ADC or DAC.
:raises ValueOutOfRangeException: if the channel number is invalid or
modulation parameters can't be achieved
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'modulation channel')
_utils.check_parameter_valid(
'range', frequency, [0, 250e6], 'internal modulation frequency')
_utils.check_parameter_valid(
'set', mtype, ['amplitude',
'frequency',
'phase'], 'modulation type')
_utils.check_parameter_valid(
'set', source, ['adc1',
'adc2',
'dac1',
'dac2',
'internal',
'in',
'out'], 'modulation source')
if source in ['in', 'out']:
warnings.warn(
message="'in' and 'out' modulation sources have been "
"deprecated. Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.",
category=DeprecationWarning,
stacklevel=1
)
# 'in' and 'out' sources are deprecated sources. Convert to adc/dac
# source type:
if ch == 1:
if source == 'in':
source = 'adc1'
elif source == 'out':
source = 'dac2'
if ch == 2:
if source == 'in':
source = 'adc2'
elif source == 'out':
source = 'dac1'
# Can't use current channel as trigger mode source:
if ch == 1 and source == 'dac1':
raise ValueOutOfRangeException(
"dac1 cannot be used as the modulation source for channel 1.")
elif ch == 2 and source == 'dac2':
raise ValueOutOfRangeException(
"dac2 cannot be used as the modulation source for channel 2.")
_str_to_modsource = {
'adc1': _WG_MOD_ADC1,
'adc2': _WG_MOD_ADC2,
'dac1': _WG_MOD_DAC1,
'dac2': _WG_MOD_DAC2,
'internal': _WG_MOD_INTER
}
_str_to_modtype = {
'amplitude': _WG_MOD_AMPL,
'frequency': _WG_MOD_FREQ,
'phase': _WG_MOD_PHASE
}
source = _utils.str_to_val(
_str_to_modsource, source, 'modulation source')
mtype = _utils.str_to_val(
_str_to_modtype, mtype, 'modulation source')
# Maximum achievable modulation depth is limited when frontend
# attenuation is not enabled
if self.atten_compensate_ch1 == 0:
logging.warning("+/- 0.5 V voltage range is selected on input "
"channel 1. Maximum achievable modulation depth "
"may be limited.")
if self.atten_compensate_ch2 == 0:
logging.warning("+/- 0.5 V voltage range is selected on input "
"channel 2. Maximum achievable modulation depth "
"may be limited.")
# Calculate the depth value depending on modulation source and type.
# Calibration calculations for frontend variations done in
# _update_dependent_regs.
depth_parameter = 0.0
if mtype == _WG_MOD_AMPL:
_utils.check_parameter_valid('range', depth, [0.0, 1.0],
'amplitude modulation depth',
'fraction')
depth_parameter = depth
elif mtype == _WG_MOD_FREQ:
_utils.check_parameter_valid(
'range', depth, [0.0, _WG_MOD_FREQ_MAX],
'frequency modulation depth', 'Hz/V')
depth_parameter = depth / (DAC_SMP_RATE / 8.0)
elif mtype == _WG_MOD_PHASE:
_utils.check_parameter_valid(
'range', depth, [0.0, 360.0],
'phase modulation depth', 'degrees/V')
depth_parameter = depth / 360.0
# Can't use trigger/sweep modes at the same time as modulation
self.set_modulate_trig_off(ch)
if ch == 1:
self.mod_depth_uncalibrated_ch1 = depth_parameter
self.mod_source_ch1 = source
self.amod_enable_ch1 = True if mtype == _WG_MOD_AMPL else False
self.fmod_enable_ch1 = True if mtype == _WG_MOD_FREQ else False
self.pmod_enable_ch1 = True if mtype == _WG_MOD_PHASE else False
self.sweep_enable_ch1 = False
if source == _WG_MOD_INTER:
self._set_sweepgenerator(sweepgen=self._sweepmod1,
waveform=2,
waitfortrig=0,
frequency=frequency,
offset=0,
logsweep=0,
duration=0)
self.adc1_statuslight = True if \
source == _WG_MODSOURCE_ADC else False
elif ch == 2:
self.mod_depth_uncalibrated_ch2 = depth_parameter
self.mod_source_ch2 = source
self.amod_enable_ch2 = True if mtype == _WG_MOD_AMPL else False
self.fmod_enable_ch2 = True if mtype == _WG_MOD_FREQ else False
self.pmod_enable_ch2 = True if mtype == _WG_MOD_PHASE else False
self.sweep_enable_ch2 = False
if source == _WG_MOD_INTER:
self._set_sweepgenerator(sweepgen=self._sweepmod2,
waveform=2,
waitfortrig=0,
frequency=frequency,
offset=0,
logsweep=0,
duration=0)
self.adc2_statuslight = True if \
source == _WG_MODSOURCE_ADC else False
def _get_mod_depth_uncalibrated(self, ch):
# Calculate mod depth based on instrument state. Used when connecting
# to running device.
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
mod_source_scalers = [2.0**11 / (8.0 if self.atten_compensate_ch1
else 1.0) * adc1,
2.0**11 / (8.0 if self.atten_compensate_ch2
else 1.0) * adc2,
2.0**14 * dac1,
2.0**14 * dac2,
1.0,
1.0]
if ch == 1:
mod_depth_uncalibrated = self.mod_depth_ch1 / \
mod_source_scalers[self.mod_source_ch1] / _WG_MOD_DEPTH_MAX
else:
mod_depth_uncalibrated = self.mod_depth_ch2 / \
mod_source_scalers[self.mod_source_ch2] / _WG_MOD_DEPTH_MAX
return mod_depth_uncalibrated
def _get_gate_thresh_uncalibrated(self, ch):
# Calculate gate threshold based on instrument state. Used when
# connecting to running device.
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
gate_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0]
if ch == 1:
gate_thresh_uncalibrated = self.gate_thresh_ch1 * \
gate_source_scalers[self.trig_source_ch1]
else:
gate_thresh_uncalibrated = self.gate_thresh_ch2 * \
gate_source_scalers[self.trig_source_ch2]
return gate_thresh_uncalibrated
def _get_trig_thresh_uncalibrated(self, ch):
# Calculate trig threshold based on instrument state. Used when
# connecting to running device.
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
trig_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0]
if ch == 1:
trig_threshold_uncalibrated = self._trigger1.level * \
trig_source_scalers[self.trig_source_ch1]
else:
trig_threshold_uncalibrated = self._trigger2.level * \
trig_source_scalers[self.trig_source_ch2]
return trig_threshold_uncalibrated
def _update_dependent_regs(self):
# Get the calibration coefficients of the front end
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
# Frontend attenuation flag for modulation
self.atten_compensate_ch1 = 1 if self._get_frontend(1)[1] else 0
self.atten_compensate_ch2 = 1 if self._get_frontend(2)[1] else 0
# Scaling source parameter arrays for each trigger/modulation mode.
mod_source_scalers = [2.0**11 / (8.0 if self.atten_compensate_ch1
else 1.0) * adc1,
2.0**11 / (8.0 if self.atten_compensate_ch2
else 1.0) * adc2,
2.0**14 * dac1,
2.0**14 * dac2,
1.0,
1.0]
gate_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0]
trig_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0]
# Channel 1 modulation depth
if (self.amod_enable_ch1 is True or self.pmod_enable_ch1 is True or (
self.fmod_enable_ch1 is True)):
try:
self.mod_depth_uncalibrated_ch1
except AttributeError:
self.mod_depth_uncalibrated_ch1 = \
self._get_mod_depth_uncalibrated(1)
self.mod_depth_ch1 = self.mod_depth_uncalibrated_ch1 * \
mod_source_scalers[self.mod_source_ch1] * _WG_MOD_DEPTH_MAX
# Channel 2 modulation depth
if (self.amod_enable_ch2 is True or self.pmod_enable_ch2 is True or (
self.fmod_enable_ch2 is True)):
try:
self.mod_depth_uncalibrated_ch2
except AttributeError:
self.mod_depth_uncalibrated_ch2 = \
self._get_mod_depth_uncalibrated(2)
self.mod_depth_ch2 = self.mod_depth_uncalibrated_ch2 * \
mod_source_scalers[self.mod_source_ch2] * _WG_MOD_DEPTH_MAX
# Channel 1 gate threshold
if self.gate_mode_ch1 == 1:
try:
self.gate_thresh_uncalibrated_ch1
except AttributeError:
self.gate_thresh_uncalibrated_ch1 = \
self._get_gate_thresh_uncalibrated(1)
self.gate_thresh_ch1 = self.gate_thresh_uncalibrated_ch1 / \
gate_source_scalers[self.trig_source_ch1]
# Channel 2 gate threshold
if self.gate_mode_ch2 == 1:
try:
self.gate_thresh_uncalibrated_ch2
except AttributeError:
self.gate_thresh_uncalibrated_ch2 = \
self._get_gate_thresh_uncalibrated(2)
self.gate_thresh_ch2 = self.gate_thresh_uncalibrated_ch2 / \
gate_source_scalers[self.trig_source_ch2]
# Channel 1 N cycle/start/sweep mode trigger threshold
if (self.trig_sweep_mode_ch1 == 1 and self.gate_mode_ch1 != 1):
try:
self.trigger_threshold_uncalibrated_ch1
except AttributeError:
self.trigger_threshold_uncalibrated_ch1 = \
self._get_trig_thresh_uncalibrated(1)
self._trigger1.level = self.trigger_threshold_uncalibrated_ch1 / \
trig_source_scalers[self.trig_source_ch1]
# Channel 2 N cycle/start/sweep mode trigger threshold
if (self.trig_sweep_mode_ch2 == 1 and self.gate_mode_ch2 != 1):
try:
self.trigger_threshold_uncalibrated_ch2
except AttributeError:
self.trigger_threshold_uncalibrated_ch2 = \
self._get_trig_thresh_uncalibrated(2)
self._trigger2.level = self.trigger_threshold_uncalibrated_ch2 / \
trig_source_scalers[self.trig_source_ch2]
def commit(self):
self._update_dependent_regs()
# Commit the register values to the device
super(WaveformGenerator, self).commit()
# Bring in the docstring from the superclass for our docco.
commit.__doc__ = MokuInstrument.commit.__doc__
_wavegen_reg_handlers = {
# channel 1 control:
# modulation controls
'adc1_statuslight':
(REG_BASE_MOD_0,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'amod_enable_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(1, 1),
from_reg_unsigned(1, 1)),
'fmod_enable_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(2, 1),
from_reg_unsigned(2, 1)),
'pmod_enable_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(3, 1),
from_reg_unsigned(3, 1)),
'sweep_enable_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(4, 1),
from_reg_unsigned(4, 1)),
'reverse_sweep_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(5, 1),
from_reg_unsigned(5, 1)),
'mod_source_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(6, 3),
from_reg_unsigned(6, 3)),
'atten_compensate_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(9, 1),
from_reg_unsigned(9, 1)),
'trig_source_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(10, 3),
from_reg_unsigned(10, 3)),
'range_shift_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(13, 6),
from_reg_unsigned(13, 6)),
'sine_trigdly_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(19, 1),
from_reg_unsigned(19, 1)),
'phasedly_en_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(20, 1),
from_reg_unsigned(20, 1)),
'trig_sweep_mode_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(29, 1),
from_reg_unsigned(29, 1)),
'gate_mode_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(30, 1),
from_reg_unsigned(30, 1)),
'mod_depth_ch1':
(REG_BASE_MOD_0 + 1,
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32)),
'gate_thresh_ch1':
((REG_GATETHRESH_H_CH1, REG_GATETHRESH_L_CH1),
to_reg_signed(16, 48),
from_reg_signed(16, 48)),
# waveform controls
'enable_ch1':
(REG_BASE_WAV_0,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'waveform_type_ch1':
(REG_BASE_WAV_0,
to_reg_unsigned(1, 1),
from_reg_unsigned(1, 1)),
'amplitude_ch1':
(REG_BASE_WAV_0 + 1,
to_reg_signed(0, 18,
xform=lambda obj, a: 2 * a / obj._dac_gains()[0]),
from_reg_signed(0, 18,
xform=lambda obj, a: 2 * a * obj._dac_gains()[0])),
'offset_ch1':
(REG_BASE_WAV_0 + 2,
to_reg_signed(0, 16,
xform=lambda obj, a: a / obj._dac_gains()[0]),
from_reg_signed(0, 16,
xform=lambda obj, a: a * obj._dac_gains()[0])),
't0_ch1':
((REG_BASE_WAV_0 + 13, REG_BASE_WAV_0 + 12),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
't1_ch1':
((REG_BASE_WAV_0 + 15, REG_BASE_WAV_0 + 14),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
't2_ch1':
((REG_BASE_WAV_0 + 17, REG_BASE_WAV_0 + 16),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
'riserate_ch1':
((REG_BASE_WAV_0 + 19, REG_BASE_WAV_0 + 18),
to_reg_signed(0, 64,
xform=lambda obj, o: (o**-1) * _WG_RISESCALE),
from_reg_signed(0, 64,
xform=lambda obj, o: (o / _WG_RISESCALE)**-1)),
'fallrate_ch1':
((REG_BASE_WAV_0 + 21, REG_BASE_WAV_0 + 20),
to_reg_signed(0, 64,
xform=lambda obj, o: (o**-1) * _WG_RISESCALE),
from_reg_signed(0, 64,
xform=lambda obj, o: (o / _WG_RISESCALE)**-1)),
'enable_reset_ch1':
(REG_BASE_WAV_0 + 22,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'phase_dly_ch1':
(REG_BASE_WAV_0 + 23,
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32)),
# channel 2 control:
# modulation controls
'adc2_statuslight':
(REG_BASE_MOD_1,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'amod_enable_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(1, 1),
from_reg_unsigned(1, 1)),
'fmod_enable_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(2, 1),
from_reg_unsigned(2, 1)),
'pmod_enable_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(3, 1),
from_reg_unsigned(3, 1)),
'sweep_enable_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(4, 1),
from_reg_unsigned(4, 1)),
'reverse_sweep_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(5, 1),
from_reg_unsigned(5, 1)),
'mod_source_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(6, 3),
from_reg_unsigned(6, 3)),
'atten_compensate_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(9, 1),
from_reg_unsigned(9, 1)),
'trig_source_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(10, 3),
from_reg_unsigned(10, 3)),
'range_shift_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(13, 6),
from_reg_unsigned(13, 6)),
'sine_trigdly_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(19, 1),
from_reg_unsigned(19, 1)),
'phasedly_en_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(20, 1),
from_reg_unsigned(20, 1)),
'trig_sweep_mode_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(29, 1),
from_reg_unsigned(29, 1)),
'gate_mode_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(30, 1),
from_reg_unsigned(30, 1)),
'mod_depth_ch2':
((REG_BASE_MOD_1 + 1),
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32)),
'gate_thresh_ch2':
((REG_GATETHRESH_H_CH2, REG_GATETHRESH_L_CH2),
to_reg_signed(16, 48),
from_reg_signed(16, 48)),
# waveform controls
'enable_ch2':
(REG_BASE_WAV_1,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'waveform_type_ch2':
(REG_BASE_WAV_1,
to_reg_unsigned(1, 1),
from_reg_unsigned(1, 1)),
'amplitude_ch2':
((REG_BASE_WAV_1 + 1),
to_reg_signed(0, 18,
xform=lambda obj, a: 2 * a / obj._dac_gains()[1]),
from_reg_signed(0, 18,
xform=lambda obj, a: 2 * a * obj._dac_gains()[1])),
'offset_ch2':
((REG_BASE_WAV_1 + 2),
to_reg_signed(0, 16,
xform=lambda obj, a: a / obj._dac_gains()[1]),
from_reg_signed(0, 16,
xform=lambda obj, a: a * obj._dac_gains()[1])),
't0_ch2':
(((REG_BASE_WAV_1 + 13), (REG_BASE_WAV_1 + 12)),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
't1_ch2':
((REG_BASE_WAV_1 + 15, REG_BASE_WAV_1 + 14),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
't2_ch2':
((REG_BASE_WAV_1 + 17, REG_BASE_WAV_1 + 16),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
'riserate_ch2':
((REG_BASE_WAV_1 + 19, REG_BASE_WAV_1 + 18),
to_reg_signed(0, 64,
xform=lambda obj, o: (o**-1) * _WG_RISESCALE),
from_reg_signed(0, 64,
xform=lambda obj, o: (o / _WG_RISESCALE)**-1)),
'fallrate_ch2':
((REG_BASE_WAV_1 + 21, REG_BASE_WAV_1 + 20),
to_reg_signed(0, 64,
xform=lambda obj, o: (o**-1) * _WG_RISESCALE),
from_reg_signed(0, 64,
xform=lambda obj, o: (o / _WG_RISESCALE)**-1)),
'enable_reset_ch2':
(REG_BASE_WAV_1 + 22,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'phase_dly_ch2':
(REG_BASE_WAV_1 + 23,
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32))
}
_wavegen_mod_reg_handlers = {}
| mit | -7,956,985,272,048,114,000 | 38.26213 | 79 | 0.532666 | false |
DataONEorg/d1_python | test_utilities/src/d1_test/mock_api/tests/test_get_log_records.py | 1 | 1793 | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import responses
import d1_common.types.exceptions
import d1_test.d1_test_case
import d1_test.mock_api.get_log_records
class TestMockLogRecords(d1_test.d1_test_case.D1TestCase):
@responses.activate
def test_1000(self, mn_client_v1_v2):
"""mock_api.getLogRecords() returns a DataONE Log PyXB object."""
d1_test.mock_api.get_log_records.add_callback(
d1_test.d1_test_case.MOCK_MN_BASE_URL
)
assert isinstance(
mn_client_v1_v2.getLogRecords(), mn_client_v1_v2.pyxb_binding.Log
)
@responses.activate
def test_1010(self, mn_client_v1_v2):
"""mock_api.getLogRecords(): Passing a trigger header triggers a
DataONEException."""
d1_test.mock_api.get_log_records.add_callback(
d1_test.d1_test_case.MOCK_MN_BASE_URL
)
with pytest.raises(d1_common.types.exceptions.NotFound):
mn_client_v1_v2.getLogRecords("test_pid", vendorSpecific={"trigger": "404"})
| apache-2.0 | 2,127,669,503,232,610,300 | 37.148936 | 88 | 0.707752 | false |
targueriano/neuroIFC | neuro-ifc_1.0.16_amd64/usr/local/neuro-ifc/src/Treinamento.py | 1 | 4811 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#autor Taylan Branco Meurer
import neurolab
class Treinamento(object):
def __init__(self, net, inputs, targets, epocas, show, goal, lr, lr_inc, lr_dec, mc, rr):
self.net = net
self.inputs = inputs
self.targets = targets
self.epocas = epocas
self.show = show
self.objetivo = goal
self.taxaAprendizado = lr
self.taxaIncremento = lr_inc
self.taxaDecremento = lr_dec
self.taxaImpulso = mc
self.taxaRegularizacao = rr
self.errors = list()
def treinar(self, regra):
if regra == "delta":
self.errors = self.net.train(self.inputs, self.targets,
epochs=self.epocas.get_value_as_int(),
show=self.show.get_value_as_int(),
lr=self.taxaAprendizado.get_value()
)
return self.errors
elif regra == "gd":
self.net.trainf = neurolab.train.train_gd
print self.net.trainf
self.errors = self.net.train(self.inputs, self.targets,
epochs=self.epocas.get_value_as_int(),
show=self.show.get_value_as_int(),
goal=self.objetivo.get_value(),
lr=self.taxaAprendizado.get_value()
)
return self.errors
elif regra == "gdm":
self.net.trainf = neurolab.train.train_gdm
self.errors = self.net.train(self.inputs, self.targets,
epochs=self.epocas.get_value_as_int(),
show=self.show.get_value_as_int(),
goal=self.objetivo.get_value(),
lr=self.taxaAprendizado.get_value(),
mc=self.taxaImpulso.get_value(),
rr=self.taxaRegularizacao.get_value()
)
return self.errors
elif regra == "gda":
self.net.trainf = neurolab.train.train_gda
self.errors = self.net.train(self.inputs, self.targets,
epochs=self.epocas.get_value_as_int(),
show=self.show.get_value_as_int(),
goal=self.objetivo.get_value(),
lr=self.taxaAprendizado.get_value(),
lr_inc=self.taxaIncremento.get_value(),
lr_dec=self.taxaDecremento.get_value(),
rr=self.taxaRegularizacao.get_value()
)
return self.errors
elif regra == "gdx":
self.net.trainf = neurolab.train.train_gdx
print self.net.trainf
self.errors = self.net.train(self.inputs, self.targets,
epochs=self.epocas.get_value_as_int(),
show=self.show.get_value_as_int(),
goal=self.objetivo.get_value(),
lr=self.taxaAprendizado.get_value(),
lr_inc=self.taxaIncremento.get_value(),
lr_dec=self.taxaDecremento.get_value(),
mc=self.taxaImpulso.get_value(),
rr=self.taxaRegularizacao.get_value()
)
return self.errors
elif regra == "rprop":
self.net.trainf = neurolab.train.train_rprop
self.errors = self.net.train(self.inputs, self.targets,
epochs=self.epocas.get_value_as_int(),
show=self.show.get_value_as_int(),
goal=self.objetivo.get_value(),
lr=self.taxaAprendizado.get_value(),
)
return self.errors
elif regra == "bfgs":
self.net.trainf = neurolab.train.train_bfgs
self.errors = self.net.train(self.inputs, self.targets,
epochs=self.epocas.get_value_as_int(),
show=self.show.get_value_as_int(),
goal=self.objetivo.get_value(),
rr=self.taxaRegularizacao.get_value()
)
return self.errors
| gpl-3.0 | -5,539,195,246,021,007,000 | 49.114583 | 93 | 0.437331 | false |
OscarPDR/projects_morelab | projects/views.py | 1 | 21846 | # coding: utf-8
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.contrib.auth.decorators import login_required
from django.conf import settings
from email.mime.image import MIMEImage
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from .models import Project, FundingAmount, AssignedEmployee, ConsortiumMember
from .forms import ProjectForm, ProjectSearchForm, FundingAmountFormSet, AssignedEmployeeFormSet, ConsortiumMemberFormSet
from employees.models import Employee
from organizations.models import Organization
from funding_programs.models import FundingProgram
# Create your views here.
PAGINATION_NUMBER = settings.PROJECTS_PAGINATION
#########################
# View: project_index
#########################
def project_index(request):
projects = Project.objects.all().order_by('title')
if request.method == 'POST':
form = ProjectSearchForm(request.POST)
if form.is_valid():
query = form.cleaned_data['text']
query = slugify(query)
projs = []
for project in projects:
if query in slugify(project.title):
projs.append(project)
projects = projs
else:
form = ProjectSearchForm()
paginator = Paginator(projects, PAGINATION_NUMBER)
page = request.GET.get('page')
try:
projects = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
projects = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
projects = paginator.page(paginator.num_pages)
return render_to_response("projects/index.html", {
"projects": projects,
'form': form,
},
context_instance = RequestContext(request))
#########################
# View: add_project
#########################
@login_required
def add_project(request):
error_badges = []
project = None
current_year = None
end_year = None
project_form = ProjectForm(prefix='project_form')
funding_amount_formset = FundingAmountFormSet(instance=Project(), prefix='funding_amount_formset')
assigned_employee_formset = AssignedEmployeeFormSet(instance=Project(), prefix='assigned_employee_formset')
consortium_member_formset = ConsortiumMemberFormSet(instance=Project(), prefix='consortium_member_formset')
if request.POST:
project_form = ProjectForm(request.POST, prefix='project_form')
# start project_form validation
if project_form.is_valid():
project = project_form.save(commit=False)
funding_amount_formset = FundingAmountFormSet(request.POST, instance=project, prefix='funding_amount_formset')
assigned_employee_formset = AssignedEmployeeFormSet(request.POST, instance=project, prefix='assigned_employee_formset')
consortium_member_formset = ConsortiumMemberFormSet(request.POST, instance=project, prefix='consortium_member_formset')
cd_p = project_form.cleaned_data
current_year = cd_p['start_year']
end_year = cd_p['end_year']
project.project_type = cd_p['project_type'].encode('utf-8')
project.title = cd_p['title'].encode('utf-8')
project.description = cd_p['description'].encode('utf-8')
project.homepage = cd_p['homepage']
project.start_month = cd_p['start_month']
project.start_year = cd_p['start_year']
project.end_month = cd_p['end_month']
project.end_year = cd_p['end_year']
project.status = cd_p['status'].encode('utf-8')
project.project_code = cd_p['project_code'].encode('utf-8')
project.total_funds = cd_p['total_funds']
project.total_funds_deusto = cd_p['total_funds_deusto']
project.observations = cd_p['observations'].encode('utf-8')
project.funding_program = cd_p['funding_program']
project.project_leader = cd_p['project_leader']
try:
project.logo = request.FILES['project_form-logo']
except:
pass
project.save()
if 'project' in error_badges:
error_badges.remove('project')
if 'funding_program' in error_badges:
error_badges.remove('funding_program')
else:
if request.POST.get('project_form_funding_program') is None:
error_badges.append('funding_program')
else:
error_badges.remove('funding_program')
error_badges.append('project')
# end project_form validation
# start funding_amount_formset validation
if funding_amount_formset.is_valid():
total_funding = 0
for funding_amount_form in funding_amount_formset:
if (len(funding_amount_form.cleaned_data) > 0) and (current_year <= end_year):
cd_fa = funding_amount_form.cleaned_data
funding_amount = FundingAmount(
project=project,
amount=cd_fa['amount'],
year=current_year,
)
total_funding += funding_amount.amount
funding_amount.save()
current_year += 1
else:
print "No fundings amounts to save"
project.total_funds_deusto = total_funding
project.save()
# end funding_amount_formset validation
# start assigned_employee_formset validation
if assigned_employee_formset.is_valid():
for assigned_employee_form in assigned_employee_formset:
if (len(assigned_employee_form.cleaned_data) > 0):
cd_ae = assigned_employee_form.cleaned_data
assigned_employee_form.project = project
assigned_employee_form.employee = cd_ae['employee']
assigned_employee_form.role = cd_ae['role']
assigned_employee_form.save()
else:
print "No assigned employees to save"
assigned_employee_formset.save()
if 'assigned_employees' in error_badges:
error_badges.remove('assigned_employees')
else:
error_badges.append('assigned_employees')
try:
project.delete()
except:
pass
# end assigned_employee_formset validation
# start consortium_member_formset validation
if consortium_member_formset.is_valid():
for consortium_member_form in consortium_member_formset:
if (len(consortium_member_form.cleaned_data) > 0):
cd_cm = consortium_member_form.cleaned_data
consortium_member = ConsortiumMember(
project=project,
organization=cd_cm['organization'],
)
consortium_member.save()
else:
print "No consortium members to save"
if 'consortium_members' in error_badges:
error_badges.remove('consortium_members')
else:
error_badges.append('consortium_members')
try:
project.delete()
except:
pass
# start consortium_member_formset validation
try:
return HttpResponseRedirect(reverse('email_project', args = (project.slug,)))
except:
pass
else:
project_form = ProjectForm(prefix='project_form')
funding_amount_formset = FundingAmountFormSet(instance=Project(), prefix='funding_amount_formset')
assigned_employee_formset = AssignedEmployeeFormSet(instance=Project(), prefix='assigned_employee_formset')
consortium_member_formset = ConsortiumMemberFormSet(instance=Project(), prefix='consortium_member_formset')
return render_to_response("projects/add.html", {
'error_badges': error_badges,
'project_form': project_form,
'funding_amount_formset': funding_amount_formset,
'assigned_employee_formset': assigned_employee_formset,
'consortium_member_formset': consortium_member_formset,
},
context_instance = RequestContext(request))
#########################
# View: project_info
#########################
def project_info(request, slug):
project = get_object_or_404(Project, slug=slug)
funding_program = FundingProgram.objects.get(id=project.funding_program_id)
lprs = AssignedEmployee.objects.filter(project_id=project.id, role='Principal researcher').values('employee_id')
principal_researchers = Employee.objects.filter(id__in=lprs).order_by('name', 'first_surname', 'second_surname')
lpms = AssignedEmployee.objects.filter(project_id=project.id, role='Project manager').values('employee_id')
project_managers = Employee.objects.filter(id__in=lpms).order_by('name', 'first_surname', 'second_surname')
rs = AssignedEmployee.objects.filter(project_id=project.id, role='Researcher').values('employee_id')
researchers = Employee.objects.filter(id__in=rs).order_by('name', 'first_surname', 'second_surname')
funding_amounts = FundingAmount.objects.filter(project_id=project.id)
consortium_members = ConsortiumMember.objects.filter(project_id=project.id)
return render_to_response("projects/info.html", {
'project': project,
'funding_program': funding_program,
'principal_researchers': principal_researchers,
'project_managers': project_managers,
'researchers': researchers,
'funding_amounts': funding_amounts,
'consortium_members': consortium_members,
},
context_instance = RequestContext(request))
#########################
# View: edit_project
#########################
@login_required
def edit_project(request, slug):
error_badges = []
project = get_object_or_404(Project, slug=slug)
assigned_employees = AssignedEmployee.objects.filter(project_id=project.id)
consortium_members = ConsortiumMember.objects.filter(project_id=project.id)
funding_amounts = FundingAmount.objects.filter(project_id=project.id).order_by('year')
current_year = 3000
end_year = 2999
project_form = ProjectForm(prefix='project_form')
funding_amount_formset = FundingAmountFormSet(instance=Project(), prefix='funding_amount_formset')
assigned_employee_formset = AssignedEmployeeFormSet(instance=Project(), prefix='assigned_employee_formset')
consortium_member_formset = ConsortiumMemberFormSet(instance=Project(), prefix='consortium_member_formset')
if request.POST:
project_form = ProjectForm(request.POST, prefix='project_form')
# start project_form validation
if project_form.is_valid():
funding_amount_formset = FundingAmountFormSet(request.POST, instance=project, prefix='funding_amount_formset')
assigned_employee_formset = AssignedEmployeeFormSet(request.POST, instance=project, prefix='assigned_employee_formset')
consortium_member_formset = ConsortiumMemberFormSet(request.POST, instance=project, prefix='consortium_member_formset')
cd_p = project_form.cleaned_data
current_year = cd_p['start_year']
end_year = cd_p['end_year']
project.project_type = cd_p['project_type'].encode('utf-8')
project.title = cd_p['title'].encode('utf-8')
project.description = cd_p['description'].encode('utf-8')
project.homepage = cd_p['homepage']
project.start_month = cd_p['start_month']
project.start_year = cd_p['start_year']
project.end_month = cd_p['end_month']
project.end_year = cd_p['end_year']
project.status = cd_p['status'].encode('utf-8')
project.project_code = cd_p['project_code'].encode('utf-8')
project.total_funds = cd_p['total_funds']
project.total_funds_deusto = cd_p['total_funds_deusto']
project.observations = cd_p['observations'].encode('utf-8')
project.funding_program = cd_p['funding_program']
project.project_leader = cd_p['project_leader']
try:
project.logo = request.FILES['project_form-logo']
except:
pass
project.save()
if 'project' in error_badges:
error_badges.remove('project')
else:
error_badges.append('project')
# end project_form validation
# start funding_amount_formset validation
if funding_amount_formset.is_valid():
total_funding = 0
for funding_amount_form in funding_amount_formset:
if (len(funding_amount_form.cleaned_data) > 0) and (current_year <= end_year):
cd_fa = funding_amount_form.cleaned_data
funding_amount = FundingAmount.objects.get(project_id=project.id, year=current_year)
funding_amount.amount = cd_fa['amount']
total_funding += cd_fa['amount']
funding_amount.save()
current_year += 1
else:
print "No fundings amounts to save"
if 'funding_amount' in error_badges:
error_badges.remove('funding_amount')
project.total_funds_deusto = total_funding
project.save()
else:
error_badges.append('funding_amount')
# end funding_amount_formset validation
# start assigned_employee_formset validation
if assigned_employee_formset.is_valid():
for assigned_employee_form in assigned_employee_formset:
if (len(assigned_employee_form.cleaned_data) > 0):
cd_ae = assigned_employee_form.cleaned_data
assigned_employee_form.project = project
assigned_employee_form.employee = cd_ae['employee']
assigned_employee_form.role = cd_ae['role']
assigned_employee_form.save()
else:
print "No assigned employees to save"
assigned_employee_formset.save()
if 'assigned_employees' in error_badges:
error_badges.remove('assigned_employees')
else:
error_badges.append('assigned_employees')
# end assigned_employee_formset validation
# start consortium_member_formset validation
if consortium_member_formset.is_valid():
for consortium_member_form in consortium_member_formset:
if (len(consortium_member_form.cleaned_data) > 0):
cd_cm = consortium_member_form.cleaned_data
consortium_member_form.project = project
consortium_member_form.organization = cd_cm['organization']
consortium_member_form.save()
else:
print "No consortium members to save"
if 'consortium_members' in error_badges:
error_badges.remove('consortium_members')
else:
error_badges.append('consortium_members')
# start consortium_member_formset validation
try:
return HttpResponseRedirect(reverse('email_project', args=(project.slug,)))
except:
pass
else:
project_data = {
'project_type': project.project_type,
'title': project.title,
'slug': project.slug,
'description': project.description,
'homepage': project.homepage,
'start_month': project.start_month,
'start_year': project.start_year,
'end_month': project.end_month,
'end_year': project.end_year,
'status': project.status,
'project_code': project.project_code,
'total_funds': project.total_funds,
'total_funds_deusto': project.total_funds_deusto,
'observations': project.observations,
'funding_program': project.funding_program,
'project_leader': project.project_leader,
}
# FORMS
project_form = ProjectForm(
prefix='project_form',
initial=project_data,
)
funding_amount_formset = FundingAmountFormSet(
instance=Project(),
prefix='funding_amount_formset'
)
assigned_employee_formset = AssignedEmployeeFormSet(
instance=Project(),
prefix='assigned_employee_formset'
)
consortium_member_formset = ConsortiumMemberFormSet(
instance=Project(),
prefix='consortium_member_formset'
)
return render_to_response("projects/edit.html", {
'project': project,
'project_form': project_form,
'funding_amounts': funding_amounts,
'funding_amount_formset': funding_amount_formset,
'assigned_employees': assigned_employees,
'assigned_employee_formset': assigned_employee_formset,
'consortium_members': consortium_members,
'consortium_member_formset': consortium_member_formset,
},
context_instance = RequestContext(request))
#########################
# View: email_project
#########################
@login_required
def email_project(request, slug):
project = get_object_or_404(Project, slug=slug)
funding_program = FundingProgram.objects.get(id=project.funding_program_id)
lpms = AssignedEmployee.objects.filter(project_id=project.id, role='Project manager').values('employee_id')
project_managers = Employee.objects.filter(id__in=lpms).order_by('name', 'first_surname', 'second_surname')
lprs = AssignedEmployee.objects.filter(project_id=project.id, role='Principal researcher').values('employee_id')
principal_researchers = Employee.objects.filter(id__in=lprs).order_by('name', 'first_surname', 'second_surname')
project_leader = Organization.objects.get(id=project.project_leader_id)
consortium_members = []
for consortium_member in ConsortiumMember.objects.all().filter(project_id=project.id):
org = Organization.objects.get(id=consortium_member.organization.id)
consortium_members.append(org.name)
html_content = render_to_string('projects/project_email_template.html', {
'project': project,
'funding_program': funding_program,
'project_managers': project_managers,
'principal_researchers': principal_researchers,
'project_leader': project_leader,
'consortium_members': consortium_members,
})
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(
'[NEW PROJECT]: ' + project.title, # subject
text_content, # message
settings.PROJECTS_SENDER_EMAIL, # from
settings.PROJECTS_RECEPTOR_EMAILS, # to
)
try:
image_file = open(project.logo.path, 'rb')
msg_image = MIMEImage(image_file.read())
image_file.close()
msg_image.add_header('Content-ID', '<image>', filename=project.logo.path)
msg.attach(msg_image)
except:
pass
try:
image_file = open(funding_program.logo.path, 'rb')
msg_image = MIMEImage(image_file.read())
image_file.close()
msg_image.add_header('Content-ID', '<image>', filename = funding_program.logo.path)
msg.attach(msg_image)
except:
pass
msg.attach_alternative(html_content, "text/html")
msg.send()
return HttpResponseRedirect(reverse('project_index'))
#########################
# View: delete_project
#########################
@login_required
def delete_project(request, slug):
project = get_object_or_404(Project, slug=slug)
project.delete()
return HttpResponseRedirect(reverse('project_index'))
#########################
# View: delete_employee_from_project
#########################
@login_required
def delete_employee_from_project(request, employee_slug, project_slug):
project = get_object_or_404(Project, slug=project_slug)
employee = get_object_or_404(Employee, slug=employee_slug)
assigned_employee = get_object_or_404(AssignedEmployee, project_id=project.id, employee_id=employee.id)
assigned_employee.delete()
return HttpResponseRedirect(reverse('edit_project', args=(project.slug,)))
#########################
# View: delete_employee_from_project
#########################
@login_required
def delete_organization_from_project(request, organization_slug, project_slug):
project = get_object_or_404(Project, slug=project_slug)
organization = get_object_or_404(Organization, slug=organization_slug)
consortium_member = get_object_or_404(ConsortiumMember, project_id=project.id, organization_id=organization.id)
consortium_member.delete()
return HttpResponseRedirect(reverse('edit_project', args=(project.slug,)))
| gpl-3.0 | -7,591,338,815,435,761,000 | 34.637847 | 131 | 0.61018 | false |
chrisdjscott/Atoman | atoman/system/lattice.py | 1 | 15979 |
"""
Lattice module, with Lattice object and utilities
@author: Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import logging
import copy
import numpy as np
from .atoms import elements
from ..algebra import vectors
from . import _lattice
from . import _output
from six.moves import range
class Lattice(object):
"""
The Lattice object.
"""
def __init__(self):
self.NAtoms = 0
self.cellDims = np.array([100, 100, 100], np.float64)
self.specieList = []
self.specieCount = np.empty(0, np.int32)
self.specieMass = np.empty(0, np.float64)
self.specieCovalentRadius = np.empty(0, np.float64)
self.specieRGB = np.empty((0, 3), np.float64)
self.specieAtomicNumber = np.empty(0, np.int32)
self.minPos = np.zeros(3, np.float64)
self.maxPos = np.zeros(3, np.float64)
self.atomID = np.empty(0, np.int32)
self.specie = np.empty(0, np.int32)
self.pos = np.empty(0, np.float64)
self.charge = np.empty(0, np.float64)
self.scalarsDict = {}
self.scalarsFiles = {}
self.vectorsDict = {}
self.vectorsFiles = {}
self.attributes = {}
self.PBC = np.ones(3, np.int32)
def wrapAtoms(self):
"""
Wrap atoms that have left the periodic cell.
"""
return _lattice.wrapAtoms(self.NAtoms, self.pos, self.cellDims, self.PBC)
def atomSeparation(self, index1, index2, pbc):
"""
Calculate the separation between two atoms.
Parameters
----------
index1, index2 : integer
Indexes of the atoms you want to calculate the separation
between.
Returns
-------
atomSeparation : float
The separation between the two atoms. This function will
return 'None' if the indexes are out of range.
Raises
------
IndexError
If the specified indexes are too large.
"""
if index1 < self.NAtoms and index2 < self.NAtoms:
atomSeparation = vectors.separation(self.atomPos(index1), self.atomPos(index2), self.cellDims, pbc)
else:
raise IndexError("Atom index(es) out of range: (%d or %d) >= %d" % (index1, index2, self.NAtoms))
return atomSeparation
def reset(self, NAtoms):
"""
Reinitialise arrays and counters
"""
self.NAtoms = NAtoms
self.atomID = np.empty(NAtoms, np.int32)
self.specie = np.empty(NAtoms, np.int32)
self.pos = np.empty(3 * NAtoms, np.float64)
self.charge = np.zeros(NAtoms, np.float64)
self.specieList = []
self.specieCount = np.empty(0, np.int32)
self.specieMass = np.empty(0, np.float64)
self.specieCovalentRadius = np.empty(0, np.float64)
self.specieRGB = np.empty((0, 3), np.float64)
self.specieAtomicNumber = np.empty(0, np.int32)
self.minPos = np.zeros(3, np.float64)
self.maxPos = np.zeros(3, np.float64)
self.cellDims = np.zeros(3, np.float64)
self.scalarsDict = {}
self.scalarsFiles = {}
self.vectorsDict = {}
self.vectorsFiles = {}
self.attributes = {}
self.PBC = np.ones(3, np.int32)
def calcTemperature(self, NMoving=None):
"""
Calculate temperature in K
"""
logger = logging.getLogger(__name__)
logger.debug("Calculating temperature of Lattice")
if "Kinetic energy" in self.scalarsDict:
logger.debug("Got 'Kinetic energy' array from scalarsDict")
ke = self.scalarsDict["Kinetic energy"]
elif "KE" in self.scalarsDict:
logger.debug("Got 'KE' array from scalarsDict")
ke = self.scalarsDict["KE"]
else:
logger.debug("No kinetic energy information stored on Lattice")
return None
if NMoving is None:
NMoving = self.NAtoms
keSum = np.sum(ke)
if keSum == 0:
temperature = 0.0
else:
boltzmann = 8.6173324e-5
temperature = 2.0 * keSum / (3.0 * boltzmann * NMoving)
return temperature
def density(self):
"""
Return density of lattice
"""
vol = self.volume()
if vol == 0:
return
return self.NAtoms / vol
def volume(self):
"""
Return volume of lattice
"""
return self.cellDims[0] * self.cellDims[1] * self.cellDims[2]
def addSpecie(self, sym, count=None):
"""
Add specie to specie list
"""
if sym in self.specieList:
if count is not None:
specInd = self.specieIndex(sym)
self.specieCount[specInd] = count
return
if count is None:
count = 0
self.specieList.append(sym)
self.specieCount = np.append(self.specieCount, np.int32(count))
self.specieMass = np.append(self.specieMass, elements.atomicMass(sym))
self.specieCovalentRadius = np.append(self.specieCovalentRadius, elements.covalentRadius(sym))
rgbtemp = elements.RGB(sym)
rgbnew = np.empty((1, 3), np.float64)
rgbnew[0][0] = rgbtemp[0]
rgbnew[0][1] = rgbtemp[1]
rgbnew[0][2] = rgbtemp[2]
self.specieRGB = np.append(self.specieRGB, rgbnew, axis=0)
def addAtom(self, sym, pos, charge, atomID=None, scalarVals={}, vectorVals={}):
"""
Add an atom to the lattice
"""
if sym not in self.specieList:
self.addSpecie(sym)
# atom ID
if atomID is None:
atomID = self.NAtoms
specInd = self.getSpecieIndex(sym)
self.specieCount[specInd] += 1
pos = np.asarray(pos, dtype=np.float64)
self.atomID = np.append(self.atomID, np.int32(atomID))
self.specie = np.append(self.specie, np.int32(specInd))
self.pos = np.append(self.pos, pos)
self.charge = np.append(self.charge, np.float64(charge))
# wrap positions
# min/max pos!!??
for i in range(3):
self.minPos[i] = min(self.minPos[i], pos[i])
self.maxPos[i] = max(self.maxPos[i], pos[i])
self.NAtoms += 1
logger = logging.getLogger(__name__)
for scalarName in list(self.scalarsDict.keys()):
if scalarName in scalarVals:
newval = scalarVals[scalarName]
self.scalarsDict[scalarName] = np.append(self.scalarsDict[scalarName], np.float64(newval))
else:
self.scalarsDict.pop(scalarName)
logger.warning("Removing '%s' scalars from Lattice (addAtom)", scalarName)
for vectorName in list(self.vectorsDict.keys()):
newval = []
if vectorName in vectorVals:
newval = vectorVals[vectorName]
if len(newval) == 3:
self.vectorsDict[vectorName] = np.append(self.vectorsDict[vectorName], np.asarray(newval,
dtype=np.float64))
else:
self.vectorsDict.pop(vectorName)
logger.warning("Removing '%s' vectors from Lattice (addAtom)", vectorName)
def removeAtom(self, index):
"""
Remove an atom
"""
specInd = self.specie[index]
self.atomID = np.delete(self.atomID, index)
self.specie = np.delete(self.specie, index)
self.pos = np.delete(self.pos, [3 * index, 3 * index + 1, 3 * index + 2])
self.charge = np.delete(self.charge, index)
self.NAtoms -= 1
# modify specie list / counter if required
self.specieCount[specInd] -= 1
if self.specieCount[specInd] == 0:
self.removeSpecie(specInd)
for scalarName in list(self.scalarsDict.keys()):
self.scalarsDict[scalarName] = np.delete(self.scalarsDict[scalarName], index)
for vectorName in list(self.vectorsDict.keys()):
self.vectorsDict[vectorName] = np.delete(self.vectorsDict[vectorName],
[3 * index, 3 * index + 1, 3 * index + 2])
def removeSpecie(self, index):
"""
Remove a specie from the specie list.
"""
self.specieCount = np.delete(self.specieCount, index)
self.specieList.pop(index)
self.specieCovalentRadius = np.delete(self.specieCovalentRadius, index)
self.specieMass = np.delete(self.specieMass, index)
# self.specieMassAMU = np.delete(self.specieMassAMU, index)
self.specieRGB = np.delete(self.specieRGB, index, axis=0)
for i in range(self.NAtoms):
if self.specie[i] > index:
self.specie[i] -= 1
def calcForce(self, forceConfig):
"""
Calculate force on lattice.
"""
pass
# if type(forceConfig) is not forces.ForceConfig:
# print "FORCE CONFIG WRONG TYPE"
# return 113
#
# return forces.calc_force(self, forceConfig)
def atomPos(self, index):
"""
Return pointer to atom position within pos array: [xpos, ypos, zpos].
"""
atomPos = None
if index < self.NAtoms:
atomPos = self.pos[3 * index:3 * index + 3]
return atomPos
def atomSym(self, index):
"""
Returns symbol of given atom.
"""
atomSym = None
if index < self.NAtoms:
atomSym = self.specieList[self.specie[index]]
return atomSym
def getSpecieIndex(self, sym):
"""
Return index of specie in specie list.
"""
if sym not in self.specieList:
raise ValueError("Species '%s' is not in the species list" % sym)
index = None
for i in range(len(self.specieList)):
if self.specieList[i] == sym:
index = i
break
return index
def setDims(self, dimsarray):
self.cellDims[0] = float(dimsarray[0])
self.cellDims[1] = float(dimsarray[1])
self.cellDims[2] = float(dimsarray[2])
def refreshElementProperties(self):
"""
Refresh element properties.
"""
for i, sym in enumerate(self.specieList):
self.specieMass[i] = elements.atomicMass(sym)
self.specieCovalentRadius[i] = elements.covalentRadius(sym)
self.specieAtomicNumber[i] = elements.atomicNumber(sym)
rgbtemp = elements.RGB(sym)
self.specieRGB[i][0] = rgbtemp[0]
self.specieRGB[i][1] = rgbtemp[1]
self.specieRGB[i][2] = rgbtemp[2]
def toLKMC(self, storeEnergies=False):
"""
Convert the Lattice to LKMC.Lattice.
Returns None if cannot load LKMC.
"""
# try to load LKMC
try:
from LKMC import Lattice
from LKMC import Atoms
except ImportError:
lkmcLattice = None
else:
lkmcLattice = Lattice.Lattice(0, storeEnergies=storeEnergies)
lkmcLattice.NAtoms = self.NAtoms
lkmcLattice.pos = self.pos
lkmcLattice.specie = self.specie
lkmcLattice.specieList = self.specieList
lkmcLattice.specieCount = self.specieCount
lkmcLattice.charge = self.charge
lkmcLattice.minPos = self.minPos
lkmcLattice.maxPos = self.maxPos
lkmcLattice.cellDims[0] = self.cellDims[0]
lkmcLattice.cellDims[4] = self.cellDims[1]
lkmcLattice.cellDims[8] = self.cellDims[2]
lkmcLattice.force = np.empty(3 * self.NAtoms, np.float64)
lkmcLattice.specieCovalentRadius = self.specieCovalentRadius
lkmcLattice.specieRGB = self.specieRGB
lkmcLattice.specieMass = np.empty(len(self.specieList), np.float64)
lkmcLattice.specieMassAMU = np.empty(len(self.specieList), np.float64)
for i, sym in enumerate(self.specieList):
lkmcLattice.specieMass[i] = Atoms.atomicMass(sym)
lkmcLattice.specieMassAMU[i] = Atoms.atomicMassAMU(sym)
def writeLattice(self, filename, visibleAtoms=None):
"""
Write the Lattice to the given file. If visibleAtoms is passed only write those atoms.
"""
# full lattice or just visible atoms
if visibleAtoms is None:
writeFullLattice = 1
visibleAtoms = np.empty(0, np.int32)
else:
writeFullLattice = 0
# call C function to write Lattice
_output.writeLattice(filename, visibleAtoms, self.cellDims, self.specieList, self.specie, self.pos, self.charge,
writeFullLattice)
def clone(self, lattice):
"""
Copy given lattice into this instance
"""
if lattice.NAtoms != self.NAtoms:
self.reset(lattice.NAtoms)
NAtoms = lattice.NAtoms
# copy dims
self.cellDims[0] = lattice.cellDims[0]
self.cellDims[1] = lattice.cellDims[1]
self.cellDims[2] = lattice.cellDims[2]
# specie stuff
NSpecies = len(lattice.specieList)
self.specieList = []
self.specieCount = np.zeros(NSpecies, np.int32)
self.specieMass = np.empty(NSpecies, np.float64)
self.specieCovalentRadius = np.empty(NSpecies, np.float64)
self.specieAtomicNumber = np.zeros(NSpecies, np.int32)
self.specieRGB = np.empty((NSpecies, 3), np.float64)
for i in range(NSpecies):
self.specieList.append(lattice.specieList[i])
self.specieCount[i] = lattice.specieCount[i]
self.specieMass[i] = lattice.specieMass[i]
self.specieCovalentRadius[i] = lattice.specieCovalentRadius[i]
self.specieAtomicNumber[i] = lattice.specieAtomicNumber[i]
for j in range(3):
self.specieRGB[i][j] = lattice.specieRGB[i][j]
# atom data
self.atomID = np.empty(NAtoms, np.int32)
self.specie = np.empty(NAtoms, np.int32)
self.pos = np.empty(3 * NAtoms, np.float64)
self.charge = np.empty(NAtoms, np.float64)
for i in range(NAtoms):
self.atomID[i] = lattice.atomID[i]
self.specie[i] = lattice.specie[i]
self.charge[i] = lattice.charge[i]
for j in range(3):
self.pos[3 * i + j] = lattice.pos[3 * i + j]
self.minPos[0] = lattice.minPos[0]
self.minPos[1] = lattice.minPos[1]
self.minPos[2] = lattice.minPos[2]
self.maxPos[0] = lattice.maxPos[0]
self.maxPos[1] = lattice.maxPos[1]
self.maxPos[2] = lattice.maxPos[2]
self.scalarsDict = copy.deepcopy(lattice.scalarsDict)
self.vectorsDict = copy.deepcopy(lattice.vectorsDict)
self.scalarsFiles = copy.deepcopy(lattice.scalarsFiles)
self.vectorsFiles = copy.deepcopy(lattice.vectorsFiles)
self.attributes = copy.deepcopy(lattice.attributes)
self.PBC = copy.deepcopy(lattice.PBC)
| mit | -2,791,847,265,881,904,600 | 32.569328 | 120 | 0.553476 | false |
OpenNetworkingFoundation/PIF-Open-Intermediate-Representation | pif_ir/bir/tests/test_parser.py | 1 | 2151 | #!/usr/bin/env python
import logging
import struct
from pif_ir.bir.objects.bir_struct import BIRStruct
from pif_ir.bir.objects.packet_instance import PacketInstance
from pif_ir.bir.utils.bir_parser import BIRParser
from test_common import yaml_eth_struct_dict
def fail(case):
logging.error("Test Case {}: Failed".format(case))
exit(1)
logging.basicConfig(level=logging.DEBUG)
logging.info("RUNNING TEST: %s" % __file__)
eth_data = struct.pack("BBBBBB", 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA)
eth_data += struct.pack("BBBBBB", 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB)
eth_data += struct.pack("BB", 0x08, 0x00)
ipv4_data = struct.pack("BBBB", 0x40, 0xFF, 0x00, 0x05)
ipv4_data += struct.pack("BBBB", 0x11, 0x11, 0x11, 0x11)
ipv4_data += struct.pack("BBBB", 0xFE, 0x11, 0x00, 0x00)
ipv4_data += struct.pack("BBBB", 0xFF, 0xFF, 0xFF, 0xFF)
ipv4_data += struct.pack("BBBB", 0xEE, 0xEE, 0xEE, 0xEE)
udp_data = struct.pack("BB", 0x22, 0x22)
udp_data += struct.pack("BB", 0x33, 0x33)
udp_data += struct.pack("BB", 0x44, 0x44)
udp_data += struct.pack("BB", 0x55, 0x55)
pkt = bytearray(eth_data + ipv4_data + udp_data)
packet = PacketInstance(pkt, {}, None)
header = BIRStruct('eth', yaml_eth_struct_dict)
parser = BIRParser()
if parser.eval_cond("0x800 == 0x8800", header, packet) != False: fail(0)
if parser.eval_cond("0x0800 == 0x800", header, packet) != True: fail(1)
if parser.eval_cond("0 == 0x0", header, packet) != True: fail(2)
if parser.eval_cond("1 == 0x1", header, packet) != True: fail(3)
if parser.eval_cond("(10 > 11--)", header, packet) != False: fail(4)
if parser.eval_cond("10 >= 11--", header, packet) != True: fail(5)
if parser.eval_inst("(~(0xA + 10) & 0xFF)", header, packet) != 235: fail(6)
if parser.eval_inst("10++ + 11", header, packet) != 22: fail(7)
if parser.eval_cond("(type_ == 0x0800)", header, packet) != True: fail(8)
if parser.eval_cond("type_ != 0x0800", header, packet) != False: fail(9)
if parser.eval_inst("(type_ + 1) & 0xFF00", header, packet) != 0x0800:
fail(10)
if parser.eval_inst("type_++ & 0xFF00", header, packet) != 0x0800: fail(11)
| apache-2.0 | -7,633,070,394,601,520,000 | 38.833333 | 76 | 0.65086 | false |
dragondjf/PyQt5 | python2.7/PyQt5/uic/port_v2/load_plugin.py | 1 | 1519 | #############################################################################
##
## Copyright (c) 2014 Riverbank Computing Limited <[email protected]>
##
## This file is part of PyQt5.
##
## This file may be used under the terms of the GNU General Public License
## version 3.0 as published by the Free Software Foundation and appearing in
## the file LICENSE included in the packaging of this file. Please review the
## following information to ensure the GNU General Public License version 3.0
## requirements will be met: http://www.gnu.org/copyleft/gpl.html.
##
## If you do not wish to use this file under the terms of the GPL version 3.0
## then you may purchase a commercial license. For more information contact
## [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
from ..exceptions import WidgetPluginError
def load_plugin(plugin, plugin_globals, plugin_locals):
""" Load the given plugin (which is an open file). Return True if the
plugin was loaded, or False if it wanted to be ignored. Raise an exception
if there was an error.
"""
try:
exec(plugin.read(), plugin_globals, plugin_locals)
except ImportError:
return False
except Exception, e:
raise WidgetPluginError("%s: %s" % (e.__class__, str(e)))
return True
| gpl-2.0 | 2,753,135,764,658,247,000 | 37.948718 | 79 | 0.642528 | false |
FedoraScientific/salome-smesh | doc/salome/examples/creating_meshes_ex03.py | 1 | 2218 | # Change priority of submeshes in Mesh
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
Box_1 = geompy.MakeBoxDXDYDZ(200, 200, 200)
[Face_1,Face_2,Face_3,Face_4,Face_5,Face_6] = geompy.SubShapeAllSorted(Box_1, geompy.ShapeType["FACE"])
# create Mesh object on Box shape
Mesh_1 = smesh.Mesh(Box_1)
# assign mesh algorithms
Regular_1D = Mesh_1.Segment()
Nb_Segments_1 = Regular_1D.NumberOfSegments(20)
Nb_Segments_1.SetDistrType( 0 )
MEFISTO_2D = Mesh_1.Triangle()
Max_Element_Area_1 = MEFISTO_2D.MaxElementArea(1200)
Tetrahedron = Mesh_1.Tetrahedron()
Max_Element_Volume_1 = Tetrahedron.MaxElementVolume(40000)
# create submesh and assign algorithms on Face_1
Regular_1D_1 = Mesh_1.Segment(geom=Face_1)
Nb_Segments_2 = Regular_1D_1.NumberOfSegments(4)
Nb_Segments_2.SetDistrType( 0 )
MEFISTO_2D_1 = Mesh_1.Triangle(algo=smeshBuilder.MEFISTO,geom=Face_1)
Length_From_Edges_2D = MEFISTO_2D_1.LengthFromEdges()
SubMesh_1 = MEFISTO_2D_1.GetSubMesh()
# create submesh and assign algorithms on Face_2
Regular_1D_2 = Mesh_1.Segment(geom=Face_2)
Nb_Segments_3 = Regular_1D_2.NumberOfSegments(8)
Nb_Segments_3.SetDistrType( 0 )
MEFISTO_2D_2 = Mesh_1.Triangle(algo=smeshBuilder.MEFISTO,geom=Face_2)
Length_From_Edges_2D_1 = MEFISTO_2D_2.LengthFromEdges()
SubMesh_2 = MEFISTO_2D_2.GetSubMesh()
# create submesh and assign algorithms on Face_3
Regular_1D_3 = Mesh_1.Segment(geom=Face_3)
Nb_Segments_4 = Regular_1D_3.NumberOfSegments(12)
Nb_Segments_4.SetDistrType( 0 )
MEFISTO_2D_3 = Mesh_1.Triangle(algo=smeshBuilder.MEFISTO,geom=Face_3)
Length_From_Edges_2D_2 = MEFISTO_2D_3.LengthFromEdges()
SubMesh_3 = MEFISTO_2D_3.GetSubMesh()
# check exisiting submesh priority order
[ [ SubMesh_1, SubMesh_3, SubMesh_2 ] ] = Mesh_1.GetMeshOrder()
# set new submesh order
isDone = Mesh_1.SetMeshOrder( [ [ SubMesh_1, SubMesh_2, SubMesh_3 ] ])
# compute mesh
isDone = Mesh_1.Compute()
# clear mesh result and compute with other submesh order
Mesh_1.Clear()
isDone = Mesh_1.SetMeshOrder( [ [ SubMesh_2, SubMesh_1, SubMesh_3 ] ])
isDone = Mesh_1.Compute()
| lgpl-2.1 | -7,202,010,096,023,731,000 | 34.774194 | 103 | 0.756087 | false |
Scriptkiddi/Ankipubsub-Client | pubsub/models/Template.py | 1 | 2259 | __author__ = 'fritz'
import json
from pubsub.database.models import db, AnkiPubSubTemplate
from copy import deepcopy
class Template():
def __init__(self,
name,
answer_format,
question_format,
deck,
ord,
back_answer_format,
back_question_format):
try:
db.connect()
expression = (AnkiPubSubTemplate.answer_format == answer_format) & \
(AnkiPubSubTemplate.name == name) & \
(AnkiPubSubTemplate.question_format == question_format) & \
(AnkiPubSubTemplate.ord == ord) & \
(AnkiPubSubTemplate.back_answer_format == back_answer_format) & \
(AnkiPubSubTemplate.back_question_format == back_question_format)
template = AnkiPubSubTemplate.select().where(expression).get()
self.remote_id = template.remote_id
except AnkiPubSubTemplate.DoesNotExist:
self.remote_id = None
finally:
db.close()
self.answer_format = answer_format
self.name = name
self.question_format = question_format
# self.deck = deck
self.ord = int(ord)
self.back_answer_format = back_answer_format
self.back_question_format = back_question_format
def json(self):
dic = deepcopy(self.__dict__)
dic.update({"remote_id": str(self.remote_id)})
return json.dumps(dic)
def save(self):
db.connect()
template, created = AnkiPubSubTemplate.get_or_create(remote_id=self.remote_id,
answer_format=self.answer_format,
name=self.name,
question_format=self.question_format,
ord=self.ord,
back_answer_format=self.back_answer_format,
back_question_format=self.back_question_format)
db.close()
| gpl-3.0 | -4,366,288,978,875,604,000 | 42.442308 | 108 | 0.486056 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/aio/operations/_load_balancer_backend_address_pools_operations.py | 1 | 8800 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations:
"""LoadBalancerBackendAddressPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerBackendAddressPoolListResult"]:
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs
) -> "_models.BackendAddressPool":
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
| mit | 8,278,161,777,444,019,000 | 48.438202 | 218 | 0.655114 | false |
google/grr | grr/server/grr_response_server/flow_utils_test.py | 1 | 2306 | #!/usr/bin/env python
"""Tests for flow utils classes."""
from absl import app
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_server import flow_utils
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class TestInterpolatePath(flow_test_lib.FlowTestsBaseclass):
"""Tests for path interpolation."""
def _MakeKnowledgeBase(self):
kb = rdf_client.KnowledgeBase()
kb.users.Append(
rdf_client.User(
username="test",
userdomain="TESTDOMAIN",
full_name="test user",
homedir="c:\\Users\\test",
last_logon=rdfvalue.RDFDatetime.FromHumanReadable("2012-11-10")))
kb.users.Append(
rdf_client.User(
username="test2",
userdomain="TESTDOMAIN",
full_name="test user 2",
homedir="c:\\Users\\test2",
last_logon=100))
return kb
def testBasicInterpolation(self):
"""Test Basic."""
kb = self._MakeKnowledgeBase()
path = "{systemroot}\\test"
new_path = flow_utils.InterpolatePath(path, kb, users=None)
self.assertEqual(new_path.lower(), "c:\\windows\\test")
new_path = flow_utils.InterpolatePath("{does_not_exist}", kb)
self.assertEqual(new_path, "")
def testUserInterpolation(self):
"""User interpolation returns a list of paths."""
kb = self._MakeKnowledgeBase()
path = "{homedir}\\dir"
new_path = flow_utils.InterpolatePath(path, kb, users=["test"])
self.assertEqual(new_path[0].lower(), "c:\\users\\test\\dir")
path = "{systemroot}\\{last_logon}\\dir"
new_path = flow_utils.InterpolatePath(path, kb, users=["test"])
self.assertEqual(new_path[0].lower(),
"c:\\windows\\2012-11-10 00:00:00\\dir")
path = "{homedir}\\a"
new_path = flow_utils.InterpolatePath(path, kb, users=["test", "test2"])
self.assertLen(new_path, 2)
self.assertEqual(new_path[0].lower(), "c:\\users\\test\\a")
self.assertEqual(new_path[1].lower(), "c:\\users\\test2\\a")
new_path = flow_utils.InterpolatePath(
"{does_not_exist}", kb, users=["test"])
self.assertEqual(new_path, [])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 2,136,674,623,070,125,000 | 31.027778 | 77 | 0.630095 | false |
texastribune/tt_disposal_wells | example/example/settings.py | 1 | 5210 | # Django settings for example project.
# Setup a ``project_dir`` function
import os
from dj_settings_helpers import create_project_dir
project_dir = create_project_dir(os.path.join(os.path.dirname(__file__),
'..', '..'))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
import dj_database_url
DATABASE_URL = os.environ.get('DATABASE_URL',
'sqlite:///%s' % project_dir('project.db'))
DATABASES = {'default': dj_database_url.parse(DATABASE_URL), }
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'drlfrd0yo@x^b7z(bnae5q=3bo(od!#5nsm0%l@-^9y=3l@9cu'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
project_dir('templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
# Custom application being tested
'tt_disposal_wells',
# The app with all of the tests and any example customizations
'example_usage',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| apache-2.0 | -4,731,053,268,762,224,000 | 31.974684 | 88 | 0.702495 | false |
frhumanes/consulting | web/deploy/wtdeploy/wtdeploy/modules/fab_apache.py | 1 | 1204 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
#
# author: javi santana
from fabric.api import *
from fabric.contrib.files import upload_template
from fabric.contrib.files import exists
def install(conf_folder):
sudo("apt-get -y install apache2 libapache2-mod-wsgi")
#sudo("rm -rf /etc/apache2/site-enabled/*")
def copy_conf_files(conf_folder, deploy_folder):
with cd(deploy_folder):
run('mkdir -p apache2')
#put('%s/apache/virtualhost' % conf_folder, 'apache2')
if env.is_mobile:
print "mobile template"
upload_template('%s/apache/virtualhost_mobile' % conf_folder, 'apache2/virtualhost', context=env)
else:
upload_template('%s/apache/virtualhost' % conf_folder, 'apache2', context=env)
sudo('cp apache2/virtualhost /etc/apache2/sites-available/%(host)s' % env)
sudo('chmod a+r /etc/apache2/sites-available/%(host)s' % env)
if not exists('/etc/apache2/sites-enabled/00-%(host)s' % env):
sudo('ln -s /etc/apache2/sites-available/%(host)s /etc/apache2/sites-enabled/00-%(host)s' % env)
def start():
sudo("/etc/init.d/apache2 start")
def stop():
sudo("/etc/init.d/apache2 stop")
def restart():
sudo("/etc/init.d/apache2 restart")
| apache-2.0 | 1,911,970,735,717,133,000 | 32.444444 | 105 | 0.677741 | false |
eyaler/tensorpack | tensorpack/utils/palette.py | 1 | 1914 | # -*- coding: utf-8 -*-
# File: palette.py
import numpy as np
__all__ = ['PALETTE_RGB']
# copied from https://stackoverflow.com/questions/2328339/how-to-generate-n-different-colors-for-any-natural-number-n
PALETTE_HEX = [
"#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059",
"#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87",
"#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
"#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100",
"#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F",
"#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
"#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
"#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C",
"#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81",
"#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00",
"#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700",
"#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329",
"#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C",
"#83AB58", "#001C1E", "#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800",
"#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4", "#1E0200", "#5B4E51",
"#C895C5", "#320033", "#FF6832", "#66E1D3", "#CFCDAC", "#D0AC94",
"#7ED379", "#012C58"]
def _parse_hex_color(s):
r = int(s[1:3], 16)
g = int(s[3:5], 16)
b = int(s[5:7], 16)
return (r, g, b)
PALETTE_RGB = np.asarray(
list(map(_parse_hex_color, PALETTE_HEX)),
dtype='int32')
| apache-2.0 | -6,845,373,950,139,188,000 | 49.368421 | 117 | 0.541797 | false |
google-research/google-research | tf3d/instance_segmentation/model_utils.py | 1 | 4801 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Instance segmentation model utility functions."""
import tensorflow as tf
from tf3d import standard_fields
from tf3d.instance_segmentation import postprocessor
from tf3d.utils import mask_utils
from tf3d.utils import voxel_utils
def mask_valid_voxels(inputs, outputs):
"""Mask the voxels that are valid and in image view."""
valid_mask = mask_utils.num_voxels_mask(inputs=inputs)
mask_utils.apply_mask_to_output_voxel_tensors(
outputs=outputs, valid_mask=valid_mask)
def mask_valid_points(inputs, outputs):
"""Mask the voxels that are valid and in image view."""
valid_mask = mask_utils.num_points_mask(inputs=inputs)
mask_utils.apply_mask_to_output_point_tensors(
outputs=outputs, valid_mask=valid_mask)
def postprocess(inputs, outputs, is_training, num_furthest_voxel_samples,
sampler_score_vs_distance_coef, embedding_similarity_strategy,
embedding_similarity_threshold, score_threshold, apply_nms,
nms_iou_threshold):
"""Post-processor function.
Args:
inputs: A dictionary containing input tensors.
outputs: A dictionary containing predicted tensors.
is_training: If during training stage or not.
num_furthest_voxel_samples: Number of voxels to be sampled using furthest
voxel sampling in the postprocessor.
sampler_score_vs_distance_coef: The coefficient that balances the weight
between furthest voxel sampling and highest score sampling in the
postprocessor.
embedding_similarity_strategy: Embedding similarity strategy.
embedding_similarity_threshold: Similarity threshold used to decide if two
point embedding vectors belong to the same instance.
score_threshold: Instance score threshold used throughout postprocessing.
apply_nms: If True, it will apply non-maximum suppression to the final
predictions.
nms_iou_threshold: Intersection over union threshold used in non-maximum
suppression.
"""
if not is_training:
# Squeeze output voxel properties.
for key in standard_fields.get_output_voxel_fields():
if key in outputs and outputs[key] is not None:
outputs[key] = tf.squeeze(outputs[key], axis=0)
# Squeeze output point properties.
for key in standard_fields.get_output_point_fields():
if key in outputs and outputs[key] is not None:
outputs[key] = tf.squeeze(outputs[key], axis=0)
# Squeeze output object properties.
for key in standard_fields.get_output_object_fields():
if key in outputs and outputs[key] is not None:
outputs[key] = tf.squeeze(outputs[key], axis=0)
# Mask the valid voxels
mask_valid_voxels(inputs=inputs, outputs=outputs)
# Mask the valid points
mask_valid_points(inputs=inputs, outputs=outputs)
# NMS
postprocessor.postprocess(
outputs=outputs,
num_furthest_voxel_samples=num_furthest_voxel_samples,
sampler_score_vs_distance_coef=sampler_score_vs_distance_coef,
embedding_similarity_strategy=embedding_similarity_strategy,
embedding_similarity_threshold=embedding_similarity_threshold,
apply_nms=apply_nms,
nms_score_threshold=score_threshold,
nms_iou_threshold=nms_iou_threshold)
# Add instance segment point masks at eval time
if standard_fields.InputDataFields.points_to_voxel_mapping in inputs:
instance_segments_point_mask = (
voxel_utils.sparse_voxel_grid_to_pointcloud(
voxel_features=tf.expand_dims(
tf.transpose(outputs[standard_fields.DetectionResultFields
.instance_segments_voxel_mask]),
axis=0),
segment_ids=inputs[
standard_fields.InputDataFields.points_to_voxel_mapping],
num_valid_voxels=inputs[
standard_fields.InputDataFields.num_valid_voxels],
num_valid_points=inputs[
standard_fields.InputDataFields.num_valid_points]))
outputs[standard_fields.DetectionResultFields
.instance_segments_point_mask] = tf.transpose(
tf.squeeze(instance_segments_point_mask, axis=0))
| apache-2.0 | 5,561,826,398,023,108,000 | 41.486726 | 78 | 0.705478 | false |
simbtrix/mxnix | project/crossSectionView/rectangleView.py | 1 | 4807 | '''
Created on 14.03.2016
@author: mkennert
'''
from kivy.properties import NumericProperty
from kivy.uix.gridlayout import GridLayout
from crossSectionView.aview import AView
from ownComponents.design import Design
from ownComponents.ownGraph import OwnGraph
from plot.dashedLine import DashedLine
from plot.line import LinePlot
class CSRectangleView(GridLayout, AView):
'''
the class CSRectangleView was developed to show the rectangle-shape of
the cross-section
'''
# height of the cross-section
ch = NumericProperty(0.5)
# width of the cross-section
cw = NumericProperty(0.25)
'''
constructor
'''
def __init__(self, **kwargs):
super(CSRectangleView, self).__init__(**kwargs)
self.cols = 1
'''
the method create_graph create the graph, where you can add
the layers. the method should be called only once at the beginning
'''
def create_graph(self):
self.epsX = self.cw / 2e1
self.graph = OwnGraph(xlabel=self.xlabelStr, ylabel=self.ylabelStr,
x_ticks_major=0.05, y_ticks_major=0.05,
y_grid_label=True, x_grid_label=True, padding=5,
xmin=0, xmax=self.cw + 2 * self.epsX, ymin=0, ymax=1.04 * self.ch)
self.add_widget(self.graph)
self.p = LinePlot(color=[0, 0, 0])
self.p.points = self.draw_rectangle()
self.graph.add_plot(self.p)
'''
the method add_layer was developed to add new layer at the cross section
'''
def add_layer(self, y, csArea, material):
#if the y-coordinate is out of range
if y >= self.ch or y <= 0:
self.csShape.show_error_message()
else:
line = DashedLine(color=[1, 0, 0, 1], points=[(self.epsX, y), (self.cw + self.epsX, y)])
self.create_layer(y, csArea, self.cw, material, line)
'''
edit a layer which is already exist
'''
def edit_layer(self, y, material, csArea):
#if the y-coordinate is out of range
if y >= self.ch or y <= 0:
self.csShape.show_error_message()
else:
self.focusLayer.line.points = [(self.epsX, y), (self.cw + self.epsX, y)]
self.update_layer_properties(y, material, csArea)
'''
add a bar to the cross-section
'''
def add_bar(self, x, y, csArea, material):
epsY = self.ch / Design.barProcent
epsX = self.cw / Design.barProcent
#if the coordinates are out of range
if y + epsY > self.ch or y - epsY < 0 or x + epsX > self.cw + self.epsX or x - epsX < self.epsX:
self.csShape.show_error_message()
else:
self.create_bar(x, y, csArea, material, epsX, epsY)
'''
edit a bar which is already exist
'''
def edit_bar(self, x, y, csArea, material):
epsY = self.ch / Design.barProcent
epsX = self.cw / Design.barProcent
#if the coordinates are out of range
if y + epsY > self.ch or y - epsY < 0 or x + epsX > self.cw + self.epsX or x - epsX < self.epsX:
self.csShape.show_error_message()
else:
self.update_bar_properties(x, y, csArea, material, epsX, epsY)
'''
the method update_height change the height of the cross section shape
and update the layers
'''
def update_height(self, value):
self.ch = value
self.graph.y_ticks_major = value / 5.
self.graph.ymax = self.ch * 1.04
self.p.points = self.draw_rectangle()
self.delete_reinforcement()
'''
the method update_width change the width of the cross section shape
and update the layers
'''
def update_width(self, value):
self.cw = value
self.epsX = self.cw / 2e1
self.graph.x_ticks_major = value / 5.
self.graph.xmax = self.cw + 2 * self.epsX
self.p.points = self.draw_rectangle()
self.delete_reinforcement()
'''
give the user the possibility to focus a layer or a bar
'''
def on_touch_down(self, touch):
x0, y0 = self.graph._plot_area.pos # position of the lowerleft
gw, gh = self.graph._plot_area.size # graph size
x = (touch.x - x0) / gw * (self.cw + 2 * self.epsX)
y = (touch.y - y0) / gh * self.graph.ymax
self.touch_reaction(x, y, self.cw, self.ch)
'''
draw the rectangle
'''
def draw_rectangle(self):
return [(self.epsX, 0), (self.epsX, self.ch), (self.cw + self.epsX, self.ch), (self.cw + self.epsX, 0), (self.epsX, 0)]
| gpl-3.0 | 8,672,656,484,019,362,000 | 31.158621 | 127 | 0.564801 | false |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/phonenumbers/shortnumberinfo.py | 1 | 15445 | """Methods for getting information about short phone numbers,
such as short codes and emergency numbers.
Note most commercial short numbers are not handled here, but by phonenumberutil.py
"""
# Based on original Java code:
# java/src/com/google/i18n/phonenumbers/ShortNumberInfo.java
# Copyright (C) 2013 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .re_util import fullmatch
from .util import U_EMPTY_STRING
from .phonemetadata import PhoneMetadata
from .phonenumberutil import _extract_possible_number, _PLUS_CHARS_PATTERN
from .phonenumberutil import normalize_digits_only, region_codes_for_country_code
from .phonenumberutil import national_significant_number
from .phonenumberutil import _is_number_possible_for_desc, _is_number_matching_desc
# In these countries, if extra digits are added to an emergency number, it no longer connects
# to the emergency service.
_REGIONS_WHERE_EMERGENCY_NUMBERS_MUST_BE_EXACT = set(["BR", "CL", "NI"])
class ShortNumberCost(object):
"""Cost categories of short numbers."""
TOLL_FREE = 0
STANDARD_RATE = 1
PREMIUM_RATE = 2
UNKNOWN_COST = 3
def is_possible_short_number_for_region(short_number, region_dialing_from):
"""Check whether a short number is a possible number when dialled from a
region, given the number in the form of a string, and the region where the
number is dialed from. This provides a more lenient check than
is_valid_short_number_for_region.
Arguments:
short_number -- the short number to check as a string
region_dialing_from -- the region from which the number is dialed
Return whether the number is a possible short number.
"""
metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from)
if metadata is None:
return False
general_desc = metadata.general_desc
return _is_number_possible_for_desc(short_number, general_desc)
def is_possible_short_number(numobj):
"""Check whether a short number is a possible number.
If a country calling code is shared by multiple regions, this returns True
if it's possible in any of them. This provides a more lenient check than
is_valid_short_number.
Arguments:
numobj -- the short number to check
Return whether the number is a possible short number.
"""
region_codes = region_codes_for_country_code(numobj.country_code)
short_number = national_significant_number(numobj)
for region in region_codes:
metadata = PhoneMetadata.short_metadata_for_region(region)
if _is_number_possible_for_desc(short_number, metadata.general_desc):
return True
return False
def is_valid_short_number_for_region(short_number, region_dialing_from):
"""Tests whether a short number matches a valid pattern in a region.
Note that this doesn't verify the number is actually in use, which is
impossible to tell by just looking at the number itself.
Arguments:
short_number -- the short number to check as a string
region_dialing_from -- the region from which the number is dialed
Return whether the short number matches a valid pattern
"""
metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from)
if metadata is None:
return False
general_desc = metadata.general_desc
if (general_desc.national_number_pattern is None or
not _is_number_matching_desc(short_number, general_desc)):
return False
short_number_desc = metadata.short_code
if short_number_desc.national_number_pattern is None: # pragma no cover
return False
return _is_number_matching_desc(short_number, short_number_desc)
def is_valid_short_number(numobj):
"""Tests whether a short number matches a valid pattern.
If a country calling code is shared by multiple regions, this returns True
if it's valid in any of them. Note that this doesn't verify the number is
actually in use, which is impossible to tell by just looking at the number
itself. See is_valid_short_number_for_region for details.
Arguments:
numobj - the short number for which we want to test the validity
Return whether the short number matches a valid pattern
"""
region_codes = region_codes_for_country_code(numobj.country_code)
short_number = national_significant_number(numobj)
region_code = _region_code_for_short_number_from_region_list(numobj, region_codes)
if len(region_codes) > 1 and region_code is not None:
# If a matching region had been found for the phone number from among two or more regions,
# then we have already implicitly verified its validity for that region.
return True
return is_valid_short_number_for_region(short_number, region_code)
def expected_cost_for_region(short_number, region_dialing_from):
"""Gets the expected cost category of a short number when dialled from a
region (however, nothing is implied about its validity). If it is
important that the number is valid, then its validity must first be
checked using is_valid_short_number_for_region. Note that emergency
numbers are always considered toll-free.
Example usage:
short_number = "110"
region_code = "FR"
if phonenumbers.is_valid_short_number_for_region(short_number, region_code):
cost = phonenumbers.expected_cost(short_number, region_code) # ShortNumberCost
# Do something with the cost information here.
Arguments:
short_number -- the short number for which we want to know the expected cost category
region_dialing_from -- the region from which the number is dialed
Return the expected cost category for that region of the short
number. Returns UNKNOWN_COST if the number does not match a cost
category. Note that an invalid number may match any cost category.
"""
# Note that region_dialing_from may be None, in which case metadata will also be None.
metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from)
if metadata is None:
return ShortNumberCost.UNKNOWN_COST
# The cost categories are tested in order of decreasing expense, since if
# for some reason the patterns overlap the most expensive matching cost
# category should be returned.
if _is_number_matching_desc(short_number, metadata.premium_rate):
return ShortNumberCost.PREMIUM_RATE
if _is_number_matching_desc(short_number, metadata.standard_rate):
return ShortNumberCost.STANDARD_RATE
if _is_number_matching_desc(short_number, metadata.toll_free):
return ShortNumberCost.TOLL_FREE
if is_emergency_number(short_number, region_dialing_from):
# Emergency numbers are implicitly toll-free.
return ShortNumberCost.TOLL_FREE
return ShortNumberCost.UNKNOWN_COST
def expected_cost(numobj):
"""Gets the expected cost category of a short number (however, nothing is
implied about its validity). If the country calling code is unique to a
region, this method behaves exactly the same as
get_expected_cost_for_region. However, if the country calling code is
shared by multiple regions, then it returns the highest cost in the
sequence PREMIUM_RATE, UNKNOWN_COST, STANDARD_RATE, TOLL_FREE. The reason
for the position of UNKNOWN_COST in this order is that if a number is
UNKNOWN_COST in one region but STANDARD_RATE or TOLL_FREE in another, its
expected cost cannot be estimated as one of the latter since it might be a
PREMIUM_RATE number.
For example, if a number is STANDARD_RATE in the US, but TOLL_FREE in
Canada, the expected cost returned by this method will be STANDARD_RATE,
since the NANPA countries share the same country calling code.
Note: If the region from which the number is dialed is known, it is highly preferable to call
expected_cost_for_region instead.
Arguments:
numobj -- the short number for which we want to know the expected cost category
Return the highest expected cost category of the short number in the
region(s) with the given country calling code
"""
region_codes = region_codes_for_country_code(numobj.country_code)
if len(region_codes) == 0:
return ShortNumberCost.UNKNOWN_COST
short_number = national_significant_number(numobj)
if len(region_codes) == 1:
return expected_cost_for_region(short_number, region_codes[0])
cost = ShortNumberCost.TOLL_FREE
for region_code in region_codes:
cost_for_region = expected_cost_for_region(short_number, region_code)
if cost_for_region == ShortNumberCost.PREMIUM_RATE:
return ShortNumberCost.PREMIUM_RATE
elif cost_for_region == ShortNumberCost.UNKNOWN_COST:
return ShortNumberCost.UNKNOWN_COST
elif cost_for_region == ShortNumberCost.STANDARD_RATE:
if cost != ShortNumberCost.UNKNOWN_COST:
cost = ShortNumberCost.STANDARD_RATE
elif cost_for_region == ShortNumberCost.TOLL_FREE:
# Do nothing
pass
else: # pragma no cover
raise Exception("Unrecognized cost for region: %s", cost_for_region)
return cost
def _region_code_for_short_number_from_region_list(numobj, region_codes):
"""Helper method to get the region code for a given phone number, from a list of possible region
codes. If the list contains more than one region, the first region for which the number is
valid is returned.
"""
if len(region_codes) == 0:
return None
elif len(region_codes) == 1:
return region_codes[0]
national_number = national_significant_number(numobj)
for region_code in region_codes:
metadata = PhoneMetadata.short_metadata_for_region(region_code)
if metadata is not None and _is_number_matching_desc(national_number, metadata.short_code):
# The number is valid for this region.
return region_code
return None
def _example_short_number(region_code):
"""Gets a valid short number for the specified region.
Arguments:
region_code -- the region for which an example short number is needed.
Returns a valid short number for the specified region. Returns an empty
string when the metadata does not contain such information.
"""
metadata = PhoneMetadata.short_metadata_for_region(region_code)
if metadata is None:
return U_EMPTY_STRING
desc = metadata.short_code
if desc.example_number is not None:
return desc.example_number
return U_EMPTY_STRING
def _example_short_number_for_cost(region_code, cost):
"""Gets a valid short number for the specified cost category.
Arguments:
region_code -- the region for which an example short number is needed.
cost -- the cost category of number that is needed.
Returns a valid short number for the specified region and cost
category. Returns an empty string when the metadata does not contain such
information, or the cost is UNKNOWN_COST.
"""
metadata = PhoneMetadata.short_metadata_for_region(region_code)
if metadata is None:
return U_EMPTY_STRING
desc = None
if cost == ShortNumberCost.TOLL_FREE:
desc = metadata.toll_free
elif cost == ShortNumberCost.STANDARD_RATE:
desc = metadata.standard_rate
elif cost == ShortNumberCost.PREMIUM_RATE:
desc = metadata.premium_rate
else:
# ShortNumberCost.UNKNOWN_COST numbers are computed by the process of
# elimination from the other cost categoried.
pass
if desc is not None and desc.example_number is not None:
return desc.example_number
return U_EMPTY_STRING
def connects_to_emergency_number(number, region_code):
"""Returns whether the number might be used to connect to an emergency
service in the given region.
This function takes into account cases where the number might contain
formatting, or might have additional digits appended (when it is okay to
do that in the region specified).
Arguments:
number -- The phone number to test.
region_code -- The region where the phone number is being dialed.
Returns whether the number might be used to connect to an emergency
service in the given region.
"""
return _matches_emergency_number_helper(number, region_code, True) # Allows prefix match
def is_emergency_number(number, region_code):
"""Returns true if the number exactly matches an emergency service number
in the given region.
This method takes into account cases where the number might contain
formatting, but doesn't allow additional digits to be appended.
Arguments:
number -- The phone number to test.
region_code -- The region where the phone number is being dialed.
Returns if the number exactly matches an emergency services number in the
given region.
"""
return _matches_emergency_number_helper(number, region_code, False) # Doesn't allow prefix match
def _matches_emergency_number_helper(number, region_code, allow_prefix_match):
number = _extract_possible_number(number)
if _PLUS_CHARS_PATTERN.match(number):
# Returns False if the number starts with a plus sign. We don't
# believe dialing the country code before emergency numbers
# (e.g. +1911) works, but later, if that proves to work, we can add
# additional logic here to handle it.
return False
metadata = PhoneMetadata.short_metadata_for_region(region_code.upper(), None)
if metadata is None or metadata.emergency is None:
return False
emergency_number_pattern = re.compile(metadata.emergency.national_number_pattern)
normalized_number = normalize_digits_only(number)
if not allow_prefix_match or region_code in _REGIONS_WHERE_EMERGENCY_NUMBERS_MUST_BE_EXACT:
return fullmatch(emergency_number_pattern, normalized_number) is not None
else:
return emergency_number_pattern.match(normalized_number) is not None
def is_carrier_specific(numobj):
"""Given a valid short number, determines whether it is carrier-specific
(however, nothing is implied about its validity). If it is important that
the number is valid, then its validity must first be checked using
is_valid_short_number or is_valid_short_number_for_region.
Arguments:
numobj -- the valid short number to check
Returns whether the short number is carrier-specific (assuming the input
was a valid short number).
"""
region_codes = region_codes_for_country_code(numobj.country_code)
region_code = _region_code_for_short_number_from_region_list(numobj, region_codes)
national_number = national_significant_number(numobj)
metadata = PhoneMetadata.short_metadata_for_region(region_code)
return (metadata is not None and _is_number_matching_desc(national_number, metadata.carrier_specific))
| bsd-3-clause | 3,237,084,498,279,031,300 | 41.665746 | 106 | 0.722888 | false |
tomdyson/wagtail-modeltranslation | wagtail_modeltranslation/fields.py | 1 | 16968 | # -*- coding: utf-8 -*-
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.db.models import fields
from wagtail.wagtailcore.fields import StreamField
from django.utils import six
from wagtail_modeltranslation import settings as mt_settings
from wagtail_modeltranslation.utils import (
get_language, build_localized_fieldname, build_localized_verbose_name, resolution_order)
from wagtail_modeltranslation.widgets import ClearableWidgetWrapper
SUPPORTED_FIELDS = (
fields.CharField,
# Above implies also CommaSeparatedIntegerField, EmailField, FilePathField, SlugField
# and URLField as they are subclasses of CharField.
fields.TextField,
fields.IntegerField,
# Above implies also BigIntegerField, SmallIntegerField, PositiveIntegerField and
# PositiveSmallIntegerField, as they are subclasses of IntegerField.
fields.BooleanField,
fields.NullBooleanField,
fields.FloatField,
fields.DecimalField,
fields.IPAddressField,
fields.GenericIPAddressField,
fields.DateField,
fields.DateTimeField,
fields.TimeField,
fields.files.FileField,
fields.files.ImageField,
fields.related.ForeignKey,
# Above implies also OneToOneField
# Wagtail StreamField
StreamField
)
class NONE:
"""
Used for fallback options when they are not provided (``None`` can be
given as a fallback or undefined value) or to mark that a nullable value
is not yet known and needs to be computed (e.g. field default).
"""
pass
def create_translation_field(model, field_name, lang, empty_value):
"""
Translation field factory. Returns a ``TranslationField`` based on a
fieldname and a language.
The list of supported fields can be extended by defining a tuple of field
names in the projects settings.py like this::
MODELTRANSLATION_CUSTOM_FIELDS = ('MyField', 'MyOtherField',)
If the class is neither a subclass of fields in ``SUPPORTED_FIELDS``, nor
in ``CUSTOM_FIELDS`` an ``ImproperlyConfigured`` exception will be raised.
"""
if empty_value not in ('', 'both', None, NONE):
raise ImproperlyConfigured('%s is not a valid empty_value.' % empty_value)
field = model._meta.get_field(field_name)
cls_name = field.__class__.__name__
if not (isinstance(field, SUPPORTED_FIELDS) or cls_name in mt_settings.CUSTOM_FIELDS):
raise ImproperlyConfigured(
'%s is not supported by modeltranslation.' % cls_name)
translation_class = field_factory(field.__class__)
return translation_class(translated_field=field, language=lang, empty_value=empty_value)
def field_factory(baseclass):
class TranslationFieldSpecific(TranslationField, baseclass):
pass
# Reflect baseclass name of returned subclass
TranslationFieldSpecific.__name__ = 'Translation%s' % baseclass.__name__
return TranslationFieldSpecific
class TranslationField(object):
"""
The translation field functions as a proxy to the original field which is
wrapped.
For every field defined in the model's ``TranslationOptions`` localized
versions of that field are added to the model depending on the languages
given in ``settings.LANGUAGES``.
If for example there is a model ``News`` with a field ``title`` which is
registered for translation and the ``settings.LANGUAGES`` contains the
``de`` and ``en`` languages, the fields ``title_de`` and ``title_en`` will
be added to the model class. These fields are realized using this
descriptor.
The translation field needs to know which language it contains therefore
that needs to be specified when the field is created.
"""
def __init__(self, translated_field, language, empty_value, *args, **kwargs):
from wagtail_modeltranslation.translator import translator
# Update the dict of this field with the content of the original one
# This might be a bit radical?! Seems to work though...
self.__dict__.update(translated_field.__dict__)
# Store the originally wrapped field for later
self.translated_field = translated_field
self.language = language
self.empty_value = empty_value
if empty_value is NONE:
self.empty_value = None if translated_field.null else ''
# Default behaviour is that all translations are optional
if not isinstance(self, fields.BooleanField):
# TODO: Do we really want to enforce null *at all*? Shouldn't this
# better honour the null setting of the translated field?
self.null = True
self.blank = True
# Take required_languages translation option into account
trans_opts = translator.get_options_for_model(self.model)
if trans_opts.required_languages:
required_languages = trans_opts.required_languages
if isinstance(trans_opts.required_languages, (tuple, list)):
# All fields
if self.language in required_languages:
# self.null = False
self.blank = False
else:
# Certain fields only
# Try current language - if not present, try 'default' key
try:
req_fields = required_languages[self.language]
except KeyError:
req_fields = required_languages.get('default', ())
if self.name in req_fields:
# TODO: We might have to handle the whole thing through the
# FieldsAggregationMetaClass, as fields can be inherited.
# self.null = False
self.blank = False
# Adjust the name of this field to reflect the language
self.attname = build_localized_fieldname(self.translated_field.name, language)
self.name = self.attname
if self.translated_field.db_column:
self.db_column = build_localized_fieldname(self.translated_field.db_column, language)
self.column = self.db_column
# Copy the verbose name and append a language suffix
# (will show up e.g. in the admin).
self.verbose_name = build_localized_verbose_name(translated_field.verbose_name, language)
# ForeignKey support - rewrite related_name
if self.rel and self.related and not self.rel.is_hidden():
import copy
current = self.related.get_accessor_name()
self.rel = copy.copy(self.rel) # Since fields cannot share the same rel object.
# self.related doesn't need to be copied, as it will be recreated in
# ``RelatedField.do_related_class``
if self.rel.related_name is None:
# For implicit related_name use different query field name
loc_related_query_name = build_localized_fieldname(
self.related_query_name(), self.language)
self.related_query_name = lambda: loc_related_query_name
self.rel.related_name = build_localized_fieldname(current, self.language)
self.rel.field = self # Django 1.6
if hasattr(self.rel.to._meta, '_related_objects_cache'):
del self.rel.to._meta._related_objects_cache
# Django 1.5 changed definition of __hash__ for fields to be fine with hash requirements.
# It spoiled our machinery, since TranslationField has the same creation_counter as its
# original field and fields didn't get added to sets.
# So here we override __eq__ and __hash__ to fix the issue while retaining fine with
# http://docs.python.org/2.7/reference/datamodel.html#object.__hash__
def __eq__(self, other):
if isinstance(other, fields.Field):
return (self.creation_counter == other.creation_counter and
self.language == getattr(other, 'language', None))
return super(TranslationField, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.creation_counter, self.language))
def formfield(self, *args, **kwargs):
"""
Returns proper formfield, according to empty_values setting
(only for ``forms.CharField`` subclasses).
There are 3 different formfields:
- CharField that stores all empty values as empty strings;
- NullCharField that stores all empty values as None (Null);
- NullableField that can store both None and empty string.
By default, if no empty_values was specified in model's translation options,
NullCharField would be used if the original field is nullable, CharField otherwise.
This can be overridden by setting empty_values to '' or None.
Setting 'both' will result in NullableField being used.
Textual widgets (subclassing ``TextInput`` or ``Textarea``) used for
nullable fields are enriched with a clear checkbox, allowing ``None``
values to be preserved rather than saved as empty strings.
The ``forms.CharField`` somewhat surprising behaviour is documented as a
"won't fix": https://code.djangoproject.com/ticket/9590.
"""
formfield = super(TranslationField, self).formfield(*args, **kwargs)
if isinstance(formfield, forms.CharField):
if self.empty_value is None:
from wagtail_modeltranslation.forms import NullCharField
form_class = formfield.__class__
kwargs['form_class'] = type(
'Null%s' % form_class.__name__, (NullCharField, form_class), {})
formfield = super(TranslationField, self).formfield(*args, **kwargs)
elif self.empty_value == 'both':
from wagtail_modeltranslation.forms import NullableField
form_class = formfield.__class__
kwargs['form_class'] = type(
'Nullable%s' % form_class.__name__, (NullableField, form_class), {})
formfield = super(TranslationField, self).formfield(*args, **kwargs)
if isinstance(formfield.widget, (forms.TextInput, forms.Textarea)):
formfield.widget = ClearableWidgetWrapper(formfield.widget)
return formfield
def save_form_data(self, instance, data, check=True):
# Allow 3rd-party apps forms to be saved using only translated field name.
# When translated field (e.g. 'name') is specified and translation field (e.g. 'name_en')
# not, we assume that form was saved without knowledge of modeltranslation and we make
# things right:
# Translated field is saved first, settings respective translation field value. Then
# translation field is being saved without value - and we handle this here (only for
# active language).
# Questionable fields are stored in special variable, which is later handled by clean_fields
# method on the model.
if check and self.language == get_language() and getattr(instance, self.name) and not data:
if not hasattr(instance, '_mt_form_pending_clear'):
instance._mt_form_pending_clear = {}
instance._mt_form_pending_clear[self.name] = data
else:
super(TranslationField, self).save_form_data(instance, data)
def deconstruct(self):
name, path, args, kwargs = self.translated_field.deconstruct()
if self.null is True:
kwargs.update({'null': True})
if 'db_column' in kwargs:
kwargs['db_column'] = self.db_column
return six.text_type(self.name), path, args, kwargs
def south_field_triple(self):
"""
Returns a suitable description of this field for South.
"""
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
try:
# Check if the field provides its own 'field_class':
field_class = self.translated_field.south_field_triple()[0]
except AttributeError:
field_class = '%s.%s' % (self.translated_field.__class__.__module__,
self.translated_field.__class__.__name__)
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
class TranslationFieldDescriptor(object):
"""
A descriptor used for the original translated field.
"""
def __init__(self, field, fallback_languages=None, fallback_value=NONE,
fallback_undefined=NONE):
"""
Stores fallback options and the original field, so we know it's name
and default.
"""
self.field = field
self.fallback_languages = fallback_languages
self.fallback_value = fallback_value
self.fallback_undefined = fallback_undefined
def __set__(self, instance, value):
"""
Updates the translation field for the current language.
"""
if getattr(instance, '_mt_init', False):
# When assignment takes place in model instance constructor, don't set value.
# This is essential for only/defer to work, but I think it's sensible anyway.
return
loc_field_name = build_localized_fieldname(self.field.name, get_language())
setattr(instance, loc_field_name, value)
def meaningful_value(self, val, undefined):
"""
Check if val is considered non-empty.
"""
if isinstance(val, fields.files.FieldFile):
return val.name and not (
isinstance(undefined, fields.files.FieldFile) and val == undefined)
return val is not None and val != undefined
def __get__(self, instance, owner):
"""
Returns value from the translation field for the current language, or
value for some another language according to fallback languages, or the
custom fallback value, or field's default value.
"""
if instance is None:
return self
default = NONE
undefined = self.fallback_undefined
if undefined is NONE:
default = self.field.get_default()
undefined = default
langs = resolution_order(get_language(), self.fallback_languages)
for lang in langs:
loc_field_name = build_localized_fieldname(self.field.name, lang)
val = getattr(instance, loc_field_name, None)
if self.meaningful_value(val, undefined):
return val
if mt_settings.ENABLE_FALLBACKS and self.fallback_value is not NONE:
return self.fallback_value
else:
if default is NONE:
default = self.field.get_default()
# Some fields like FileField behave strange, as their get_default() doesn't return
# instance of attr_class, but rather None or ''.
# Normally this case is handled in the descriptor, but since we have overridden it, we
# must mock it up.
if (isinstance(self.field, fields.files.FileField) and
not isinstance(default, self.field.attr_class)):
return self.field.attr_class(instance, self.field, default)
return default
class TranslatedRelationIdDescriptor(object):
"""
A descriptor used for the original '_id' attribute of a translated
ForeignKey field.
"""
def __init__(self, field_name, fallback_languages):
self.field_name = field_name # The name of the original field (excluding '_id')
self.fallback_languages = fallback_languages
def __set__(self, instance, value):
lang = get_language()
loc_field_name = build_localized_fieldname(self.field_name, lang)
# Localized field name with '_id'
loc_attname = instance._meta.get_field(loc_field_name).get_attname()
setattr(instance, loc_attname, value)
def __get__(self, instance, owner):
if instance is None:
return self
langs = resolution_order(get_language(), self.fallback_languages)
for lang in langs:
loc_field_name = build_localized_fieldname(self.field_name, lang)
# Localized field name with '_id'
loc_attname = instance._meta.get_field(loc_field_name).get_attname()
val = getattr(instance, loc_attname, None)
if val is not None:
return val
return None
class LanguageCacheSingleObjectDescriptor(object):
"""
A Mixin for RelatedObjectDescriptors which use current language in cache lookups.
"""
accessor = None # needs to be set on instance
@property
def cache_name(self):
lang = get_language()
cache = build_localized_fieldname(self.accessor, lang)
return "_%s_cache" % cache
| bsd-3-clause | 1,340,835,948,762,362,000 | 43.1875 | 100 | 0.643388 | false |
burrsettles/ml-talks-duolingo | 03_clustering/voting_em.py | 1 | 3646 | """
Burr Settles
Duolingo ML Dev Talk #3: Clustering
EM-GMM (expectaction maximization with Gaussian mixture models) clustering example using
scikit-learn.
"""
import argparse
import math
import json
import numpy as np
from bs4 import BeautifulSoup
from sklearn.mixture import GaussianMixture
# cluster colors (for map visualizations, up to 8)
COLORS = '#56A9F6 #73BE49 #F4D23E #F18E2E #EA5E5B #B26EDF #DDDEE0 #53585F'.split()
def hex_to_rgb(value):
"""Return (red, green, blue) for the color given as #rrggbb."""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rgb_to_hex(red, green, blue):
"""Return color as #rrggbb for the given color values."""
return '#%02x%02x%02x' % (red, green, blue)
def read_vote_data(votingfile):
features = None
states = []
abbr = []
matrix = []
with open(votingfile, 'rU') as ins:
for line in ins:
bits = line.strip().split(',')
if features is None:
features = bits[2:]
else:
states.append(bits[0])
abbr.append(bits[1])
matrix.append([float(x) for x in bits[2:]])
return features, states, abbr, np.array(matrix)
def make_map_file(mapfile, state_cluster_map, num_clusters=None):
num_clusters = num_clusters or max(state_cluster_map.values())+1
svg = open(mapfile, 'r').read()
soup = BeautifulSoup(svg, "html5lib")
paths = soup.findAll('path')
for p in paths:
if p['id'] in state_cluster_map.keys():
dist = list(state_cluster_map[p['id']])
dist = [math.sqrt(math.sqrt(math.sqrt(math.sqrt(x)))) for x in dist]
dist = [x / sum(dist) for x in dist]
(r, g, b) = (0., 0., 0.)
for i, prob in enumerate(dist):
(r_, g_, b_) = hex_to_rgb(COLORS[i])
r += prob * r_
g += prob * g_
b += prob * b_
color = str(rgb_to_hex(r, g, b))
p['style'] = 'fill:%s;display:inline' % color
f = open('figs/gmm_%d.svg' % num_clusters,"w")
f.write(soup.prettify())
f.close()
parser = argparse.ArgumentParser(description='Fit a SpacedRepetitionModel to data.')
parser.add_argument('-n', action='store', dest='num_clusters', type=int, default=4, help='number of clusters')
if __name__ == '__main__':
args = parser.parse_args()
features, states, abbr, X = read_vote_data('data/state_vote_data.csv')
# cluster the data
gmm = GaussianMixture(n_components=args.num_clusters, covariance_type='spherical', max_iter=5, init_params='random', random_state=0).fit(X)
# print cluster assignment distributions for each state
preds = gmm.predict_proba(X)
entropy = 0.
for i, st in enumerate(states):
print '%s\t%s\t%s' % (abbr[i], '{:<30}'.format(st), str(preds[i]))
for x in preds[i]:
try:
entropy -= x * math.log(x, 2)
except:
pass
entropy /= len(states)
print 'entropy:', entropy
# print mean values for each cluster
for k, c in enumerate(gmm.means_):
vector = dict(zip(features, c))
print '\nCLUSTER %d' % k
print '\t'.join(['']+[str(x) for x in range(1980,2017,4)])
for party in 'dem rep 3rd'.split():
dat = ['%.2f' % vector['%d_%s' % (year, party)] for year in range(1980,2017,4)]
print '\t'.join([party]+dat)
# visualize clusters in a map
make_map_file('figs/Blank_US_Map_with_borders.svg', dict(zip(abbr, preds)), args.num_clusters)
| gpl-3.0 | -6,921,857,075,692,596,000 | 33.396226 | 143 | 0.582008 | false |
firestrand/pybrain-gpu | pybraingpu/datasets/supervised.py | 1 | 4176 | __author__ = 'Thomas Rueckstiess, [email protected]'
from random import sample
from scipy import isscalar
from dataset import DataSet
from pybraingpu.utilities import fListToString
class SupervisedDataSet(DataSet):
"""SupervisedDataSets have two fields, one for input and one for the target.
"""
def __init__(self, inp, target):
"""Initialize an empty supervised dataset.
Pass `inp` and `target` to specify the dimensions of the input and
target vectors."""
DataSet.__init__(self)
if isscalar(inp):
# add input and target fields and link them
self.addField('input', inp)
self.addField('target', target)
else:
self.setField('input', inp)
self.setField('target', target)
self.linkFields(['input', 'target'])
# reset the index marker
self.index = 0
# the input and target dimensions
self.indim = self.getDimension('input')
self.outdim = self.getDimension('target')
def __reduce__(self):
_, _, state, _, _ = super(SupervisedDataSet, self).__reduce__()
creator = self.__class__
args = self.indim, self.outdim
return creator, args, state, iter([]), iter({})
def addSample(self, inp, target):
"""Add a new sample consisting of `input` and `target`."""
self.appendLinked(inp, target)
def getSample(self, index=None):
"""Return a sample at `index` or the current sample."""
return self.getLinked(index)
def setField(self, label, arr, **kwargs):
"""Set the given array `arr` as the new array of the field specfied by
`label`."""
DataSet.setField(self, label, arr, **kwargs)
# refresh dimensions, in case any of these fields were modified
if label == 'input':
self.indim = self.getDimension('input')
elif label == 'target':
self.outdim = self.getDimension('target')
def _provideSequences(self):
"""Return an iterator over sequence lists, although the dataset contains
only single samples."""
return iter(map(lambda x: [x], iter(self)))
def evaluateMSE(self, f, **args):
"""Evaluate the predictions of a function on the dataset and return the
Mean Squared Error, incorporating importance."""
ponderation = 0.
totalError = 0
for seq in self._provideSequences():
e, p = self._evaluateSequence(f, seq, **args)
totalError += e
ponderation += p
assert ponderation > 0
return totalError / ponderation
def _evaluateSequence(self, f, seq, verbose=False):
"""Return the ponderated MSE over one sequence."""
totalError = 0.
ponderation = 0.
for input, target in seq:
res = f(input)
e = 0.5 * sum((target - res).flatten() ** 2)
totalError += e
ponderation += len(target)
if verbose:
print 'out: ', fListToString(list(res))
print 'correct:', fListToString(target)
print 'error: % .8f' % e
return totalError, ponderation
def evaluateModuleMSE(self, module, averageOver=1, **args):
"""Evaluate the predictions of a module on a dataset and return the MSE
(potentially average over a number of epochs)."""
res = 0.
for dummy in range(averageOver):
module.reset()
res += self.evaluateMSE(module.activate, **args)
return res / averageOver
def splitWithProportion(self, proportion=0.5):
"""Produce two new datasets, the first one containing the fraction given
by `proportion` of the samples."""
leftIndices = set(sample(range(len(self)), int(len(self) * proportion)))
leftDs = self.copy()
leftDs.clear()
rightDs = leftDs.copy()
index = 0
for sp in self:
if index in leftIndices:
leftDs.addSample(*sp)
else:
rightDs.addSample(*sp)
index += 1
return leftDs, rightDs
| bsd-3-clause | 1,078,351,008,453,465,500 | 34.692308 | 80 | 0.58501 | false |
AdamISZ/CoinSwapCS | test/conftest.py | 1 | 3554 | import pytest
import os
import time
import subprocess
bitcoin_path = None
bitcoin_conf = None
bitcoin_rpcpassword = None
bitcoin_rpcusername = None
miniircd_procs = []
def local_command(command, bg=False, redirect=''):
if redirect == 'NULL':
if OS == 'Windows':
command.append(' > NUL 2>&1')
elif OS == 'Linux':
command.extend(['>', '/dev/null', '2>&1'])
else:
print "OS not recognised, quitting."
elif redirect:
command.extend(['>', redirect])
if bg:
#using subprocess.PIPE seems to cause problems
FNULL = open(os.devnull, 'w')
return subprocess.Popen(command,
stdout=FNULL,
stderr=subprocess.STDOUT,
close_fds=True)
else:
#in case of foreground execution, we can use the output; if not
#it doesn't matter
return subprocess.check_output(command)
def pytest_addoption(parser):
parser.addoption("--btcroot", action="store", default='',
help="the fully qualified path to the directory containing "+\
"the bitcoin binaries, e.g. /home/user/bitcoin/bin/")
parser.addoption("--btcconf", action="store",
help="the fully qualified path to the location of the "+\
"bitcoin configuration file you use for testing, e.g. "+\
"/home/user/.bitcoin/bitcoin.conf")
parser.addoption("--btcpwd",
action="store",
help="the RPC password for your test bitcoin instance")
parser.addoption("--btcuser",
action="store",
default='bitcoinrpc',
help="the RPC username for your test bitcoin instance (default=bitcoinrpc)")
parser.addoption("--runtype",
action="store",
default="",
help="Mode of test, can be one of: cooperative,")
def pytest_generate_tests(metafunc):
option_value = metafunc.config.option.runtype
if "runtype" in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("runtype", [option_value])
def teardown():
#shut down bitcoin and remove the regtest dir
local_command([bitcoin_path + "bitcoin-cli", "-regtest",
"-rpcuser=" + bitcoin_rpcusername,
"-rpcpassword=" + bitcoin_rpcpassword,
"-conf=" + bitcoin_conf, "stop"])
#note, it is better to clean out ~/.bitcoin/regtest but too
#dangerous to automate it here perhaps
@pytest.fixture(scope="session", autouse=True)
def setup(request):
print 'starting'
request.addfinalizer(teardown)
global bitcoin_conf, bitcoin_path, bitcoin_rpcpassword, bitcoin_rpcusername
bitcoin_path = request.config.getoption("--btcroot")
bitcoin_conf = request.config.getoption("--btcconf")
bitcoin_rpcpassword = request.config.getoption("--btcpwd")
bitcoin_rpcusername = request.config.getoption("--btcuser")
#start up regtest blockchain
btc_proc = subprocess.call([bitcoin_path + "bitcoind", "-regtest",
"-daemon", "-conf=" + bitcoin_conf])
time.sleep(3)
#generate blocks
local_command([bitcoin_path + "bitcoin-cli", "-regtest",
"-rpcuser=" + bitcoin_rpcusername,
"-rpcpassword=" + bitcoin_rpcpassword,
"-conf=" + bitcoin_conf, "generate", "601"])
| gpl-3.0 | -8,149,794,628,993,577,000 | 38.932584 | 97 | 0.580754 | false |
GraveRaven/hivemind | hivemindsrc/ants.py | 1 | 15536 | #!/bin/env python
"""
The MIT License
Copyright (c) 2010 The Chicago Tribune & Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from multiprocessing import Pool
import os
import re
import socket
import time
import sys
IS_PY2 = sys.version_info.major == 2
if IS_PY2:
from urllib2 import urlopen, Request
from StringIO import StringIO
else:
from urllib.request import urlopen, Request
from io import StringIO
import base64
import csv
import random
import ssl
from contextlib import contextmanager
import traceback
import boto.ec2
import boto.exception
import paramiko
STATE_FILENAME = os.path.expanduser('~/.ants')
# Utilities
@contextmanager
def _redirect_stdout(outfile=None):
save_stdout = sys.stdout
sys.stdout = outfile or StringIO()
yield
sys.stdout = save_stdout
def _read_server_list():
instance_ids = []
if not os.path.isfile(STATE_FILENAME):
return (None, None, None, None)
with open(STATE_FILENAME, 'r') as f:
username = f.readline().strip()
key_name = f.readline().strip()
zone = f.readline().strip()
text = f.read()
instance_ids = [i for i in text.split('\n') if i != '']
print('Read %i bees from the roster.' % len(instance_ids))
return (username, key_name, zone, instance_ids)
def _write_server_list(username, key_name, zone, instances):
with open(STATE_FILENAME, 'w') as f:
f.write('%s\n' % username)
f.write('%s\n' % key_name)
f.write('%s\n' % zone)
f.write('\n'.join([instance.id for instance in instances]))
def _delete_server_list():
os.remove(STATE_FILENAME)
def _get_pem_path(key):
return os.path.expanduser('~/.ssh/%s.pem' % key)
def _get_region(zone):
return zone if 'gov' in zone else zone[:-1] # chop off the "d" in the "us-east-1d" to get the "Region"
def _get_security_group_id(connection, security_group_name, subnet):
if not security_group_name:
print('The bees need a security group to run under. Need to open a port from where you are to the target subnet.')
return
security_groups = connection.get_all_security_groups(filters={'group-name': [security_group_name]})
if not security_groups:
print('The bees need a security group to run under. The one specified was not found.')
return
group = security_groups[0] if security_groups else None
return group.id
# Methods
def up(count, group, zone, image_id, instance_type, username, key_name, subnet, bid = None):
"""
Startup the load testing server.
"""
existing_username, existing_key_name, existing_zone, instance_ids = _read_server_list()
count = int(count)
if existing_username == username and existing_key_name == key_name and existing_zone == zone:
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
existing_reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
existing_instances = filter(lambda i: i.state == 'running', [r.instances[0] for r in existing_reservations])
# User, key and zone match existing values and instance ids are found on state file
if count <= len(existing_instances):
# Count is less than the amount of existing instances. No need to create new ones.
print('Ants are already assembled and awaiting orders.')
return
else:
# Count is greater than the amount of existing instances. Need to create the only the extra instances.
count -= len(existing_instances)
elif instance_ids:
# Instances found on state file but user, key and/or zone not matching existing value.
# State file only stores one user/key/zone config combination so instances are unusable.
print('Taking down {} unusable ants.'.format(len(instance_ids)))
# Redirect prints in down() to devnull to avoid duplicate messages
with _redirect_stdout():
down()
# down() deletes existing state file so _read_server_list() returns a blank state
existing_username, existing_key_name, existing_zone, instance_ids = _read_server_list()
pem_path = _get_pem_path(key_name)
if not os.path.isfile(pem_path):
print('Warning. No key file found for %s. You will need to add this key to your SSH agent to connect.' % pem_path)
print('Connecting to the hive.')
try:
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
except boto.exception.NoAuthHandlerFound as e:
print("Authenciation config error, perhaps you do not have a ~/.boto file with correct permissions?")
print(e.message)
return e
except Exception as e:
print("Unknown error occured:")
print(e.message)
return e
if ec2_connection == None:
raise Exception("Invalid zone specified? Unable to connect to region using zone name")
groupId = group if subnet is None else _get_security_group_id(ec2_connection, group, subnet)
print("GroupId found: %s" % groupId)
placement = None if 'gov' in zone else zone
print("Placement: %s" % placement)
if bid:
print('Attempting to call up %i spot ants, this can take a while...' % count)
spot_requests = ec2_connection.request_spot_instances(
image_id=image_id,
price=bid,
count=count,
key_name=key_name,
security_group_ids=[groupId],
instance_type=instance_type,
placement=placement,
subnet_id=subnet)
# it can take a few seconds before the spot requests are fully processed
time.sleep(5)
instances = _wait_for_spot_request_fulfillment(ec2_connection, spot_requests)
else:
print('Attempting to call up %i ants.' % count)
try:
reservation = ec2_connection.run_instances(
image_id=image_id,
min_count=count,
max_count=count,
key_name=key_name,
security_group_ids=[groupId],
instance_type=instance_type,
placement=placement,
subnet_id=subnet)
except boto.exception.EC2ResponseError as e:
print("Unable to call ants:", e.message)
return e
instances = reservation.instances
if instance_ids:
existing_reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
existing_instances = filter(lambda i: i.state == 'running', [r.instances[0] for r in existing_reservations])
map(instances.append, existing_instances)
dead_instances = filter(lambda i: i not in [j.id for j in existing_instances], instance_ids)
map(instance_ids.pop, [instance_ids.index(i) for i in dead_instances])
print('Waiting for ants to spawn...')
instance_ids = instance_ids or []
for instance in [i for i in instances if i.state == 'pending']:
instance.update()
while instance.state != 'running':
print('.')
time.sleep(5)
instance.update()
instance_ids.append(instance.id)
print('Ant %s is ready.' % instance.id)
ec2_connection.create_tags(instance_ids, { "Name": "an ant!" })
_write_server_list(username, key_name, zone, instances)
print('The hive has assembled %i ants.' % len(instances))
def report():
"""
Report the status of the load testing servers.
"""
username, key_name, zone, instance_ids = _read_server_list()
if not instance_ids:
print('No ants have been mobilized.')
return
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
for instance in instances:
print('Ant %s: %s @ %s' % (instance.id, instance.state, instance.ip_address))
def down():
"""
Shutdown the load testing server.
"""
username, key_name, zone, instance_ids = _read_server_list()
if not instance_ids:
print('No ants have been mobilized.')
return
print('Connecting to the hive.')
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
print('Calling off the hive.')
terminated_instance_ids = ec2_connection.terminate_instances(
instance_ids=instance_ids)
print('Stood down %i ants.' % len(terminated_instance_ids))
_delete_server_list()
def _wait_for_spot_request_fulfillment(conn, requests, fulfilled_requests = []):
"""
Wait until all spot requests are fulfilled.
Once all spot requests are fulfilled, return a list of corresponding spot instances.
"""
if len(requests) == 0:
reservations = conn.get_all_instances(instance_ids = [r.instance_id for r in fulfilled_requests])
return [r.instances[0] for r in reservations]
else:
time.sleep(10)
print('.')
requests = conn.get_all_spot_instance_requests(request_ids=[req.id for req in requests])
for req in requests:
if req.status.code == 'fulfilled':
fulfilled_requests.append(req)
print("spot ant `{}` joined the hive.".format(req.instance_id))
return _wait_for_spot_request_fulfillment(conn, [r for r in requests if r not in fulfilled_requests], fulfilled_requests)
def _execute_order(params):
print('Ant %i is joining the hive.' % params['i'])
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pem_path = params.get('key_name') and _get_pem_path(params['key_name']) or None
if not os.path.isfile(pem_path):
client.load_system_host_keys()
client.connect(params['instance_name'], username=params['username'])
else:
client.connect(
params['instance_name'],
username=params['username'],
key_filename=pem_path)
print('Ant %i is executing order' % params['i'])
stdin, stdout, stderr = client.exec_command(params['order'])
#response = {}
# paramiko's read() returns bytes which need to be converted back to a str
#ab_results = IS_PY2 and stdout.read() or stdout.read().decode('utf-8')
print(stdout.read().decode('utf-8'))
client.close()
except socket.error as e:
return e
except Exception as e:
traceback.print_exc()
print()
raise e
def _execute_order_file(params):
upload_path = "/tmp/"
print('Ant %i is joining the hive.' % params['i'])
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pem_path = params.get('key_name') and _get_pem_path(params['key_name']) or None
if not os.path.isfile(pem_path):
client.load_system_host_keys()
client.connect(params['instance_name'], username=params['username'])
else:
client.connect(
params['instance_name'],
username=params['username'],
key_filename=pem_path)
order_file = params['order_file']
filename = os.path.basename(order_file)
print('Ant %s uploading file %s to %s' % (params['i'], order_file, upload_path + filename))
command = 'scp -i %s -o StrictHostKeyChecking=no %s %s@%s:%s' % (_get_pem_path(params['key_name']), order_file, params['username'], params['instance_name'], upload_path)
os.system(command)
print('Ant %s executing file %s' % (params['i'], upload_path + filename))
stdin, stdout, stderr = client.exec_command('chmod +x %s'% upload_path + filename)
stdin, stdout, stderr = client.exec_command(upload_path + filename)
#response = {}
# paramiko's read() returns bytes which need to be converted back to a str
#ab_results = IS_PY2 and stdout.read() or stdout.read().decode('utf-8')
print(stdout.read().decode('utf-8'))
client.close()
except socket.error as e:
return e
except Exception as e:
traceback.print_exc()
print()
raise e
def order(orders, order_files):
username, key_name, zone, instance_ids = _read_server_list()
if not instance_ids:
print('No ants are ready for orders.')
return
print('Connecting to the hive.')
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
print('Assembling ants.')
reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
instance_count = len(instances)
params = []
#Start with executing order
if not orders == None:
for order in orders:
del params[:]
for i, instance in enumerate(instances):
params.append({
'i': i,
'instance_id': instance.id,
'instance_name': instance.private_dns_name if instance.public_dns_name == "" else instance.public_dns_name,
'username': username,
'key_name': key_name,
'order': order
})
print('Organizing the hive.')
# Spin up processes for connecting to EC2 instances
pool = Pool(len(params))
results = pool.map(_execute_order, params)
#Now run order files
if not order_files == None:
for order_file in order_files:
print('Filename: %s' % order_file)
del params[:]
for i, instance in enumerate(instances):
params.append({
'i': i,
'instance_id': instance.id,
'instance_name': instance.private_dns_name if instance.public_dns_name == "" else instance.public_dns_name,
'username': username,
'key_name': key_name,
'order_file': order_file
})
#print('Running order file %s' % order_file)
print('Organizing the hive.')
# Spin up processes for connecting to EC2 instances
pool = Pool(len(params))
results = pool.map(_execute_order_file, params)
print('The hive is awaiting new orders.')
sys.exit(0)
| mit | 2,145,380,531,466,934,800 | 33.678571 | 177 | 0.630021 | false |
kayhayen/Nuitka | nuitka/tools/quality/pylint/__main__.py | 1 | 4063 | #!/usr/bin/env python
# Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Main program for PyLint checker tool.
"""
from __future__ import print_function
import sys
from optparse import OptionParser
from nuitka.PythonVersions import python_version
from nuitka.tools.Basics import addPYTHONPATH, getHomePath, goHome, setupPATH
from nuitka.tools.quality.Git import getModifiedPaths
from nuitka.tools.quality.pylint import PyLint
from nuitka.tools.quality.ScanSources import isPythonFile, scanTargets
from nuitka.tools.testing.Common import hasModule, setup
from nuitka.utils.FileOperations import resolveShellPatternToFilenames
def main():
setup(go_main=False)
# So PyLint finds nuitka package.
addPYTHONPATH(getHomePath())
setupPATH()
parser = OptionParser()
parser.add_option(
"--diff",
action="store_true",
dest="diff",
default=False,
help="""\
Analyse the changed files in git. Default is %default.""",
)
parser.add_option(
"--show-todos",
"--todos",
action="store_true",
dest="todos",
default=False,
help="""\
Show TODO items. Default is %default.""",
)
parser.add_option(
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="""\
Be verbose in output. Default is %default.""",
)
parser.add_option(
"--one-by-one",
action="store_true",
dest="one_by_one",
default=False,
help="""\
Check files one by one. Default is %default.""",
)
parser.add_option(
"--not-installed-is-no-error",
action="store_true",
dest="not_installed_is_no_error",
default=False,
help="""\
Insist on PyLint to be installed. Default is %default.""",
)
options, positional_args = parser.parse_args()
if options.not_installed_is_no_error and not hasModule("pylint"):
print("PyLint is not installed for this interpreter version: SKIPPED")
sys.exit(0)
if positional_args:
if options.diff:
sys.exit("Error, no filenames argument allowed in git diff mode.")
else:
goHome()
if options.diff:
positional_args = [
filename for filename in getModifiedPaths() if isPythonFile(filename)
]
else:
positional_args = ["bin", "nuitka", "setup.py", "tests/*/run_all.py"]
positional_args = sum(
(
resolveShellPatternToFilenames(positional_arg)
for positional_arg in positional_args
),
[],
)
if not positional_args:
sys.exit("No files found.")
print("Working on:", positional_args)
ignore_list = []
# Avoid checking the Python2 runner along with the one for Python3, it has name collisions.
if python_version >= 0x300:
ignore_list.append("nuitka")
filenames = list(
scanTargets(
positional_args, suffixes=(".py", ".scons"), ignore_list=ignore_list
)
)
PyLint.executePyLint(
filenames=filenames,
show_todos=options.todos,
verbose=options.verbose,
one_by_one=options.one_by_one,
)
if not filenames:
sys.exit("No files found.")
sys.exit(PyLint.our_exit_code)
| apache-2.0 | -8,395,116,404,153,469,000 | 26.828767 | 95 | 0.630815 | false |
DedMemez/ODS-August-2017 | building/DistributedPaintShopInterior.py | 1 | 1302 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.building.DistributedPaintShopInterior
from direct.distributed.DistributedObject import DistributedObject
from direct.actor.Actor import Actor
from RandomBuilding import RandomBuilding
class DistributedPaintShopInterior(DistributedObject, RandomBuilding):
def announceGenerate(self):
DistributedObject.announceGenerate(self)
self.setup()
def setup(self):
randomGen = self.getRandomGen()
colors = self.getColors()
self.interior = loader.loadModel('phase_4/models/modules/PaintShopInterior')
self.interior.reparentTo(render)
self.mixer = Actor('phase_4/models/props/pos_PS_Mixer_zero', {'mix': 'phase_4/models/props/pos_PS_Mixer_mix'})
self.mixer.reparentTo(self.interior)
self.mixer.setPlayRate(2.1, 'mix')
self.mixer.loop('mix', fromFrame=20, toFrame=160)
if settings['smoothAnimations']:
self.mixer.setBlend(frameBlend=True)
self.setupDoor(randomGen, colors, self.interior, -0.25)
self.resetNPCs()
def disable(self):
self.mixer.removeNode()
del self.mixer
self.interior.removeNode()
del self.interior
DistributedObject.disable(self) | apache-2.0 | -2,224,954,339,351,331,600 | 40.064516 | 118 | 0.685868 | false |
Wolfterro/Criador-De-Postagens | src/old/v1.0/CriadorDePostagens.py | 1 | 13285 | # -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2017 Wolfgang Almeida <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#===================================
# Criado por: Wolfterro
# Versão: 1.0 - Python 2.x
# Data: 26/03/2017
#===================================
from PyQt4 import QtCore, QtGui
import sys
# Imports do programa
# ===================
from WindowHandler import WindowHandler
from GlobalVars import GlobalVars
# Definindo a codificação padrão para UTF-8.
# ==========================================
reload(sys)
sys.setdefaultencoding('utf-8')
# Codificação do programa.
# ========================
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
# Classe principal do Programa gerado pelo Qt Designer.
# =====================================================
class Ui_MainWindow(object):
def setupUi(self, MainWindow, Handler):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(700, 820)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("Icon.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout_5 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_4 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.gridLayout_4.addWidget(self.lineEdit, 0, 0, 1, 1)
self.gridLayout_5.addWidget(self.groupBox, 0, 0, 1, 1)
self.groupBox_2 = QtGui.QGroupBox(self.centralwidget)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.lineEdit_2 = QtGui.QLineEdit(self.groupBox_2)
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.gridLayout_3.addWidget(self.lineEdit_2, 0, 0, 1, 1)
self.gridLayout_5.addWidget(self.groupBox_2, 1, 0, 1, 1)
self.groupBox_3 = QtGui.QGroupBox(self.centralwidget)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.textEdit = QtGui.QTextEdit(self.groupBox_3)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.gridLayout_2.addWidget(self.textEdit, 0, 0, 1, 1)
self.gridLayout_5.addWidget(self.groupBox_3, 2, 0, 1, 1)
self.groupBox_4 = QtGui.QGroupBox(self.centralwidget)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_4)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.pushButton = QtGui.QPushButton(self.groupBox_4)
self.pushButton.setStyleSheet(_fromUtf8("QPushButton {\n"
" font-weight: bold;\n"
"}"))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.gridLayout.addWidget(self.pushButton, 0, 0, 1, 1)
self.pushButton_2 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_2.setStyleSheet(_fromUtf8("QPushButton {\n"
" font-style: italic;\n"
"}"))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.gridLayout.addWidget(self.pushButton_2, 0, 1, 1, 1)
self.pushButton_3 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_3.setStyleSheet(_fromUtf8("QPushButton {\n"
" text-decoration: underline;\n"
"}"))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.gridLayout.addWidget(self.pushButton_3, 0, 2, 1, 2)
self.pushButton_4 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_4.setStyleSheet(_fromUtf8(""))
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.gridLayout.addWidget(self.pushButton_4, 0, 4, 1, 2)
self.pushButton_5 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_5.setObjectName(_fromUtf8("pushButton_5"))
self.gridLayout.addWidget(self.pushButton_5, 0, 6, 1, 1)
self.pushButton_6 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_6.setObjectName(_fromUtf8("pushButton_6"))
self.gridLayout.addWidget(self.pushButton_6, 0, 7, 1, 2)
self.pushButton_7 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_7.setObjectName(_fromUtf8("pushButton_7"))
self.gridLayout.addWidget(self.pushButton_7, 0, 9, 1, 1)
self.pushButton_14 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_14.setObjectName(_fromUtf8("pushButton_14"))
self.gridLayout.addWidget(self.pushButton_14, 0, 10, 1, 1)
self.pushButton_8 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_8.setObjectName(_fromUtf8("pushButton_8"))
self.gridLayout.addWidget(self.pushButton_8, 1, 0, 1, 1)
self.pushButton_9 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_9.setObjectName(_fromUtf8("pushButton_9"))
self.gridLayout.addWidget(self.pushButton_9, 1, 1, 1, 2)
self.pushButton_10 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_10.setObjectName(_fromUtf8("pushButton_10"))
self.gridLayout.addWidget(self.pushButton_10, 1, 3, 1, 2)
self.pushButton_11 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_11.setObjectName(_fromUtf8("pushButton_11"))
self.gridLayout.addWidget(self.pushButton_11, 1, 5, 1, 2)
self.pushButton_12 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_12.setObjectName(_fromUtf8("pushButton_12"))
self.gridLayout.addWidget(self.pushButton_12, 1, 7, 1, 1)
self.pushButton_13 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_13.setObjectName(_fromUtf8("pushButton_13"))
self.gridLayout.addWidget(self.pushButton_13, 1, 8, 1, 2)
self.pushButton_15 = QtGui.QPushButton(self.groupBox_4)
self.pushButton_15.setObjectName(_fromUtf8("pushButton_15"))
self.gridLayout.addWidget(self.pushButton_15, 1, 10, 1, 1)
self.gridLayout_5.addWidget(self.groupBox_4, 3, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 691, 20))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuArquivo = QtGui.QMenu(self.menubar)
self.menuArquivo.setObjectName(_fromUtf8("menuArquivo"))
self.menuFormatar = QtGui.QMenu(self.menubar)
self.menuFormatar.setObjectName(_fromUtf8("menuArquivo"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionSalvar_Como = QtGui.QAction(MainWindow)
self.actionSalvar_Como.setObjectName(_fromUtf8("actionSalvar_Como"))
self.actionSair = QtGui.QAction(MainWindow)
self.actionSair.setObjectName(_fromUtf8("actionSair"))
self.actionFonte = QtGui.QAction(MainWindow)
self.actionFonte.setObjectName(_fromUtf8("actionFonte"))
self.menuArquivo.addAction(self.actionSalvar_Como)
self.menuArquivo.addAction(self.actionSair)
self.menuFormatar.addAction(self.actionFonte)
self.menubar.addAction(self.menuArquivo.menuAction())
self.menubar.addAction(self.menuFormatar.menuAction())
# Adicionando evento 'clicked.connect' aos botões da janela
# =========================================================
self.pushButton.clicked.connect(lambda: Handler.InsertTag(u"<b></b>", True))
self.pushButton_2.clicked.connect(lambda: Handler.InsertTag(u"<i></i>", True))
self.pushButton_3.clicked.connect(lambda: Handler.InsertTag(u"<u></u>", True))
self.pushButton_4.clicked.connect(lambda: Handler.InsertTag(u"<del></del>", True))
self.pushButton_5.clicked.connect(lambda: Handler.InsertTag(u"<img class=\"img-responsive\" src=\"INSIRA O CAMINHO DA IMAGEM AQUI\" alt=\"NOME DA IMAGEM\"></img>", True))
self.pushButton_6.clicked.connect(lambda: Handler.InsertTag(u"<a href=\"INSIRA O LINK AQUI\" target=\"_blank\"></a>", True))
self.pushButton_7.clicked.connect(lambda: Handler.InsertTag(u"<p></p>", True))
self.pushButton_8.clicked.connect(lambda: Handler.InsertTag(u"<h1></h1>", True))
self.pushButton_9.clicked.connect(lambda: Handler.InsertTag(u"<h2></h2>", True))
self.pushButton_10.clicked.connect(lambda: Handler.InsertTag(u"<h3></h3>", True))
self.pushButton_11.clicked.connect(lambda: Handler.InsertTag(u"<center></center>", False))
self.pushButton_12.clicked.connect(lambda: Handler.InsertTag(u"<video><source src=\"INSIRA O CAMINHO DO VÍDEO AQUI\" type=\"video/mp4\"></video>", True))
self.pushButton_13.clicked.connect(lambda: Handler.InsertTag(u"<audio><source src=\"INSIRA O CAMINHO DO ÁUDIO AQUI\" type=\"audio/mpeg\"></audio>", True))
self.pushButton_14.clicked.connect(lambda: Handler.InsertTag(u"<br>", True))
self.pushButton_15.clicked.connect(lambda: Handler.InsertTag(u"<hr>", True))
# Adicionando evento 'triggered.connect' aos menus da janela
# ==========================================================
self.actionSair.triggered.connect(Handler.ExitProgram)
self.actionSalvar_Como.triggered.connect(Handler.GetValuesAndSaveAs)
self.actionFonte.triggered.connect(Handler.ChangeFont)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Criador de Postagens - v%s" % (GlobalVars.Version), None))
self.groupBox.setTitle(_translate("MainWindow", "Título", None))
self.groupBox_2.setTitle(_translate("MainWindow", "Subtítulo", None))
self.groupBox_3.setTitle(_translate("MainWindow", "Postagem", None))
self.groupBox_4.setTitle(_translate("MainWindow", "Ferramentas de Postagem", None))
self.pushButton.setText(_translate("MainWindow", "B", None))
self.pushButton_2.setText(_translate("MainWindow", "i", None))
self.pushButton_3.setText(_translate("MainWindow", "u", None))
self.pushButton_4.setText(_translate("MainWindow", "<del>", None))
self.pushButton_5.setText(_translate("MainWindow", "<img>", None))
self.pushButton_6.setText(_translate("MainWindow", "<a>", None))
self.pushButton_7.setText(_translate("MainWindow", "<p>", None))
self.pushButton_14.setText(_translate("MainWindow", "<br>", None))
self.pushButton_8.setText(_translate("MainWindow", "<h1>", None))
self.pushButton_9.setText(_translate("MainWindow", "<h2>", None))
self.pushButton_10.setText(_translate("MainWindow", "<h3>", None))
self.pushButton_11.setText(_translate("MainWindow", "<center>", None))
self.pushButton_12.setText(_translate("MainWindow", "<video>", None))
self.pushButton_13.setText(_translate("MainWindow", "<audio>", None))
self.pushButton_15.setText(_translate("MainWindow", "<hr>", None))
self.menuArquivo.setTitle(_translate("MainWindow", "Arquivo", None))
self.menuFormatar.setTitle(_translate("MainWindow", "Formatar", None))
self.actionSalvar_Como.setText(_translate("MainWindow", "Salvar Como...", None))
self.actionSair.setText(_translate("MainWindow", "Sair", None))
self.actionFonte.setText(_translate("MainWindow", "Fonte...", None))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
# Os métodos do programa serão definidos pelo Handler
# ---------------------------------------------------
Handler = WindowHandler(ui)
# Definindo locale do programa
# ----------------------------
translator = QtCore.QTranslator()
locale = QtCore.QLocale.system().name()
translator.load('qt_%s' % locale,
QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath))
app.installTranslator(translator)
ui.setupUi(MainWindow, Handler)
MainWindow.show()
sys.exit(app.exec_()) | mit | 7,671,738,107,317,394,000 | 49.851563 | 172 | 0.714361 | false |
davidfischer-ch/django-imagefit | imagefit/models.py | 1 | 3608 | from __future__ import division
from imagefit.conf import ext_to_format, settings
from PIL import Image as PilImage
import mimetypes
try:
import StringIO
except ImportError:
import io as StringIO
import re
import os
class Image(object):
"""
Represents an Image file on the system.
"""
def __init__(self, path, cache=None, cached_name=None, *args, **kwargs):
self.path = path
self.pil = PilImage.open(path)
self.cache = cache
self.cached_name = cached_name
# force RGB
if self.pil.mode not in ('L', 'RGB'):
self.pil = self.pil.convert('RGB')
@property
def mimetype(self):
return mimetypes.guess_type(self.path)[0]
@property
def modified(self):
return os.path.getmtime(self.path)
@property
def is_cached(self):
return self.cache and self.cached_name in self.cache
def resize(self, width=None, height=None):
return self.pil.thumbnail(
(int(width), int(height)),
PilImage.ANTIALIAS)
def crop(self, width=None, height=None):
img_w, img_h = self.pil.size
# don't crop an image than is smaller than requested size
if img_w < width and img_h < height:
return self.pil
elif img_w < width:
width = img_w
elif img_h < height:
height = img_h
delta_w = img_w / width
delta_h = img_h / height
delta = delta_w if delta_w < delta_h else delta_h
new_w = img_w / delta
new_h = img_h / delta
self.resize(new_w, new_h)
box_diff = ((new_w - width) / 2, (new_h - height) / 2)
box = (
int(box_diff[0]), int(box_diff[1]), int(new_w - box_diff[0]),
int(new_h - box_diff[1]))
self.pil = self.pil.crop(box)
return self.pil
def render(self):
"""
Renders the file content
"""
if self.is_cached:
return self.cache.get(self.cached_name)
else:
image_str = StringIO.StringIO()
self.pil.save(image_str, ext_to_format(self.cached_name))
return image_str.getvalue()
def save(self):
"""
Save the image to the cache if provided and not cached yet.
"""
if self.cache and not self.is_cached:
image_str = StringIO.StringIO()
self.pil.save(image_str, ext_to_format(self.cached_name))
self.cache.set(self.cached_name, image_str.getvalue())
image_str.close()
class Presets(object):
"""
Representation of an image format storage
"""
@classmethod
def get_all(cls):
"""
Reads presets from settings
"""
return getattr(settings, 'IMAGEFIT_PRESETS', {})
@classmethod
def get(cls, key, to_tuple=False):
"""
Retrieves a specific preset by its name
"""
preset = cls.get_all().get(key, None)
return preset
@classmethod
def has(cls, key):
"""
Checks if a preset exists
"""
return key in cls.get_all()
@classmethod
def from_string(cls, string):
"""
Converts a <width>x<height> into a {'width': <width>,
'height': <height>} dict
return dict or None
"""
if re.match('(\d+)x(\d+),?(\w*)', string):
sizes = [x for x in re.match(
'(\d+)x(\d+)(,?[c|C]?)', string).groups()]
return {
'width': int(sizes[0]), 'height': int(sizes[1]),
'crop': bool(sizes[2])}
| bsd-3-clause | 6,212,976,185,050,799,000 | 26.968992 | 76 | 0.54296 | false |
berth64/modded_modded_1257ad | source/process_presentations.py | 1 | 1453 | import sys
sys.dont_write_bytecode = True
import string
from module_info import *
from module_presentations import *
from ID_meshes import *
from process_common import *
from process_operations import *
# Lav's export_dir tweak
export_dir = '%s/' % export_dir.replace('\\', '/').rstrip('/')
def save_presentations(variable_list,variable_uses,tag_uses,quick_strings):
ofile = open(export_dir + "presentations.txt","w")
ofile.write("presentationsfile version 1\n")
ofile.write(" %d\n"%(len(presentations)))
for presentation in presentations:
ofile.write("prsnt_%s %d %d "%(presentation[0], presentation[1], presentation[2]))
save_simple_triggers(ofile,presentation[3], variable_list,variable_uses,tag_uses,quick_strings)
ofile.write("\n")
ofile.close()
def save_python_header():
file = open("./ID_presentations.py","w")
for i_presentation in xrange(len(presentations)):
file.write("prsnt_%s = %d\n"%(presentations[i_presentation][0],i_presentation))
file.close()
print "Exporting presentations..."
save_python_header()
variable_uses = []
variables = load_variables(export_dir,variable_uses)
tag_uses = load_tag_uses(export_dir)
quick_strings = load_quick_strings(export_dir)
save_presentations(variables,variable_uses,tag_uses,quick_strings)
save_variables(export_dir,variables,variable_uses)
save_tag_uses(export_dir,tag_uses)
save_quick_strings(export_dir,quick_strings)
| agpl-3.0 | 8,558,168,947,293,909,000 | 32.595238 | 99 | 0.714384 | false |
Dinnerbone/mcstatus | setup.py | 1 | 1718 | from setuptools import setup
with open("requirements.txt") as f:
install_requires = f.read().splitlines()
with open("test-requirements.txt") as f:
tests_require = f.read().splitlines()
tests_require.pop(0) # remove '-r requirements.txt' line
setup(
name="mcstatus",
version="6.4.0",
author="Nathan Adams",
author_email="[email protected]",
url="https://pypi.python.org/pypi/mcstatus",
packages=["mcstatus", "mcstatus.protocol", "mcstatus.scripts"],
description="A library to query Minecraft Servers for their status and capabilities.",
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
install_requires=install_requires,
extras_require={
"tests": tests_require,
},
python_requires=">=3.6",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Games/Entertainment",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Monitoring",
],
entry_points="""
[console_scripts]
mcstatus=mcstatus.scripts.mcstatus:cli
""",
project_urls={
"Source": "https://github.com/Dinnerbone/mcstatus",
},
)
| apache-2.0 | -681,558,182,179,929,600 | 34.791667 | 90 | 0.623981 | false |
NEVERFEAR/Matrix-Arithmetic | tests/TestScale.py | 1 | 1881 | '''
@author: [email protected]
'''
from MatrixArithmetic import *
import unittest
class TestScale(unittest.TestCase):
def setUp(self):
self.A = [
[1,2],
[3,4]
]
self.mA = Matrix(self.A)
def tearDown(self):
del self.A
del self.mA
def testScaleFunctionNonMutate(self):
B = Scale(self.A, 2)
self.assertEqual([[2,4],[6,8]], B)
self.assertEqual([[1,2],[3,4]], self.A) # Check it's not mutated
mB = Scale(self.mA, 2)
self.assertEqual([[2,4],[6,8]], mB)
self.assertEqual([[1,2],[3,4]], self.mA) # Check it's not mutated
def testScaleFunctionMutate(self):
B = Scale(self.A, 2, Mutate = True)
self.assertEqual([[2,4],[6,8]], B)
self.assertEqual([[2,4],[6,8]], self.A) # Check it's mutated
self.assertEqual(id(self.A), id(B)) # Check it's the same instance
mB = Scale(self.mA, 2, Mutate = True)
self.assertEqual([[2,4],[6,8]], mB)
self.assertEqual([[2,4],[6,8]], self.mA) # Check it's mutated
self.assertEquals(id(self.mA), id(mB))
def testScaleMethod(self):
mB = self.mA.scale(2)
self.assertEqual([[2,4],[6,8]], mB)
self.assertEqual([[1,2],[3,4]], self.mA) # Check it's not mutated
def testScaleOpFloat(self):
mB = self.mA * 2.0
self.assertEqual([[2,4],[6,8]], mB)
self.assertEqual([[1,2],[3,4]], self.mA) # Check it's not mutated
def testScaleOpInt(self):
mB = self.mA * 2
self.assertEqual([[2,4],[6,8]], mB)
self.assertEqual([[1,2],[3,4]], self.mA) # Check it's not mutated
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 | -593,897,408,270,513,700 | 31 | 74 | 0.510367 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.2/Lib/email/MIMEAudio.py | 1 | 2545 | # Author: Anthony Baxter
"""Class representing audio/* type MIME documents.
"""
import sndhdr
from cStringIO import StringIO
import MIMEBase
import Errors
import Encoders
_sndhdr_MIMEmap = {'au' : 'basic',
'wav' :'x-wav',
'aiff':'x-aiff',
'aifc':'x-aiff',
}
# There are others in sndhdr that don't have MIME types. :(
# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
def _whatsnd(data):
"""Try to identify a sound file type.
sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
we re-do it here. It would be easier to reverse engineer the Unix 'file'
command and use the standard 'magic' file, as shipped with a modern Unix.
"""
hdr = data[:512]
fakefile = StringIO(hdr)
for testfn in sndhdr.tests:
res = testfn(hdr, fakefile)
if res is not None:
return _sndhdr_MIMEmap.get(res[0])
return None
class MIMEAudio(MIMEBase.MIMEBase):
"""Class for generating audio/* MIME documents."""
def __init__(self, _audiodata, _subtype=None,
_encoder=Encoders.encode_base64, **_params):
"""Create an audio/* type MIME document.
_audiodata is a string containing the raw audio data. If this data
can be decoded by the standard Python `sndhdr' module, then the
subtype will be automatically included in the Content-Type: header.
Otherwise, you can specify the specific audio subtype via the
_subtype parameter. If _subtype is not given, and no subtype can be
guessed, a TypeError is raised.
_encoder is a function which will perform the actual encoding for
transport of the image data. It takes one argument, which is this
Image instance. It should use get_payload() and set_payload() to
change the payload to the encoded form. It should also add any
Content-Transfer-Encoding: or other headers to the message as
necessary. The default encoding is Base64.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type:
header.
"""
if _subtype is None:
_subtype = _whatsnd(_audiodata)
if _subtype is None:
raise TypeError, 'Could not find audio MIME subtype'
MIMEBase.MIMEBase.__init__(self, 'audio', _subtype, **_params)
self.set_payload(_audiodata)
_encoder(self)
| mit | 3,961,440,915,732,128,300 | 33.863014 | 77 | 0.639293 | false |
chehanr/moodlescrapr | moodlescrapr3.py | 1 | 9047 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A simple (improved + 1) ACBT scraper to download course files (by /u/chehanr)."""
import getpass
import os
import urllib
from argparse import ArgumentParser
import requests
from bs4 import BeautifulSoup, SoupStrainer
CWD = os.getcwd()
class Download:
"""Download resource files.
:param username: Username,
:param subject_name: Subject Name,
:param week: Week number
"""
def __init__(self, username, subject_name, week):
self.username = username
self.subject_name = subject_name
self.week = week
self.path = '%s/scrape2/%s/%s/Week %s/' % (
CWD, self.username.upper(), self.subject_name, self.week)
def resource(self, resource_uri, resource_title):
"""Downloading the resource files."""
resource_url = 'https://learning.acbt.lk/moodle/mod/resource/%s' % (
resource_uri)
if urllib.request.getproxies():
os.system('wget --load-cookies "%s/cookies.txt" --content-disposition --show-progress --progress=bar:force -N -c "%s" -P "%s" -e use_proxy=yes -e http_proxy="%s" -e https_proxy="%s"' %
(CWD, resource_url, self.path, urllib.request.getproxies().get('http'), urllib.request.getproxies().get('https')))
else:
os.system('wget --load-cookies "%s/cookies.txt" --content-disposition --show-progress --progress=bar:force -N -c "%s" -P "%s"' %
(CWD, resource_url, self.path))
class Scrape:
"""Initial scrape.
:param session: Current session
"""
def __init__(self, session):
self.session = session
def subjects(self):
"""Returns subject list."""
response = self.session.get('https://learning.acbt.lk/moodle')
strainer = SoupStrainer(
'div', attrs={'class': 'block_course_list sideblock'})
soup = BeautifulSoup(
response.content, 'lxml', parse_only=strainer)
subjects_list = []
for _ in soup.find_all('div', attrs={'class': 'content'}):
for _ in _.find_all('ul', attrs={'class': 'list'}):
for li_subject in _.find_all('li'):
for subject in li_subject.find_all('div', attrs={'class': 'column c1'}):
_subject_name = subject.text
_subject_code = subject.find('a')['title']
subject_url = subject.find('a')['href']
subject_id = subject_url.split('id=', 1)[1]
subject_name = '%s (%s)' % (
_subject_code.upper(), _subject_name)
subjects_list.append(
(subject_name, subject_url, subject_id))
return subjects_list
def resources(self, subject_id):
"""Returns resources list."""
resources_list = []
week = 0
params = {'id': subject_id}
response = self.session.get(
'https://learning.acbt.lk/moodle/mod/resource/index.php', params=params)
strainer = SoupStrainer(
'table', attrs={'class': 'generaltable boxaligncenter'})
soup = BeautifulSoup(response.content, 'lxml', parse_only=strainer)
for row in soup.find_all('tr'):
week_td = row.find_all('td', attrs={'class': 'cell c0'})
resource_td = row.find_all('td', attrs={'class': 'cell c1'})
for _week in week_td:
try:
week = int(_week.get_text().strip())
except:
pass
for resource in resource_td:
resource_uri = resource.find('a')['href']
resource_title = resource.get_text().strip()
if 'view.php?id=' in resource_uri:
resources_list.append(
(week, resource_uri, resource_title))
return resources_list
def subject_list_display(subjects):
"""Returns the list of subjects."""
_subjects = 'available subjects:\n'
for i, subject in enumerate(subjects):
subject_name, _, _ = subject
_subjects += '%s. %s\n' % (i + 1, subject_name)
return _subjects
def create_cookies_file(session):
"For wget."
moodle_id_expire = None
cookies = session.cookies
for cookie in cookies:
if cookie.name == 'MOODLEID_':
moodle_id_expire = cookie.expires
cookie_dict = cookies.get_dict()
cookie_text = 'learning.acbt.lk\tTRUE\t/\tFALSE\t%s\tMOODLEID_\t%s\nlearning.acbt.lk\tTRUE\t/\tFALSE\t0\tMoodleSessionTest\t%s\nlearning.acbt.lk\tTRUE\t/\tTRUE\t0\tNVT\t%s' % (
moodle_id_expire, cookie_dict.get('MOODLEID_'), cookie_dict.get('MoodleSessionTest'), cookie_dict.get('NVT'))
with open(CWD + '/cookies.txt', 'w') as f:
f.write(cookie_text)
def main(username, password, specific_subject, specific_week, list_subjects):
"""Main work."""
if not username:
username = input('moodle username: ')
if not password:
password = getpass.getpass('moodle password (hidden): ')
try:
params = {'username': username, 'password': password}
session = requests.Session()
session.post('https://learning.acbt.lk/user/login',
data=params, proxies=urllib.request.getproxies())
except Exception as err:
print(err)
else:
scrape = Scrape(session)
subjects = scrape.subjects()
create_cookies_file(session)
week_list = []
if specific_week:
week_list = [int(item) for item in specific_week.split(',')]
subject_list = []
if specific_subject:
subject_list = [item.strip().upper()
for item in specific_subject.split(',')]
if list_subjects:
print(subject_list_display(subjects))
else:
def _download_resources(resources, subject_name, week_list=None):
for resource in resources:
week, resource_uri, resource_title = resource
download = Download(username, subject_name, week)
if specific_week is None:
download.resource(resource_uri, resource_title)
else:
if week in week_list:
download.resource(resource_uri, resource_title)
for subject in subjects:
subject_name, _, subject_id = subject
resources = scrape.resources(subject_id)
if subject_list and week_list:
for _subject in subject_list:
if _subject in subject_name.upper():
print('\ndownloading resources from %s in week %s' %
(subject_name, week_list))
_download_resources(
resources, subject_name, week_list)
elif subject_list or week_list:
if subject_list:
for _subject in subject_list:
if _subject in subject_name.upper():
print('\ndownloading all resources from %s' %
(subject_name))
_download_resources(resources, subject_name)
elif week_list:
print('\ndownloading resources from %s in week %s' %
(subject_name, week_list))
_download_resources(
resources, subject_name, week_list)
else:
print('\ndownloading all resources from %s' %
(subject_name))
_download_resources(resources, subject_name)
def arg_parse():
"""Argument parser."""
parser = ArgumentParser(prog='moodlescrapr',
description='ACBT moodle scraper (by chehanr)')
parser.add_argument('-u', '--username', action='store', dest='username',
help='moodle username', required=False)
parser.add_argument('-p', '--password', action='store', dest='password',
help='moodle password', required=False)
parser.add_argument('-s', '--subject', action='store', dest='subject',
help='scrape only specific subject (comma separated)', required=False)
parser.add_argument('-w', '--week', action='store', dest='week',
help='scrape only specific week number (comma separated)', required=False)
parser.add_argument('-l', '--list-subjects', action='store_true', dest='list_subjects',
help='list available subjects', required=False)
results = parser.parse_args()
return results
if __name__ == '__main__':
args = arg_parse()
main(args.username, args.password,
args.subject, args.week, args.list_subjects)
| gpl-3.0 | -6,424,722,299,303,595,000 | 37.828326 | 196 | 0.543053 | false |
jcelliott/catapult | catapult/tests/example_tests/example_test.py | 1 | 1369 | from unittest import TestCase, skip
import time
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class ExampleTest(TestCase):
def test_something(self):
""" A test to check something """
time.sleep(0.5)
self.assertEqual("something", "something")
def test_something_else(self):
time.sleep(0.5)
print("This is some text that shouldn't interfere with the TAP output")
self.assertEqual("something", "else")
def test_number_one(self):
""" Test the number of things is equal to one """
time.sleep(0.5)
log.info("This is a log message that shouldn't interfere with the TAP output")
self.assertTrue(1 == 1)
@skip("this test is pointless")
def test_skipping(self):
""" This is something that should be skipped """
self.assertTrue("the world is flat")
def test_example_1(self):
""" An example test """
time.sleep(0.5)
print("This is some more text that shouldn't interfere with the TAP output")
self.assertTrue(True)
def test_example_2(self):
""" Another example test """
time.sleep(0.5)
log.error("This is another log message that shouldn't interfere with the TAP output")
raise Exception("this test will error")
self.assertTrue(True)
| mit | 8,364,715,891,797,427,000 | 31.595238 | 93 | 0.63477 | false |
kave/deepscrub | core/models.py | 1 | 2346 | from django.db import models
class Price(models.Model):
cost = models.FloatField(blank=False)
promotion = models.CharField(max_length=100, blank=False)
def __unicode__(self):
return '${0}'.format(self.cost)
class Color(models.Model):
WHITE = 1
BLACK = 2
name = models.CharField(max_length=50, blank=False)
type = models.IntegerField(blank=False, default=0, unique=True)
COLORS = {
WHITE: 'White',
BLACK: 'Black',
}
def __unicode__(self):
return self.name
class SpongeType(models.Model):
STRONG = 1
GENTLE = 2
name = models.CharField(max_length=50, blank=False)
type = models.IntegerField(blank=False, unique=True)
TYPES = {
STRONG: 'Strong',
GENTLE: 'Gentle',
}
def __unicode__(self):
return self.name
class Sponge(models.Model):
color = models.ForeignKey(Color)
type = models.ForeignKey(SpongeType)
quantity = models.IntegerField(blank=False, default=0)
price = models.ForeignKey(Price)
img_url = models.URLField(blank=True)
class Meta:
unique_together = ('color', 'type',)
def __unicode__(self):
return '{0}:{1} #{2} {3}'.format(self.color, self.type, self.quantity, self.price)
class Receipt(models.Model):
sponges = models.ManyToManyField(Sponge, related_name='receipt_sponges_set')
name_shipping = models.CharField(max_length=100, blank=False)
street1_shipping = models.CharField(max_length=200, blank=False)
street2_shipping = models.CharField(max_length=200, blank=False)
city_shipping = models.CharField(max_length=50, blank=False)
state_shipping = models.CharField(max_length=50, blank=False)
zipcode_shipping = models.CharField(max_length=20, blank=False)
quantity = models.IntegerField(blank=False)
shipping_cost = models.FloatField(blank=False)
total_cost = models.FloatField(blank=False)
class Order(models.Model):
receipt = models.ForeignKey(Receipt)
class ContactUs(models.Model):
name = models.CharField(max_length=100, blank=False)
email = models.EmailField(blank=False)
message = models.TextField(max_length=500, blank=False)
def __unicode__(self):
return self.message
class Meta:
verbose_name = "Contact Us"
verbose_name_plural = "Contact Us"
| artistic-2.0 | 4,112,472,593,783,800,300 | 26.6 | 93 | 0.665814 | false |
mmromero/dwybss | dwybss/bss/bss.py | 1 | 12622 | '''
Created on 8 Sep 2017
Root class for all the bss methods (scalability)
@author: Miguel Molina Romero, Techical University of Munich
@contact: [email protected]
@license: LPGL
'''
import nibabel as nib
import numpy as np
import dwybss.bss.utils as ut
import os
from joblib import Parallel, delayed
class BSS:
"""Blind Source separation parent class
Functions:
factorize: for general purpose applications of BSS.
fwe: specifically designed for Free Water Elimination.
"""
def __check_data_consistency(self,data):
if data == None:
raise BssException('Missing data.')
if 'dwi' not in data:
raise BssException('Missing diffusion data.')
if 'te' not in data:
raise BssException('Missing TE data.')
if data['dwi'] is None:
raise BssException('Missing diffusion data.')
if data['te'] is None:
raise BssException('Missing TE data.')
if len(data['dwi']) != len(data['te']):
raise BssException('Number of TE values and diffusion files do not match')
def __check_mask_consistency(self, mask):
if mask is None:
raise BssException('Missing mask.')
if mask == []:
raise BssException('Missing mask.')
def __check_out_path_consistency(self, out_path):
if out_path is None:
raise BssException('Missing out_path.')
if not os.path.exists(out_path):
raise BssException('Non existing out_path.')
def __check_params_cosnsistency(self, params):
if params is None:
raise BssException('Missing parameters.')
if 'max_sources' not in params:
raise BssException('Missing max_sources parameter.')
if params['max_sources'] < 1:
raise BssException('The parameter max_sources must be >= 1.')
self._check_method_params_consistency(params)
def _check_method_params_consistency(self, params):
"""TO BE IMPLEMENTED BY THE SPECIFIC METHOD"""
def factorize(self, data, mask, params, out_path, s_prior = None, t2_bounds = None, run_parallel = True ):
"""Factorizes the data X, into the mixing matrix A and the sources S.
This is a general function that accepts any kind of prior knowledge and constraints.
Usage::
:param data: Dictionary containing the fields 'dwi' and 'TE'. 'dwi' contains a list of nifti files
with the diffusion data to build X. 'TE' is the echo time value at which each dwi was
acquired, the order and number of elements of 'dwi' and 'TE' must match.
:param mask: Path to the mask file.
:param params: Dictionary containing the parameters for the factorization method.
:param out_path: Path where the output files will be created.
:param s_prior: Dictionary with prior knowledge for one or more sources. 'source' is the index of
the column matrix of A associated with the source. 'data' is an array containing
the actual information. ``s_prior = {'1':[1, 0.2, 0.4, ...], '3': [1, 0.45, 0.90, ....]}
:param t2_bounds: Dictionary containing the bounds for the T2 value of the columns of A:
``t2_bounds = {'1': [0, 0.04], '3': [2000, 2000]}`
:param run_parallel: True to use all the CPUs. False to do not parallelize execution.
:raise BssExcpetion: When there is an error in the input parameters.
:rtype: Dictionary
:return: Path to the Nifti files containing the results: 'sources', 't2s', 'fs', 's0', 'nsources' and 'rel_error'.
"""
self.__check_data_consistency(data)
self.__check_mask_consistency(mask)
self.__check_params_cosnsistency(params)
self.__check_out_path_consistency(out_path)
results = self._volume_factorization(data, mask, params, out_path, s_prior, t2_bounds, run_parallel)
return results
def _volume_factorization(self, data, mask, params, out_path, s_prior, t2_bounds, run_parallel):
""" Iterates over all the voxels in the volume and performs BSS in each of them.
"""
# Load the mask
msk = nib.load(mask).get_data()
# Load the data
nii = nib.load(data['dwi'][0])
res = np.shape(nii.get_data())
ntes = len(data['te'])
X = np.zeros([res[0], res[1], res[2], ntes, res[3]])
for i in range(ntes):
nii = nib.load(data['dwi'][i])
niidata = nii.get_data()
X[:,:,:,i,:] = niidata
# Lunch BSS over the volume
if run_parallel:
num_cpus = -1
else:
num_cpus = 1
result = OutputResults(res,params['max_sources'], nii.header, out_path)
Parallel(n_jobs=num_cpus, backend='threading')(delayed(self._method_factorization)(X[r,c,s,:,:], data['te'], msk[r,c,s], params, result, r, c, s, s_prior, t2_bounds) for r in range(res[0]) for c in range(res[1]) for s in range(res[2]))
# Save results
return result.save()
def _method_factorization(self, data, tes, mask, params, results, r, c, s, s_prior = None, t2_bounds = None):
"""TO BE IMPLEMENTED BY THE SPECIFIC METHOD"""
def _compute_actual_A(self, X, A, tes, params):
"""Computes T2, f, proton density and sources from factorized A and the measurements X.
"""
max_sources = params['max_sources']
dims = np.shape(A)
t2 = list()
for c in range(dims[1]):
t2.append(ut.t2_from_slope(A[:,c], tes))
t2 = np.round(t2,3)
# Any t2 < 5ms is due to erros, we cannot detect it in clinical scanners
t2[t2 < 0.005] = 0
A = np.exp(-tes * (1./(t2 + 1e-8)))
nsources = np.linalg.matrix_rank(A)
if nsources == 0:
return {'t2': np.zeros(max_sources), 'f': np.zeros(max_sources), 's0': 0, 'nsources': nsources,
'sources': np.zeros(np.shape(X)), 'A': np.zeros(dims)}
else:
Xmax = np.max(X, 1)
f = np.linalg.lstsq(A,Xmax[:, None])[0]
f[f < 0] = 0
s0 = np.round(np.sum(f),3)
f = f / s0
f = f.transpose()[0]
f = np.round(f,2)
A = A * f
nsources = np.linalg.matrix_rank(A)
S = np.linalg.lstsq(A,X)[0] / s0
t2[f == 0] = 0
return {'t2': t2, 'f': f, 's0': s0, 'nsources': nsources, 'sources': S, 'A': A}
def fwe(self, data, mask, b_values, out_path):
"""Free-water elimination method.
All the necessary assumptions on tissue relaxation and diffusivity are encoded in this function.
Notice that it always tries to run in parallel.
Usage::
:param data: Dictionary containing the fields 'dwi' and 'TE'. 'dwi' contains a list of nifti files
with the diffusion data to build X. 'TE' is the echo time value at which each dwi was
acquired, the order and number of elements of 'dwi' and 'TE' must match.
:param mask: Path to the mask file.
:param b_values: Path to the *.bval file. Units must be mm^2/s.
:param out_path: Path where the output files will be created.
:raise BssExcpetion: When there is an error in the input parameters.
:rtype: Dictionary
:return: Path to the Nifti files containing the results: 'sources', 't2s', 'fs', 's0', 'nsources' and 'rel_error'.
"""
# Check b values consistency
if b_values == None:
raise BssException("b_values is a mandatory parameter")
if not os.path.exists(b_values):
raise BssException('Wrong path to b_values')
# Read bo
bval = np.fromfile(b_values, float, -1, sep=' ')
if 0 not in bval:
raise BssException('At least one b0 is required')
# Define priors on CSF
Dcsf = 3e-3; # mm^2/s
Scsf = np.exp(-bval * Dcsf)
t2_bounds = {'1': [0, 0.3], '2': [1.5, 2.5]}
s_prior = {'2': Scsf}
params = {'max_sources': 2,'max_iters': 100, 'tolx': 1e-12, 'tolfun': 1e-12}
return self.factorize(data, mask, params, out_path, s_prior, t2_bounds, True)
class BssException(Exception):
def __init__(self, message, errors=0):
# Call the base class constructor with the parameters it needs
super(BssException, self).__init__(message)
# Now for your custom code...
self.errors = errors
class OutputResults():
"""Storage object to keep and save the factorization results.
"""
def __init__(self, res, max_sources, nii_header, out_path):
"""An OutputResult object is defined by the image resolution to be stored and saved,
the maximum number of sources, the voxel dimensions included in the Nifti header,
and the output path for the factorization results that will be stored in Nifti format.
:param res: Four elements list containing the number of rows, columns, slices, and diffusion directions in this order.
:param max_sources: Maximum number of sources as defined in the `param` dictionary
:param nii_header: Nifti header of the one of the `dwi` files containing the voxel dimensions and affine.
:param out_path: Existing directory to save the output Nifti files.
:ivar T2: Matrix of shape (res[0], res[1], res[2], max_sources) that stores the resulting T2 values.
:ivar f: Matrix of shape (res[0], res[1], res[2], max_sources) that stores the resulting f values.
:ivar pd: Matrix of shape (res[0], res[1], res[2]) that stores the resulting proton density value.
:ivar nsources: Matrix of shape (res[0], res[1], res[2]) that stores the resulting number of sources per voxel.
:ivar sources: Matrix of shape (res[0], res[1], res[2], max_sources, diff_directions ) that stores the resulting sources.
:ivar rel_error: Matrix of shape (res[0], res[1], res[2]) that stores the resulting factorization relative error.
"""
self.__max_sources = max_sources
self.__nii_header = nii_header
self.__out_path = out_path
self.T2 = np.zeros([res[0], res[1], res[2], max_sources])
self.f = np.zeros([res[0], res[1], res[2], max_sources])
self.pd = np.zeros([res[0], res[1], res[2]])
self.nsources = np.zeros([res[0], res[1], res[2]])
self.sources = np.zeros([res[0], res[1], res[2], max_sources, res[3]])
self.rel_error = np.zeros([res[0], res[1], res[2]])
def __save_file(self, data, name):
data = np.squeeze(data)
# Check dimensions of the header
dims = np.shape(data)
self.__nii_header.set_data_shape(dims)
# Build NIFTI
nii = nib.Nifti1Image(data.astype(np.float32), self.__nii_header.get_best_affine(), self.__nii_header)
# Save it
fpath = os.path.join(self.__out_path, name + '.nii.gz')
nib.save(nii, fpath)
return fpath
def save(self):
"""Once the instance matrix variables have been filled up. Call save to create the Nifti files in the
`out_path` folder.
:return: Dictionary with the paths and names for the output Nifti files.
"""
T2 = list()
f = list()
sources = list()
S = self.pd[:,:,:, None, None] * (self.f[:,:,:,:, None] * self.sources)
for i in range(self.__max_sources):
T2.append(self.__save_file(self.T2[:,:,:,i],'T2_{}'.format(i)))
f.append(self.__save_file(self.f[:,:,:,i], 'f_{}'.format(i)))
sources.append(self.__save_file(S[:,:,:,i,:],'source_{}'.format(i)))
files = {'T2': T2, 'f': f, 'sources': sources}
files['pd'] = self.__save_file(self.pd, 'pd')
files['nsources'] = self.__save_file(self.nsources, 'nsources')
files['rel_error'] = self.__save_file(self.rel_error, 'rel_error')
return files | lgpl-3.0 | 3,995,142,767,246,093,000 | 41.19398 | 243 | 0.567261 | false |
Milias/ModellingSimulation | Week9/python/plots.py | 1 | 14528 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import json
import sys
from numpy import *
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
color = {20:"r-", 40:"g-", 100:"b-", 30: "k--", 60:"m--", 50:"g--"}
def MagnetizationVsStep(savefile, filenames):
try:
data = []
for filename in filenames:
f = open(filename,'r')
data.append(json.loads(f.read()))
f.close()
except Exception as e:
return "Error loading: %s" % str(e)
M_l = {}
for i in range(len(filenames)):
M_l[data[i]["Size"]] = [
linspace(0, data[i]["TotalSteps"], data[i]["SavedSteps"]),
abs(array(data[i]["Magnetization"])/data[i]["TotalSize"])
]
for i in M_l:
plt.plot(M_l[i][0], M_l[i][1], "-", label="L = %d" % i, linewidth=1.2)
xlabel = ["Simulation steps", "Simulation steps per lattice point"]
plt.title("Magnetization vs Step\n"+r"$T = %1.2f$, $J = %1.2f$" % (1/data[0]["Beta"], data[0]["J"]))
try:
plt.xlabel(xlabel[data[0]["Metropolis"]])
except:
plt.xlabel(xlabel[0])
plt.ylabel("Magnetization per lattice point")
plt.legend(loc=0)
plt.savefig(savefile)
plt.show()
return "Success."
def MagnetizationVsT(start, savefile, filenames):
try:
data = []
for filename in filenames:
f = open(filename,'r')
data.append(json.loads(f.read()))
f.close()
except Exception as e:
return str(e)
start = int(start)
M_l = {}
for i in range(len(filenames)):
y = average(abs(array(data[i]["Magnetization"])[start:]))/data[i]["TotalSize"]
if not data[i]["Size"] in M_l:
M_l[data[i]["Size"]] = [[],[],[],[]]
if not 1/data[i]["Beta"] in M_l[data[i]["Size"]][0]:
M_l[data[i]["Size"]][0].append(1/data[i]["Beta"])
M_l[data[i]["Size"]][1].append(y)
M_l[data[i]["Size"]][2].append(1)
M_l[data[i]["Size"]][3].append(y**2)
else:
ind = M_l[data[i]["Size"]][0].index(1/data[i]["Beta"])
M_l[data[i]["Size"]][1][ind] += y
M_l[data[i]["Size"]][2][ind] += 1
M_l[data[i]["Size"]][3][ind] += y**2
del y
for i in M_l:
x = array(M_l[i][0])
y = array(M_l[i][1])
N = array(M_l[i][2])
z = array(M_l[i][3])
y = y/N
z = z/N
ind = argsort(x)
x2 = x[ind]
y2 = y[ind]
z2 = z[ind]
M_l[i][0] = x2
M_l[i][1] = y2
M_l[i][3] = sqrt(z2-y2*y2)
for m in M_l:
plt.errorbar(M_l[m][0], M_l[m][1], fmt=color[m], yerr=M_l[m][3], label="L = %d" % m, linewidth=1.2)
#plt.fill_between(M_l[m][0], M_l[m][1]-M_l[m][3], M_l[m][1]+M_l[m][3], interpolate=True)
plt.title("Magnetization vs Temperature")
plt.xlabel(r"Temperature / $T^*$")
plt.ylabel("Magnetization per lattice point")
T_c = linspace(2.2,2.3,5)
beta = linspace(1.76,1.77,5)
nu = linspace(0.99,1.01,5)
"""
nf = 0
for i in beta:
for j in nu:
for k in T_c:
print(nf)
for m in M_l:
t = (M_l[m][0]-k)/k
#plt.plot(M_l[m][0], M_l[m][1], "-", label="L = %d" % m, linewidth=1.2)
plt.plot(t*m**(1/j), M_l[m][1]*m**(i/j), "-", label="L = %d" % m, linewidth=1.2)
plt.xlim([-10,20])
plt.title("Magnetic susceptibility vs Temperature\n"+r"$\gamma = %1.3f, \nu = %1.3f, T_c = %1.3f$" % (i, j, k))
plt.legend(loc=0, numpoints=1)
plt.savefig("report/graphs/fit-2/fit-%d.png" % nf)
plt.clf()
nf += 1
"""
plt.legend(loc=0,numpoints=1)
plt.savefig(savefile)
plt.show()
return "Success."
def EnergyVsStep(savefile, filenames):
try:
data = []
for filename in filenames:
f = open(filename,'r')
data.append(json.loads(f.read()))
f.close()
except Exception as e:
return "Error loading: %s" % str(e)
M_l = {}
for i in range(len(filenames)):
M_l[data[i]["Size"]] = [
linspace(0, data[i]["TotalSteps"], data[i]["SavedSteps"]),
array(data[i]["Energy"])/data[i]["TotalSize"]
]
for i in M_l:
plt.plot(M_l[i][0], M_l[i][1], "-", label="L = %d" % i, linewidth=1.2)
xlabel = ["Simulation steps", "Simulation steps per lattice point"]
plt.title("Energy vs Step\n"+r"$T = %1.2f$, $J = %1.2f$" % (1/data[0]["Beta"], data[0]["J"]))
try:
plt.xlabel(xlabel[data[0]["Metropolis"]])
except:
plt.xlabel(xlabel[0])
plt.ylabel("Energy per lattice point")
plt.legend(loc=0,numpoints=1)
plt.savefig(savefile)
plt.show()
return "Success."
def EnergyVsT(start, savefile, filenames):
try:
data = []
for filename in filenames:
f = open(filename,'r')
data.append(json.loads(f.read()))
f.close()
except Exception as e:
return str(e)
start = int(start)
M_l = {}
for i in range(len(filenames)):
y = average(array(data[i]["Energy"])[start:]**2)#/data[i]["TotalSize"]
if not data[i]["Size"] in M_l:
M_l[data[i]["Size"]] = [[],[],[],[]]
if not 1/data[i]["Beta"] in M_l[data[i]["Size"]][0]:
M_l[data[i]["Size"]][0].append(1/data[i]["Beta"])
M_l[data[i]["Size"]][1].append(y)
M_l[data[i]["Size"]][2].append(1)
M_l[data[i]["Size"]][3].append(y**2)
else:
ind = M_l[data[i]["Size"]][0].index(1/data[i]["Beta"])
M_l[data[i]["Size"]][1][ind] += y
M_l[data[i]["Size"]][2][ind] += 1
M_l[data[i]["Size"]][3][ind] += y**2
del y
for i in M_l:
x = array(M_l[i][0])
y = array(M_l[i][1])
N = array(M_l[i][2])
z = array(M_l[i][3])
y = y/N
z = z/N
ind = argsort(x)
x2 = x[ind]
y2 = y[ind]
z2 = z[ind]
M_l[i][0] = x2
M_l[i][1] = y2
M_l[i][3] = sqrt(z2-y2*y2)
plt.xlim([1.5,3.0])
for m in M_l:
plt.errorbar(M_l[m][0], M_l[m][1], fmt=color[m], yerr=M_l[m][3], label="L = %d" % m, linewidth=1.2)
#plt.fill_between(M_l[m][0], M_l[m][1]-M_l[m][3], M_l[m][1]+M_l[m][3], interpolate=True)
plt.title("Energy vs Temperature")
plt.xlabel(r"Temperature / $T^*$")
plt.ylabel("Energy per lattice point")
plt.legend(loc=0, numpoints=1)
plt.savefig(savefile)
plt.show()
return "Success."
def HeatVsT(start, savefile, filenames):
try:
data = []
for filename in filenames:
f = open(filename,'r')
data.append(json.loads(f.read()))
f.close()
except Exception as e:
return str(e)
start = int(start)
dim = data[0]["Dimensions"]
M_l = {}
for i in range(len(filenames)):
E = array(data[i]["Energy"])[start:]
if not data[i]["Size"] in M_l:
M_l[data[i]["Size"]] = [[],[],[],[]]
y = data[i]["Beta"]**2/data[i]["TotalSize"]*(average(E*E)-average(E)**2)
if not 1/data[i]["Beta"] in M_l[data[i]["Size"]][0]:
M_l[data[i]["Size"]][0].append(1/data[i]["Beta"])
M_l[data[i]["Size"]][1].append(y)
M_l[data[i]["Size"]][2].append(1)
M_l[data[i]["Size"]][3].append(y**2)
else:
ind = M_l[data[i]["Size"]][0].index(1/data[i]["Beta"])
M_l[data[i]["Size"]][1][ind] += y
M_l[data[i]["Size"]][2][ind] += 1
M_l[data[i]["Size"]][3][ind] += y**2
del E
plt.title("Specific heat vs Temperature")
plt.xlabel(r"Temperature / $T^*$")
plt.ylabel("Specific heat / $c_s(T)$")
for i in M_l:
x = array(M_l[i][0])
y = array(M_l[i][1])
N = array(M_l[i][2])
z = array(M_l[i][3])
y = y/N
z = z/N
ind = argsort(x)
x2 = x[ind]
y2 = y[ind]
z2 = z[ind]
M_l[i][0] = x2
M_l[i][1] = y2
M_l[i][3] = sqrt(z2-y2*y2)
plt.xlim(1.5,3.0)
for m in M_l:
plt.errorbar(M_l[m][0], M_l[m][1], fmt=color[m], yerr=M_l[m][3], label="L = %d" % m, linewidth=1.2)
plt.legend(loc=0, numpoints=1)
plt.savefig(savefile)
plt.show()
if dim == 2:
T_c = linspace(2.268,2.3,1)
alpha = linspace(0.2,0.3,1)
nu = linspace(0.995,1.01,1)
else:
T_c = linspace(4.51,4.52,1)
alpha = linspace(0.113,0.115,1)
nu = linspace(0.628,1.01,1)
#"""
nf = 0
for i in alpha:
for j in nu:
for k in T_c:
print(nf)
for m in M_l:
t = (M_l[m][0]-k)/k
#plt.plot(M_l[m][0], M_l[m][1], "-", label="L = %d" % m, linewidth=1.2)
plt.errorbar(t*m**(1/j), M_l[m][1]*m**(-i/j), yerr=M_l[m][3]*m**(-i/j), fmt=color[m], label="L = %d" % m, linewidth=1.2)
plt.title("Specific heat vs Temperature\n"+r"$\alpha = %1.3f, \nu = %1.3f, T_c = %1.3f$" % (i, j, k))
plt.xlabel(r"Temperature / $t L^{\frac{1}{\nu}}$")
plt.ylabel(r"Specific heat / $c_s L^{-\frac{\alpha}{\nu}}$")
plt.legend(loc=0, numpoints=1)
plt.savefig("report/graphs/fit-spec-%d/fit-%d.pdf" % (dim,nf))
plt.show()
nf += 1
#"""
return "Success."
def SuscVsT(start, savefile, filenames):
try:
data = []
for filename in filenames:
f = open(filename,'r')
data.append(json.loads(f.read()))
f.close()
except Exception as e:
return str(e)
start = int(start)
dim = data[0]["Dimensions"]
M_l = {}
for i in range(len(filenames)):
E = array(data[i]["Magnetization"])[start:]
if not data[i]["Size"] in M_l:
M_l[data[i]["Size"]] = [[],[],[],[]]
y = data[i]["Beta"]/data[i]["TotalSize"]*(average(E*E)-average(abs(E))**2)
if not 1/data[i]["Beta"] in M_l[data[i]["Size"]][0]:
M_l[data[i]["Size"]][0].append(1/data[i]["Beta"])
M_l[data[i]["Size"]][1].append(y)
M_l[data[i]["Size"]][2].append(1)
M_l[data[i]["Size"]][3].append(y**2)
else:
ind = M_l[data[i]["Size"]][0].index(1/data[i]["Beta"])
M_l[data[i]["Size"]][1][ind] += y
M_l[data[i]["Size"]][2][ind] += 1
M_l[data[i]["Size"]][3][ind] += y**2
del E
del y
del data
plt.xlabel(r"Temperature / $T^*$")
plt.ylabel(r"Magnetic susceptibility / $\chi(T)$")
for i in M_l:
x = array(M_l[i][0])
y = array(M_l[i][1])
N = array(M_l[i][2])
z = array(M_l[i][3])
y = y/N
z = z/N
ind = argsort(x)
x2 = x[ind]
y2 = y[ind]
z2 = z[ind]
M_l[i][0] = x2
M_l[i][1] = y2
M_l[i][3] = sqrt(z2-y2*y2)
plt.xlim([4.0,5.5])
for m in M_l:
plt.errorbar(M_l[m][0], M_l[m][1], fmt=color[m], yerr=M_l[m][3], label="L = %d" % m, linewidth=1.2)
#plt.fill_between(M_l[m][0], M_l[m][1]-M_l[m][3], M_l[m][1]+M_l[m][3], interpolate=True)
plt.title("Magnetic susceptibility vs Temperature\n")
plt.legend(loc=0, numpoints=1)
plt.savefig(savefile)
plt.clf()
if dim == 2:
T_c = linspace(2.267,2.27,3)
gamma = linspace(1.74,1.77,3)
nu = linspace(0.98,1.01,3)
else:
T_c = linspace(4.51,4.52,3)
gamma = linspace(1.2368,1.2371,3)
nu = linspace(0.628,0.631,3)
#"""
nf = 0
for i in gamma:
for j in nu:
for k in T_c:
print(nf)
for m in M_l:
t = (M_l[m][0]-k)/k
#plt.plot(M_l[m][0], M_l[m][1], color[m], label="L = %d" % m, linewidth=1.2)
plt.errorbar(t*m**(1/j), M_l[m][1]*m**(-i/j), yerr=M_l[m][3]*m**(-i/j), fmt=color[m], label="L = %d" % m, linewidth=1.2)
plt.xlim([-10,20])
plt.title("Magnetic susceptibility vs Temperature\n"+r"$\gamma = %1.3f, \nu = %1.3f, T_c = %1.3f$" % (i, j, k))
plt.legend(loc=0, numpoints=1)
plt.savefig("report/graphs/fit-susc-%d/fit-%d.pdf" % (dim,nf))
plt.show()
nf += 1
#"""
return "Success."
def AutovsN(name, savefile, filename):
try:
f = open(filename,'r')
data = json.loads(f.read())
f.close()
except Exception as e:
return str(e)
print("T = %f" % (1/data["Beta"]))
auto_fun = abs(array(data["Autocorrelation"][name]))
plt.plot(linspace(0,data["AutoT"][1]-data["AutoT"][0],len(auto_fun)), auto_fun, "r-", linewidth=1.2)
plt.title(r"Autocorrelation function vs Simulation steps")
try:
if data["Metropolis"]:
plt.xlabel("Simulation steps per lattice point")
else:
plt.xlabel("Simulation steps")
except:
plt.xlabel("Simulation steps")
plt.savefig(savefile)
plt.show()
return "Success."
def CorrvsT(end, savefile, filenames):
try:
data = []
for filename in filenames:
f = open(filename,'r')
data.append(json.loads(f.read()))
f.close()
except Exception as e:
return str(e)
end = int(end)
M_l = {}
for i in range(len(filenames)):
if data[i]["Size"] == 20: continue
auto_fun = array(data[i]["Autocorrelation"]["M"])
t = linspace(0,data[i]["AutoT"][1]-data[i]["AutoT"][0],auto_fun.size)
y = trapz(auto_fun[:end], t[:end])
if not data[i]["Size"] in M_l:
M_l[data[i]["Size"]] = [[],[],[],[]]
if not 1/data[i]["Beta"] in M_l[data[i]["Size"]][0]:
M_l[data[i]["Size"]][0].append(1/data[i]["Beta"])
M_l[data[i]["Size"]][1].append(y)
M_l[data[i]["Size"]][2].append(1)
M_l[data[i]["Size"]][3].append(y**2)
else:
ind = M_l[data[i]["Size"]][0].index(1/data[i]["Beta"])
M_l[data[i]["Size"]][1][ind] += y
M_l[data[i]["Size"]][2][ind] += 1
M_l[data[i]["Size"]][3][ind] += y**2
del auto_fun
del t
del data
plt.title("Correlation time vs Temperature")
plt.xlabel(r"Temperature / $T^*$")
plt.ylabel(r"Correlation time / $\tau(T)$")
for i in M_l:
x = array(M_l[i][0])
y = array(M_l[i][1])
N = array(M_l[i][2])
z = array(M_l[i][3])
y = y/N
z = z/N
ind = argsort(x)
x2 = x[ind]
y2 = y[ind]
z2 = z[ind]
M_l[i][0] = x2
M_l[i][1] = y2
M_l[i][3] = sqrt(z2-y2*y2)
plt.xlim([1.5,3.0])
for m in M_l:
plt.plot(M_l[m][0], M_l[m][1], color[m], label="L = %d" % m, linewidth=1.2)
#plt.errorbar(M_l[m][0], M_l[m][1], fmt=".", yerr=M_l[m][3], label="L = %d" % m, linewidth=1.2)
plt.fill_between(M_l[m][0], M_l[m][1]-M_l[m][3], M_l[m][1]+M_l[m][3], interpolate=True, color=color[m][0])
plt.legend(loc=0, numpoints=1)
plt.savefig(savefile)
plt.show()
return "Success."
def ParseInput(argv):
if len(argv) > 1:
if argv[1] == "-MvsN":
print(MagnetizationVsStep(sys.argv[2], sys.argv[3:]))
elif argv[1] == "-MvsT":
print(MagnetizationVsT(sys.argv[2], sys.argv[3], sys.argv[4:]))
elif argv[1] == "-EvsN":
print(EnergyVsStep(sys.argv[2], sys.argv[3:]))
elif argv[1] == "-EvsT":
print(EnergyVsT(sys.argv[2], sys.argv[3], sys.argv[4:]))
elif argv[1] == "-XvsT":
print(SuscVsT(sys.argv[2], sys.argv[3], sys.argv[4:]))
elif argv[1] == "-CvsT":
print(HeatVsT(sys.argv[2], sys.argv[3], sys.argv[4:]))
elif argv[1] == "-AvsN":
print(AutovsN(sys.argv[2], sys.argv[3], sys.argv[4]))
elif argv[1] == "-TauvsT":
print(CorrvsT(sys.argv[2], sys.argv[3], sys.argv[4:]))
else:
print("Wrong argument.")
ParseInput(sys.argv)
| mit | -4,814,993,212,101,208,000 | 27.998004 | 130 | 0.52712 | false |
tuxfux-hlp-notes/python-batches | batch-67/14-Logging/seventh.py | 1 | 2186 | #!/usr/bin/python
# l.basicConfig?
# l.Formatter?
# man date or time.strftime()
# subprocess:https://docs.python.org/2/library/subprocess.html
# crontab or scheduler
# shelx
# handlers: https://docs.python.org/2/howto/logging.html#useful-handlers
# lh.SysLogHandler?
import logging
from logging.handlers import SysLogHandler
from subprocess import Popen,PIPE
# l.basicConfig(filename='myapp.log',filemode='a',level=l.DEBUG,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# datefmt='%c')
# create logger
logger = logging.getLogger('disk_monitor') # logger name
logger.setLevel(logging.DEBUG) # filter for logger name.
# create console handler and set level to debug
ch = SysLogHandler(address="/dev/log") # handler
ch.setLevel(logging.DEBUG) # filter for handler.
# create formatter
formatter = logging.Formatter('- %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter) # handler and formatter
# add ch to logger
logger.addHandler(ch) # logger and handler
# Loggers expose the interface that application code directly uses.
# ex: root
# Handlers send the log records (created by loggers) to the appropriate destination.
# ex: filename='myapp.log',filemode='a'
# Filters provide a finer grained facility for determining which log records to output.
# ex: level=l.DEBUG
# Formatters specify the layout of log records in the final output.
# ex: format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',datefmt='%c'
if __name__ == '__main__':
#disk_size = int(raw_input("please enter the disk size:"))
p1 = Popen(['df','-h','/'],stdout=PIPE)
p2 = Popen(['tail','-n','1'],stdin=p1.stdout,stdout=PIPE)
disk_size = int(p2.communicate()[0].split()[-2].split('%')[0])
if disk_size < 50:
logger.info("Your disk looks healthy at {}.".format(disk_size))
elif disk_size < 70:
logger.warning("seems our disk is incresing {}.Make sure you keep a track".format(disk_size))
elif disk_size < 80:
logger.error("our application is about to choke at {}.".format(disk_size))
elif disk_size < 99:
logger.critical("our application is down - {}".format(disk_size)) | gpl-3.0 | -5,970,782,362,328,364,000 | 34.274194 | 95 | 0.692132 | false |
martysyuk/PY-3-Learning | homeworks/lesson2-5.py | 1 | 1229 | # -*- coding: UTF-8 -*-
# Домашнее задание по уроку 2.5
# «Работа с папками, путями»
# Выполнил Мартысюк Илья PY-3
import glob
from os.path import join
def search_in_files(search, files_list):
temp_filtered_list = list()
for index, file in enumerate(files_list):
with open(file, 'r') as work_file:
data = work_file.read().lower()
if search in data:
temp_filtered_list.append(file)
return temp_filtered_list
def print_result(data):
file_count = 0
print('\nИскомая фраза встрачается в файлах:')
for index, data_stdout in enumerate(data):
print(str(index+1), ')', data_stdout)
file_count += 1
print('Всего найденых файлов: {}\n'.format(file_count))
work_dir = ['lesson2-5', 'homework', 'Advanced']
files = glob.glob(join(*work_dir, '*.sql'))
filtered_list = files
while True:
search_str = input('Введите искомое слово: ')
if search_str == '':
exit(0)
search_str = search_str.lower()
filtered_list = search_in_files(search_str, filtered_list)
print_result(filtered_list)
| mit | 4,530,041,229,361,432,600 | 25.756098 | 62 | 0.630811 | false |
JorgeDeLosSantos/NanchiPlot | nanchi/uiaux.py | 1 | 25587 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import wx
import wx.html as html
import wx.grid as wxgrid
import wx.lib.floatbar as wxfb
import webbrowser
try:
import uibase as ui
import iodata as io
from _const_ import *
from util import isempty
except:
import nanchi.uibase as ui
import nanchi.iodata as io
from nanchi._const_ import *
from nanchi.util import isempty
class FunctionDialog(wx.Dialog):
def __init__(self,parent,**kwargs):
#_styles = (wx.CLOSE_BOX|wx.CAPTION)
wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION,
size=(200,180))
self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
self.initCtrls()
self.initSizers()
# Output properties
self.data = ""
self.out_fun = ""
self.out_a = ""
self.out_b = ""
self.out_points = ""
self.Centre(True)
#self.Show()
def initSizers(self):
self.mainsz = wx.BoxSizer(wx.VERTICAL)
self.pfunsz = wx.BoxSizer(wx.HORIZONTAL)
self.prangesz = wx.BoxSizer(wx.HORIZONTAL)
self.pointssz = wx.BoxSizer(wx.HORIZONTAL)
self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL)
self.pfunsz.Add(self._fun, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.pfunsz.Add(self.fun, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangesz.Add(self._a, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangesz.Add(self.a, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangesz.Add(self._b, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangesz.Add(self.b, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.pointssz.Add(self._points, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.pointssz.Add(self.points, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.pbuttonsz.Add(self.okbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
self.pbuttonsz.Add(self.cancelbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
for panel in [self.pfun, self.prange, self.pointssz, self.pbutton]:
self.mainsz.Add(panel, 1, wx.EXPAND)
self.pfun.SetSizer(self.pfunsz)
self.prange.SetSizer(self.prangesz)
self.pbutton.SetSizer(self.pbuttonsz)
self.SetSizer(self.mainsz)
def initCtrls(self):
# Panels
self.pfun = wx.Panel(self, -1)
self.prange = wx.Panel(self, -1)
self.pbutton = wx.Panel(self, -1)
# Controls
self._fun = wx.StaticText(self.pfun, -1, u"f(x)", size=(-1,25))
self.fun = wx.TextCtrl(self.pfun, -1, u"15*x^2-x^3", size=(-1,25))
self._a = wx.StaticText(self.prange, -1, u"a", size=(-1,25))
self.a = wx.TextCtrl(self.prange, -1, u"0", size=(50,25))
self._b = wx.StaticText(self.prange, -1, u"b", size=(-1,25))
self.b = wx.TextCtrl(self.prange, -1, u"10", size=(50,25))
self._points = wx.StaticText(self, -1, u"Points", size=(-1,25))
self.points = wx.TextCtrl(self, -1, u"100", size=(80,25))
self.okbutton = wx.Button(self.pbutton, wx.ID_OK, size=(-1,25))
self.cancelbutton = wx.Button(self.pbutton, wx.ID_CANCEL, size=(-1,25),
style=wx.ID_CANCEL)
for ctrl in [self._fun,self._a,self._b, self._points]:
ctrl.SetFont(self.LABEL_FONT)
def GetData(self):
self.out_fun = self.fun.GetValue().replace("^","**")
self.out_a = self.a.GetValue()
self.out_b = self.b.GetValue()
self.out_points = self.points.GetValue()
self.data = (self.out_fun, self.out_a, self.out_b, self.out_points)
return self.data
class BivariableFunctionDialog(wx.Dialog):
def __init__(self,parent,**kwargs):
#_styles = (wx.CLOSE_BOX|wx.CAPTION)
wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION,
size=(220,200))
self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
self.initCtrls()
self.initSizers()
# Output properties
self.data = ""
self.out_fun = ""
self.out_x = ""
self.out_y = ""
self.out_points = ""
self.Centre(True)
#self.Show()
def initSizers(self):
# Sizers
self.mainsz = wx.BoxSizer(wx.VERTICAL)
self.pfunsz = wx.BoxSizer(wx.HORIZONTAL)
self.prangexsz = wx.BoxSizer(wx.HORIZONTAL)
self.prangeysz = wx.BoxSizer(wx.HORIZONTAL)
self.pointssz = wx.BoxSizer(wx.HORIZONTAL)
self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL)
# add to sizers
self.pfunsz.Add(self._fun, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.pfunsz.Add(self.fun, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangexsz.Add(self._x1, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangexsz.Add(self.x1, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangexsz.Add(self._x2, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangexsz.Add(self.x2, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangeysz.Add(self._y1, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangeysz.Add(self.y1, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangeysz.Add(self._y2, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.prangeysz.Add(self.y2, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.pointssz.Add(self._points, 1, wx.ALIGN_LEFT|wx.ALL, 5)
self.pointssz.Add(self.points, 4, wx.ALIGN_LEFT|wx.ALL, 5)
self.pbuttonsz.Add(self.okbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
self.pbuttonsz.Add(self.cancelbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
for panel in [self.pfun, self.prangex, self.prangey, self.pointssz, self.pbutton]:
self.mainsz.Add(panel, 1, wx.EXPAND)
self.pfun.SetSizer(self.pfunsz)
self.prangex.SetSizer(self.prangexsz)
self.prangey.SetSizer(self.prangeysz)
self.pbutton.SetSizer(self.pbuttonsz)
self.SetSizer(self.mainsz)
def initCtrls(self):
self.pfun = wx.Panel(self, -1)
self.prangex = wx.Panel(self, -1)
self.prangey = wx.Panel(self, -1)
self.pbutton = wx.Panel(self, -1)
self._fun = wx.StaticText(self.pfun, -1, u"f(x,y)", size=(-1,25))
self.fun = wx.TextCtrl(self.pfun, -1, u"(x*y)/(x^2+y^2)", size=(-1,25))
self._x1 = wx.StaticText(self.prangex, -1, u"x1", size=(-1,25))
self.x1 = wx.TextCtrl(self.prangex, -1, u"-10", size=(50,25))
self._x2 = wx.StaticText(self.prangex, -1, u"x2", size=(-1,25))
self.x2 = wx.TextCtrl(self.prangex, -1, u"10", size=(50,25))
self._y1 = wx.StaticText(self.prangey, -1, u"y1", size=(-1,25))
self.y1 = wx.TextCtrl(self.prangey, -1, u"-10", size=(50,25))
self._y2 = wx.StaticText(self.prangey, -1, u"y2", size=(-1,25))
self.y2 = wx.TextCtrl(self.prangey, -1, u"10", size=(50,25))
self._points = wx.StaticText(self, -1, u"Points", size=(-1,25))
self.points = wx.TextCtrl(self, -1, u"100", size=(80,25))
self.okbutton = wx.Button(self.pbutton, wx.ID_OK, size=(-1,25))
self.cancelbutton = wx.Button(self.pbutton, wx.ID_CANCEL, size=(-1,25),
style=wx.ID_CANCEL)
for ctrl in [self._fun,self._x1, self._x2, self._y1, self._y2, self._points]:
ctrl.SetFont(self.LABEL_FONT)
def GetData(self):
self.out_fun = self.fun.GetValue().replace("^","**")
self.out_x = [self.x1.GetValue(), self.x2.GetValue()]
self.out_y = [self.y1.GetValue(), self.y2.GetValue()]
self.out_points = self.points.GetValue()
self.data = (self.out_fun, self.out_x, self.out_y, self.out_points)
return self.data
class AboutDialog(wx.Frame):
def __init__(self,parent,*args,**kwargs):
_styles = wx.CAPTION|wx.CLOSE_BOX
wx.Frame.__init__(self,parent=parent,title=NANCHI_MAIN_CAPTION,
size=(350,220), style=_styles)
self.winhtml = HTMLWindow(self)
self.winhtml.LoadPage(PATH_ABOUT_HTML)
self.Centre(True)
self.Show()
class HTMLWindow(html.HtmlWindow):
def __init__(self,parent,**kwargs):
html.HtmlWindow.__init__(self,parent=parent,**kwargs)
def OnLinkClicked(self, link):
webbrowser.open(link.GetHref())
class StatusBar(wx.StatusBar):
def __init__(self,*args,**kwargs):
wx.StatusBar.__init__(self,*args,**kwargs)
class BusyInfo(object):
def __init__(self, msg, parent=None, bgColour="#f0f0f0", fgColour="#8080ff"):
self.frame = _InfoFrame(parent, msg, bgColour, fgColour)
self.frame.Show()
self.frame.Refresh()
self.frame.Update()
def __del__(self):
self.Close()
def Close(self):
"""
Hide and close the busy info box
"""
if self.frame:
self.frame.Hide()
self.frame.Close()
self.frame = None
# Magic methods for using this class as a Context Manager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.Close()
return False
class _InfoFrame(wx.Frame):
def __init__(self, parent, msg, bgColour=None, fgColour=None):
wx.Frame.__init__(self, parent, style=wx.BORDER_SIMPLE|wx.FRAME_TOOL_WINDOW|wx.STAY_ON_TOP)
bgColour = bgColour if bgColour is not None else wx.Colour(253, 255, 225)
fgColour = fgColour if fgColour is not None else wx.BLACK
panel = wx.Panel(self)
text = wx.StaticText(panel, -1, msg)
for win in [panel, text]:
win.SetCursor(wx.HOURGLASS_CURSOR)
win.SetBackgroundColour(bgColour)
win.SetForegroundColour(fgColour)
size = text.GetBestSize()
self.SetClientSize((size.width + 60, size.height + 40))
panel.SetSize(self.GetClientSize())
text.Center()
self.Center()
class LogCtrl(wx.TextCtrl):
def __init__(self,parent,**kwargs):
wx.TextCtrl.__init__(self, parent=parent, id=wx.ID_ANY,
style=wx.TE_MULTILINE, **kwargs)
self.font = wx.Font(9, wx.MODERN, wx.NORMAL, wx.BOLD)
self.SetFont(self.font)
self.SetForegroundColour("#ff5050")
def write(self,string):
_nvalue = ">>> %s"%(string)
self.SetValue(_nvalue)
class ImportDialog(wx.Dialog):
def __init__(self,parent,**kwargs):
wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION,
size=(800,500))
self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL)
self.VALUE_FONT = wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)
self.initCtrls()
self.initSizers()
self.Centre(True)
def initSizers(self):
self.mainsz = wx.BoxSizer(wx.VERTICAL)
self.panelsz = wx.BoxSizer(wx.HORIZONTAL)
self.plogsz = wx.BoxSizer(wx.HORIZONTAL)
self.pctrlssz = wx.BoxSizer(wx.VERTICAL)
self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL)
#
self.pctrlssz.Add(self._dlm, 0, wx.EXPAND|wx.ALL, 5)
self.pctrlssz.Add(self.dlm, 0, wx.EXPAND|wx.ALL, 5)
self.pctrlssz.Add(self._skiprows, 0, wx.EXPAND|wx.ALL, 5)
self.pctrlssz.Add(self.skiprows, 0, wx.EXPAND|wx.ALL, 5)
self.pctrlssz.Add(self.preview, 0, wx.ALIGN_CENTRE|wx.ALL, 10)
self.panelsz.Add(self.fctrl, 1, wx.EXPAND|wx.ALL, 5)
self.panelsz.Add(self.pctrls, 1, wx.EXPAND|wx.ALL, 5)
self.panelsz.Add(self.grid, 2, wx.EXPAND|wx.ALL, 5)
self.plogsz.Add(self.log, 1, wx.EXPAND|wx.ALL, 5)
self.pbuttonsz.Add(self.okbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
self.pbuttonsz.Add(self.cancelbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
self.mainsz.Add(self.panel, 5, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.plog, 1, wx.EXPAND|wx.ALL, 5)
self.mainsz.Add(self.pbutton, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.pctrls.SetSizer(self.pctrlssz)
self.panel.SetSizer(self.panelsz)
self.plog.SetSizer(self.plogsz)
self.pbutton.SetSizer(self.pbuttonsz)
self.SetSizer(self.mainsz)
def initCtrls(self):
self.panel = wx.Panel(self, -1)
self.plog = wx.Panel(self, -1)
self.pbutton = wx.Panel(self, -1)
self.pctrls = wx.Panel(self.panel, -1)
wc = IMPORT_DIALOG_WILDCARD
self.fctrl = wx.FileCtrl(self.panel, -1, wildCard=wc)
self.grid = ui.DataGrid(self.panel, (10,1))
self.grid.SetRowLabelSize(0)
self.grid.SetColLabelSize(0)
# Controles conf.
self._dlm = wx.StaticText(self.pctrls, -1, u"Delimiter", size=(-1,25))
self.dlm = wx.TextCtrl(self.pctrls, -1, u",", size=(-1,25))
self.dlm.SetFont(self.VALUE_FONT)
self._skiprows = wx.StaticText(self.pctrls, -1, u"Start reading from row...", size=(-1,25))
self.skiprows = wx.SpinCtrl(self.pctrls, -1, min=1, max=100)
self.preview = wx.Button(self.pctrls, -1, u"Preview")
# Set labels
for label in [self._dlm, self._skiprows]:
label.SetFont(self.LABEL_FONT)
label.SetForegroundColour("#556655")
# Log
self.log = LogCtrl(self.plog)
# Botones
self.okbutton = wx.Button(self.pbutton, wx.ID_OK, size=(100,25))
self.cancelbutton = wx.Button(self.pbutton, wx.ID_CANCEL,
size=(100,25), style=wx.ID_CANCEL)
self.Bind(wx.EVT_BUTTON, self.OnPreview, self.preview)
def OnPreview(self,event):
self.grid.SetArrayData(np.array(([[],[]])))
filename = self.fctrl.GetPath()
delimiter = self.dlm.GetValue()
skipr = self.skiprows.GetValue()
mps = 100 # max preview size
try:
data = io.read_txt(filename, delimiter=delimiter, skiprows=skipr)
if not data is None:
if data.shape[0]>mps and data.shape[1]>mps:
self.grid.SetArrayData(data[:mps,:mps])
elif data.shape[0]>mps and data.shape[1]<mps:
self.grid.SetArrayData(data[:mps,:])
elif data.shape[0]<mps and data.shape[1]>mps:
self.grid.SetArrayData(data[:,:mps])
else:
self.grid.SetArrayData(data)
infostr = u"Preview from '%s'\nSize: (%g,%g)"%(
filename,
data.shape[0],
data.shape[1])
self.log.write(infostr)
else:
self.log.write(u"Unable to read data")
except Exception as exc:
self.log.write(exc)
def GetData(self):
filename = self.fctrl.GetPath()
delimiter = self.dlm.GetValue()
skiprows = self.skiprows.GetValue()
try:
data = io.read_txt(filename, delimiter=delimiter, skiprows=skiprows)
if not data is None:
return data
else:
self.log.write("Unable to read data")
except Exception as exc:
self.log.write(exc)
class TickDialog(wx.Dialog):
def __init__(self,parent,axes,xy,**kwargs):
wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION,
size=(200,400))
#~ self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
self.xy = xy
self.ctick = axes.get_xticks() if xy=="x" else axes.get_yticks()
self.clabel = axes.get_xticklabels() if xy=="x" else axes.get_yticklabels()
self.axes = axes
self.initCtrls()
self.initSizers()
self.initConfig()
self.Centre(True)
def initCtrls(self):
self.panel = wx.Panel(self, -1)
self.pbutton = wx.Panel(self, -1)
self.grid = TickGrid(self.panel)
self.okbt = wx.Button(self.pbutton, wx.ID_OK, u"OK")
self.cancelbt = wx.Button(self.pbutton, wx.ID_CANCEL, u"Cancel")
def initSizers(self):
self.sz = wx.BoxSizer(wx.VERTICAL)
self.panelsz = wx.BoxSizer(wx.VERTICAL)
self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL)
self.panelsz.Add(self.grid, 1, wx.EXPAND|wx.ALL, 5)
self.pbuttonsz.Add(self.okbt, 1, wx.EXPAND|wx.ALL, 5)
self.pbuttonsz.Add(self.cancelbt, 1, wx.EXPAND|wx.ALL, 5)
self.sz.Add(self.panel, 8, wx.EXPAND|wx.ALL, 5)
self.sz.Add(self.pbutton, 1, wx.EXPAND|wx.ALL, 5)
self.SetSizer(self.sz)
self.panel.SetSizer(self.panelsz)
self.pbutton.SetSizer(self.pbuttonsz)
def initConfig(self):
nrows = len(self.ctick)
self.grid.UpdateGridSize(nrows,2)
for ii in range(nrows):
self.grid.SetCellValue(ii,0,str(self.ctick[ii]))
label = self.clabel[ii].get_text()
if not label:
self.grid.SetCellValue(ii,1,str(self.ctick[ii]))
else:
self.grid.SetCellValue(ii,1,label)
def GetData(self):
data = zip(*self.grid.GetArrayData())
ticks = [float(xt) for xt in data[0]]
labels = data[1]
return ticks,labels
class TickGrid(wxgrid.Grid):
def __init__(self,parent,**kwargs):
wxgrid.Grid.__init__(self,parent=parent,id=-1,**kwargs)
gridsize = (2,2)
rows = int(gridsize[0])
cols = int(gridsize[1])
self.CreateGrid(rows,cols)
self.SetRowLabelSize(0)
self.SetColLabelValue(0, "Tick")
self.SetColLabelValue(1, "TickLabel")
self.Bind(wxgrid.EVT_GRID_CELL_CHANGE, self.OnCellEdit)
self.Bind(wxgrid.EVT_GRID_CELL_RIGHT_CLICK, self.OnRightClick)
def GetArrayData(self):
nrows = self.GetNumberRows()
ncols = self.GetNumberCols()
X = []
for i in range(nrows):
row = []
for j in range(ncols):
cval = self.GetCellValue(i,j)
if not isempty(cval):
row.append(cval)
else:
row.append("")
X.append(row)
return X
def UpdateGridSize(self,rows,cols):
self.ClearGrid()
ccols = self.GetNumberCols()
crows = self.GetNumberRows()
if rows > crows:
self.AppendRows(rows-crows)
elif rows < crows:
self.DeleteRows(0,crows-rows)
if cols > ccols:
self.AppendCols(cols-ccols)
elif cols < ccols:
self.DeleteCols(0,ccols-cols)
def GetSelectedRows(self):
srows = []
top_left = self.GetSelectionBlockTopLeft()
bottom_right = self.GetSelectionBlockBottomRight()
if not isempty(bottom_right) and not isempty(top_left):
max_row = bottom_right[0][0]
min_row = top_left[0][0]
srows = range(min_row,max_row+1)
return srows
def OnCellEdit(self,event):
pass
def OnRightClick(self,event):
pum = wx.Menu()
addrow = wx.MenuItem(pum, -1, "Add row...")
pum.AppendItem(addrow)
pum.AppendSeparator()
delrows = wx.MenuItem(pum, -1, "Delete rows")
pum.AppendItem(delrows)
pum.AppendSeparator()
clearcell = wx.MenuItem(pum, -1, "Clear cells")
pum.AppendItem(clearcell)
# Binds
pum.Bind(wx.EVT_MENU, self.del_rows, delrows)
pum.Bind(wx.EVT_MENU, self.add_row, addrow)
pum.Bind(wx.EVT_MENU, self.clear_cell, clearcell)
# Show
self.PopupMenu(pum)
pum.Destroy()
def del_rows(self,event):
rows = self.GetSelectedRows()
self.DeleteRows(rows[0],len(rows))
def add_row(self,event):
self.AppendRows(1)
def clear_cell(self,event):
top_left = self.GetSelectionBlockTopLeft()
bottom_right = self.GetSelectionBlockBottomRight()
row_range = range(top_left[0][0], bottom_right[0][0] + 1)
col_range = range(top_left[0][1], bottom_right[0][1] + 1)
for ii in row_range:
for jj in col_range:
self.SetCellValue(ii,jj,u"")
class LineStyleDialog(wx.Dialog):
def __init__(self,parent,**kwargs):
wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION,
size=(200,120))
self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
self.initCtrls()
self.initSizers()
self.Centre(True)
def initCtrls(self):
self.panel = wx.Panel(self, -1)
self.pbutton = wx.Panel(self, -1)
self._label = wx.StaticText(self.panel, -1, u"Select a line style")
self._lstyles = "-|--|:|-.".split("|")
self.options = wx.ComboBox(self.panel, -1, choices=self._lstyles)
self.options.SetFont(self.LABEL_FONT)
self.okbt = wx.Button(self.pbutton, wx.ID_OK)
self.cancelbt = wx.Button(self.pbutton, wx.ID_CANCEL)
def initSizers(self):
self.sz = wx.BoxSizer(wx.VERTICAL)
self.panelsz = wx.BoxSizer(wx.VERTICAL)
self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL)
self.panelsz.Add(self._label, 1, wx.EXPAND|wx.ALL, 2)
self.panelsz.Add(self.options, 1, wx.EXPAND|wx.ALL, 2)
self.pbuttonsz.Add(self.okbt, 1, wx.EXPAND|wx.ALL, 3)
self.pbuttonsz.Add(self.cancelbt, 1, wx.EXPAND|wx.ALL, 3)
self.sz.Add(self.panel, 2, wx.EXPAND|wx.ALL, 2)
self.sz.Add(self.pbutton, 1, wx.EXPAND|wx.ALL, 2)
self.SetSizer(self.sz)
self.panel.SetSizer(self.panelsz)
self.pbutton.SetSizer(self.pbuttonsz)
def GetData(self):
_ls = self.options.GetValue()
if not _ls in self._lstyles:
_ls = "-"
return _ls
class PieLabelsDialog(wx.Dialog):
def __init__(self,parent,labels,**kwargs):
wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION,
size=(200,300))
self.labels = labels
self.initCtrls()
self.initSizers()
self.initConfig()
self.Centre(True)
def initCtrls(self):
self.panel = wx.Panel(self, -1)
self.pbutton = wx.Panel(self, -1)
self.grid = wxgrid.Grid(self.panel)
self.okbt = wx.Button(self.pbutton, wx.ID_OK)
self.cancelbt = wx.Button(self.pbutton, wx.ID_CANCEL)
def initSizers(self):
self.sz = wx.BoxSizer(wx.VERTICAL)
self.panelsz = wx.BoxSizer(wx.VERTICAL)
self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL)
self.panelsz.Add(self.grid, 1, wx.EXPAND|wx.ALL, 5)
self.pbuttonsz.Add(self.okbt, 1, wx.EXPAND|wx.ALL, 5)
self.pbuttonsz.Add(self.cancelbt, 1, wx.EXPAND|wx.ALL, 5)
self.sz.Add(self.panel, 8, wx.EXPAND|wx.ALL, 5)
self.sz.Add(self.pbutton, 1, wx.EXPAND|wx.ALL, 5)
self.SetSizer(self.sz)
self.panel.SetSizer(self.panelsz)
self.pbutton.SetSizer(self.pbuttonsz)
def initConfig(self):
_rows = len(self.labels)
self.grid.CreateGrid(_rows,1)
self.grid.SetRowLabelSize(0)
self.grid.SetColLabelSize(0)
self.grid.SetColSize(0,160)
for ii in range(_rows):
self.grid.SetCellValue(ii,0,str(self.labels[ii].get_text()))
def GetData(self):
for k,ii in enumerate(range(len(self.labels))):
val = self.grid.GetCellValue(ii,0)
self.labels[k].set_text(val)
return self.labels
def test_about():
app=wx.App()
fr = AboutDialog(None)
app.MainLoop()
def test_function():
app = wx.App()
fr = BivariableFunctionDialog(None)
if fr.ShowModal() == wx.ID_OK:
print fr.GetData()
fr.Destroy()
app.MainLoop()
def test_import():
app = wx.App()
fr = ImportDialog(None)
if fr.ShowModal() == wx.ID_OK:
print fr.GetData()
fr.Destroy()
app.MainLoop()
def test_tick():
f = plt.figure()
ax = f.add_subplot(111)
app = wx.App()
fr = TickDialog(None,ax,"x")
if fr.ShowModal() == wx.ID_OK:
print fr.GetData()
fr.Destroy()
app.MainLoop()
def test_axestoolbar():
app = wx.App()
fr = wx.Frame(None, -1, "Hi !!!", size=(800,600))
sz = wx.BoxSizer(wx.VERTICAL)
tb = AxesToolbar(fr)
sz.Add(tb, 0, wx.EXPAND)
fr.SetSizer(sz)
tb.Realize()
fr.Show()
app.MainLoop()
def test_linestyle():
app = wx.App()
fr = LineStyleDialog(None)
if fr.ShowModal() == wx.ID_OK:
print fr.GetData()
fr.Destroy()
app.MainLoop()
def test_pie():
f = plt.figure()
ax = f.add_subplot(111)
_, lbl = ax.pie([1,2,3])
app = wx.App()
fr = PieLabelsDialog(None, lbl)
if fr.ShowModal() == wx.ID_OK:
print fr.GetData()
fr.Destroy()
app.MainLoop()
if __name__=='__main__':
test_function()
| mit | 7,859,190,531,030,774,000 | 34.098765 | 99 | 0.559425 | false |
hugs/selenium | firefox/src/py/extensionconnection.py | 1 | 2732 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communication with the firefox extension."""
import logging
import socket
import time
try:
import json
except ImportError: # Python < 2.6
import simplejson as json
# Some old JSON libraries don't have "dumps", make sure we have a good one
if not hasattr(json, 'dumps'):
import simplejson as json
from selenium.remote.command import Command
from selenium.remote.remote_connection import RemoteConnection
_DEFAULT_TIMEOUT = 20
_DEFAULT_PORT = 7055
LOGGER = logging.getLogger("webdriver.ExtensionConnection")
class ExtensionConnection(RemoteConnection):
"""This class maintains a connection to the firefox extension.
"""
def __init__(self, timeout=_DEFAULT_TIMEOUT):
RemoteConnection.__init__(
self, "http://localhost:%d/hub" % _DEFAULT_PORT)
LOGGER.debug("extension connection initiated")
self.timeout = timeout
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId':sessionId})
while self.is_connectable():
logging.info("waiting to quit")
time.sleep(1)
def connect(self):
"""Connects to the extension and retrieves the session id."""
return self.execute(Command.NEW_SESSION, {'desiredCapabilities':{
'browserName': 'firefox',
'platform': 'ANY',
'version': '',
'javascriptEnabled': True}})
def connect_and_quit(self):
"""Connects to an running browser and quit immediately."""
self._request('%s/extensions/firefox/quit' % self._url)
def is_connectable(self):
"""Trys to connect to the extension but do not retrieve context."""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("localhost", _DEFAULT_PORT))
socket_.close()
return True
except socket.error:
return False
class ExtensionConnectionError(Exception):
"""An internal error occurred int the extension.
Might be caused by bad input or bugs in webdriver
"""
pass
| apache-2.0 | -2,958,718,092,136,119,300 | 33.15 | 75 | 0.675695 | false |
openfisca/openfisca-matplotlib | openfisca_matplotlib/tests/test_inversion.py | 1 | 5053 | # -*- coding: utf-8 -*-
from __future__ import division
import datetime
from openfisca_core import periods
from openfisca_core.tools import assert_near
from openfisca_france import FranceTaxBenefitSystem
from openfisca_france.reforms.inversion_directe_salaires import inversion_directe_salaires
from matplotlib import pyplot
tax_benefit_system = inversion_directe_salaires(FranceTaxBenefitSystem())
def brut_plot(revenu, count = 11, max_revenu = 5000, min_revenu = 0):
year = 2014
period = periods.period("{}-01".format(year))
if revenu == 'chomage':
brut_name = 'chobrut'
imposable_name = 'cho'
inversible_name = 'choi'
elif revenu == 'retraite':
brut_name = 'rstbrut'
imposable_name = 'rst'
inversible_name = 'rsti'
elif revenu == 'salaire':
brut_name = 'salaire_de_base'
imposable_name = 'salaire_imposable'
inversible_name = 'salaire_imposable_pour_inversion'
else:
return
single_entity_kwargs = dict(
axes = [dict(count = count, max = max_revenu, min = min_revenu, name = brut_name)],
period = period,
parent1 = dict(
date_naissance = datetime.date(year - 40, 1, 1),
),
)
print(single_entity_kwargs)
simulation = tax_benefit_system.new_scenario().init_single_entity(
**single_entity_kwargs).new_simulation()
boum
brut = simulation.get_holder(brut_name).array
imposable = simulation.calculate(imposable_name)
inversion_reform = inversion_revenus.build_reform(tax_benefit_system)
inverse_simulation = inversion_reform.new_scenario().init_single_entity(
**single_entity_kwargs).new_simulation(debug = True)
inverse_simulation.get_holder(brut_name).delete_arrays()
inverse_simulation.get_or_new_holder(inversible_name).array = imposable.copy()
new_brut = inverse_simulation.calculate(brut_name)
pyplot.subplot(2, 1, 1)
pyplot.plot(brut, imposable, 'ro', label = "direct")
pyplot.plot(new_brut, imposable, 'db', label = "inversed")
pyplot.legend()
pyplot.subplot(2, 1, 2)
pyplot.plot(brut, new_brut - brut, 'r-')
pyplot.show()
assert_near(new_brut, brut, absolute_error_margin = 1)
def net_plot(revenu, count = 11, max_revenu = 5000, min_revenu = 0):
year = 2014
period = periods.period("{}-01".format(year))
if revenu == 'chomage':
brut_name = 'chobrut'
net_name = 'chonet'
elif revenu == 'retraite':
brut_name = 'rstbrut'
net_name = 'rstnet'
elif revenu == 'salaire':
brut_name = 'salaire_de_base'
net_name = 'salaire_net'
else:
return
single_entity_kwargs = dict(
axes = [[
dict(count = count, max = max_revenu, min = min_revenu, name = brut_name)
]],
period = period,
parent1 = dict(
date_naissance = datetime.date(year - 40, 1, 1),
),
)
simulation = tax_benefit_system.new_scenario().init_single_entity(
**single_entity_kwargs).new_simulation(debug = True)
smic_horaire = simulation.legislation_at(period.start).cotsoc.gen.smic_h_b
smic_mensuel = smic_horaire * 35 * 52 / 12
brut = simulation.get_holder(brut_name).array
simulation.get_or_new_holder('contrat_de_travail').array = brut < smic_mensuel # temps plein ou temps partiel
simulation.get_or_new_holder('heures_remunerees_volume').array = brut // smic_horaire # temps plein ou partiel
net = simulation.calculate(net_name)
inversion_reform = inversion_revenus.build_reform(tax_benefit_system)
inverse_simulation = inversion_reform.new_scenario().init_single_entity(
**single_entity_kwargs).new_simulation(debug = True)
inverse_simulation.get_holder(brut_name).delete_arrays()
inverse_simulation.get_or_new_holder(net_name).array = net.copy()
inverse_simulation.get_or_new_holder('contrat_de_travail').array = brut < smic_mensuel # temps plein ou partiel
inverse_simulation.get_or_new_holder('heures_remunerees_volume').array = (
(brut // smic_horaire) * (brut < smic_mensuel)
)
print(inverse_simulation.get_or_new_holder('contrat_de_travail').array)
print(inverse_simulation.get_or_new_holder('heures_remunerees_volume').array)
new_brut = inverse_simulation.calculate(brut_name)
pyplot.subplot(2, 1, 1)
pyplot.plot(brut, net, 'ro', label = "direct")
pyplot.plot(new_brut, net, 'db', label = "inversed")
pyplot.legend()
pyplot.subplot(2, 1, 2)
pyplot.plot(brut, new_brut - brut, 'r-')
pyplot.show()
assert_near(new_brut, brut, absolute_error_margin = 1)
if __name__ == '__main__':
# chomage OK
# brut_plot('chomage', count = 5000)
# retraite OK (mais long !)
# brut_plot('retraite', count = 10000)
brut_plot('salaire', count = 101, max_revenu = 2000, min_revenu = 0)
# retraite OK
# net_plot('retraite', count = 100)
# net_plot('chomage', count = 101, max_revenu = 4000, min_revenu = 0)
| agpl-3.0 | -3,216,029,010,520,110,000 | 35.092857 | 116 | 0.645953 | false |
dag10/EventLCD | website/app/events.py | 1 | 2672 | import requests
from datetime import datetime
import dateutil.parser
import pytz
import urllib
import yaml
import dateutil.parser
import re
class Events:
def __init__(self):
try:
with open('app.yaml', 'r') as f:
self.config = yaml.load(f)
except:
raise Exception('Missing/invalid config')
def getNextEvents(self, location, num):
"""Gets the next events at a location.
The location is a partial string match.
Args:
location: String of location.
num: Number of events to retrieve.
Returns:
List of events.
"""
events = self.getEvents(20)
# Filter by location
events = filter(
lambda e: 'location' in e and location.lower() in e['location'].lower(),
events)
# Filter out all-day events
events = filter(
lambda e: 'start' in e and 'time' in dir(e['start']),
events)
return events[:num]
def getEvents(self, num):
"""Gets the a list of events from the calendar.
Args:
num: Number of events to retrieve.
Returns:
List of events, or an empty list.
Throws:
Exception if an error occurs.
"""
calendar = self.config['calendar_id']
isotime = datetime.utcnow().replace(
microsecond=0, tzinfo=pytz.utc).isoformat('T')
params = {
'maxResults': num,
'orderBy': 'startTime',
'showDeleted': False,
'singleEvents': True,
'timeMin': isotime,
'fields': 'items(location, start, end, summary, description)',
'key': self.config['token'],
}
url = (
'https://www.googleapis.com/calendar/v3/calendars/{0}/events?{1}'
).format(calendar, urllib.urlencode(params))
response = requests.get(url);
if response.status_code is not 200:
raise Exception('Google %d' % response.status_code)
events = response.json()['items']
events = map(self.formatEvent, events)
return events
def formatEvent(self, e):
"""Formats an event dictionary from Google's format to our own.
Args:
e: Dictionary with Google's event structure.
Returns:
Restructured dictionary.
"""
if 'start' in e and 'dateTime' in e['start']:
e['start'] = dateutil.parser.parse(e['start']['dateTime'])
if 'end' in e and 'dateTime' in e['end']:
e['end'] = dateutil.parser.parse(e['end']['dateTime'])
# If event description contains special tag [lcd: <value>], display that
# value instead of the event summary.
if 'description' in e:
lcdValue = re.findall(r'\[lcd:\s*([^\]]+)\]', e['description'])
if lcdValue:
e['summary'] = lcdValue[0]
return e
| mit | -1,262,797,832,440,862,700 | 23.740741 | 80 | 0.611901 | false |
Lattyware/unrest | unrest.py | 1 | 5481 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright © 2011: Lattyware <[email protected]>
This file is part of unrest.
unrest is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
unrest is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
Just as a quick note, please remember if you are reading this, 48 hours. It's
not clean, well documented or particularly well done in general, but that wasn't
the point. Hopefully it's a bit of fun.
"""
import sys
import random
import os
import sf
import scenes
from interface import Cursor
from interface import Stats
class Quit(Exception):
pass
class LeeroyJenkins:
def __init__(self, name):
random.seed()
video_mode = sf.VideoMode(800, 600, 32)
style = sf.Style.CLOSE
self.window_image = sf.Image.load_from_file(bytes(os.path.join("assets", "you.png"), 'UTF-8'))
self.window = sf.RenderWindow(video_mode, bytes(name, 'UTF-8'), style)
self.window.set_icon(40, 40, self.window_image.get_pixels())
self.window.framerate_limit = 60
self.window.show_mouse_cursor = False
self.state = State()
self.scene = scenes.Intro(self.state)
self.cursor = Cursor()
self.pause_img = sf.Texture.load_from_file(bytes(os.path.join("assets", "paused.png"), 'UTF-8'))
self.pause_sprite = sf.Sprite(self.pause_img)
self.pause = False
self.run()
def run(self):
while True:
try:
self.step()
except Quit:
self.window.close()
sys.exit()
def step(self):
if self.scene.finished:
self.scene = self.scene.next
self.handle_events()
if not self.pause:
self.update()
self.render()
def handle_events(self):
for event in self.window.iter_events():
if event.type == sf.Event.CLOSED or (event.type ==
sf.Event.KEY_PRESSED and event.code == sf.Keyboard.ESCAPE):
self.quit()
elif (event.type == sf.Event.KEY_PRESSED and
event.code == sf.Keyboard.P):
self.pause = not self.pause
elif event.type == sf.Event.MOUSE_BUTTON_RELEASED and self.pause:
self.pause = False
elif event.type == sf.Event.LOST_FOCUS:
self.pause = True
elif event.type == sf.Event.GAINED_FOCUS:
self.pause = False
elif not self.pause:
self.scene.handle_event(self.cursor, event)
def render(self):
self.window.clear(sf.Color.BLACK)
self.scene.render(self.window)
self.cursor.draw(self.window)
if self.pause:
self.window.draw(self.pause_sprite)
self.window.display()
def update(self):
self.scene._update(self.cursor, self.window.frame_time)
self.cursor.position = sf.Mouse.get_position(self.window)
def quit(self):
raise Quit
class State:
def __init__(self):
self.achievements = {
"Educated": False,
"Have Home": False,
"Girlfriend": False,
"Father": False,
}
self.issues = {
"Poor": 5,
"Uneducated": 10,
"Stressed": 0,
"Overweight": 5,
"Guilty": 0,
"Bored": 3,
"Lonely": 6,
"Addiction": 1,
}
self.stats = Stats(self)
def _get_total(self):
total = 0
for count in self.issues.values():
total += count
return total
total = property(_get_total)
def increase(self, scene, name, amount=1):
self.issues[name] += amount
self.stats.show(name)
self.check_victory(scene)
def decrease(self, scene, name, amount=1):
self.issues[name] -= amount
if self.issues[name] < 1:
self.issues[name] = 0
self.stats.hide(name)
if self.issues["Uneducated"] == 0:
self.achievements["Educated"] = True
self.check_victory(scene)
def check_victory(self, scene):
if self.total == 0:
scene.finish(scenes.Endgame, True)
else:
if self.issues["Poor"] > 10:
scene.finish(scenes.Endgame, False, ["With no money,", "hopelessness set in,", "and you took your own life."])
elif self.issues["Stressed"] > 10:
scene.finish(scenes.Endgame, False, ["Hopelessly stressed,", "you could no longer take living,", "and took your own life."])
elif self.issues["Overweight"] > 10:
scene.finish(scenes.Endgame, False, ["You died due to a", "hopelessly unhealthy lifestyle."])
elif self.issues["Bored"] > 10:
scene.finish(scenes.Endgame, False, ["Hopelessly bored with life,", "you saw no reason to go on."])
elif self.issues["Lonely"] > 10:
scene.finish(scenes.Endgame, False, ["Hopelessly lonely,", "you saw no reason to go on."])
elif self.issues["Addiction"] > 10:
scene.finish(scenes.Endgame, False, ["Your addictions lead you", "to a to die a hopeless death", "desperately seeking a fix."])
elif self.total > 35:
scene.finish(scenes.Endgame, False, ["Your life became hopeless", "as your issues built up,", "you saw no reason to go on."])
def opacity(obj, change=None, relative=False):
col = obj.color
if change == None:
return col.a
if relative:
if col.a + change > 255:
col.a = 255
elif col.a + change < 0:
col.a = 0
else:
col.a += change
else:
if change > 255:
col.a = 255
elif change < 0:
col.a = 0
else:
col.a = change
obj.color = col
if __name__ == "__main__":
lets_do_this = LeeroyJenkins("Unrest")
| gpl-3.0 | 8,282,483,104,794,054,000 | 27.393782 | 131 | 0.679015 | false |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2021_02_01_preview/operations/_storage_account_credentials_operations.py | 1 | 21775 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountCredentialsOperations(object):
"""StorageAccountCredentialsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2021_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.StorageAccountCredentialList"]
"""Gets all the storage account credentials in a Data Box Edge/Data Box Gateway device.
Gets all the storage account credentials in a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountCredentialList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2021_02_01_preview.models.StorageAccountCredentialList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredentialList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountCredentialList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.StorageAccountCredential"
"""Gets the properties of the specified storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountCredential, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2021_02_01_preview.models.StorageAccountCredential
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredential"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
storage_account_credential, # type: "_models.StorageAccountCredential"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.StorageAccountCredential"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccountCredential"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(storage_account_credential, 'StorageAccountCredential')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
storage_account_credential, # type: "_models.StorageAccountCredential"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.StorageAccountCredential"]
"""Creates or updates the storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param storage_account_credential: The storage account credential.
:type storage_account_credential: ~azure.mgmt.databoxedge.v2021_02_01_preview.models.StorageAccountCredential
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either StorageAccountCredential or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2021_02_01_preview.models.StorageAccountCredential]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredential"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
storage_account_credential=storage_account_credential,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def begin_delete(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
| mit | 3,678,572,357,001,889,000 | 48.488636 | 231 | 0.641883 | false |
wontfix-org/wtf | _setup/py3/ext.py | 1 | 7479 | # -*- coding: ascii -*-
#
# Copyright 2007, 2008, 2009, 2010, 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===================
C extension tools
===================
C extension tools.
"""
__author__ = "Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
__test__ = False
from distutils import core as _core
from distutils import errors as _distutils_errors
from distutils import log
import os as _os
import posixpath as _posixpath
import shutil as _shutil
import tempfile as _tempfile
from _setup import commands as _commands
def _install_finalizer(installer):
if installer.without_c_extensions:
installer.distribution.ext_modules = []
def _build_finalizer(builder):
if builder.without_c_extensions:
builder.extensions = []
class Extension(_core.Extension):
"""
Extension with prerequisite check interface
If your check is cacheable (during the setup run), override
`cached_check_prerequisites`, `check_prerequisites` otherwise.
:IVariables:
`cached_check` : ``bool``
The cached check result
"""
cached_check = None
def __init__(self, *args, **kwargs):
""" Initialization """
if 'depends' in kwargs:
self.depends = kwargs['depends'] or []
else:
self.depends = []
_core.Extension.__init__(self, *args, **kwargs)
# add include path
included = _posixpath.join('_setup', 'include')
if included not in self.include_dirs:
self.include_dirs.append(included)
# add cext.h to the dependencies
cext_h = _posixpath.join(included, 'cext.h')
if cext_h not in self.depends:
self.depends.append(cext_h)
_commands.add_option('install_lib', 'without-c-extensions',
help_text='Don\'t install C extensions',
inherit='install',
)
_commands.add_finalizer('install_lib', 'c-extensions',
_install_finalizer
)
_commands.add_option('build_ext', 'without-c-extensions',
help_text='Don\'t build C extensions',
inherit=('build', 'install_lib'),
)
_commands.add_finalizer('build_ext', 'c-extensions', _build_finalizer)
def check_prerequisites(self, build):
"""
Check prerequisites
The check should cover all dependencies needed for the extension to
be built and run. The method can do the following:
- return a false value: the extension will be built
- return a true value: the extension will be skipped. This is useful
for optional extensions
- raise an exception. This is useful for mandatory extensions
If the check result is cacheable (during the setup run), override
`cached_check_prerequisites` instead.
:Parameters:
`build` : `BuildExt`
The extension builder
:Return: Skip the extension?
:Rtype: ``bool``
"""
if self.cached_check is None:
log.debug("PREREQ check for %s" % self.name)
self.cached_check = self.cached_check_prerequisites(build)
else:
log.debug("PREREQ check for %s (cached)" % self.name)
return self.cached_check
def cached_check_prerequisites(self, build):
"""
Check prerequisites
The check should cover all dependencies needed for the extension to
be built and run. The method can do the following:
- return a false value: the extension will be built
- return a true value: the extension will be skipped. This is useful
for optional extensions
- raise an exception. This is useful for mandatory extensions
If the check result is *not* cacheable (during the setup run),
override `check_prerequisites` instead.
:Parameters:
`build` : `BuildExt`
The extension builder
:Return: Skip the extension?
:Rtype: ``bool``
"""
# pylint: disable = W0613
log.debug("Nothing to check for %s!" % self.name)
return False
class ConfTest(object):
"""
Single conftest abstraction
:IVariables:
`_tempdir` : ``str``
The tempdir created for this test
`src` : ``str``
Name of the source file
`target` : ``str``
Target filename
`compiler` : ``CCompiler``
compiler instance
`obj` : ``list``
List of object filenames (``[str, ...]``)
"""
_tempdir = None
def __init__(self, build, source):
"""
Initialization
:Parameters:
`build` : ``distuils.command.build_ext.build_ext``
builder instance
`source` : ``str``
Source of the file to compile
"""
self._tempdir = tempdir = _tempfile.mkdtemp()
src = _os.path.join(tempdir, 'conftest.c')
fp = open(src, 'w')
try:
fp.write(source)
finally:
fp.close()
self.src = src
self.compiler = compiler = build.compiler
self.target = _os.path.join(tempdir, 'conftest')
self.obj = compiler.object_filenames([src], output_dir=tempdir)
def __del__(self):
""" Destruction """
self.destroy()
def destroy(self):
""" Destroy the conftest leftovers on disk """
tempdir, self._tempdir = self._tempdir, None
if tempdir is not None:
_shutil.rmtree(tempdir)
def compile(self, **kwargs):
"""
Compile the conftest
:Parameters:
`kwargs` : ``dict``
Optional keyword parameters for the compiler call
:Return: Was the compilation successful?
:Rtype: ``bool``
"""
kwargs['output_dir'] = self._tempdir
try:
self.compiler.compile([self.src], **kwargs)
except _distutils_errors.CompileError:
return False
return True
def link(self, **kwargs):
r"""
Link the conftest
Before you can link the conftest objects they need to be `compile`\d.
:Parameters:
`kwargs` : ``dict``
Optional keyword parameters for the linker call
:Return: Was the linking successful?
:Rtype: ``bool``
"""
try:
self.compiler.link_executable(self.obj, self.target, **kwargs)
except _distutils_errors.LinkError:
return False
return True
def pipe(self, mode="r"):
r"""
Execute the conftest binary and connect to it using a pipe
Before you can pipe to or from the conftest binary it needs to
be `link`\ed.
:Parameters:
`mode` : ``str``
Pipe mode - r/w
:Return: The open pipe
:Rtype: ``file``
"""
return _os.popen(self.compiler.executable_filename(self.target), mode)
| apache-2.0 | 1,383,966,553,002,570,800 | 28.561265 | 78 | 0.594732 | false |
xZise/pywikibot-core | scripts/unlink.py | 1 | 7673 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This bot unlinks a page on every page that links to it.
This script understands this command-line argument:
-namespace:n Number of namespace to process. The parameter can be used
multiple times. It works in combination with all other
parameters, except for the -start parameter. If you e.g.
want to iterate over all user pages starting at User:M, use
-start:User:M.
Any other parameter will be regarded as the title of the page
that should be unlinked.
Example:
python unlink.py "Foo bar" -namespace:0 -namespace:6
Removes links to the page [[Foo bar]] in articles and image descriptions.
"""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import re
import pywikibot
from pywikibot.editor import TextEditor
from pywikibot import i18n
from pywikibot.bot import SingleSiteBot
class UnlinkBot(SingleSiteBot):
"""Page unlinking bot."""
def __init__(self, pageToUnlink, **kwargs):
"""Initialize a UnlinkBot instance with the given page to unlink."""
self.availableOptions.update({
'namespaces': [],
# Which namespaces should be processed?
# default to [] which means all namespaces will be processed
})
super(UnlinkBot, self).__init__(site=pageToUnlink.site, **kwargs)
self.pageToUnlink = pageToUnlink
linktrail = self.pageToUnlink.site.linktrail()
self.generator = pageToUnlink.getReferences(
namespaces=self.getOption('namespaces'), content=True)
# The regular expression which finds links. Results consist of four
# groups:
#
# group title is the target page title, that is, everything
# before | or ].
#
# group section is the page section.
# It'll include the # to make life easier for us.
#
# group label is the alternative link title, that's everything
# between | and ].
#
# group linktrail is the link trail, that's letters after ]] which are
# part of the word.
# note that the definition of 'letter' varies from language to language.
self.linkR = re.compile(r'\[\[(?P<title>[^\]\|#]*)(?P<section>#[^\]\|]*)?(\|(?P<label>[^\]]*))?\]\](?P<linktrail>%s)'
% linktrail)
self.comment = i18n.twtranslate(self.pageToUnlink.site, 'unlink-unlinking',
self.pageToUnlink.title())
def handleNextLink(self, text, match, context=100):
"""
Return a tuple (text, jumpToBeginning).
text is the unicode string after the current link has been processed.
jumpToBeginning is a boolean which specifies if the cursor position
should be reset to 0. This is required after the user has edited the
article.
"""
# ignore interwiki links and links to sections of the same page as well
# as section links
if not match.group('title') \
or self.pageToUnlink.site.isInterwikiLink(match.group('title')) \
or match.group('section'):
return text, False
linkedPage = pywikibot.Page(self.pageToUnlink.site,
match.group('title'))
# Check whether the link found is to the current page itself.
if linkedPage != self.pageToUnlink:
# not a self-link
return text, False
else:
# at the beginning of the link, start red color.
# at the end of the link, reset the color to default
if self.getOption('always'):
choice = 'a'
else:
pywikibot.output(
text[max(0, match.start() - context):match.start()] +
'\03{lightred}' + text[match.start():match.end()] +
'\03{default}' + text[match.end():match.end() + context])
choice = pywikibot.input_choice(
u'\nWhat shall be done with this link?\n',
[('unlink', 'u'), ('skip', 's'), ('edit', 'e'),
('more context', 'm'), ('unlink all', 'a')], 'u')
pywikibot.output(u'')
if choice == 's':
# skip this link
return text, False
elif choice == 'e':
editor = TextEditor()
newText = editor.edit(text, jumpIndex=match.start())
# if user didn't press Cancel
if newText:
return newText, True
else:
return text, True
elif choice == 'm':
# show more context by recursive self-call
return self.handleNextLink(text, match,
context=context + 100)
elif choice == 'a':
self.options['always'] = True
new = match.group('label') or match.group('title')
new += match.group('linktrail')
return text[:match.start()] + new + text[match.end():], False
def treat(self, page):
"""Remove links pointing to the configured page from the given page."""
self.current_page = page
try:
oldText = page.get()
text = oldText
curpos = 0
while curpos < len(text):
match = self.linkR.search(text, pos=curpos)
if not match:
break
# Make sure that next time around we will not find this same
# hit.
curpos = match.start() + 1
text, jumpToBeginning = self.handleNextLink(text, match)
if jumpToBeginning:
curpos = 0
if oldText == text:
pywikibot.output(u'No changes necessary.')
else:
pywikibot.showDiff(oldText, text)
page.text = text
page.save(self.comment)
except pywikibot.NoPage:
pywikibot.output(u"Page %s does not exist?!"
% page.title(asLink=True))
except pywikibot.IsRedirectPage:
pywikibot.output(u"Page %s is a redirect; skipping."
% page.title(asLink=True))
except pywikibot.LockedPage:
pywikibot.output(u"Page %s is locked?!" % page.title(asLink=True))
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# This temporary string is used to read the title
# of the page that should be unlinked.
page_title = None
options = {}
for arg in pywikibot.handle_args(args):
if arg.startswith('-namespace:'):
if 'namespaces' not in options:
options['namespaces'] = []
try:
options['namespaces'].append(int(arg[11:]))
except ValueError:
options['namespaces'].append(arg[11:])
elif arg == '-always':
options['always'] = True
else:
page_title = arg
if page_title:
page = pywikibot.Page(pywikibot.Site(), page_title)
bot = UnlinkBot(page, **options)
bot.run()
else:
pywikibot.showHelp()
if __name__ == "__main__":
main()
| mit | 3,933,467,441,758,765,600 | 36.612745 | 125 | 0.545158 | false |
jmbeuken/abinit | tests/v7/__init__.py | 1 | 1266 | """Global variables associated to the test suite."""
#: List of CPP variables that should be defined in config.h in order to enable this suite.
need_cpp_vars = [
]
#: List of keywords that are automatically added to all the tests of this suite.
keywords = [
]
#: List of input files
inp_files = [
"t01.in",
"t02.in",
"t03.in",
"t04.in",
"t05.in",
"t06.in",
"t07.in",
"t08.in",
"t09.in",
"t10.in",
"t11.in",
"t12.in",
"t13.in",
"t14.in",
"t15.in",
"t16.in",
"t17.in",
"t21.in",
"t22.in",
"t23.in",
"t24.in",
"t25.in",
"t26.in",
"t27.in",
"t28.in",
"t29.in",
"t30.in",
"t31.in",
"t32.in",
"t35.in",
"t36.in",
"t41.in",
"t42.in",
"t43.in",
#"t44.in", # Will be added
"t45.in",
"t46.in",
"t47.in",
"t48.in",
"t50.in",
"t51.in",
"t52.in",
"t53.in",
"t54.in",
"t55.in",
#"t56.in", # To be activated when python scripts can be tested.
"t57.in",
"t58.in",
"t59.in",
"t60.in",
"t61.in",
"t62.in",
"t63.in",
"t64.in",
"t65.in",
"t66.in",
"t68.in",
"t69.in",
"t70.in",
"t71.in",
"t72.in",
"t80.in",
"t81.in",
"t82.in",
"t83.in",
#"t84.in", # To be activated when python scripts can be tested
"t85.in",
"t86.in",
"t87.in",
"t88.in",
"t89.in",
"t90.in",
"t91.in",
"t92.in",
"t93.in",
"t94.in",
"t95.in",
"t96.in",
"t97.in",
"t98.in",
"t99.in",
]
| gpl-3.0 | -1,178,226,654,721,699,300 | 12.468085 | 90 | 0.563191 | false |
nemerle/reko | src/tools/Dll2Lib.py | 3 | 1769 | TOOLCHAIN = "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\bin\\amd64\\"
def getNativePath(filePath):
if strtoupper(PHP_OS) == "CYGWIN":
return rtrim(shell_exec("cygpath -w '{$filePath}'"))
return filePath
def getLocalPath(filePath):
if(strtoupper(PHP_OS) == "CYGWIN"):
return rtrim(shell_exec("cygpath -u '{$filePath}'"))
return filePath
localToolchain = getLocalPath(TOOLCHAIN)
print("TOOLCHAIN @ %s\n", localToolchain);
inDll = argv[1]
outBase = getcwd() + "/" + pathinfo(inDll, PATHINFO_FILENAME)
outDef = outBase + ".def"
outLib = outBase + ".lib"
nativeDll = escapeshellarg(getNativePath(inDll))
nativeDef = escapeshellarg(getNativePath(outDef))
nativeLib = escapeshellarg(getNativePath(outLib))
#//system("{$localToolchain}/dumpbin.exe /EXPORTS /OUT:{$nativeDef} {$nativeDll}");
#_def = fopen($outDef, "r") or die("open fail" . PHP_EOL);
#newdef = fopen($outBase + "_fixed.def", "w+") or die("open fail" . PHP_EOL);
found = false
while not feof(_def):
line = trim(fgets(_def))
if empty(line):
continue
if not found and strpos(line, "ordinal") == 0:
found = true
libName = pathinfo(outBase, PATHINFO_FILENAME)
fprintf(newdef, "LIBRARY\t$libName\r\n")
fprintf(newdef, "EXPORTS\r\n")
continue
if found:
parts = preg_split("/\s+/", line)
if (len(parts) < 4) or not is_numeric(parts[0]):
fprintf(STDERR, "Skip '%s'\n", line)
continue
if parts[2] == "[NONAME]":
parts[2] = substr(parts[2], 1, strlen(parts[2]) - 2)
fprintf(newdef, "%s @ %d %s\r\n", parts[3], parts[0], parts[2])
fclose(_def)
fclose(newdef)
nativeDef = escapeshellarg(getNativePath(outBase + "_fixed.def"))
cmd = escapeshellarg("{$localToolchain}/lib.exe")
system("{$cmd} /DEF:{$nativeDef} /OUT:{$nativeLib}")
#//unlink($outDef);
| gpl-2.0 | -3,016,731,800,723,802,600 | 28.483333 | 85 | 0.670435 | false |
mdj2/django | django/views/generic/edit.py | 2 | 8429 | import warnings
from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(ContextMixin):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial.copy()
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""
If the form is invalid, re-render the context data with the
data-filled form and errors.
"""
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
def get_form_class(self):
"""
Returns the form class to use in this view.
"""
if self.form_class:
return self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
fields = getattr(self, 'fields', None)
if fields is None:
warnings.warn("Using ModelFormMixin (base class of %s) without "
"the 'fields' attribute is deprecated." % self.__class__.__name__,
PendingDeprecationWarning)
return model_forms.modelform_factory(model, fields=fields)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
"""
Returns the supplied URL.
"""
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
class ProcessFormView(View):
"""
A mixin that renders a form on GET and processes it on POST.
"""
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template.
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url % self.object.__dict__
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
| bsd-3-clause | -5,578,284,978,364,074,000 | 30.334572 | 96 | 0.606596 | false |
Samweli/inasafe | safe/impact_functions/loader.py | 1 | 6064 | # coding=utf-8
# Earthquake
from safe.impact_functions.earthquake.earthquake_building\
.impact_function import EarthquakeBuildingFunction
from safe.impact_functions.earthquake.itb_earthquake_fatality_model\
.impact_function import ITBFatalityFunction
from safe.impact_functions.earthquake.pager_earthquake_fatality_model\
.impact_function import PAGFatalityFunction
from safe.impact_functions.earthquake.itb_bayesian_earthquake_fatality_model\
.impact_function import ITBBayesianFatalityFunction
# Generic
from safe.impact_functions.generic.classified_raster_building\
.impact_function import ClassifiedRasterHazardBuildingFunction
from safe.impact_functions.generic.classified_polygon_population\
.impact_function import ClassifiedPolygonHazardPopulationFunction
from safe.impact_functions.generic.classified_raster_population\
.impact_function import ClassifiedRasterHazardPopulationFunction
from safe.impact_functions.generic.continuous_hazard_population\
.impact_function import ContinuousHazardPopulationFunction
from safe.impact_functions.generic.classified_polygon_building\
.impact_function import ClassifiedPolygonHazardBuildingFunction
from safe.impact_functions.generic.classified_polygon_people\
.impact_function import ClassifiedPolygonHazardPolygonPeopleFunction
from safe.impact_functions.generic.classified_polygon_landcover\
.impact_function import ClassifiedPolygonHazardLandCoverFunction
# Inundation
from safe.impact_functions.inundation.flood_raster_osm_building_impact\
.impact_function import FloodRasterBuildingFunction
from safe.impact_functions.impact_function_manager import ImpactFunctionManager
from safe.impact_functions.inundation.flood_raster_road\
.impact_function import FloodRasterRoadsFunction
from safe.impact_functions.inundation.flood_vector_building_impact\
.impact_function import FloodPolygonBuildingFunction
from safe.impact_functions.inundation.flood_polygon_roads\
.impact_function import FloodPolygonRoadsFunction
from safe.impact_functions.inundation.flood_raster_population.impact_function\
import FloodEvacuationRasterHazardFunction
from safe.impact_functions.inundation.flood_polygon_population\
.impact_function import FloodEvacuationVectorHazardFunction
from safe.impact_functions.inundation\
.tsunami_population_evacuation_raster.impact_function import \
TsunamiEvacuationFunction
from safe.impact_functions.inundation.tsunami_raster_building.impact_function \
import TsunamiRasterBuildingFunction
from safe.impact_functions.inundation.tsunami_raster_road.impact_function \
import TsunamiRasterRoadsFunction
from safe.impact_functions.inundation.tsunami_raster_landcover.impact_function\
import TsunamiRasterLandcoverFunction
# Volcanic
from safe.impact_functions.volcanic.volcano_point_building.impact_function \
import VolcanoPointBuildingFunction
from safe.impact_functions.volcanic.volcano_polygon_building.impact_function \
import VolcanoPolygonBuildingFunction
from safe.impact_functions.volcanic.volcano_polygon_population\
.impact_function import VolcanoPolygonPopulationFunction
from safe.impact_functions.volcanic.volcano_point_population\
.impact_function import VolcanoPointPopulationFunction
# Volcanic Ash
from safe.impact_functions.ash.ash_raster_landcover.impact_function import \
AshRasterLandCoverFunction
from safe.impact_functions.ash.ash_raster_population.impact_function import \
AshRasterPopulationFunction
from safe.impact_functions.ash.ash_raster_places.impact_function import \
AshRasterPlacesFunction
__author__ = 'Rizky Maulana Nugraha <[email protected]>'
__date__ = '5/25/16'
def register_impact_functions():
"""Register all the impact functions available."""
impact_function_registry = ImpactFunctionManager().registry
# Earthquake
impact_function_registry.register(EarthquakeBuildingFunction)
impact_function_registry.register(ITBFatalityFunction)
impact_function_registry.register(PAGFatalityFunction)
# Added in 3.3
impact_function_registry.register(ITBBayesianFatalityFunction)
# Generic IF's
impact_function_registry.register(ClassifiedRasterHazardBuildingFunction)
impact_function_registry.register(ClassifiedRasterHazardPopulationFunction)
impact_function_registry.register(ContinuousHazardPopulationFunction)
impact_function_registry.register(
ClassifiedPolygonHazardPopulationFunction)
impact_function_registry.register(ClassifiedPolygonHazardBuildingFunction)
# Added in 3.3
impact_function_registry.register(
ClassifiedPolygonHazardPolygonPeopleFunction)
# Added in 3.4
impact_function_registry.register(ClassifiedPolygonHazardLandCoverFunction)
# Inundation IF's
impact_function_registry.register(FloodPolygonBuildingFunction)
impact_function_registry.register(FloodPolygonRoadsFunction)
impact_function_registry.register(FloodEvacuationVectorHazardFunction)
impact_function_registry.register(FloodEvacuationRasterHazardFunction)
impact_function_registry.register(FloodRasterBuildingFunction)
impact_function_registry.register(FloodRasterRoadsFunction)
impact_function_registry.register(TsunamiEvacuationFunction)
# Added in 3.3
impact_function_registry.register(TsunamiRasterBuildingFunction)
impact_function_registry.register(TsunamiRasterRoadsFunction)
# Added in 3.4
impact_function_registry.register(TsunamiRasterLandcoverFunction)
# Volcanic IF's
impact_function_registry.register(VolcanoPointBuildingFunction)
impact_function_registry.register(VolcanoPolygonBuildingFunction)
impact_function_registry.register(VolcanoPointPopulationFunction)
impact_function_registry.register(VolcanoPolygonPopulationFunction)
# Volcanic Ash IF's
# Added in 3.4
impact_function_registry.register(AshRasterLandCoverFunction)
# Added in 3.5
impact_function_registry.register(AshRasterPopulationFunction)
impact_function_registry.register(AshRasterPlacesFunction)
| gpl-3.0 | -4,222,240,589,647,584,000 | 47.512 | 79 | 0.828826 | false |
yaybu/touchdown | touchdown/goals/refresh.py | 1 | 1493 | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import errors
from touchdown.core.goals import Goal, register
class Refresh(Goal):
""" Replace a configuration variable with its default setting """
name = "refresh"
mutator = False
def get_plan_class(self, resource):
plan_class = resource.meta.get_plan("refresh")
if not plan_class:
plan_class = resource.meta.get_plan("describe")
if not plan_class:
plan_class = resource.meta.get_plan("null")
return plan_class
@classmethod
def setup_argparse(cls, parser):
parser.add_argument(
"name", metavar="NAME", type=str, help="The setting to refresh"
)
def execute(self, name):
settings = self.collect_as_dict("refresh")
if name not in settings:
raise errors.Error('No such setting "{}"'.format(name))
settings[name].execute()
register(Refresh)
| apache-2.0 | 3,562,553,157,625,070,600 | 30.765957 | 75 | 0.679169 | false |
spiralray/kondo | scripts/kondo.py | 1 | 1370 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2015/02/26
@author: spiralray
'''
import sys
import yaml
import roslib
roslib.load_manifest("kondo");
import rospy
from _servo import servo
import serial
import struct
import time
import threading
import math
class kondo(serial.Serial):
def setAngle(self, id, value):
self.cmd = chr(0x80 | id) + chr(int(value)/128) + chr(int(value)%128)
self.write( self.cmd )
self.read(3)
self.rec = self.read(3)
return ord(self.rec[1])*0x80 + ord(self.rec[2])
def callback(msg):
pubmsg = servo()
pubmsg.stamp = rospy.get_rostime()
pubmsg.id = msg.id
pubmsg.angle = (k.setAngle(msg.id, 7500 + msg.angle/0.000460194 )-7500) * 0.000460194
pub.publish( pubmsg )
if __name__ == '__main__':
argv = rospy.myargv(sys.argv)
rospy.init_node('kondo')
try:
port = rospy.get_param('~port')
rospy.loginfo('Parameter %s has value %s', rospy.resolve_name('~port'), port)
except:
rospy.logerr("Set correct port to %s", rospy.resolve_name('~port'))
exit()
k = kondo(port, 115200, timeout=0.1, parity=serial.PARITY_EVEN, stopbits=serial.STOPBITS_ONE)
pub = rospy.Publisher('/servo/rx', servo, queue_size=100)
rospy.Subscriber("/servo/tx", servo, callback)
rospy.spin()
k.close() | mit | 1,974,135,662,869,258,200 | 23.482143 | 97 | 0.622628 | false |
adsr303/unjabber | unjabberlib/tkui.py | 1 | 2101 | from tkinter import *
from tkinter.scrolledtext import ScrolledText
from tkinter.ttk import *
from unjabberlib import formatters
class ScrolledTextFormatter(formatters.Formatter):
def __init__(self, scrolled_text):
super().__init__()
self.text = scrolled_text
self.text.tag_configure(formatters.DAY, foreground='red',
justify='center')
self.text.tag_configure(formatters.HOUR, foreground='blue')
self.text.tag_configure(formatters.NAME, foreground='green')
def append(self, text, tag=None):
self.text.insert(END, text, tag)
class UnjabberTk(Tk):
def __init__(self, queries, *args, title=None, **kwargs):
super().__init__(*args, **kwargs)
self.title(title)
self.queries = queries
top_frame = Frame(self)
top_frame.pack(fill=X)
self.who_narrow_var = StringVar(self)
self.who_narrow_var.trace_add('write', lambda *_: self.narrow_who())
e = Entry(top_frame, textvariable=self.who_narrow_var)
e.pack(side=LEFT)
self.text = ScrolledText(self)
self.text.pack(expand=True, fill=BOTH)
self.formatter = ScrolledTextFormatter(self.text)
self.who_var = StringVar(self)
self.who_var.trace_add('write', lambda *_: self.who())
self.who_menu = OptionMenu(top_frame, self.who_var, '',
*self.queries.who(None))
self.who_menu.pack(side=LEFT)
def who(self):
self.text.delete('1.0', END)
previous = None
for message in self.queries.messages_for_whom(self.who_var.get()):
day, hour, name = message.after(previous)
self.formatter.show(previous, day, hour, name, message.what)
previous = message
def narrow_who(self):
menu = self.who_menu['menu']
menu.delete(0, END)
like = self.who_narrow_var.get() or None
for name in self.queries.who(like):
menu.add_command(label=name,
command=lambda x=name: self.who_var.set(x))
| mit | -5,649,839,388,989,175,000 | 34.610169 | 76 | 0.60019 | false |
esistgut/django-content-toolkit | accounts/migrations/0003_auto_20150608_1342.py | 1 | 1142 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150520_2341'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=75, verbose_name='email address', unique=True),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', related_name='user_set', verbose_name='groups', blank=True, to='auth.Group'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login'),
preserve_default=True,
),
]
| mit | 5,197,411,857,469,430,000 | 33.606061 | 257 | 0.609457 | false |
zeraien/pyjade | pyjade/parser.py | 1 | 10550 | from __future__ import absolute_import
from .lexer import Lexer
from . import nodes
import six
textOnly = ('script','style')
class Parser(object):
def __init__(self,str,filename=None,**options):
self.input = str
self.lexer = Lexer(str,**options)
self.filename = filename
self.bloks = {}
self.options = options
self.contexts = [self]
self.extending = False
self._spaces = None
def context(self,parser):
if parser: self.context.append(parser)
else: self.contexts.pop()
def advance(self):
return self.lexer.advance()
def skip(self,n):
while n>1: # > 0?
self.advance()
n -= 1
def peek(self):
p = self.lookahead(1)
return p
def line(self):
return self.lexer.lineno
def lookahead(self,n):
return self.lexer.lookahead(n)
def parse(self):
block = nodes.Block()
parser = None
block.line = self.line()
while 'eos' != self.peek().type:
if 'newline' == self.peek().type: self.advance()
else: block.append(self.parseExpr())
parser = self.extending
if parser:
self.context(parser)
ast = parser.parse()
self.context()
return ast
return block
def expect(self,type):
t = self.peek().type
if t == type: return self.advance()
else:
raise Exception('expected "%s" but got "%s" in file %s on line %d' %
(type, t, self.filename, self.line()))
def accept(self,type):
if self.peek().type == type: return self.advance()
def parseExpr(self):
t = self.peek().type
if 'yield' == t:
self.advance()
block = nodes.Block()
block._yield = True
return block
elif t in ('id','class'):
tok = self.advance()
new_div = self.lexer.tok('tag','div')
new_div.inline_level = tok.inline_level
self.lexer.stash.append(new_div)
self.lexer.stash.append(tok)
return self.parseExpr()
funcName = 'parse%s'%t.capitalize()
if hasattr(self,funcName):
return getattr(self,funcName)()
else:
raise Exception('unexpected token "%s" in file %s on line %d' %
(t, self.filename, self.line()))
def parseString(self):
tok = self.expect('string')
node = nodes.String(tok.val, inline=tok.inline_level > 0)
node.line = self.line()
return node
def parseText(self):
tok = self.expect('text')
node = nodes.Text(tok.val)
node.line = self.line()
return node
def parseBlockExpansion(self):
if ':'== self.peek().type:
self.advance()
return nodes.Block(self.parseExpr())
else:
return self.block()
def parseAssignment(self):
tok = self.expect('assignment')
return nodes.Assignment(tok.name,tok.val)
def parseCode(self):
tok = self.expect('code')
node = nodes.Code(tok.val,tok.buffer,tok.escape) #tok.escape
block,i = None,1
node.line = self.line()
while self.lookahead(i) and 'newline'==self.lookahead(i).type:
i+= 1
block = 'indent' == self.lookahead(i).type
if block:
self.skip(i-1)
node.block = self.block()
return node
def parseComment(self):
tok = self.expect('comment')
if 'indent'==self.peek().type:
node = nodes.BlockComment(tok.val, self.block(), tok.buffer)
else:
node = nodes.Comment(tok.val,tok.buffer)
node.line = self.line()
return node
def parseDoctype(self):
tok = self.expect('doctype')
node = nodes.Doctype(tok.val)
node.line = self.line()
return node
def parseFilter(self):
tok = self.expect('filter')
attrs = self.accept('attrs')
self.lexer.pipeless = True
block = self.parseTextBlock()
self.lexer.pipeless = False
node = nodes.Filter(tok.val, block, attrs and attrs.attrs)
node.line = self.line()
return node
def parseASTFilter(self):
tok = self.expect('tag')
attrs = self.accept('attrs')
self.expect(':')
block = self.block()
node = nodes.Filter(tok.val, block, attrs and attrs.attrs)
node.line = self.line()
return node
def parseEach(self):
tok = self.expect('each')
node = nodes.Each(tok.code, tok.keys)
node.line = self.line()
node.block = self.block()
return node
def parseConditional(self):
tok = self.expect('conditional')
node = nodes.Conditional(tok.val, tok.sentence)
node.line = self.line()
node.block = self.block()
while True:
t = self.peek()
if 'conditional' == t.type and node.can_append(t.val):
node.append(self.parseConditional())
else:
break
return node
def parseExtends(self):
path = self.expect('extends').val.strip('"\'')
return nodes.Extends(path)
def parseCall(self):
tok = self.expect('call')
name = tok.val
args = tok.args
if args is None:
args = ""
block = self.block() if 'indent' == self.peek().type else None
return nodes.Mixin(name,args,block,True)
def parseMixin(self):
tok = self.expect('mixin')
name = tok.val
args = tok.args
if args is None:
args = ""
block = self.block() if 'indent' == self.peek().type else None
return nodes.Mixin(name,args,block,block is None)
def parseBlock(self):
block = self.expect('block')
mode = block.mode
name = block.val.strip()
block = self.block(cls=nodes.CodeBlock) if 'indent'==self.peek().type else nodes.CodeBlock(nodes.Literal(''))
block.mode = mode
block.name = name
return block
def parseInclude(self):
path = self.expect('include').val.strip()
return nodes.Include(path)
def parseTextBlock(self, tag=None):
text = nodes.Text()
text.line = self.line()
if (tag):
text.parent == tag
spaces = self.expect('indent').val
if not self._spaces: self._spaces = spaces
indent = ' '*(spaces-self._spaces)
while 'outdent' != self.peek().type:
t = self.peek().type
if 'newline'==t:
text.append('\n')
self.advance()
elif 'indent'==t:
text.append('\n')
for node in self.parseTextBlock().nodes: text.append(node)
text.append('\n')
else:
text.append(indent+self.advance().val)
if spaces == self._spaces: self._spaces = None
self.expect('outdent')
return text
def block(self,cls=nodes.Block):
block = cls()
block.line = self.line()
self.expect('indent')
while 'outdent' != self.peek().type:
if 'newline'== self.peek().type:
self.advance()
else:
block.append(self.parseExpr())
self.expect('outdent')
return block
def processInline(self, current_tag, current_level):
next_level = current_level + 1
while self.peek().inline_level == next_level:
current_tag.block.append(self.parseExpr())
if self.peek().inline_level > next_level:
self.processInline(current_tag, next_level)
def processTagText(self, tag):
if self.peek().inline_level < tag.inline_level:
return
if not self.lookahead(2).inline_level > tag.inline_level:
tag.text = self.parseText()
return
while self.peek().inline_level == tag.inline_level and self.peek().type == 'string':
tag.block.append(self.parseExpr())
if self.peek().inline_level > tag.inline_level:
self.processInline(tag, tag.inline_level)
def parseTag(self):
i = 2
if 'attrs'==self.lookahead(i).type: i += 1
if ':'==self.lookahead(i).type:
if 'indent' == self.lookahead(i+1).type:
raise Exception('unexpected token "indent" in file %s on line %d' %
(self.filename, self.line()))
tok = self.advance()
tag = nodes.Tag(tok.val)
tag.inline_level = tok.inline_level
dot = None
tag.line = self.line()
while True:
t = self.peek().type
if t in ('id','class'):
tok = self.advance()
tag.setAttribute(tok.type,'"%s"'%tok.val,True)
continue
elif 'attrs'==t:
tok = self.advance()
for n,v in six.iteritems(tok.attrs):
tag.setAttribute(n,v,n in tok.static_attrs)
continue
else:
break
v = self.peek().val
if '.'== v:
dot = tag.textOnly = True
self.advance()
elif '<'== v: # For inline elements
tag.inline = True
self.advance()
t = self.peek().type
if 'code'==t: tag.code = self.parseCode()
elif ':'==t:
self.advance()
tag.block = nodes.Block()
tag.block.append(self.parseExpr())
elif 'string'==t: self.processTagText(tag)
elif 'text'==t: tag.text = self.parseText()
while 'newline' == self.peek().type: self.advance()
tag.textOnly = tag.textOnly or tag.name in textOnly
if 'script'== tag.name:
type = tag.getAttribute('type')
if not dot and type and 'text/javascript' !=type.strip('"\''): tag.textOnly = False
if 'indent' == self.peek().type:
if tag.textOnly:
self.lexer.pipeless = True
tag.block = self.parseTextBlock(tag)
self.lexer.pipeless = False
else:
block = self.block()
if tag.block:
for node in block.nodes:
tag.block.append(node)
else:
tag.block = block
return tag
| mit | -330,388,523,607,640,800 | 29.403458 | 117 | 0.525877 | false |
patrickm/chromium.src | mojo/public/bindings/pylib/parse/mojo_parser_unittest.py | 1 | 3720 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mojo_lexer
import mojo_parser
import unittest
class MojoParserTest(unittest.TestCase):
"""Tests mojo_parser (in particular, Parse())."""
def testTrivialValidSource(self):
"""Tests a trivial, but valid, .mojom source."""
source = """\
// This is a comment.
module my_module {
}
"""
self.assertEquals(mojo_parser.Parse(source, "my_file.mojom"),
[("MODULE", "my_module", None)])
def testSourceWithCrLfs(self):
"""Tests a .mojom source with CR-LFs instead of LFs."""
source = "// This is a comment.\r\n\r\nmodule my_module {\r\n}\r\n";
self.assertEquals(mojo_parser.Parse(source, "my_file.mojom"),
[("MODULE", "my_module", None)])
def testUnexpectedEOF(self):
"""Tests a "truncated" .mojom source."""
source = """\
// This is a comment.
module my_module {
"""
with self.assertRaisesRegexp(
mojo_parser.ParseError,
r"^my_file\.mojom: Error: Unexpected end of file$"):
mojo_parser.Parse(source, "my_file.mojom")
def testSimpleStruct(self):
"""Tests a simple .mojom source that just defines a struct."""
source ="""\
module my_module {
struct MyStruct {
int32 a;
double b;
};
} // module my_module
"""
# Note: Output as pretty-printed on failure by the test harness.
expected = \
[('MODULE',
'my_module',
[('STRUCT',
'MyStruct',
None,
[('FIELD', 'int32', 'a', None, None),
('FIELD', 'double', 'b', None, None)])])]
self.assertEquals(mojo_parser.Parse(source, "my_file.mojom"), expected)
def testEnumExpressions(self):
"""Tests an enum with values calculated using simple expressions."""
source = """\
module my_module {
enum MyEnum {
MY_ENUM_1 = 1,
MY_ENUM_2 = 1 + 1,
MY_ENUM_3 = 1 * 3,
MY_ENUM_4 = 2 * (1 + 1),
MY_ENUM_5 = 1 + 2 * 2,
MY_ENUM_6 = -6 / -2,
MY_ENUM_7 = 3 | (1 << 2),
MY_ENUM_8 = 16 >> 1,
MY_ENUM_9 = 1 ^ 15 & 8,
MY_ENUM_10 = 110 % 100,
MY_ENUM_MINUS_1 = ~0
};
} // my_module
"""
self.maxDiff = 2000
expected = \
[('MODULE',
'my_module',
[('ENUM',
'MyEnum',
[('ENUM_FIELD', 'MY_ENUM_1', ('EXPRESSION', ['1'])),
('ENUM_FIELD', 'MY_ENUM_2', ('EXPRESSION', ['1', '+', '1'])),
('ENUM_FIELD', 'MY_ENUM_3', ('EXPRESSION', ['1', '*', '3'])),
('ENUM_FIELD',
'MY_ENUM_4',
('EXPRESSION',
['2', '*', '(', ('EXPRESSION', ['1', '+', '1']), ')'])),
('ENUM_FIELD',
'MY_ENUM_5',
('EXPRESSION', ['1', '+', '2', '*', '2'])),
('ENUM_FIELD',
'MY_ENUM_6',
('EXPRESSION',
['-', ('EXPRESSION', ['6', '/', '-', ('EXPRESSION', ['2'])])])),
('ENUM_FIELD',
'MY_ENUM_7',
('EXPRESSION',
['3', '|', '(', ('EXPRESSION', ['1', '<<', '2']), ')'])),
('ENUM_FIELD', 'MY_ENUM_8', ('EXPRESSION', ['16', '>>', '1'])),
('ENUM_FIELD',
'MY_ENUM_9',
('EXPRESSION', ['1', '^', '15', '&', '8'])),
('ENUM_FIELD', 'MY_ENUM_10', ('EXPRESSION', ['110', '%', '100'])),
('ENUM_FIELD',
'MY_ENUM_MINUS_1',
('EXPRESSION', ['~', ('EXPRESSION', ['0'])]))])])]
self.assertEquals(mojo_parser.Parse(source, "my_file.mojom"), expected)
def testNoConditionals(self):
"""Tests that ?: is not allowed."""
source = """\
module my_module {
enum MyEnum {
MY_ENUM_1 = 1 ? 2 : 3
};
} // my_module
"""
with self.assertRaisesRegexp(
mojo_lexer.LexError,
r"^my_file\.mojom:4: Error: Illegal character '\?'$"):
mojo_parser.Parse(source, "my_file.mojom")
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -6,125,040,359,476,793,000 | 25.956522 | 75 | 0.54086 | false |
eternallyBaffled/itrade | ext/itrade_liveupdate_euronext.py | 1 | 16717 | #!/usr/bin/env python
# ============================================================================
# Project Name : iTrade
# Module Name : itrade_liveupdate_euronext.py
#
# Description: Live update quotes from euronext.com : EURONEXT, ALTERNEXT,
# MARCHE LIBRE (PARIS & BRUXELLES)
#
# The Original Code is iTrade code (http://itrade.sourceforge.net).
#
# The Initial Developer of the Original Code is Gilles Dumortier.
#
# Portions created by the Initial Developer are Copyright (C) 2004-2008 the
# Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see http://www.gnu.org/licenses/gpl.html
#
# History Rev Description
# 2005-06-10 dgil Wrote it from scratch
# ============================================================================
# ============================================================================
# Imports
# ============================================================================
# python system
import logging
import re
import string
import thread
import urllib2
from datetime import *
# iTrade system
from itrade_logging import *
from itrade_quotes import *
from itrade_datation import Datation,jjmmaa2yyyymmdd
from itrade_defs import *
from itrade_ext import *
from itrade_market import euronextmic,convertConnectorTimeToPlaceTime
from itrade_connection import ITradeConnection
import itrade_config
# ============================================================================
# LiveUpdate_Euronext()
#
# Euronext returns all the quotes then we have to extract only the quote
# we want to return :-(
# design idea : if the quote is requested within the same second, use a
# cached data to extract !
# ============================================================================
class LiveUpdate_Euronext(object):
def __init__(self,market='EURONEXT'):
debug('LiveUpdate_Euronext:__init__')
self.m_connected = False
self.m_livelock = thread.allocate_lock()
self.m_data = None
self.m_clock = {}
self.m_dateindice = {}
self.m_dcmpd = {}
self.m_lastclock = 0
self.m_lastdate = "20070101"
self.m_market = market
self.m_url = 'https://europeanequities.nyx.com/fr/nyx_eu_listings/real-time/quote?'
self.m_connection = ITradeConnection(cookies = None,
proxy = itrade_config.proxyHostname,
proxyAuth = itrade_config.proxyAuthentication,
connectionTimeout = itrade_config.connectionTimeout
)
# ---[ reentrant ] ---
def acquire(self):
# not reentrant because of global states : m_viewstate/m_data
self.m_livelock.acquire()
def release(self):
self.m_livelock.release()
# ---[ properties ] ---
def name(self):
return self.m_market
def delay(self):
return 15
def timezone(self):
# timezone of the livedata (see pytz all_timezones)
return "CET"
# ---[ connexion ] ---
def connect(self):
return True
def disconnect(self):
pass
def alive(self):
return self.m_connected
# ---[ state ] ---
def getstate(self):
# no state
return True
# ---[ code to get data ] ---
def splitLines(self,buf):
lines = string.split(buf, '\n')
lines = filter(lambda x:x, lines)
def removeCarriage(s):
if s[-1]=='\r':
return s[:-1]
else:
return s
lines = [removeCarriage(l) for l in lines]
return lines
def euronextDate(self,date):
sp = string.split(date,' ')
#print 'euronextDate:',sp
# Date part is easy
sdate = jjmmaa2yyyymmdd(sp[0])
if len(sp)==1:
return sdate,"00:00"
return sdate,sp[1]
def convertClock(self,place,clock,date):
min = clock[3:5]
hour = clock[:2]
val = (int(hour)*60) + int(min)
#print 'clock:',clock,hour,min,val
if val>self.m_lastclock and date>=self.m_lastdate:
self.m_lastdate = date
self.m_lastclock = val
# convert from connector timezone to market place timezone
mdatetime = datetime(int(date[0:4]),int(date[4:6]),int(date[6:8]),val/60,val%60)
mdatetime = convertConnectorTimeToPlaceTime(mdatetime,self.timezone(),place)
return "%d:%02d" % (mdatetime.hour,mdatetime.minute)
def parseFValue(self,d):
val = string.split(d,',')
ret = ''
for val in val:
ret = ret+val
return string.atof(ret)
def parseLValue(self,d):
if d=='-': return 0
if ',' in d:
s = ','
else:
s = '\xA0'
val = string.split(d,s)
ret = ''
for val in val:
ret = ret+val
return string.atol(ret)
def getdata(self,quote):
self.m_connected = False
debug("LiveUpdate_Euronext:getdata quote:%s market:%s" % (quote,self.m_market))
mic = euronextmic(quote.market(),quote.place())
query = (
('isin', quote.isin()),
('mic', mic),
)
query = map(lambda (var, val): '%s=%s' % (var, str(val)), query)
query = string.join(query, '&')
url = self.m_url + query
#print 'url:',url
debug("LiveUpdate_Euronext:getdata: url=%s ",url)
try:
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20041202 Firefox/1.0')
f = urllib2.urlopen(req)
buf = f.read()
f.close()
except:
debug('LiveUpdate_Euronext:unable to connect :-(')
return None
# pull data
lines = self.splitLines(buf)
i = 0
count = 0
for eachLine in lines:
count = count + 1
if '"datetimeLastvalue">' in eachLine:
iDate = eachLine[eachLine.find('"datetimeLastvalue">')+20:eachLine.find('</span>')].replace('CET','').replace('BST','')
iDate = iDate.rstrip()
#print count,'iDate:',iDate
i = i +1
if '"lastPriceint">' in eachLine:
lastPriceint = eachLine[eachLine.find('"lastPriceint">')+15:eachLine.find('</span>')].replace(',','.')
lastPriceint = lastPriceint.replace(',','.')
i = i +1
if '"lastPricefract">' in eachLine:
lastPricefract = eachLine[eachLine.find('"lastPricefract">')+17:eachLine.find('</sup>')]
i = i +1
iLast = lastPriceint + lastPricefract
#print count,'iLast:',iLast
if '"cnDiffRelvalue">(' in eachLine:
iPercent = eachLine[eachLine.find('"cnDiffRelvalue">(')+18:eachLine.find(')</span>')]
iPercent = iPercent.replace('%','').replace(',','.').replace('+','')
i = i +1
#print count,'iPercent:',iPercent
if '"todayVolumevalue">' in eachLine:
iVolume = eachLine[eachLine.find('"todayVolumevalue">')+19:eachLine.find(' ')].replace('.','').replace(',','')
i = i +1
#print count,'iVolume:',iVolume
if '>Ouvert<' in eachLine:
eachLine = lines[count]
iOpen = eachLine[:eachLine.find('</td>')].replace('.','').replace(',','.')
if '%' in iOpen:
iOpen = iOpen[iOpen.find('%')+1:]
elif '$' in iOpen:
iOpen = iOpen[iOpen.find('$')+1:]
elif '€' in iOpen :
iOpen = iOpen[iOpen.find('€')+6:]
elif '£' in iOpen :
iOpen = iOpen[iOpen.find('£')+7:]
elif '-' in iOpen:
iOpen = '0'
return None
i = i + 1
#print count,'iOpen:',iOpen
if '"highPricevalue">' in eachLine:
iHigh = eachLine[eachLine.find('"highPricevalue">')+17:eachLine.find(' ')].replace('.','').replace(',','.')
if '-' in iHigh:
iHigh = '0'
return None
i = i +1
#print count,'iHigh:',iHigh
if '"lowPricevalue">' in eachLine:
iLow = eachLine[eachLine.find('"lowPricevalue">')+16:eachLine.find(' ')].replace('.','').replace(',','.')
if '-' in iLow:
iLow = '0'
return None
i = i +1
#print count,'iLow:',iLow
if i == 7 and (quote.list()==QLIST_INDICES):
iVolume = '0'
i = i + 1
if i == 8:
count = 0
i = 0
c_datetime = datetime.today()
c_date = "%04d%02d%02d" % (c_datetime.year,c_datetime.month,c_datetime.day)
#print 'Today is :', c_date
sdate,sclock = self.euronextDate(iDate)
# be sure we have volume (or indices)
if (quote.list() == QLIST_INDICES or iVolume != ''):
# be sure not an oldest day !
if (c_date==sdate) or (quote.list() == QLIST_INDICES):
key = quote.key()
self.m_dcmpd[key] = sdate
self.m_dateindice[key] = str(sdate[6:8]) + '/' + str(sdate[4:6]) + '/' +str(sdate[0:4])
self.m_clock[key] = self.convertClock(quote.place(),sclock,sdate)
# ISIN;DATE;OPEN;HIGH;LOW;CLOSE;VOLUME;PERCENT
data = ';'.join([quote.key(),sdate,iOpen,iHigh,iLow,iLast,iVolume,iPercent])
return data
return None
# ---[ cache management on data ] ---
def getcacheddata(self,quote):
return None
def iscacheddataenoughfreshq(self):
return False
def cacheddatanotfresh(self):
# no cache
pass
# ---[ notebook of order ] ---
def hasNotebook(self):
return False
# ---[ status of quote ] ---
def hasStatus(self):
return itrade_config.isConnected()
def currentStatus(self,quote):
#
key = quote.key()
if not self.m_dcmpd.has_key(key):
# no data for this quote !
return "UNKNOWN","::","0.00","0.00","::"
st = 'OK'
cl = '::'
return st,cl,"-","-",self.m_clock[key]
def currentTrades(self,quote):
# clock,volume,value
return None
def currentMeans(self,quote):
# means: sell,buy,last
return "-","-","-"
def currentClock(self,quote=None):
if quote==None:
if self.m_lastclock==0:
return "::"
# hh:mm
return "%d:%02d" % (self.m_lastclock/60,self.m_lastclock%60)
#
key = quote.key()
if not self.m_clock.has_key(key):
# no data for this quote !
return "::"
else:
return self.m_clock[key]
def currentDate(self,quote=None):
key = quote.key()
if not self.m_dateindice.has_key(key):
# no date for this quote !
return "----"
else:
return self.m_dateindice[key]
# ============================================================================
# Export me
# ============================================================================
gLiveEuronext = LiveUpdate_Euronext('euronext')
gLiveAlternext = LiveUpdate_Euronext('alternext')
registerLiveConnector('EURONEXT','PAR',QLIST_BONDS,QTAG_DIFFERED,gLiveEuronext,bDefault=True)
registerLiveConnector('EURONEXT','BRU',QLIST_BONDS,QTAG_DIFFERED,gLiveEuronext,bDefault=True)
registerLiveConnector('EURONEXT','AMS',QLIST_BONDS,QTAG_DIFFERED,gLiveEuronext,bDefault=True)
registerLiveConnector('EURONEXT','LIS',QLIST_BONDS,QTAG_DIFFERED,gLiveEuronext,bDefault=True)
registerLiveConnector('EURONEXT','PAR',QLIST_INDICES,QTAG_DIFFERED,gLiveEuronext,bDefault=True)
registerLiveConnector('EURONEXT','AMS',QLIST_INDICES,QTAG_DIFFERED,gLiveEuronext,bDefault=True)
registerLiveConnector('EURONEXT','BRU',QLIST_INDICES,QTAG_DIFFERED,gLiveEuronext,bDefault=True)
registerLiveConnector('EURONEXT','LIS',QLIST_INDICES,QTAG_DIFFERED,gLiveEuronext,bDefault=True)
registerLiveConnector('EURONEXT','PAR',QLIST_ANY,QTAG_DIFFERED,gLiveEuronext,bDefault=False)
registerLiveConnector('EURONEXT','BRU',QLIST_ANY,QTAG_DIFFERED,gLiveEuronext,bDefault=False)
registerLiveConnector('EURONEXT','AMS',QLIST_ANY,QTAG_DIFFERED,gLiveEuronext,bDefault=False)
registerLiveConnector('EURONEXT','LIS',QLIST_ANY,QTAG_DIFFERED,gLiveEuronext,bDefault=False)
registerLiveConnector('ALTERNEXT','PAR',QLIST_ANY,QTAG_DIFFERED,gLiveAlternext,bDefault=False)
registerLiveConnector('ALTERNEXT','BRU',QLIST_ANY,QTAG_DIFFERED,gLiveAlternext,bDefault=False)
registerLiveConnector('ALTERNEXT','AMS',QLIST_ANY,QTAG_DIFFERED,gLiveAlternext,bDefault=False)
registerLiveConnector('ALTERNEXT','LIS',QLIST_ANY,QTAG_DIFFERED,gLiveAlternext,bDefault=False)
registerLiveConnector('ALTERNEXT','PAR',QLIST_INDICES,QTAG_DIFFERED,gLiveAlternext,bDefault=True)
registerLiveConnector('ALTERNEXT','BRU',QLIST_INDICES,QTAG_DIFFERED,gLiveAlternext,bDefault=True)
registerLiveConnector('ALTERNEXT','AMS',QLIST_INDICES,QTAG_DIFFERED,gLiveAlternext,bDefault=True)
registerLiveConnector('ALTERNEXT','LIS',QLIST_INDICES,QTAG_DIFFERED,gLiveAlternext,bDefault=True)
registerLiveConnector('PARIS MARCHE LIBRE','PAR',QLIST_ANY,QTAG_DIFFERED,gLiveEuronext,bDefault=False)
registerLiveConnector('BRUXELLES MARCHE LIBRE','BRU',QLIST_ANY,QTAG_DIFFERED,gLiveEuronext,bDefault=False)
# ============================================================================
# Test ME
#
# ============================================================================
def test(ticker):
if gLiveEuronext.iscacheddataenoughfreshq():
data = gLiveEuronext.getcacheddata(ticker)
if data:
debug(data)
else:
debug("nodata")
elif gLiveEuronext.connect():
state = gLiveEuronext.getstate()
if state:
debug("state=%s" % (state))
quote = quotes.lookupTicker(ticker,'EURONEXT')
if (quote):
data = gLiveEuronext.getdata(quote)
if data!=None:
if data:
info(data)
else:
debug("nodata")
else:
print "getdata() failure :-("
else:
print "Unknown ticker %s on EURONEXT" % (ticker)
else:
print "getstate() failure :-("
gLiveEuronext.disconnect()
else:
print "connect() failure :-("
if __name__=='__main__':
setLevel(logging.DEBUG)
print 'live %s' % date.today()
# load euronext import extension
import itrade_ext
itrade_ext.loadOneExtension('itrade_import_euronext.py',itrade_config.dirExtData)
quotes.loadMarket('EURONEXT')
test('OSI')
test('GTO')
gLiveEuronext.cacheddatanotfresh()
test('GTO')
# ============================================================================
# That's all folks !
# ============================================================================
| gpl-3.0 | -481,325,598,382,542,660 | 33.568085 | 135 | 0.522402 | false |
sternshus/Arelle | arelle/plugin/xbrlDB/XbrlOpenSqlDB.py | 1 | 79112 | '''
XbrlOpenSqlDB.py implements an SQL database interface for Arelle, based
on a concrete realization of the Open Information Model and Abstract Model Model PWD 2.0.
This is a semantic representation of XBRL Open Information Model (instance) and
XBRL Abstract Model (DTS) information.
This module may save directly to a Postgres, MySQL, SQLite, MSSQL, or Oracle server.
This module provides the execution context for saving a dts and instances in
XBRL SQL database. It may be loaded by Arelle's RSS feed, or by individual
DTS and instances opened by interactive or command line/web service mode.
Example dialog or command line parameters for operation:
host: the supporting host for SQL Server
port: the host port of server
user, password: if needed for server
database: the top level path segment for the SQL Server
timeout:
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
to use from command line:
linux
# be sure plugin is installed
arelleCmdLine --plugin '+xbrlDB|show'
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB 'myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds'
windows
rem be sure plugin is installed
arelleCmdLine --plugin "+xbrlDB|show"
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB "myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds"
examples of arguments:
store from instance into DB: -f "my_traditional_instance.xbrl" -v --plugins "xbrlDB" --store-to-XBRL-DB "localhost,8084,userid,passwd,open_db,90,pgOpenDB"
store from OIM excel instance into DB: -f "my_oim_instance.xlsx" -v --plugins "loadFromOIM.py|xbrlDB" --store-to-XBRL-DB "localhost,8084,userid,passwd,open_db,90,pgOpenDB"
load from DB save into instance: -f "output_instance.xbrl" --plugins "xbrlDB" --load-from-XBRL-DB "localhost,8084,userid,passwd,open_db,90,pgOpenDB,loadInstanceId=214147"
'''
import time, datetime, logging
from arelle.ModelDocument import Type, create as createModelDocument
from arelle.ModelDtsObject import ModelConcept, ModelType, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelXbrl import ModelXbrl
from arelle.ModelDocument import ModelDocument
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname, QName, dateTime, DATETIME
from arelle.ModelRelationshipSet import ModelRelationshipSet
from arelle.PrototypeInstanceObject import DimValuePrototype
from arelle.ValidateXbrlCalcs import roundValue
from arelle.XmlValidate import collapseWhitespacePattern, UNVALIDATED, VALID
from arelle.XmlUtil import elementChildSequence, xmlstring, addQnameValue, addChild
from arelle import XbrlConst, ValidateXbrlDimensions
from arelle.UrlUtil import authority, ensureUrl
from .SqlDb import XPDBException, isSqlConnection, SqlDbConnection
from .tableFacts import tableFacts
from .entityInformation import loadEntityInformation
from .primaryDocumentFacts import loadPrimaryDocumentFacts
from collections import defaultdict
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
loadDBsaveToFile=None, loadInstanceId=None,
product=None, rssItem=None, **kwargs):
if getattr(modelXbrl, "blockOpenDBrecursion", False):
return None
xbrlDbConn = None
result = True
try:
xbrlDbConn = XbrlSqlDatabaseConnection(modelXbrl, user, password, host, port, database, timeout, product)
if "rssObject" in kwargs: # initialize batch
xbrlDbConn.initializeBatch(kwargs["rssObject"])
else:
xbrlDbConn.verifyTables()
if loadDBsaveToFile:
# load modelDocument from database saving to file
result = xbrlDbConn.loadXbrlFromDB(loadDBsaveToFile, loadInstanceId)
else:
xbrlDbConn.insertXbrl(rssItem=rssItem)
xbrlDbConn.close()
except Exception as ex:
if xbrlDbConn is not None:
try:
xbrlDbConn.close(rollback=True)
except Exception as ex2:
pass
raise # reraise original exception with original traceback
return result
def isDBPort(host, port, timeout=10, product="postgres"):
return isSqlConnection(host, port, timeout)
XBRLDBTABLES = {
"filing", "report",
"document", "referenced_documents",
"concept", "enumeration", "data_type", "role_type", "arcrole_type",
"resource", "relationship_set", "root", "relationship",
"fact", "footnote", "entity_identifier", "period", "unit", "unit_measure", "aspect_value_set",
"message", "message_reference",
"industry", "industry_level", "industry_structure",
}
class XbrlSqlDatabaseConnection(SqlDbConnection):
def verifyTables(self):
missingTables = XBRLDBTABLES - self.tablesInDB()
# if no tables, initialize database
if missingTables == XBRLDBTABLES:
self.create({"mssql": "xbrlOpenDBMSSql.sql",
"mysql": "xbrlOpenDBMySql.ddl",
"sqlite": "xbrlOpenSqlDBSQLite.ddl",
"orcl": "xbrlOpenSqlDBOracle.sql",
"postgres": "xbrlOpenSqlDBPostgres.ddl"}[self.product])
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables and missingTables != {"sequences"}:
raise XPDBException("sqlDB:MissingTables",
_("The following tables are missing: %(missingTableNames)s"),
missingTableNames=', '.join(t for t in sorted(missingTables)))
def insertXbrl(self, rssItem):
try:
# must also have default dimensions loaded
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
# get logging entries (needed to find which concepts to identify)
self.loggingEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
self.loggingEntries = handler.dbHandlerLogEntries()
break
# must have a valid XBRL instance or document
if self.modelXbrl.modelDocument is None:
raise XPDBException("xpgDB:MissingXbrlDocument",
_("No XBRL instance or schema loaded for this filing."))
# obtain supplementaion entity information
self.entityInformation = loadEntityInformation(self.modelXbrl, rssItem)
# identify table facts (table datapoints) (prior to locked database transaction
self.tableFacts = tableFacts(self.modelXbrl) # for EFM & HMRC this is ( (roleType, table_code, fact) )
loadPrimaryDocumentFacts(self.modelXbrl, rssItem, self.entityInformation) # load primary document facts for SEC filing
self.identifyTaxonomyRelSetsOwner()
# at this point we determine what's in the database and provide new tables
# requires locking most of the table structure
self.lockTables(('filing', 'report', 'document', 'referenced_documents'),
isSessionTransaction=True) # lock for whole transaction
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
self.identifyConceptsUsed()
self.dropTemporaryTable()
startedAt = time.time()
self.syncSequences = True # for data base types that don't explicity handle sequences
self.insertFiling(rssItem)
self.modelXbrl.profileStat(_("XbrlSqlDB: Filing insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDocuments()
self.modelXbrl.profileStat(_("XbrlSqlDB: Documents insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertConcepts()
self.modelXbrl.profileStat(_("XbrlSqlDB: Concepts insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertArcroleTypes()
self.insertRoleTypes()
self.modelXbrl.profileStat(_("XbrlSqlDB: Role Types insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertResources()
self.modelXbrl.profileStat(_("XbrlSqlDB: Resources insertion"), time.time() - startedAt)
startedAt = time.time()
# self.modelXbrl.profileStat(_("XbrlSqlDB: DTS insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertFacts()
self.modelXbrl.profileStat(_("XbrlSqlDB: instance insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationships() # must follow data points for footnote relationships
self.modelXbrl.profileStat(_("XbrlSqlDB: Relationships insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlSqlDB: Validation results insertion"), time.time() - startedAt)
startedAt = time.time()
self.showStatus("Committing entries")
self.commit()
self.modelXbrl.profileStat(_("XbrlSqlDB: insertion committed"), time.time() - startedAt)
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def identifyTaxonomyRelSetsOwner(self):
# walk down referenced document set from instance to find 'lowest' taxonomy relationship set ownership
instanceReferencedDocuments = set()
instanceDocuments = set()
inlineXbrlDocSet = None
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instanceDocuments.add(mdlDoc)
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include"):
instanceReferencedDocuments.add(refDoc)
elif mdlDoc.type == Type.INLINEXBRLDOCUMENTSET:
inlineXbrlDocSet = mdlDoc
if len(instanceReferencedDocuments) > 1:
# filing must own the taxonomy set
if len(instanceDocuments) == 1:
self.taxonomyRelSetsOwner = instanceDocuments.pop()
elif inlineXbrlDocSet is not None: # manifest for inline docs can own the rel sets
self.taxonomyRelSetsOwner = inlineXbrlDocSet
else: # no single instance, pick the entry poin doct
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument # entry document (instance or inline doc set)
elif len(instanceReferencedDocuments) == 1:
self.taxonomyRelSetsOwner = instanceReferencedDocuments.pop()
elif self.modelXbrl.modelDocument.type == Type.SCHEMA:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
else:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
instanceReferencedDocuments.clear() # dereference
instanceDocuments.clear()
# check whether relationship_set is completely in instance or part/all in taxonomy
self.arcroleInInstance = {}
self.arcroleHasResource = {}
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-"):
inInstance = False
hasResource = False
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
if (not inInstance and
rel.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL) and
any(isinstance(tgtObj, ModelObject) and tgtObj.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL)
for tgtObj in (rel.fromModelObject, rel.toModelObject))):
inInstance = True
if not hasResource and any(isinstance(resource, ModelResource)
for resource in (rel.fromModelObject, rel.toModelObject)):
hasResource = True
if inInstance and hasResource:
break;
self.arcroleInInstance[arcrole] = inInstance
self.arcroleHasResource[arcrole] = hasResource
def initializeBatch(self, rssObject):
results = self.execute("SELECT filing_number, accepted_timestamp FROM filing")
existingFilings = dict((filingNumber, timestamp)
for filingNumber, timestamp in results) # timestamp is a string
for rssItem in rssObject.rssItems:
if (rssItem.accessionNumber in existingFilings and
rssItem.acceptanceDatetime == existingFilings[rssItem.accessionNumber]):
rssItem.skipRssItem = True
def insertFiling(self, rssItem):
now = datetime.datetime.now()
entityInfo = self.entityInformation
def rssItemGet(propertyName):
if rssItem is not None:
return getattr(rssItem, propertyName, None)
return None
self.showStatus("insert filing")
LEI = None
filing_comparator = ('legal_entity_number', 'filing_number') if LEI else ('filing_number',)
_fiscalYearEnd = rssItemGet("fiscalYearEnd") or entityInfo.get("fiscal-year-end")
_fiscalYearEndAdjusted = "02-28" if _fiscalYearEnd == "02-29" else _fiscalYearEnd
# _fiscalPeriod =
table = self.getTable('filing', 'filing_id',
('filing_number',
'legal_entity_number',
'reference_number', # CIK
'standard_industry_code',
'tax_number',
'form_type',
'accepted_timestamp', 'is_most_current', 'filing_date',
'creation_software',
'authority_html_url',
'entry_url',
'fiscal_year',
'fiscal_period',
'name_at_filing',
'legal_state_at_filing',
'restatement_index',
'period_index',
'first_5_comments',
'zip_url',
'file_number',
'phone',
'phys_addr1', 'phys_addr2', 'phys_city', 'phys_state', 'phys_zip', 'phys_country',
'mail_addr1', 'mail_addr2', 'mail_city', 'mail_state', 'mail_zip', 'mail_country',
'fiscal_year_end',
'filer_category',
'public_float',
'trading_symbol'),
filing_comparator, # cannot compare None = None if LEI is absent, always False
((rssItemGet("accessionNumber") or entityInfo.get("accession-number") or str(int(time.time())), # NOT NULL
LEI,
rssItemGet("cikNumber") or entityInfo.get("cik"),
rssItemGet("assignedSic") or entityInfo.get("assigned-sic") or -1,
entityInfo.get("irs-number"),
rssItemGet("formType") or entityInfo.get("form-type"),
rssItemGet("acceptanceDatetime") or entityInfo.get("acceptance-datetime") or now,
True,
rssItemGet("filingDate") or entityInfo.get("filing-date") or now, # NOT NULL
self.modelXbrl.modelDocument.creationSoftware,
rssItemGet("htmlUrl") or entityInfo.get("primary-document-url"),
rssItemGet("url") or entityInfo.get("instance-url"),
entityInfo.get("fiscal-year-focus"),
entityInfo.get("fiscal-period-focus"),
rssItemGet("companyName") or entityInfo.get("conformed-name"),
entityInfo.get("state-of-incorporation"),
None, #'restatement_index',
None, #'period_index',
None, #'first_5_comments',
rssItemGet("enclosureUrl"), # enclsure zip URL if any
rssItemGet("fileNumber") or entityInfo.get("file-number") or str(int(time.time())),
entityInfo.get("business-address.phone"),
entityInfo.get("business-address.street1"),
entityInfo.get("business-address.street2"),
entityInfo.get("business-address.city"),
entityInfo.get("business-address.state"),
entityInfo.get("business-address.zip"),
countryOfState.get(entityInfo.get("business-address.state")),
entityInfo.get("mail-address.street1"),
entityInfo.get("mail-address.street2"),
entityInfo.get("mail-address.city"),
entityInfo.get("mail-address.state"),
entityInfo.get("mail-address.zip"),
countryOfState.get(entityInfo.get("mail-address.state")),
_fiscalYearEnd,
entityInfo.get("filer-category"),
entityInfo.get("public-float"),
entityInfo.get("trading-symbol")
),),
checkIfExisting=True,
returnExistenceStatus=True)
if LEI:
for id, _LEI, filing_number, existenceStatus in table:
self.filingId = id
self.filingPreviouslyInDB = existenceStatus
break
else:
for id, filing_number, existenceStatus in table:
self.filingId = id
self.filingPreviouslyInDB = existenceStatus
break
self.showStatus("insert report")
table = self.getTable('report', 'report_id',
('filing_id', ),
('filing_id',),
((self.filingId,
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, foundFilingId, existenceStatus in table:
self.reportId = id
self.filingPreviouslyInDB = existenceStatus
break
def isSemanticDocument(self, modelDocument):
if modelDocument.type == Type.SCHEMA:
# must include document items taxonomy even if not in DTS
return modelDocument.inDTS or modelDocument.targetNamespace == "http://arelle.org/doc/2014-01-31"
return modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.LINKBASE)
def identifyPreexistingDocuments(self):
self.existingDocumentIds = {}
self.urlDocs = {}
docUris = set()
for modelDocument in self.modelXbrl.urlDocs.values():
url = ensureUrl(modelDocument.uri)
self.urlDocs[url] = modelDocument
if self.isSemanticDocument(modelDocument):
docUris.add(self.dbStr(url))
if docUris:
results = self.execute("SELECT document_id, document_url FROM {} WHERE document_url IN ({})"
.format(self.dbTableName("document"),
', '.join(docUris)))
self.existingDocumentIds = dict((self.urlDocs[docUrl],docId)
for docId, docUrl in results)
# identify whether taxonomyRelsSetsOwner is existing
self.isExistingTaxonomyRelSetsOwner = (
self.taxonomyRelSetsOwner.type not in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET) and
self.taxonomyRelSetsOwner in self.existingDocumentIds)
def identifyConceptsUsed(self):
# relationshipSets are a dts property
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
conceptsUsed = set(f.concept
for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
conceptsUsed.add(dim.dimension)
if dim.isExplicit:
conceptsUsed.add(dim.member)
else:
conceptsUsed.add(self.modelXbrl.qnameConcepts[dim.typedMember.qname])
for defaultDimQn, defaultDimMemberQn in self.modelXbrl.qnameDimensionDefaults.items():
conceptsUsed.add(self.modelXbrl.qnameConcepts[defaultDimQn])
conceptsUsed.add(self.modelXbrl.qnameConcepts[defaultDimMemberQn])
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
conceptsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
conceptsUsed.add(rel.toModelObject)
try:
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
conceptsUsed.add(self.modelXbrl.qnameConcepts[qn])
except KeyError:
pass # no DTS
for roleTypes in (self.modelXbrl.roleTypes.values(), self.modelXbrl.arcroleTypes.values()):
for roleUriTypes in roleTypes:
for roleType in roleUriTypes:
for qn in roleType.usedOns:
if qn in self.modelXbrl.qnameConcepts: # qname may be undefined or invalid and still 2.1 legal
conceptsUsed.add(self.modelXbrl.qnameConcepts[qn])
# add concepts referenced by logging entries
for logEntry in self.loggingEntries:
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
if isinstance(modelObject, ModelConcept) and modelObject.modelDocument.inDTS:
conceptsUsed.add(modelObject)
# add substitution groups
conceptsUsed |= set(concept.substitutionGroup
for concept in conceptsUsed
if concept is not None)
conceptsUsed -= {None} # remove None if in conceptsUsed
self.conceptsUsed = conceptsUsed
typesUsed = set()
def typeUsed(modelType):
if modelType is not None and modelType.modelDocument.inDTS: # exclude nonDTS types (schema, etc)
typesUsed.add(modelType)
typesDerivedFrom = modelType.typeDerivedFrom
if isinstance(typesDerivedFrom, list): # union derivation
for typeDerivedFrom in typesDerivedFrom:
if typeDerivedFrom not in typesUsed:
typeUsed(typeDerivedFrom)
else: # single derivation
if typesDerivedFrom is not None and typesDerivedFrom not in typesUsed:
typeUsed(typesDerivedFrom)
for concept in conceptsUsed:
modelType = concept.type
if modelType is not None:
if modelType not in typesUsed:
typeUsed(modelType)
self.typesUsed = typesUsed
def insertDocuments(self):
self.showStatus("insert documents")
table = self.getTable('document', 'document_id',
('document_url', 'document_type', 'namespace'),
('document_url',),
set((ensureUrl(docUrl),
Type.typeName[mdlDoc.type],
mdlDoc.targetNamespace)
for docUrl, mdlDoc in self.modelXbrl.urlDocs.items()
if mdlDoc not in self.existingDocumentIds and
self.isSemanticDocument(mdlDoc)),
checkIfExisting=True)
self.documentIds = dict((self.urlDocs[url], id)
for id, url in table)
self.documentIds.update(self.existingDocumentIds)
referencedDocuments = set()
# instance documents are filing references
# update report with document references
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
referencedDocuments.add( (self.documentIds[mdlDoc], self.documentIds[refDoc] ))
table = self.getTable('referenced_documents',
None, # no id column in this table
('object_id','document_id'),
('object_id','document_id'),
referencedDocuments,
checkIfExisting=True)
instDocId = instSchemaDocId = agencySchemaDocId = stdSchemaDocId = None
mdlDoc = self.modelXbrl.modelDocument
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instDocId = self.documentIds[mdlDoc]
# referenced doc may be extension schema
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType == "href" and refDoc in self.documentIds:
instSchemaDocId = self.documentIds[refDoc]
break
elif mdlDoc.type == Type.SCHEMA:
instDocSchemaDocId = self.documentIds[mdlDoc]
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
if refDoc.type == Type.SCHEMA:
nsAuthority = authority(refDoc.targetNamespace, includeScheme=False)
nsPath = refDoc.targetNamespace.split('/')
if len(nsPath) > 2:
if ((nsAuthority in ("fasb.org", "xbrl.us") and nsPath[-2] == "us-gaap") or
(nsAuthority == "xbrl.ifrs.org" and nsPath[-1] in ("ifrs", "ifrs-full", "ifrs-smes"))):
stdSchemaDocId = self.documentIds[refDoc]
elif (nsAuthority == "xbrl.sec.gov" and nsPath[-2] == "rr"):
agencySchemaDocId = self.documentIds[refDoc]
self.updateTable("report",
("report_id", "report_data_doc_id", "report_schema_doc_id", "agency_schema_doc_id", "standard_schema_doc_id"),
((self.reportId, instDocId, instSchemaDocId, agencySchemaDocId, stdSchemaDocId),)
)
def insertConcepts(self):
self.showStatus("insert concepts")
# determine new filing documents and types they use
filingDocumentConcepts = set()
existingDocumentUsedConcepts = set()
for concept in self.modelXbrl.qnameConcepts.values():
if concept.modelDocument not in self.existingDocumentIds:
filingDocumentConcepts.add(concept)
filingDocumentConceptType = concept.type
if filingDocumentConceptType is not None and filingDocumentConceptType not in self.typesUsed:
self.typesUsed.add(filingDocumentConceptType)
elif concept in self.conceptsUsed:
existingDocumentUsedConcepts.add(concept)
filingDocumentTypes = set()
existingDocumentUsedTypes = set()
for modelType in self.modelXbrl.qnameTypes.values():
if modelType.modelDocument not in self.existingDocumentIds:
filingDocumentTypes.add(modelType)
elif modelType in self.typesUsed:
existingDocumentUsedTypes.add(modelType)
# get existing element IDs
self.typeQnameId = {}
if existingDocumentUsedTypes:
typeQnameIds = []
table = self.getTable('data_type', 'data_type_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.qname.clarkNotation)
for modelType in existingDocumentUsedTypes
if modelType.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
table = self.getTable('data_type', 'data_type_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'base_type', 'derived_from_type_id'),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.id,
elementChildSequence(modelType),
modelType.qname.clarkNotation,
modelType.name,
modelType.baseXsdType,
self.typeQnameId.get(modelType.typeDerivedFrom)
if isinstance(modelType.typeDerivedFrom, ModelType) else None)
for modelType in filingDocumentTypes
if modelType.modelDocument in self.documentIds)
)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
updatesToDerivedFrom = set()
for modelType in filingDocumentTypes:
if isinstance(modelType.typeDerivedFrom, ModelType):
typeDerivedFrom = modelType.typeDerivedFrom
if (typeDerivedFrom in filingDocumentTypes and
modelType.qname in self.typeQnameId and
typeDerivedFrom.qname in self.typeQnameId):
updatesToDerivedFrom.add( (self.typeQnameId[modelType.qname],
self.typeQnameId[typeDerivedFrom.qname]) )
# update derivedFrom's of newly added types
if updatesToDerivedFrom:
self.updateTable('data_type',
('data_type_id', 'derived_from_type_id'),
updatesToDerivedFrom)
existingDocumentUsedTypes.clear() # dereference
filingDocumentTypes.clear() # dereference
self.conceptQnameId = {}
# get existing element IDs
if existingDocumentUsedConcepts:
table = self.getTable('concept', 'concept_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[concept.modelDocument],
concept.qname.clarkNotation)
for concept in existingDocumentUsedConcepts
if concept.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for conceptId, docId, qn in table:
self.conceptQnameId[qname(qn)] = conceptId
concepts = []
for concept in filingDocumentConcepts:
niceType = concept.niceType
if niceType is not None and len(niceType) > 128:
niceType = niceType[:128]
if concept.modelDocument in self.documentIds:
concepts.append((self.documentIds[concept.modelDocument],
concept.id,
elementChildSequence(concept),
concept.qname.clarkNotation,
concept.name,
self.typeQnameId.get(concept.typeQname),
niceType[:128] if niceType is not None else None,
self.conceptQnameId.get(concept.substitutionGroupQname),
concept.balance,
concept.periodType,
concept.isAbstract,
concept.isNillable,
concept.isNumeric,
concept.isMonetary,
concept.isTextBlock))
table = self.getTable('concept', 'concept_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'datatype_id', 'base_type', 'substitution_group_concept_id',
'balance', 'period_type', 'abstract', 'nillable',
'is_numeric', 'is_monetary', 'is_text_block'),
('document_id', 'qname'),
concepts
)
for conceptId, docId, qn in table:
self.conceptQnameId[qname(qn)] = conceptId
updatesToSubstitutionGroup = set()
for concept in filingDocumentConcepts:
if concept.substitutionGroup in filingDocumentConcepts and concept.modelDocument in self.documentIds:
updatesToSubstitutionGroup.add( (self.conceptQnameId[concept.qname],
self.conceptQnameId.get(concept.substitutionGroupQname)) )
# update derivedFrom's of newly added types
if updatesToSubstitutionGroup:
self.updateTable('concept',
('concept_id', 'substitution_group_concept_id'),
updatesToSubstitutionGroup)
# enumerations
# TBD
filingDocumentConcepts.clear() # dereference
existingDocumentUsedConcepts.clear() # dereference
def insertArcroleTypes(self):
self.showStatus("insert arcrole types")
# add existing arcrole types
arcroleTypesByIds = set((self.documentIds[arcroleType.modelDocument],
arcroleType.roleURI) # key on docId, uriId
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'arcrole_uri'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleTypeIDs[1] # uri Id
)
for arcroleTypeIDs in arcroleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.arcroleTypeIds = {}
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
# added document arcrole type
arcroleTypesByIds = dict(((self.documentIds[arcroleType.modelDocument],
arcroleType.arcroleURI), # key on docId, uriId
arcroleType) # value is roleType object
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'arcrole_uri', 'cycles_allowed', 'definition'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleType.id,
elementChildSequence(arcroleType),
arcroleType.arcroleURI,
arcroleType.cyclesAllowed,
arcroleType.definition)
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()))
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
table = self.getTable('used_on',
None, # no record id in this table
('object_id', 'concept_id'),
('object_id', 'concept_id'),
tuple((self.arcroleTypeIds[(arcroleTypeIDs[0], arcroleType.arcroleURI)],
self.conceptQnameId[usedOnQn])
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()
for usedOnQn in arcroleType.usedOns
if usedOnQn in self.conceptQnameId),
checkIfExisting=True)
def insertRoleTypes(self):
self.showStatus("insert role types")
# add existing role types
roleTypesByIds = set((self.documentIds[roleType.modelDocument],
roleType.roleURI) # key on docId, uriId
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'role_uri'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0], # doc Id
roleTypeIDs[1] # uri Id
)
for roleTypeIDs in roleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.roleTypeIds = {}
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
# new document role types
roleTypesByIds = dict(((self.documentIds[roleType.modelDocument],
roleType.roleURI), # key on docId, uriId
roleType) # value is roleType object
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'role_uri', 'definition'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0], # doc Id
roleType.id,
elementChildSequence(roleType),
roleTypeIDs[1], # uri Id
roleType.definition)
for roleTypeIDs, roleType in roleTypesByIds.items()))
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
table = self.getTable('used_on',
None, # no record id in this table
('object_id', 'concept_id'),
('object_id', 'concept_id'),
tuple((self.roleTypeIds[(roleTypeIDs[0], roleType.roleURI)],
self.conceptQnameId[usedOnQn])
for roleTypeIDs, roleType in roleTypesByIds.items()
for usedOnQn in roleType.usedOns
if usedOnQn in self.conceptQnameId),
checkIfExisting=True)
def insertResources(self):
self.showStatus("insert resources")
# deduplicate resources (may be on multiple arcs)
arcroles = [arcrole
# check whether relationship_set is completely in instance or part/all in taxonomy
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-")
and self.arcroleHasResource[arcrole]
and (self.arcroleInInstance[arcrole] or not self.isExistingTaxonomyRelSetsOwner)]
# note that lxml has no column numbers, use objectIndex as pseudo-column number
uniqueResources = dict(((self.documentIds[resource.modelDocument],
resource.objectIndex), resource)
for arcrole in arcroles
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships
for resource in (rel.fromModelObject, rel.toModelObject)
if isinstance(resource, ModelResource))
table = self.getTable('resource', 'resource_id',
('document_id', 'xml_id', 'xml_child_seq', 'qname', 'role', 'value', 'xml_lang'),
('document_id', 'xml_child_seq'),
tuple((self.documentIds[resource.modelDocument],
resource.id,
elementChildSequence(resource),
resource.qname.clarkNotation,
resource.role,
resource.textValue,
resource.xmlLang)
for resource in uniqueResources.values()),
checkIfExisting=True)
self.resourceId = dict(((docId, xml_child_seq), id)
for id, docId, xml_child_seq in table)
uniqueResources.clear()
def modelObjectId(self, modelObject):
if isinstance(modelObject, ModelConcept):
return self.conceptQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelType):
return self.conceptTypeIds.get(modelObject.qname)
elif isinstance(modelObject, ModelResource):
return self.resourceId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelFact):
return self.factId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
else:
return None
def insertRelationships(self):
self.showStatus("insert relationship sets")
table = self.getTable('relationship_set', 'relationship_set_id',
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
tuple((self.documentIds[self.modelXbrl.modelDocument if self.arcroleInInstance[arcrole]
else self.taxonomyRelSetsOwner],
ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])))
self.relSetId = dict(((linkRole, arcRole, lnkQn, arcQn), id)
for id, document_id, linkRole, arcRole, lnkQn, arcQn in table)
# do tree walk to build relationships with depth annotated, no targetRole navigation
dbRels = []
def walkTree(rels, seq, depth, relationshipSet, visited, dbRels, relSetId):
for rel in rels:
if rel not in visited and isinstance(rel.toModelObject, ModelObject):
visited.add(rel)
dbRels.append((rel, seq, depth, relSetId))
seq += 1
seq = walkTree(relationshipSet.fromModelObject(rel.toModelObject), seq, depth+1, relationshipSet, visited, dbRels, relSetId)
visited.remove(rel)
return seq
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if (ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])):
relSetId = self.relSetId[(ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)]
relationshipSet = self.modelXbrl.relationshipSet(arcrole, ELR, linkqname, arcqname)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), seq, 1, relationshipSet, set(), dbRels, relSetId)
def resourceResourceId(resource):
if isinstance(resource, ModelResource):
return self.resourceId.get((self.documentIds[resource.modelDocument],
resource.sourceline,
resource.objectIndex))
else:
return None
table = self.getTable('relationship', 'relationship_id',
('document_id', 'xml_id', 'xml_child_seq',
'relationship_set_id', 'reln_order',
'from_id', 'to_id', 'calculation_weight',
'tree_sequence', 'tree_depth', 'preferred_label_role'),
('relationship_set_id', 'document_id', 'xml_child_seq'),
tuple((self.documentIds[rel.modelDocument],
rel.id,
elementChildSequence(rel.arcElement),
relSetId,
self.dbNum(rel.order),
self.modelObjectId(rel.fromModelObject),
self.modelObjectId(rel.toModelObject),
self.dbNum(rel.weight), # none if no weight
sequence,
depth,
rel.preferredLabel)
for rel, sequence, depth, relSetId in dbRels
if isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
self.relationshipId = dict(((docId,xml_child_seq), relationshipId)
for relationshipId, relSetId, docId, xml_child_seq in table)
table = self.getTable('root', None,
('relationship_set_id', 'relationship_id'),
('relationship_set_id', 'relationship_id'),
tuple((relSetId,
self.relationshipId[self.documentIds[rel.modelDocument],
elementChildSequence(rel.arcElement)])
for rel, sequence, depth, relSetId in dbRels
if depth == 1 and
isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
del dbRels[:] # dererefence
def insertFacts(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior facts of this report")
# remove prior facts
self.lockTables(("fact", "entity_identifier", "period", "unit_measure", "unit",
"aspect_value_set", "aspect_value_report_set", # report_set is for id assignment
"footnote" "table_facts"))
for _tableName, _id in (("entity_identifier", "entity_identifier_id"),
("period", "period_id"),
("unit", "unit_id"),
("unit_measure", "unit_id"),
("aspect_value_set", "aspect_value_set_id"),
("aspect_value_report_set", "aspect_value_set_id"),
("footnote", "fact_id")):
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.{3} = {1}.{3}"
.format( self.dbTableName(_tableName),
self.dbTableName("fact"),
reportId,
_id),
close=False, fetch=False)
for _tableName in ("fact", "table_facts"):
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName(_tableName), reportId),
close=False, fetch=False)
self.showStatus("insert data points")
# must only store used contexts and units (as they are removed only by being used)
contextsUsed = set()
unitsUsed = {} # deduplicate by md5has
for f in self.modelXbrl.factsInInstance:
if f.context is not None:
contextsUsed.add(f.context)
if f.unit is not None:
unitsUsed[f.unit.md5hash] = f.unit
# units
table = self.getTable('unit', 'unit_id',
('xml_id', 'xml_child_seq', 'measures_hash'),
('measures_hash',),
tuple((unit.id, # unit's xml_id
elementChildSequence(unit),
unit.md5hash)
for unit in unitsUsed.values()))
self.unitId = dict((_measuresHash, id)
for id, _measuresHash in table)
# measures
table = self.getTable('unit_measure',
None,
('unit_id', 'qname', 'is_multiplicand'),
('unit_id', 'qname', 'is_multiplicand'),
tuple((self.unitId[unit.md5hash],
measure.clarkNotation,
i == 0)
for unit in unitsUsed.values()
for i in range(2)
for measure in unit.measures[i]))
table = self.getTable('entity_identifier', 'entity_identifier_id',
('scheme', 'identifier'),
('scheme', 'identifier'),
set((cntx.entityIdentifier[0],
cntx.entityIdentifier[1])
for cntx in contextsUsed)) # not shared across reports
self.entityIdentifierId = dict(((entScheme, entIdent), id)
for id, entScheme, entIdent in table)
table = self.getTable('period', 'period_id',
('start_date', 'end_date', 'is_instant', 'is_forever'),
('start_date', 'end_date', 'is_instant', 'is_forever'),
set((cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)
for cntx in contextsUsed)) # periods not shared across multiple instance/inline docs
self.periodId = dict(((start, end, isInstant, isForever), id)
for id, start, end, isInstant, isForever in table)
def cntxDimsSet(cntx):
return frozenset((self.conceptQnameId[modelDimValue.dimensionQname],
self.conceptQnameId.get(modelDimValue.memberQname), # null if typed
modelDimValue.isTyped,
None if not modelDimValue.isTyped else ( # typed_value is null if not typed dimension
modelDimValue.typedMember.xValue.clarkNotation # if typed member is QName use clark name because QName is not necessarily a concept in the DTS
if (modelDimValue.typedMember is not None and getattr(modelDimValue.typedMember, "xValid", UNVALIDATED) >= VALID and isinstance(modelDimValue.typedMember.xValue,QName))
else modelDimValue.stringValue)) # otherwise typed member is string value of the typed member
for modelDimValue in cntx.qnameDims.values()
if modelDimValue.dimensionQname in self.conceptQnameId)
cntxAspectValueSets = dict((cntx, cntxDimsSet(cntx))
for cntx in contextsUsed)
aspectValueSelections = set(aspectValueSelectionSet
for cntx, aspectValueSelectionSet in cntxAspectValueSets.items()
if aspectValueSelectionSet)
# allocate an aspect_value_set_id for each aspect_value_set in report (independent of SQL of database)
table = self.getTable('aspect_value_report_set', 'aspect_value_set_id',
('report_id', ),
('report_id', ),
tuple((reportId,)
for aspectValueSelection in aspectValueSelections)
)
# assure we only get single entry per result (above gives cross product)
table = self.execute("SELECT aspect_value_set_id, report_id from {0} "
"WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_report_set"), reportId))
aspectValueReportSets = dict((aspectValueSelections.pop(), id)
for id, _reportId in table)
cntxAspectValueSetId = dict((cntx, aspectValueReportSets[_cntxDimsSet])
for cntx, _cntxDimsSet in cntxAspectValueSets.items()
if _cntxDimsSet)
table = self.getTable('aspect_value_set',
None,
('aspect_value_set_id', 'aspect_concept_id', 'aspect_value_id', 'is_typed_value', 'typed_value'),
('aspect_value_set_id', ),
tuple((aspectValueSetId, dimId, dimMbrId, isTyped, typedValue)
for aspectValueSelection, aspectValueSetId in aspectValueReportSets.items()
for dimId, dimMbrId, isTyped, typedValue in aspectValueSelection)
)
del contextsUsed, unitsUsed # dereference objects
# facts
def insertFactSet(modelFacts, tupleFactId):
facts = []
for fact in modelFacts:
if fact.concept is not None and getattr(fact, "xValid", UNVALIDATED) >= VALID and fact.qname is not None:
cntx = fact.context
documentId = self.documentIds[fact.modelDocument]
facts.append((reportId,
documentId,
fact.id, # fact's xml_id
elementChildSequence(fact),
fact.sourceline,
tupleFactId, # tuple (parent) fact's database fact_id
self.conceptQnameId.get(fact.qname),
fact.contextID,
self.entityIdentifierId.get((cntx.entityIdentifier[0], cntx.entityIdentifier[1]))
if cntx is not None else None,
self.periodId.get((
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)) if cntx is not None else None,
cntxAspectValueSetId.get(cntx) if cntx is not None else None,
self.unitId.get(fact.unit.md5hash) if fact.unit is not None else None,
fact.isNil,
fact.precision,
fact.decimals,
roundValue(fact.value, fact.precision, fact.decimals) if fact.isNumeric and not fact.isNil else None,
fact.xmlLang if not fact.isNumeric and not fact.isNil else None,
collapseWhitespacePattern.sub(' ', fact.value.strip()) if fact.value is not None else None,
fact.value,
))
table = self.getTable('fact', 'fact_id',
('report_id', 'document_id', 'xml_id', 'xml_child_seq', 'source_line',
'tuple_fact_id', # tuple
'concept_id',
'context_xml_id', 'entity_identifier_id', 'period_id', 'aspect_value_set_id', 'unit_id',
'is_nil', 'precision_value', 'decimals_value', 'effective_value',
'language', 'normalized_string_value', 'value'),
('document_id', 'xml_child_seq'),
facts)
xmlIdFactId = dict(((docId, xml_child_seq), _factId)
for _factId, docId, xml_child_seq in table)
self.factId.update(xmlIdFactId)
for fact in modelFacts:
if fact.isTuple:
try:
insertFactSet(fact.modelTupleFacts,
xmlIdFactId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))])
except KeyError:
self.modelXbrl.info("xpDB:warning",
_("Loading XBRL DB: tuple's datapoint not found: %(tuple)s"),
modelObject=fact, tuple=fact.qname)
self.factId = {}
insertFactSet(self.modelXbrl.facts, None)
# hashes
if self.tableFacts: # if any entries
_tableFacts = []
for roleType, tableCode, fact in self.tableFacts:
try:
_tableFacts.append((reportId,
self.roleTypeIds[(self.documentIds[roleType.modelDocument],
roleType.roleURI)],
tableCode,
self.factId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))]))
except KeyError:
# print ("missing table facts role or fact")
pass
table = self.getTable('table_facts', None,
('report_id', 'object_id', 'table_code', 'fact_id'),
('report_id', 'object_id', 'fact_id'),
_tableFacts)
# footnotes
footnotesRelationshipSet = ModelRelationshipSet(self.modelXbrl, "XBRL-footnotes")
table = self.getTable('footnote', None,
('fact_id', 'footnote_group', 'type', 'footnote_value_id', 'language', 'normalized_string_value', 'value'),
('fact_id', 'footnote_group', 'type', 'footnote_value_id', 'language', 'normalized_string_value', 'value'),
tuple((self.factId[(self.documentIds[fact.modelDocument], elementChildSequence(fact))],
footnoteRel.arcrole,
None if isinstance(toObj, ModelFact) else toObj.role,
self.factId[(self.documentIds[toObj.modelDocument], elementChildSequence(toObj))] if isinstance(toObj, ModelFact) else None,
None if isinstance(toObj, ModelFact) else toObj.xmlLang,
None if isinstance(toObj, ModelFact) else collapseWhitespacePattern.sub(' ', xmlstring(toObj, stripXmlns=True, contentsOnly=True, includeText=True)),
None if isinstance(toObj, ModelFact) else xmlstring(toObj, stripXmlns=True, contentsOnly=True, includeText=True))
for fact in self.modelXbrl.factsInInstance
for footnoteRel in footnotesRelationshipSet.fromModelObject(fact)
for toObj in (footnoteRel.toModelObject,)
if toObj is not None)
)
def insertValidationResults(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior messages of this report")
# remove prior messages for this report
self.lockTables(("message", "message_reference"))
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {1}.message_id = {0}.message_id"
.format(self.dbTableName("message_reference"),
self.dbTableName("message"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {} WHERE message.report_id = {}"
.format(self.dbTableName("message"),reportId),
close=False, fetch=False)
messages = []
messageRefs = defaultdict(set) # direct link to objects
for i, logEntry in enumerate(self.loggingEntries):
sequenceInReport = i+1
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
objectId = None
if isinstance(modelObject, ModelFact):
objectId = self.factId.get((self.documentIds.get(modelObject.modelDocument),
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelRelationship):
objectId = self.relSetId.get((modelObject.linkrole,
modelObject.arcrole,
modelObject.linkQname.clarkNotation,
modelObject.arcElement.qname.clarkNotation))
elif isinstance(modelObject, ModelConcept):
objectId = self.conceptQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelXbrl):
objectId = reportId
elif hasattr(modelObject, "modelDocument"):
objectId = self.documentIds.get(modelObject.modelDocument)
if objectId is not None:
messageRefs[sequenceInReport].add(objectId)
messages.append((reportId,
sequenceInReport,
logEntry['code'],
logEntry['level'],
logEntry['message']['text']))
if messages:
self.showStatus("insert validation messages")
table = self.getTable('message', 'message_id',
('report_id', 'sequence_in_report', 'message_code', 'message_level', 'value'),
('report_id', 'sequence_in_report'),
messages)
messageIds = dict((sequenceInReport, messageId)
for messageId, _reportId, sequenceInReport in table)
table = self.getTable('message_reference', None,
('message_id', 'object_id'),
('message_id', 'object_id'),
tuple((messageId,
objectId)
for sequenceInReport, objectIds in messageRefs.items()
for objectId in objectIds
for messageId in (messageIds[sequenceInReport],)))
def loadXbrlFromDB(self, loadDBsaveToFile, loadReportId):
# load from database
modelXbrl = self.modelXbrl
# find instance in DB
self.showStatus("finding loadReportId in database")
if loadReportId and loadReportId.isnumeric():
# use report ID to get specific report
results = self.execute("select r.report_id, d.document_url from report r, document d "
"where r.report_id = {} and r.report_schema_doc_id = d.document_id"
.format(loadReportId))
else:
# use filename to get instance
instanceURI = os.path.basename(loadDBsaveToFile)
results = self.execute("select r.report_id, d.document_url from report r, document d "
"where r.report_schema_doc_id = d.document_id")
for reportId, xbrlSchemaRef in results:
break
if not reportId:
raise DpmDBException("sqlDB:MissingReport",
_("The report was not found in table report"))
if not xbrlSchemaRef:
raise DpmDBException("sqlDB:MissingSchemaRef",
_("The report schemaRef was not found in table report"))
# create the instance document and resulting filing
modelXbrl.blockOpenDBrecursion = True
modelXbrl.modelDocument = createModelDocument(
modelXbrl,
Type.INSTANCE,
loadDBsaveToFile,
schemaRefs=[xbrlSchemaRef],
isEntry=True,
initialComment="Generated by Arelle(r) for Data Act project",
documentEncoding="utf-8")
ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl) # needs dimension defaults
prefixes = modelXbrl.prefixedNamespaces
prefixes["iso4217"] = XbrlConst.iso4217
prefixes["xbrli"] = XbrlConst.xbrli
prefixes[None] = XbrlConst.xbrli # createInstance expects default prefix for xbrli
# make prefixes reverse lookupable for qname function efficiency
prefixes.update(dict((ns,prefix) for prefix, ns in prefixes.items()))
# add roleRef and arcroleRef (e.g. for footnotes, if any, see inlineXbrlDocue)
cntxTbl = {} # index by d
unitTbl = {}
# facts in this instance
self.showStatus("finding facts in database")
factsTbl = self.execute(_(
"select f.fact_id, fc.qname, f.value, f.decimals_value, "
"avd.qname as dim_name, avm.qname as mem_name, av.typed_value, "
"um.qname as u_measure, um.is_multiplicand as u_mul,p.start_date, p.end_date, p.is_instant, "
"ei.scheme, ei.identifier "
"from fact f "
"join concept fc on f.concept_id = fc.concept_id "
"and f.report_id = {} "
"left join aspect_value_set av on av.aspect_value_set_id = f.aspect_value_set_id "
"join concept avd on av.aspect_concept_id = avd.concept_id "
"left join concept avm on av.aspect_value_id = avm.concept_id "
"left join unit_measure um on um.unit_id = f.unit_id "
"left join period p on p.period_id = f.period_id "
"left join entity_identifier ei on ei.entity_identifier_id = f.entity_identifier_id ")
.format(reportId))
prevId = None
factRows = []
cntxTbl = {}
unitTbl = {}
def storeFact():
unitMul = set()
unitDiv = set()
dims = set()
for _dbFactId, _qname, _value, _decimals, _dimQName, _memQName, _typedValue, \
_unitMeasure, _unitIsMul, \
_perStart, _perEnd, _perIsInstant, \
_scheme, _identifier in factRows:
if _unitMeasure:
if _unitIsMul:
unitMul.add(_unitMeasure)
else:
unitDiv.add(_unitMeasure)
if _dimQName:
dims.add((_dimQName, _memQName, _typedValue))
cntxKey = (_perStart, _perEnd, _perIsInstant, _scheme, _identifier) + tuple(sorted(dims))
if cntxKey in cntxTbl:
_cntx = cntxTbl[cntxKey]
else:
cntxId = 'c-{:02}'.format(len(cntxTbl) + 1)
qnameDims = {}
for _dimQn, _memQn, _dimVal in dims:
dimQname = qname(_dimQn, prefixes)
dimConcept = modelXbrl.qnameConcepts.get(dimQname)
if _memQn:
mem = qname(_memQn, prefixes) # explicit dim
elif dimConcept.isTypedDimension:
# a modelObject xml element is needed for all of the instance functions to manage the typed dim
mem = addChild(modelXbrl.modelDocument, dimConcept.typedDomainElement.qname, text=_dimVal, appendChild=False)
qnameDims[dimQname] = DimValuePrototype(modelXbrl, None, dimQname, mem, "segment")
_cntx = modelXbrl.createContext(
_scheme,
_identifier,
("duration","instant")[_perIsInstant],
None if _perIsInstant else dateTime(_perStart, type=DATETIME),
dateTime(_perEnd, type=DATETIME),
None, # no dimensional validity checking (like formula does)
qnameDims, [], [],
id=cntxId)
cntxTbl[cntxKey] = _cntx
if unitMul or unitDiv:
unitKey = (tuple(sorted(unitMul)),tuple(sorted(unitDiv)))
if unitKey in unitTbl:
unit = unitTbl[unitKey]
else:
mulQns = [qname(u, prefixes) for u in sorted(unitMul) if u]
divQns = [qname(u, prefixes) for u in sorted(unitDiv) if u]
unitId = 'u-{:02}'.format(len(unitTbl) + 1)
for _measures in mulQns, divQns:
for _measure in _measures:
addQnameValue(modelXbrl.modelDocument, _measure)
unit = modelXbrl.createUnit(mulQns, divQns, id=unitId)
unitTbl[unitKey] = unit
else:
unit = None
attrs = {"contextRef": _cntx.id}
conceptQn = qname(_qname,prefixes)
concept = modelXbrl.qnameConcepts.get(conceptQn)
if _value is None or (
len(_value) == 0 and concept.baseXbrliType not in ("string", "normalizedString", "token")):
attrs[XbrlConst.qnXsiNil] = "true"
text = None
else:
text = _value
if concept.isNumeric:
if unit is not None:
attrs["unitRef"] = unit.id
if _decimals:
attrs["decimals"] = _decimals
# is value a QName?
if concept.baseXbrliType == "QName":
addQnameValue(modelXbrl.modelDocument, qname(text.strip(), prefixes))
f = modelXbrl.createFact(conceptQn, attributes=attrs, text=text)
del factRows[:]
prevId = None
for fact in factsTbl:
id = fact[0]
if id != prevId and prevId:
storeFact()
factRows.append(fact)
prevId = id
if prevId and factRows:
storeFact()
self.showStatus("saving XBRL instance")
modelXbrl.saveInstance(overrideFilepath=loadDBsaveToFile, encoding="utf-8")
self.showStatus(_("Saved extracted instance"), 5000)
return modelXbrl.modelDocument
countryOfState = {
"AL": "US","AK": "US","AZ": "US","AR": "US","CA": "US","CO": "US", "CT": "US","DE": "US",
"FL": "US","GA": "US","HI": "US","ID": "US","IL": "US","IN": "US","IA": "US","KS": "US",
"KY": "US","LA": "US","ME": "US","MD": "US","MA": "US","MI": "US","MN": "US","MS": "US",
"MO": "US","MT": "US","NE": "US","NV": "US","NH": "US","NJ": "US","NM": "US","NY": "US",
"NC": "US","ND": "US","OH": "US","OK": "US","OR": "US","PA": "US","RI": "US","SC": "US",
"SD": "US","TN": "US","TX": "US","UT": "US","VT": "US","VA": "US","WA": "US","WV": "US",
"WI": "US","WY": "US","DC": "US","PR": "US","VI": "US","AS": "US","GU": "US","MP": "US",
"AB": "CA","BC": "CA","MB": "CA","NB": "CA","NL": "CA","NS": "CA","ON": "CA","PE": "CA",
"QC": "CA","SK": "CA","NT": "CA","NU": "CA","YT": "CA"}
| apache-2.0 | 6,938,166,424,874,139,000 | 56.452433 | 201 | 0.514562 | false |
mvaled/sentry | src/sentry/testutils/factories.py | 1 | 32360 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.conf import settings
import copy
import io
import os
import petname
import random
import six
import warnings
from importlib import import_module
from django.utils import timezone
from django.utils.text import slugify
from hashlib import sha1
from loremipsum import Generator
from uuid import uuid4
from sentry.event_manager import EventManager
from sentry.constants import SentryAppStatus
from sentry.incidents.models import (
Incident,
IncidentGroup,
IncidentProject,
IncidentSeen,
IncidentActivity,
)
from sentry.mediators import (
sentry_apps,
sentry_app_installations,
sentry_app_installation_tokens,
service_hooks,
)
from sentry.models import (
Activity,
Environment,
Event,
EventError,
Group,
Organization,
OrganizationMember,
OrganizationMemberTeam,
Project,
ProjectBookmark,
Team,
User,
UserEmail,
Release,
Commit,
ReleaseCommit,
CommitAuthor,
Repository,
CommitFileChange,
ProjectDebugFile,
File,
UserPermission,
EventAttachment,
UserReport,
PlatformExternalIssue,
)
from sentry.models.integrationfeature import Feature, IntegrationFeature
from sentry.utils import json
from sentry.utils.canonical import CanonicalKeyDict
loremipsum = Generator()
def get_fixture_path(name):
return os.path.join(
os.path.dirname(__file__), # src/sentry/testutils/
os.pardir, # src/sentry/
os.pardir, # src/
os.pardir,
"tests",
"fixtures",
name,
)
def make_sentence(words=None):
if words is None:
words = int(random.weibullvariate(8, 3))
return " ".join(random.choice(loremipsum.words) for _ in range(words))
def make_word(words=None):
if words is None:
words = int(random.weibullvariate(8, 3))
return random.choice(loremipsum.words)
DEFAULT_EVENT_DATA = {
"extra": {
"loadavg": [0.97607421875, 0.88330078125, 0.833984375],
"sys.argv": [
"/Users/dcramer/.virtualenvs/sentry/bin/raven",
"test",
"https://ebc35f33e151401f9deac549978bda11:[email protected]/1",
],
"user": "dcramer",
},
"modules": {"raven": "3.1.13"},
"request": {
"cookies": {},
"data": {},
"env": {},
"headers": {},
"method": "GET",
"query_string": "",
"url": "http://example.com",
},
"stacktrace": {
"frames": [
{
"abs_path": "www/src/sentry/models/foo.py",
"context_line": " string_max_length=self.string_max_length)",
"filename": "sentry/models/foo.py",
"function": "build_msg",
"in_app": True,
"lineno": 29,
"module": "raven.base",
"post_context": [
" },",
" })",
"",
" if 'stacktrace' in data:",
" if self.include_paths:",
],
"pre_context": [
"",
" data.update({",
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
" list_max_length=self.list_max_length,",
],
"vars": {
"culprit": "raven.scripts.runner",
"date": "datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)",
"event_id": "598fb19363e745ec8be665e6ba88b1b2",
"event_type": "raven.events.Message",
"frames": "<generator object iter_stack_frames at 0x103fef050>",
"handler": "<raven.events.Message object at 0x103feb710>",
"k": "logentry",
"public_key": None,
"result": {
"logentry": "{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
"self": "<raven.base.Client object at 0x104397f10>",
"stack": True,
"tags": None,
"time_spent": None,
},
},
{
"abs_path": "/Users/dcramer/.virtualenvs/sentry/lib/python2.7/site-packages/raven/base.py",
"context_line": " string_max_length=self.string_max_length)",
"filename": "raven/base.py",
"function": "build_msg",
"in_app": False,
"lineno": 290,
"module": "raven.base",
"post_context": [
" },",
" })",
"",
" if 'stacktrace' in data:",
" if self.include_paths:",
],
"pre_context": [
"",
" data.update({",
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
" list_max_length=self.list_max_length,",
],
"vars": {
"culprit": "raven.scripts.runner",
"date": "datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)",
"event_id": "598fb19363e745ec8be665e6ba88b1b2",
"event_type": "raven.events.Message",
"frames": "<generator object iter_stack_frames at 0x103fef050>",
"handler": "<raven.events.Message object at 0x103feb710>",
"k": "logentry",
"public_key": None,
"result": {
"logentry": "{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
"self": "<raven.base.Client object at 0x104397f10>",
"stack": True,
"tags": None,
"time_spent": None,
},
},
]
},
"tags": [],
"platform": "python",
}
def _patch_artifact_manifest(path, org, release, project=None):
manifest = json.loads(open(path, "rb").read())
manifest["org"] = org
manifest["release"] = release
if project:
manifest["project"] = project
return json.dumps(manifest)
# TODO(dcramer): consider moving to something more scaleable like factoryboy
class Factories(object):
@staticmethod
def create_organization(name=None, owner=None, **kwargs):
if not name:
name = petname.Generate(2, " ", letters=10).title()
org = Organization.objects.create(name=name, **kwargs)
if owner:
Factories.create_member(organization=org, user=owner, role="owner")
return org
@staticmethod
def create_member(teams=None, **kwargs):
kwargs.setdefault("role", "member")
om = OrganizationMember.objects.create(**kwargs)
if teams:
for team in teams:
Factories.create_team_membership(team=team, member=om)
return om
@staticmethod
def create_team_membership(team, member=None, user=None):
if member is None:
member, _ = OrganizationMember.objects.get_or_create(
user=user, organization=team.organization, defaults={"role": "member"}
)
return OrganizationMemberTeam.objects.create(
team=team, organizationmember=member, is_active=True
)
@staticmethod
def create_team(organization, **kwargs):
if not kwargs.get("name"):
kwargs["name"] = petname.Generate(2, " ", letters=10).title()
if not kwargs.get("slug"):
kwargs["slug"] = slugify(six.text_type(kwargs["name"]))
members = kwargs.pop("members", None)
team = Team.objects.create(organization=organization, **kwargs)
if members:
for user in members:
Factories.create_team_membership(team=team, user=user)
return team
@staticmethod
def create_environment(project, **kwargs):
name = kwargs.get("name", petname.Generate(3, " ", letters=10)[:64])
env = Environment.objects.create(
organization_id=project.organization_id, project_id=project.id, name=name
)
env.add_project(project, is_hidden=kwargs.get("is_hidden"))
return env
@staticmethod
def create_project(organization=None, teams=None, **kwargs):
if not kwargs.get("name"):
kwargs["name"] = petname.Generate(2, " ", letters=10).title()
if not kwargs.get("slug"):
kwargs["slug"] = slugify(six.text_type(kwargs["name"]))
if not organization and teams:
organization = teams[0].organization
project = Project.objects.create(organization=organization, **kwargs)
if teams:
for team in teams:
project.add_team(team)
return project
@staticmethod
def create_project_bookmark(project, user):
return ProjectBookmark.objects.create(project_id=project.id, user=user)
@staticmethod
def create_project_key(project):
return project.key_set.get_or_create()[0]
@staticmethod
def create_release(project, user=None, version=None, date_added=None):
if version is None:
version = os.urandom(20).encode("hex")
if date_added is None:
date_added = timezone.now()
release = Release.objects.create(
version=version, organization_id=project.organization_id, date_added=date_added
)
release.add_project(project)
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=Activity.get_version_ident(version),
user=user,
data={"version": version},
)
# add commits
if user:
author = Factories.create_commit_author(project=project, user=user)
repo = Factories.create_repo(project, name="organization-{}".format(project.slug))
commit = Factories.create_commit(
project=project,
repo=repo,
author=author,
release=release,
key="deadbeef",
message="placeholder commit message",
)
release.update(
authors=[six.text_type(author.id)], commit_count=1, last_commit_id=commit.id
)
return release
@staticmethod
def create_artifact_bundle(org, release, project=None):
import zipfile
bundle = io.BytesIO()
bundle_dir = get_fixture_path("artifact_bundle")
with zipfile.ZipFile(bundle, "w", zipfile.ZIP_DEFLATED) as zipfile:
for path, _, files in os.walk(bundle_dir):
for filename in files:
fullpath = os.path.join(path, filename)
relpath = os.path.relpath(fullpath, bundle_dir)
if filename == "manifest.json":
manifest = _patch_artifact_manifest(fullpath, org, release, project)
zipfile.writestr(relpath, manifest)
else:
zipfile.write(fullpath, relpath)
return bundle.getvalue()
@staticmethod
def create_repo(project, name=None):
repo = Repository.objects.create(
organization_id=project.organization_id,
name=name
or "{}-{}".format(petname.Generate(2, "", letters=10), random.randint(1000, 9999)),
)
return repo
@staticmethod
def create_commit(
repo, project=None, author=None, release=None, message=None, key=None, date_added=None
):
commit = Commit.objects.get_or_create(
organization_id=repo.organization_id,
repository_id=repo.id,
key=key or sha1(uuid4().hex).hexdigest(),
defaults={
"message": message or make_sentence(),
"author": author
or Factories.create_commit_author(organization_id=repo.organization_id),
"date_added": date_added or timezone.now(),
},
)[0]
if release:
assert project
ReleaseCommit.objects.create(
organization_id=repo.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
Factories.create_commit_file_change(commit=commit, filename="/models/foo.py")
Factories.create_commit_file_change(commit=commit, filename="/worsematch/foo.py")
Factories.create_commit_file_change(commit=commit, filename="/models/other.py")
return commit
@staticmethod
def create_commit_author(organization_id=None, project=None, user=None):
return CommitAuthor.objects.get_or_create(
organization_id=organization_id or project.organization_id,
email=user.email if user else "{}@example.com".format(make_word()),
defaults={"name": user.name if user else make_word()},
)[0]
@staticmethod
def create_commit_file_change(commit, filename):
return CommitFileChange.objects.get_or_create(
organization_id=commit.organization_id, commit=commit, filename=filename, type="M"
)
@staticmethod
def create_user(email=None, **kwargs):
if email is None:
email = uuid4().hex + "@example.com"
kwargs.setdefault("username", email)
kwargs.setdefault("is_staff", True)
kwargs.setdefault("is_active", True)
kwargs.setdefault("is_superuser", False)
user = User(email=email, **kwargs)
if not kwargs.get("password"):
user.set_password("admin")
user.save()
# UserEmail is created by a signal
assert UserEmail.objects.filter(user=user, email=email).update(is_verified=True)
return user
@staticmethod
def create_useremail(user, email, **kwargs):
if not email:
email = uuid4().hex + "@example.com"
kwargs.setdefault("is_verified", True)
useremail = UserEmail(user=user, email=email, **kwargs)
useremail.save()
return useremail
@staticmethod
def create_event(group=None, project=None, event_id=None, normalize=True, **kwargs):
# XXX: Do not use this method for new tests! Prefer `store_event`.
if event_id is None:
event_id = uuid4().hex
kwargs.setdefault("project", project if project else group.project)
kwargs.setdefault("data", copy.deepcopy(DEFAULT_EVENT_DATA))
kwargs.setdefault("platform", kwargs["data"].get("platform", "python"))
kwargs.setdefault("message", kwargs["data"].get("message", "message"))
if kwargs.get("tags"):
tags = kwargs.pop("tags")
if isinstance(tags, dict):
tags = list(tags.items())
kwargs["data"]["tags"] = tags
if kwargs.get("stacktrace"):
stacktrace = kwargs.pop("stacktrace")
kwargs["data"]["stacktrace"] = stacktrace
user = kwargs.pop("user", None)
if user is not None:
kwargs["data"]["user"] = user
kwargs["data"].setdefault("errors", [{"type": EventError.INVALID_DATA, "name": "foobar"}])
# maintain simple event Factories by supporting the legacy message
# parameter just like our API would
if "logentry" not in kwargs["data"]:
kwargs["data"]["logentry"] = {"message": kwargs["message"] or "<unlabeled event>"}
if normalize:
manager = EventManager(CanonicalKeyDict(kwargs["data"]))
manager.normalize()
kwargs["data"] = manager.get_data()
kwargs["data"].update(manager.materialize_metadata())
kwargs["message"] = manager.get_search_message()
# This is needed so that create_event saves the event in nodestore
# under the correct key. This is usually dont in EventManager.save()
kwargs["data"].setdefault("node_id", Event.generate_node_id(kwargs["project"].id, event_id))
event = Event(event_id=event_id, group=group, **kwargs)
# emulate EventManager refs
event.data.bind_ref(event)
event.save()
return event
@staticmethod
def store_event(data, project_id, assert_no_errors=True):
# Like `create_event`, but closer to how events are actually
# ingested. Prefer to use this method over `create_event`
manager = EventManager(data)
manager.normalize()
if assert_no_errors:
errors = manager.get_data().get("errors")
assert not errors, errors
event = manager.save(project_id)
if event.group:
event.group.save()
return event
@staticmethod
def create_full_event(group, event_id="a", **kwargs):
payload = """
{
"event_id": "f5dd88e612bc406ba89dfebd09120769",
"project": 11276,
"release": "e1b5d1900526feaf20fe2bc9cad83d392136030a",
"platform": "javascript",
"culprit": "app/components/events/eventEntries in map",
"logentry": {"formatted": "TypeError: Cannot read property '1' of null"},
"tags": [
["environment", "prod"],
["sentry_version", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["level", "error"],
["logger", "javascript"],
["sentry:release", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["browser", "Chrome 48.0"],
["device", "Other"],
["os", "Windows 10"],
["url", "https://sentry.io/katon-direct/localhost/issues/112734598/"],
["sentry:user", "id:41656"]
],
"errors": [{
"url": "<anonymous>",
"type": "js_no_source"
}],
"extra": {
"session:duration": 40364
},
"exception": {
"exc_omitted": null,
"values": [{
"stacktrace": {
"frames": [{
"function": "batchedUpdates",
"abs_path": "webpack:////usr/src/getsentry/src/sentry/~/react/lib/ReactUpdates.js",
"pre_context": [" // verify that that's the case. (This is called by each top-level update", " // function, like setProps, setState, forceUpdate, etc.; creation and", " // destruction of top-level components is guarded in ReactMount.)", "", " if (!batchingStrategy.isBatchingUpdates) {"],
"post_context": [" return;", " }", "", " dirtyComponents.push(component);", "}"],
"filename": "~/react/lib/ReactUpdates.js",
"module": "react/lib/ReactUpdates",
"colno": 0,
"in_app": false,
"data": {
"orig_filename": "/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"orig_abs_path": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map",
"orig_lineno": 37,
"orig_function": "Object.s [as enqueueUpdate]",
"orig_colno": 16101
},
"context_line": " batchingStrategy.batchedUpdates(enqueueUpdate, component);",
"lineno": 176
}],
"frames_omitted": null
},
"type": "TypeError",
"value": "Cannot read property '1' of null",
"module": null
}]
},
"request": {
"url": "https://sentry.io/katon-direct/localhost/issues/112734598/",
"headers": [
["Referer", "https://sentry.io/welcome/"],
["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36"]
]
},
"user": {
"ip_address": "0.0.0.0",
"id": "41656",
"email": "[email protected]"
},
"version": "7",
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST"
}
}
]
}
}"""
event = Factories.create_event(
group=group,
event_id=event_id,
platform="javascript",
data=json.loads(payload),
# This payload already went through sourcemap
# processing, normalizing it would remove
# frame.data (orig_filename, etc)
normalize=False,
)
return event
@staticmethod
def create_group(project, checksum=None, **kwargs):
if checksum:
warnings.warn("Checksum passed to create_group", DeprecationWarning)
kwargs.setdefault("message", "Hello world")
kwargs.setdefault("data", {})
if "type" not in kwargs["data"]:
kwargs["data"].update({"type": "default", "metadata": {"title": kwargs["message"]}})
if "short_id" not in kwargs:
kwargs["short_id"] = project.next_short_id()
return Group.objects.create(project=project, **kwargs)
@staticmethod
def create_file(**kwargs):
return File.objects.create(**kwargs)
@staticmethod
def create_file_from_path(path, name=None, **kwargs):
if name is None:
name = os.path.basename(path)
file = Factories.create_file(name=name, **kwargs)
with open(path) as f:
file.putfile(f)
return file
@staticmethod
def create_event_attachment(event, file=None, **kwargs):
if file is None:
file = Factories.create_file(
name="log.txt",
size=32,
headers={"Content-Type": "text/plain"},
checksum="dc1e3f3e411979d336c3057cce64294f3420f93a",
)
return EventAttachment.objects.create(
project_id=event.project_id, event_id=event.event_id, file=file, **kwargs
)
@staticmethod
def create_dif_file(
project,
debug_id=None,
object_name=None,
features=None,
data=None,
file=None,
cpu_name=None,
code_id=None,
**kwargs
):
if debug_id is None:
debug_id = six.text_type(uuid4())
if object_name is None:
object_name = "%s.dSYM" % debug_id
if features is not None:
if data is None:
data = {}
data["features"] = features
if file is None:
file = Factories.create_file(
name=object_name,
size=42,
headers={"Content-Type": "application/x-mach-binary"},
checksum="dc1e3f3e411979d336c3057cce64294f3420f93a",
)
return ProjectDebugFile.objects.create(
debug_id=debug_id,
code_id=code_id,
project=project,
object_name=object_name,
cpu_name=cpu_name or "x86_64",
file=file,
data=data,
**kwargs
)
@staticmethod
def create_dif_from_path(path, object_name=None, **kwargs):
if object_name is None:
object_name = os.path.basename(path)
headers = {"Content-Type": "application/x-mach-binary"}
file = Factories.create_file_from_path(path, name=object_name, headers=headers)
return Factories.create_dif_file(file=file, object_name=object_name, **kwargs)
@staticmethod
def add_user_permission(user, permission):
UserPermission.objects.create(user=user, permission=permission)
@staticmethod
def create_sentry_app(**kwargs):
app = sentry_apps.Creator.run(**Factories._sentry_app_kwargs(**kwargs))
if kwargs.get("published"):
app.update(status=SentryAppStatus.PUBLISHED)
return app
@staticmethod
def create_internal_integration(**kwargs):
return sentry_apps.InternalCreator.run(**Factories._sentry_app_kwargs(**kwargs))
@staticmethod
def create_internal_integration_token(install, **kwargs):
return sentry_app_installation_tokens.Creator.run(sentry_app_installation=install, **kwargs)
@staticmethod
def _sentry_app_kwargs(**kwargs):
_kwargs = {
"user": kwargs.get("user", Factories.create_user()),
"name": kwargs.get("name", petname.Generate(2, " ", letters=10).title()),
"organization": kwargs.get("organization", Factories.create_organization()),
"author": kwargs.get("author", "A Company"),
"scopes": kwargs.get("scopes", ()),
"verify_install": kwargs.get("verify_install", True),
"webhook_url": kwargs.get("webhook_url", "https://example.com/webhook"),
"events": [],
"schema": {},
}
_kwargs.update(**kwargs)
return _kwargs
@staticmethod
def create_sentry_app_installation(organization=None, slug=None, user=None):
if not organization:
organization = Factories.create_organization()
Factories.create_project(organization=organization)
return sentry_app_installations.Creator.run(
slug=(slug or Factories.create_sentry_app().slug),
organization=organization,
user=(user or Factories.create_user()),
)
@staticmethod
def create_issue_link_schema():
return {
"type": "issue-link",
"link": {
"uri": "/sentry/issues/link",
"required_fields": [
{
"type": "select",
"name": "assignee",
"label": "Assignee",
"uri": "/sentry/members",
}
],
},
"create": {
"uri": "/sentry/issues/create",
"required_fields": [
{"type": "text", "name": "title", "label": "Title"},
{"type": "text", "name": "summary", "label": "Summary"},
],
"optional_fields": [
{
"type": "select",
"name": "points",
"label": "Points",
"options": [["1", "1"], ["2", "2"], ["3", "3"], ["5", "5"], ["8", "8"]],
},
{
"type": "select",
"name": "assignee",
"label": "Assignee",
"uri": "/sentry/members",
},
],
},
}
@staticmethod
def create_alert_rule_action_schema():
return {
"type": "alert-rule-action",
"required_fields": [{"type": "text", "name": "channel", "label": "Channel"}],
}
@staticmethod
def create_service_hook(actor=None, org=None, project=None, events=None, url=None, **kwargs):
if not actor:
actor = Factories.create_user()
if not org:
org = Factories.create_organization(owner=actor)
if not project:
project = Factories.create_project(organization=org)
if events is None:
events = ("event.created",)
if not url:
url = "https://example.com/sentry/webhook"
_kwargs = {
"actor": actor,
"projects": [project],
"organization": org,
"events": events,
"url": url,
}
_kwargs.update(kwargs)
return service_hooks.Creator.run(**_kwargs)
@staticmethod
def create_sentry_app_feature(feature=None, sentry_app=None, description=None):
if not sentry_app:
sentry_app = Factories.create_sentry_app()
integration_feature = IntegrationFeature.objects.create(
sentry_app=sentry_app, feature=feature or Feature.API
)
if description:
integration_feature.update(user_description=description)
return integration_feature
@staticmethod
def create_userreport(group, project=None, event_id=None, **kwargs):
return UserReport.objects.create(
group=group,
event_id=event_id or "a" * 32,
project=project or group.project,
name="Jane Doe",
email="[email protected]",
comments="the application crashed",
**kwargs
)
@staticmethod
def create_session():
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
return session
@staticmethod
def create_platform_external_issue(
group=None, service_type=None, display_name=None, web_url=None
):
return PlatformExternalIssue.objects.create(
group_id=group.id, service_type=service_type, display_name=display_name, web_url=web_url
)
@staticmethod
def create_incident(
organization,
projects,
detection_uuid=None,
status=1,
title=None,
query="test query",
date_started=None,
date_detected=None,
date_closed=None,
groups=None,
seen_by=None,
):
if not title:
title = petname.Generate(2, " ", letters=10).title()
incident = Incident.objects.create(
organization=organization,
detection_uuid=detection_uuid,
status=status,
title=title,
query=query,
date_started=date_started or timezone.now(),
date_detected=date_detected or timezone.now(),
date_closed=date_closed or timezone.now(),
)
for project in projects:
IncidentProject.objects.create(incident=incident, project=project)
if groups:
for group in groups:
IncidentGroup.objects.create(incident=incident, group=group)
if seen_by:
for user in seen_by:
IncidentSeen.objects.create(incident=incident, user=user, last_seen=timezone.now())
return incident
@staticmethod
def create_incident_activity(incident, type, comment=None, user=None):
return IncidentActivity.objects.create(
incident=incident, type=type, comment=comment, user=user
)
| bsd-3-clause | 1,358,003,128,335,061,000 | 35.156425 | 324 | 0.522404 | false |
RyanSkraba/beam | sdks/python/apache_beam/examples/snippets/transforms/elementwise/tostring_test.py | 1 | 2310 | # coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
from apache_beam.examples.snippets.util import assert_matches_stdout
from apache_beam.testing.test_pipeline import TestPipeline
from . import tostring
def check_plants(actual):
expected = '''[START plants]
🍓,Strawberry
🥕,Carrot
🍆,Eggplant
🍅,Tomato
🥔,Potato
[END plants]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_plant_lists(actual):
expected = '''[START plant_lists]
['🍓', 'Strawberry', 'perennial']
['🥕', 'Carrot', 'biennial']
['🍆', 'Eggplant', 'perennial']
['🍅', 'Tomato', 'annual']
['🥔', 'Potato', 'perennial']
[END plant_lists]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_plants_csv(actual):
expected = '''[START plants_csv]
🍓,Strawberry,perennial
🥕,Carrot,biennial
🍆,Eggplant,perennial
🍅,Tomato,annual
🥔,Potato,perennial
[END plants_csv]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
@mock.patch('apache_beam.Pipeline', TestPipeline)
@mock.patch(
'apache_beam.examples.snippets.transforms.elementwise.tostring.print', str)
class ToStringTest(unittest.TestCase):
def test_tostring_kvs(self):
tostring.tostring_kvs(check_plants)
def test_tostring_element(self):
tostring.tostring_element(check_plant_lists)
def test_tostring_iterables(self):
tostring.tostring_iterables(check_plants_csv)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,804,167,728,325,690,000 | 27.3125 | 79 | 0.735982 | false |
chanmodoi/plugin.video.chantube | lib/youtube.py | 1 | 1088 | import xbmcaddon
import thread, threading
import urllib, urllib2
import datetime, time
import xbmc
import logging
from bs4 import BeautifulSoup
import requests
import html5lib
class Item:
def __init__(self, link, title, img):
self.link = link
self.title = title
self.img = img
def getListVideos(url):
r = requests.get(url+"videos")
html = r.text
#xbmc.log(html.encode("utf-8"))
soup = BeautifulSoup(html)
list_a = soup.findAll('a')
list_links=[]
for a in list_a:
a_href = a.get("href")
a_title = a.get("title")
if (a_href!=None) and (a_href.startswith("/watch?v=")) and (a_title!=None):
a_img = "https://i.ytimg.com/vi/"+a_href[9:]+"/mqdefault.jpg"
list_links.append(Item("https://www.youtube.com" + a_href, a_title, a_img))
return list_links
def getLinkFromKeepVid(link):
r = requests.get("http://keepvid.com/" + '?' + urllib.urlencode({"url":link}))
html = r.text
soup = BeautifulSoup(html, "html5lib")
list_a = soup.findAll('a', attrs = {"class":"l"})
#xbmc.log(list_a.text)
links=[]
for a in list_a:
links.append(a.get("href"))
return links | gpl-2.0 | -879,495,629,093,435,400 | 26.225 | 79 | 0.669118 | false |
GEverding/touchVision | io/cleaner/plotter.py | 1 | 1125 | import numpy as np
from matplotlib import pyplot as plt
import time
class Plotter:
def __init__(self, id):
self.id = id
self.initialize_canvas()
def initialize_canvas(self):
plt.ion()
self.fig = plt.figure()
self.ay = self.fig.add_subplot(211)
self.dline, = plt.plot([0], [0])
# ay.set_xlim([-1, 99])
# ay.set_ylim([0,220])
self.ax = self.fig.add_subplot(212)
self.dlinef, = plt.plot([0], [0])
plt.show(block=False)
self.data1 = np.array([0])
self.freq = np.array([0])
def update_canvas(self, time, data, data2):
self.dline.set_xdata(time)
self.dline.set_ydata(data)
self.ay.set_xlim([min(time), max(time)])
self.ay.set_ylim([min(data), max(data)])
# self.dlinef.set_xdata(self.freq)
# self.dlinef.set_ydata(10 * np.log(self.data1))
# self.ax.set_xlim([min(self.freq), max(self.freq)])
# self.ax.set_ylim([-20, 20.0])
self.dlinef.set_xdata(time)
self.dlinef.set_ydata(data2)
self.ax.set_xlim([min(time), max(time)])
self.ax.set_ylim([min(data2), max(data2)])
self.fig.canvas.draw()
def update_canvasf(self, freq, data1):
self.freq = freq
self.data1 = data1
| mit | -520,473,535,467,576,700 | 24.568182 | 54 | 0.650667 | false |
programa-stic/barf-project | examples/kaos-toy-project/solve.py | 1 | 5341 | #! /usr/bin/env python
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import print_function
import struct
from barf.analysis.symbolic.emulator import ReilSymbolicEmulator
from barf.analysis.symbolic.emulator import State
from barf.analysis.symbolic.emulator import SymExecResult
from barf.arch.x86 import X86ArchitectureInformation
from barf.core.binary import BinaryFile
from barf.utils.reil import ReilContainerBuilder
def __get_in_array():
# Taken from: https://github.com/Cr4sh/openreil/blob/master/tests/test_kao.py
in_data = bytearray()
kao_installation_id = '97FF58287E87FB74-979950C854E3E8B3-55A3F121A5590339-6A8DF5ABA981F7CE'
# convert installation ID into the binary form
for s in kao_installation_id.split('-'):
in_data.extend(struct.pack('I', int(s[:8], 16)))
in_data.extend(struct.pack('I', int(s[8:], 16)))
return in_data
def __get_out_array():
return "0how4zdy81jpe5xfu92kar6cgiq3lst7"
def __save_path(trace, index):
print("[+] Saving trace... " + "trace_{:02d}.log".format(index))
with open("trace_{:02d}.log".format(index), "w") as trace_file:
for reil_instr, _ in trace:
line = "{:08x}.{:02x} {}\n".format(reil_instr.address >> 0x8, reil_instr.address & 0xff, reil_instr)
trace_file.write(line)
def solve():
#
# Load Binary
#
binary = BinaryFile("bin/toyproject.exe")
arch_info = X86ArchitectureInformation(binary.architecture_mode)
# Identify functions of interest
functions = [
("sub_4010ec", 0x004010ec, 0x004010ec + 0x3a)
]
# Create a REIL container
reil_container_builder = ReilContainerBuilder(binary)
reil_container = reil_container_builder.build(functions)
#
# Set up initial state
#
initial_state = State(arch_info, mode="initial")
# Set up stack
esp = 0x00001500
initial_state.write_register("esp", esp)
# Set up parameters
out_array_addr = esp - 0x25
in_array_addr = 0x4093a8
initial_state.write_memory(esp + 0x0, 4, 0x41414141) # fake return address
# TODO: Find a way to mark 'x' and 'y' as symbolic variables.
# initial_state.write_memory(esp + 0x4, 4, x) # Mark as Symbolic
# initial_state.write_memory(esp + 0x8, 4, y) # Mark as Symbolic
# Set the A array
in_array_expected = bytearray(__get_in_array())
for i in range(len(in_array_expected)):
initial_state.write_memory(in_array_addr + i, 1, in_array_expected[i])
#
# Set up final state
#
final_state = State(arch_info, mode="final")
# Set the B array
out_array_expected = bytearray(__get_out_array(), encoding='ascii')
for i in range(32):
# Avoid trivial solution
initial_state.write_memory(out_array_addr + i, 1, 0)
# Assert final (desired) state
final_state.write_memory(out_array_addr + i, 1, out_array_expected[i])
#
# Run concolic execution
#
sym_exec = ReilSymbolicEmulator(arch_info)
paths = sym_exec.find_state(reil_container, start=0x004010ec, end=0x0040111d,
initial_state=initial_state, final_state=final_state)
# There's only one way to reach the final state.
# assert len(paths) == 1
print("[+] Number of paths: {}".format(len(paths)))
# for index, path in enumerate(paths):
# __save_path(path, index)
#
# Query input buffer and print content
#
print("A (in) : {:s}".format(" ".join(["{:02x}".format(b) for b in in_array_expected])))
print("B (out) : {:s}".format(" ".join(["{:02x}".format(b) for b in out_array_expected])))
if len(paths) > 0:
se_res = SymExecResult(arch_info, initial_state, paths[0], final_state)
print("x: {0:#010x} ({0:d})".format(se_res.query_memory(esp + 0x4, 4)))
print("y: {0:#010x} ({0:d})".format(se_res.query_memory(esp + 0x8, 4)))
else:
print("[-] State Not Found!")
if __name__ == "__main__":
solve()
| bsd-2-clause | -998,339,934,289,517,700 | 33.019108 | 115 | 0.677214 | false |
mostaszewski/SleepWell | app.py | 1 | 3919 | """
The script is designed for scraping hotel prices from Trivago website.
It saves scraping result in easy to read HTML file.
"""
import argparse
import time
from selenium.common.exceptions import NoSuchElementException, \
StaleElementReferenceException
from tqdm import tqdm
from browser import Browser
from report import save
from urlbuilder import make_url
class SleepWell:
def __init__(self, location, date_from, date_to, stars, reviews, distance, max_price):
self.location = self.get_location_id(location)
self.date_from = date_from
self.date_to = date_to
self.stars = stars
self.reviews = reviews
self.distance = distance
self.max_price = max_price
self.report = '{} - {}'.format(location, time.strftime("%Y-%m-%d %H%M"))
@staticmethod
def get_location_id(location):
with Browser() as browser:
browser.get("http://trivago.pl")
browser.find_element_by_css_selector("input.input.horus__querytext.js-query"
"-input").send_keys(location)
browser.find_element_by_css_selector("div.horus__col.horus__col--search >"
" button > span").click()
time.sleep(3)
location_id = [x.replace("iPathId=", "") for x in browser.current_url.split(
"&") if "iPathId" in x][0]
return location_id
@staticmethod
def get_data(browser):
while True:
date = browser.find_element_by_class_name("btn-horus__value")
hotels = browser.find_elements_by_class_name("hotel")
for hotel in hotels:
try:
name = hotel.find_element_by_class_name("name__copytext")
price = hotel.find_element_by_class_name("item__best-price")
website = hotel.find_element_by_class_name("item__deal-best-ota")
data = {
'Name': name.text,
'Price': int(price.text.translate(str.maketrans('', '', 'zł '))),
'Website': website.text,
'Date': date.text.split(",")[1]
}
yield data
except StaleElementReferenceException:
pass
try:
next_page = browser.find_element_by_class_name("btn--next")
next_page.click()
time.sleep(5)
except NoSuchElementException:
break
def run(self):
urls = make_url(self.location, self.date_from, self.date_to, self.stars,
self.reviews, self.distance)
hotels = []
with tqdm(total=len(urls)) as pbar:
for url in urls:
with Browser() as browser:
browser.get(url)
for hotel in self.get_data(browser):
if hotel['Price'] < self.max_price:
hotels.append(hotel)
else:
break
pbar.update(1)
hotels = sorted(hotels, key=lambda k: k['Price'])
save(self.report, hotels)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--location")
parser.add_argument("-df", "--date_from")
parser.add_argument("-dt", "--date_to")
parser.add_argument("-s", "--stars", default="1,2,3,4,5")
parser.add_argument("-r", "--reviews", default="1,2,3,4,5")
parser.add_argument("-d", "--distance", nargs="?", default=None)
parser.add_argument("-mp", "--max_price", nargs="?", default=100000, type=int)
args = parser.parse_args()
SleepWell(args.location, args.date_from, args.date_to, args.stars, args.reviews,
args.distance, args.max_price).run()
| mit | 6,142,783,174,376,353,000 | 41.129032 | 90 | 0.53803 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtCore/QReadWriteLock.py | 1 | 1655 | # encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python3/dist-packages/PyQt4/QtCore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import sip as __sip
class QReadWriteLock(): # skipped bases: <class 'sip.simplewrapper'>
"""
QReadWriteLock()
QReadWriteLock(QReadWriteLock.RecursionMode)
"""
def lockForRead(self): # real signature unknown; restored from __doc__
""" QReadWriteLock.lockForRead() """
pass
def lockForWrite(self): # real signature unknown; restored from __doc__
""" QReadWriteLock.lockForWrite() """
pass
def tryLockForRead(self, p_int=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
QReadWriteLock.tryLockForRead() -> bool
QReadWriteLock.tryLockForRead(int) -> bool
"""
return False
def tryLockForWrite(self, p_int=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
QReadWriteLock.tryLockForWrite() -> bool
QReadWriteLock.tryLockForWrite(int) -> bool
"""
return False
def unlock(self): # real signature unknown; restored from __doc__
""" QReadWriteLock.unlock() """
pass
def __init__(self, QReadWriteLock_RecursionMode=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
NonRecursive = 0
RecursionMode = None # (!) real value is ''
Recursive = 1
| gpl-2.0 | -2,338,360,780,281,634,300 | 30.226415 | 130 | 0.645921 | false |
martinkirch/tofbot | plugins/lag.py | 1 | 5024 | # -*- coding: utf-8 -*-
# This file is part of tofbot, a friendly IRC bot.
# You may redistribute it under the Simplified BSD License.
# If we meet some day, and you think this stuff is worth it,
# you can buy us a beer in return.
#
# Copyright (c) 2015 Christophe-Marie Duquesne <[email protected]>
"See PluginLag"
from toflib import Plugin, cmd
import datetime
import time
import collections
Mention = collections.namedtuple('Mention', "timestamp author msg pending")
class PluginLag(Plugin):
"Lag: time between a mention and the answer"
def __init__(self, bot):
# A dictionary of nick -> dict
# Values are like this:
# {
# "mentions": list(Mention)
# "previous_lag": timedelta
# "last_active": timestamp
# }
super(PluginLag, self).__init__(bot)
self.data = {}
def timeformat(self, t):
"return a formatted time element without microseconds"
return str(t).split(".")[0]
def gc(self):
"Limit memory usage"
# don't watch more than 20 nicks
while len(self.data) > 20:
least_active_nick = max(self.data.keys(),
key=lambda x: self.data[x]["last_active"])
del self.data[least_active_nick]
# don' keep more than 5 mentions per nick
for nick in self.data:
while len(self.data[nick]["mentions"]) > 10:
del self.data[nick]["mentions"][0]
def set_active(self, nick):
"Update the last moment the nick was active"
# If the nick did not exist, add it
if nick == self.bot.nick:
return
if nick not in self.data:
self.data[nick] = {
"mentions": [],
"previous_lag": None
}
self.data[nick]["last_active"] = datetime.datetime.now()
self.gc()
def on_join(self, chan, nick):
"When a nick joins, mark it as active"
self.set_active(nick)
def add_mention(self, msg_text, author, to, pending=True):
"Add a mention to the nick"
self.data[to]["mentions"].append(Mention(
timestamp=datetime.datetime.now(),
author=author,
msg=msg_text,
pending=pending
))
self.gc()
def lag(self, nick):
"Returns the time between now and the oldest pending mention"
now = datetime.datetime.now()
for m in self.data[nick]["mentions"]:
if m.pending:
return now - m.timestamp
return None
def handle_msg(self, msg_text, _chan, me):
"Process mentions and update previous lag"
self.set_active(me)
words = set(msg_text
.replace(":", " ")
.replace(",", " ")
.strip()
.split(" "))
is_cmd = msg_text.strip().startswith('!')
# did I mention anybody?
if not is_cmd:
for nick in self.data:
if nick != me and nick in words:
self.add_mention(msg_text, me, nick)
# update the lag
lag = self.lag(me)
if lag is not None:
self.data[me]["previous_lag"] = lag
# my mentions are no longer pending since I just answered
mentions = self.data[me]["mentions"]
for i in range(len(mentions)):
mentions[i] = mentions[i]._replace(pending=False)
@cmd(1)
def cmd_lag(self, chan, args):
"Report the lag of the given nick"
who = args[0]
if who in self.data:
lag = self.lag(who)
if lag is not None:
self.say("Le %s-lag du moment est de %s." % (who,
self.timeformat(lag)))
else:
previous_lag = self.data[who]["previous_lag"]
if previous_lag is not None:
self.say("Pas de lag pour %s (lag précédent: %s)." %
(who, self.timeformat(previous_lag)))
else:
self.say("Pas de lag pour %s." % who)
else:
self.say("Pas d'infos sur %s." % who)
@cmd(1)
def cmd_mentions(self, chan, args, sender_nick):
"Report the recent mentions of the given nick"
who = args[0]
if who in self.data:
mentions = self.data[who]["mentions"]
if len(mentions) > 0:
self.private(sender_nick, "Dernières mentions de %s:" % who)
for m in mentions:
status = "✗" if m.pending else "✓"
time.sleep(0.5)
self.private(sender_nick, "[%s] %s <%s> %s" % (status,
self.timeformat(m.timestamp), m.author, m.msg))
else:
self.private(sender_nick, "Pas de mentions pour %s." % who)
else:
self.private(sender_nick, "Pas d'infos sur %s." % who)
| bsd-2-clause | -3,239,078,850,862,541,300 | 31.79085 | 76 | 0.518437 | false |
gavincyi/Telex | main.py | 1 | 3535 | #!/bin/python
from telegram.ext import Updater, CommandHandler, Filters, MessageHandler
from src.handler import handler
from src.db_client import db_client
from src.config import config
from src.user_interface import user_interface
import logging
import sys
def usage(error=''):
ret = ""
if error != '':
ret += error + "\n"
ret += "Usage: main.py -c <config_file> -i <interface_file> [-m COLD]\n"
ret += "Arguments:\n"
ret += " -c Configuration file path.\n"
ret += " -i Interface file path.\n"
ret += " -m Cold start mode. Run it in the first time.\n"
return ret
def validate_args():
if len(sys.argv) % 2 == 0:
print(usage("Invalid number of arguments"))
exit(1)
conf_path = ''
ui_path = ''
mode = 'NORMAL'
for i in range(1, len(sys.argv)-1, 2):
arg_item = sys.argv[i]
arg_val = sys.argv[i+1]
if arg_item == "-c":
conf_path = arg_val
elif arg_item == "-i":
ui_path = arg_val
elif arg_item == "-m":
if arg_val == "COLD":
mode = arg_val
else:
print(usage("Currently only supports cold start mode."))
exit(1)
conf = config(conf_path, mode=mode)
ui = user_interface(conf.platform, conf.channel_name, ui_path)
return conf, ui
if __name__ == '__main__':
conf, ui = validate_args()
# Set up logger
if len(conf.log_file) > 0:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=conf.log_file)
else:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
logger.info("Process is started")
# Set up db client
database_client = db_client(logger, conf)
if not database_client.init():
logger.warn('Database is failed to initialise')
# Set up handler
msg_handler = handler(logger, conf, ui)
msg_handler.init_db(database_client)
# Set up telegram bot
updater = Updater(conf.api_token)
updater.dispatcher.add_handler(CommandHandler('start', msg_handler.start_handler))
updater.dispatcher.add_handler(CommandHandler(msg_handler.ui.query_key_name(False), msg_handler.query_handler))
updater.dispatcher.add_handler(CommandHandler(msg_handler.ui.response_key_name(False), msg_handler.response_handler))
updater.dispatcher.add_handler(CommandHandler(msg_handler.ui.help_key_name(False), msg_handler.help_handler))
updater.dispatcher.add_handler(CommandHandler(msg_handler.ui.yes_key_name(False), msg_handler.yes_handler))
updater.dispatcher.add_handler(CommandHandler(msg_handler.ui.no_key_name(False), msg_handler.no_handler))
updater.dispatcher.add_handler(CommandHandler(msg_handler.ui.back_key_name(False), msg_handler.no_handler))
updater.dispatcher.add_handler(CommandHandler(msg_handler.ui.match_key_name(False), msg_handler.match_handler))
updater.dispatcher.add_handler(CommandHandler(msg_handler.ui.unmatch_key_name(False), msg_handler.unmatch_handler))
updater.dispatcher.add_handler(MessageHandler([Filters.text], msg_handler.set_value_handler))
updater.dispatcher.add_handler(MessageHandler([Filters.contact], msg_handler.yes_handler))
logger.info("Polling is started")
updater.start_polling()
updater.idle()
| apache-2.0 | 1,815,714,886,574,012,000 | 35.822917 | 121 | 0.639038 | false |
hackersql/sq1map | plugins/dbms/hsqldb/fingerprint.py | 1 | 4673 | #!/usr/bin/env python
#coding=utf-8
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.common import unArrayizeValue
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.session import setDbms
from lib.core.settings import HSQLDB_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.HSQLDB)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp and not conf.api:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp and not conf.api:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
actVer = Format.getDbms()
if not conf.extensiveFp:
value += actVer
return value
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp["dbmsVersion"] if 'dbmsVersion' in kb.bannerFp else None
if re.search("-log$", kb.data.banner):
banVer += ", logging enabled"
banVer = Format.getDbms([banVer] if banVer else None)
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
"""
References for fingerprint:
DATABASE_VERSION()
version 2.2.6 added two-arg REPLACE functio REPLACE('a','a') compared to REPLACE('a','a','d')
version 2.2.5 added SYSTIMESTAMP function
version 2.2.3 added REGEXPR_SUBSTRING and REGEXPR_SUBSTRING_ARRAY functions
version 2.2.0 added support for ROWNUM() function
version 2.1.0 added MEDIAN aggregate function
version < 2.0.1 added support for datetime ROUND and TRUNC functions
version 2.0.0 added VALUES support
version 1.8.0.4 Added org.hsqldbdb.Library function, getDatabaseFullProductVersion to return the
full version string, including the 4th digit (e.g 1.8.0.4).
version 1.7.2 CASE statements added and INFORMATION_SCHEMA
"""
if not conf.extensiveFp and Backend.isDbmsWithin(HSQLDB_ALIASES):
setDbms("%s %s" % (DBMS.HSQLDB, Backend.getVersion()))
if Backend.isVersionGreaterOrEqualThan("1.7.2"):
kb.data.has_information_schema = True
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.HSQLDB
logger.info(infoMsg)
result = inject.checkBooleanExpression("CASEWHEN(1=1,1,0)=1")
if result:
infoMsg = "confirming %s" % DBMS.HSQLDB
logger.info(infoMsg)
result = inject.checkBooleanExpression("ROUNDMAGIC(PI())>=3")
if not result:
warnMsg = "后端DBMS不是%s" % DBMS.HSQLDB
logger.warn(warnMsg)
return False
else:
kb.data.has_information_schema = True
Backend.setVersion(">= 1.7.2")
setDbms("%s 1.7.2" % DBMS.HSQLDB)
banner = self.getBanner()
if banner:
Backend.setVersion("= %s" % banner)
else:
if inject.checkBooleanExpression("(SELECT [RANDNUM] FROM (VALUES(0)))=[RANDNUM]"):
Backend.setVersionList([">= 2.0.0", "< 2.3.0"])
else:
banner = unArrayizeValue(inject.getValue("\"org.hsqldbdb.Library.getDatabaseFullProductVersion\"()", safeCharEncode=True))
if banner:
Backend.setVersion("= %s" % banner)
else:
Backend.setVersionList([">= 1.7.2", "< 1.8.0"])
return True
else:
warnMsg = "the back-end DBMS is not %s or version is < 1.7.2" % DBMS.HSQLDB
logger.warn(warnMsg)
return False
def getHostname(self):
warnMsg = "on HSQLDB it is not possible to enumerate the hostname"
logger.warn(warnMsg)
| gpl-3.0 | 1,755,618,997,009,516,500 | 33.555556 | 146 | 0.584995 | false |
szczeles/spark-oktawave | spark_oktawave/spark_oktawave.py | 1 | 3352 | import click
import configparser
import os
from .api import *
@click.group()
@click.option('--credentials', help='Path to credentials file', default='~/.spark-oktawave-credentials')
@click.pass_context
def cli(ctx, credentials):
ctx.obj['config'] = configparser.RawConfigParser()
ctx.obj['config'].read(os.path.expanduser(credentials))
ctx.obj['api'] = OktawaveApi(ctx.obj['config']['oktawave']['user'],
ctx.obj['config']['oktawave']['password'])
@cli.command()
@click.pass_context
def balance(ctx):
click.echo("Client balance: {}".format(ctx.obj['api'].get_balance()))
@cli.command()
@click.argument('cluster-name')
@click.option('--slaves', default=2, help='number of slaves')
@click.option('--disk-size', default=10, help='disk size [GB]')
@click.option('--master-class', default='v1.standard-2.2', help='master class')
@click.option('--slave-class', default='v1.highcpu-8.4', help='slave class')
@click.pass_context
def launch(ctx, cluster_name, slaves, disk_size, master_class, slave_class):
cluster = Cluster(ctx.obj['api'], cluster_name, ctx.obj['config']['oktawave']['private_ssh_key'])
cluster.upload_ssh_key(os.path.expanduser(ctx.obj['config']['oktawave']['public_ssh_key']))
cluster.launch_master(disk_size, master_class)
cluster.launch_slaves(slaves, disk_size, slave_class)
print('Waiting for cluster initialization...')
cluster.initialize(ctx.obj['config']['ocs'])
@cli.command()
@click.pass_context
def list(ctx):
master_nodes = ctx.obj['api'].list_vms(search_text='-master')
clusters = map(lambda vm: vm['name'].split('-master')[0], master_nodes)
for cluster in sorted(clusters):
ctx.invoke(info, cluster_name=cluster, verbose=False)
print()
@cli.command()
@click.argument('cluster-name')
@click.argument('graphite-host')
@click.pass_context
def enable_monitoring(ctx, cluster_name, graphite_host):
cluster = Cluster(ctx.obj['api'], cluster_name, ctx.obj['config']['oktawave']['private_ssh_key'])
cluster.install_collectd(graphite_host)
@cli.command()
@click.argument('cluster-name')
@click.pass_context
@click.option('-v', '--verbose', is_flag=True)
def info(ctx, cluster_name, verbose):
cluster = Cluster(ctx.obj['api'], cluster_name, ctx.obj['config']['oktawave']['private_ssh_key'])
print("Cluster name: {}".format(cluster.name))
master_ip = cluster.get_master_ip()
jupyter_password = cluster.get_jupyter_token()
if not jupyter_password:
print("Cluster is initilizing... Try again")
return
print("Spark Master UI: http://{}:8080/".format(master_ip))
print("Jupyter: http://{}:8888/".format(master_ip))
print("Jupyter password: {}".format(jupyter_password))
print("Price per hour: {:.2f} PLN".format(cluster.get_hourly_charge()))
print("Running for {}".format(cluster.get_uptime()))
print("Slaves: {}".format(len(cluster.get_nodes()) - 1))
if verbose:
for node in cluster.get_nodes():
if not cluster.is_master(node['name']):
print(' * {}'.format(node['ip']))
@cli.command()
@click.argument('cluster-name')
@click.pass_context
def destroy(ctx, cluster_name):
cluster = Cluster(ctx.obj['api'], cluster_name)
cluster.destroy_vms()
cluster.remove_ssh_key()
def main():
cli(obj={})
if __name__ == '__main__':
main()
| apache-2.0 | 1,290,144,154,548,453,400 | 36.244444 | 104 | 0.667363 | false |
plaidml/plaidml | plaidml2/ffi.py | 1 | 2449 | # Copyright 2019 Intel Corporation.
import logging
import os
import platform
import sys
import threading
import pkg_resources
from plaidml2._ffi import ffi
logger = logging.getLogger(__name__)
_TLS = threading.local()
_TLS.err = ffi.new('plaidml_error*')
_LIBNAME = 'plaidml2'
if os.getenv('PLAIDML_MLIR') == '1':
_LIBNAME = 'plaidml2_mlir'
if platform.system() == 'Windows':
lib_name = '{}.dll'.format(_LIBNAME)
elif platform.system() == 'Darwin':
lib_name = 'lib{}.dylib'.format(_LIBNAME)
else:
lib_name = 'lib{}.so'.format(_LIBNAME)
lib_path = os.getenv('PLAIDML_LIB_PATH')
if not lib_path:
lib_path = pkg_resources.resource_filename(__name__, lib_name)
def __load_library():
logger.debug('Loading {} from {}'.format(lib_name, lib_path))
return ffi.dlopen(lib_path)
lib = ffi.init_once(__load_library, 'plaidml_load_library')
def decode_str(ptr):
if ptr:
try:
return ffi.string(lib.plaidml_string_ptr(ptr)).decode()
finally:
lib.plaidml_string_free(ptr)
return None
class Error(Exception):
def __init__(self, err):
Exception.__init__(self)
self.code = err.code
self.msg = decode_str(err.msg)
def __str__(self):
return self.msg
def ffi_call(func, *args):
"""Calls ffi function and propagates foreign errors."""
ret = func(_TLS.err, *args)
if _TLS.err.code:
raise Error(_TLS.err)
return ret
class ForeignObject(object):
__ffi_obj__ = None
__ffi_del__ = None
__ffi_repr__ = None
def __init__(self, ffi_obj):
self.__ffi_obj__ = ffi_obj
def __del__(self):
if self.__ffi_obj__ and self.__ffi_del__:
self._methodcall(self.__ffi_del__)
def __repr__(self):
if self.__ffi_obj__ is None:
return 'None'
if self.__ffi_obj__ and self.__ffi_repr__:
return decode_str(self._methodcall(self.__ffi_repr__))
return super(ForeignObject, self).__repr__()
def _methodcall(self, func, *args):
return ffi_call(func, self.__ffi_obj__, *args)
def as_ptr(self, release=False):
if self.__ffi_obj__ is None:
return ffi.NULL
ret = self.__ffi_obj__
if release:
self.__ffi_obj__ = None
return ret
def set_ptr(self, ffi_obj):
self.__ffi_obj__ = ffi_obj
def take_ptr(self, obj):
self.__ffi_obj__ = obj.as_ptr(True)
| apache-2.0 | 6,785,233,738,009,265,000 | 22.548077 | 67 | 0.584728 | false |
nikcub/Sketch | sketch/util/unicode.py | 1 | 1401 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=2:sw=2:expandtab
#
# Copyright (c) 2011, Nik Cubrilovic. All rights reserved.
#
# <[email protected]> <http://nikcub.appspot.com>
#
# Licensed under a BSD license. You may obtain a copy of the License at
#
# http://nikcub.appspot.com/bsd-license
#
"""
Sketch - unicode.py
unicode and text tools
"""
def force_unicode(text, encoding='utf-8'):
"""
@TODO encoding support
"""
if text == None:
return u''
try:
text = unicode(text, 'utf-8')
except UnicodeDecodeError:
text = unicode(text, 'latin1')
except TypeError:
text = unicode(text)
return text
def force_utf8(text):
return str(force_unicode(text).encode('utf8'))
def to_utf8(value):
"""Returns a string encoded using UTF-8.
This function comes from `Tornado`_.
:param value:
A unicode or string to be encoded.
:returns:
The encoded string.
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value
def to_unicode(value):
"""Returns a unicode string from a string, using UTF-8 to decode if needed.
This function comes from `Tornado`_.
:param value:
A unicode or string to be decoded.
:returns:
The decoded string.
"""
if isinstance(value, str):
return value.decode('utf-8')
assert isinstance(value, unicode)
return value
| bsd-2-clause | -7,632,726,280,004,726,000 | 17.932432 | 77 | 0.65596 | false |
eayunstack/neutron | neutron/api/rpc/handlers/dhcp_rpc.py | 1 | 14401 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import operator
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import exceptions
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from neutron._i18n import _
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import provisioning_blocks
from neutron.extensions import segment as segment_ext
from neutron.plugins.common import utils as p_utils
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
class DhcpRpcCallback(object):
"""DHCP agent RPC callback in plugin implementations.
This class implements the server side of an rpc interface. The client
side of this interface can be found in
neutron.agent.dhcp.agent.DhcpPluginApi. For more information about
changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst.
"""
# API version history:
# 1.0 - Initial version.
# 1.1 - Added get_active_networks_info, create_dhcp_port,
# and update_dhcp_port methods.
# 1.2 - Removed get_dhcp_port. When removing a method (Making a
# backwards incompatible change) you would normally bump the
# major version. However, since the method was unused in the
# RPC client for many releases, it should be OK to bump the
# minor release instead and claim RPC compatibility with the
# last few client versions.
# 1.3 - Removed release_port_fixed_ip. It's not used by reference DHCP
# agent since Juno, so similar rationale for not bumping the
# major version as above applies here too.
# 1.4 - Removed update_lease_expiration. It's not used by reference
# DHCP agent since Juno, so similar rationale for not bumping the
# major version as above applies here too.
# 1.5 - Added dhcp_ready_on_ports.
# 1.6 - Removed get_active_networks. It's not used by reference
# DHCP agent since Havana, so similar rationale for not bumping
# the major version as above applies here too.
target = oslo_messaging.Target(
namespace=n_const.RPC_NAMESPACE_DHCP_PLUGIN,
version='1.6')
def _get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active networks."""
host = kwargs.get('host')
plugin = directory.get_plugin()
if utils.is_extension_supported(
plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.network_auto_schedule:
plugin.auto_schedule_networks(context, host)
nets = plugin.list_active_networks_on_active_dhcp_agent(
context, host)
else:
filters = dict(admin_state_up=[True])
nets = plugin.get_networks(context, filters=filters)
return nets
def _port_action(self, plugin, context, port, action):
"""Perform port operations taking care of concurrency issues."""
try:
if action == 'create_port':
return p_utils.create_port(plugin, context, port)
elif action == 'update_port':
return plugin.update_port(context, port['id'], port)
else:
msg = _('Unrecognized action')
raise exceptions.Invalid(message=msg)
except (db_exc.DBReferenceError,
exceptions.NetworkNotFound,
exceptions.SubnetNotFound,
exceptions.InvalidInput,
exceptions.IpAddressGenerationFailure) as e:
with excutils.save_and_reraise_exception(reraise=False) as ctxt:
if isinstance(e, exceptions.IpAddressGenerationFailure):
# Check if the subnet still exists and if it does not,
# this is the reason why the ip address generation failed.
# In any other unlikely event re-raise
try:
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
plugin.get_subnet(context, subnet_id)
except exceptions.SubnetNotFound:
pass
else:
ctxt.reraise = True
if ctxt.reraise:
net_id = port['port']['network_id']
LOG.warning("Action %(action)s for network %(net_id)s "
"could not complete successfully: "
"%(reason)s",
{"action": action,
"net_id": net_id,
'reason': e})
def _group_by_network_id(self, res):
grouped = {}
keyfunc = operator.itemgetter('network_id')
for net_id, values in itertools.groupby(sorted(res, key=keyfunc),
keyfunc):
grouped[net_id] = list(values)
return grouped
def get_active_networks_info(self, context, **kwargs):
"""Returns all the networks/subnets/ports in system."""
host = kwargs.get('host')
LOG.debug('get_active_networks_info from %s', host)
networks = self._get_active_networks(context, **kwargs)
plugin = directory.get_plugin()
filters = {'network_id': [network['id'] for network in networks]}
ports = plugin.get_ports(context, filters=filters)
filters['enable_dhcp'] = [True]
# NOTE(kevinbenton): we sort these because the agent builds tags
# based on position in the list and has to restart the process if
# the order changes.
subnets = sorted(plugin.get_subnets(context, filters=filters),
key=operator.itemgetter('id'))
# Handle the possibility that the dhcp agent(s) only has connectivity
# inside a segment. If the segment service plugin is loaded and
# there are active dhcp enabled subnets, then filter out the subnets
# that are not on the host's segment.
seg_plug = directory.get_plugin(
segment_ext.SegmentPluginBase.get_plugin_type())
seg_subnets = [subnet for subnet in subnets
if subnet.get('segment_id')]
nonlocal_subnets = []
if seg_plug and seg_subnets:
host_segment_ids = seg_plug.get_segments_by_hosts(context, [host])
# Gather the ids of all the subnets that are on a segment that
# this host touches
seg_subnet_ids = {subnet['id'] for subnet in seg_subnets
if subnet['segment_id'] in host_segment_ids}
# Gather the ids of all the networks that are routed
routed_net_ids = {seg_subnet['network_id']
for seg_subnet in seg_subnets}
# Remove the subnets with segments that are not in the same
# segments as the host. Do this only for the networks that are
# routed because we want non-routed networks to work as
# before.
nonlocal_subnets = [subnet for subnet in seg_subnets
if subnet['id'] not in seg_subnet_ids]
subnets = [subnet for subnet in subnets
if subnet['network_id'] not in routed_net_ids or
subnet['id'] in seg_subnet_ids]
grouped_subnets = self._group_by_network_id(subnets)
grouped_nonlocal_subnets = self._group_by_network_id(nonlocal_subnets)
grouped_ports = self._group_by_network_id(ports)
for network in networks:
network['subnets'] = grouped_subnets.get(network['id'], [])
network['non_local_subnets'] = (
grouped_nonlocal_subnets.get(network['id'], []))
network['ports'] = grouped_ports.get(network['id'], [])
return networks
def get_network_info(self, context, **kwargs):
"""Retrieve and return extended information about a network."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
LOG.debug('Network %(network_id)s requested from '
'%(host)s', {'network_id': network_id,
'host': host})
plugin = directory.get_plugin()
try:
network = plugin.get_network(context, network_id)
except exceptions.NetworkNotFound:
LOG.debug("Network %s could not be found, it might have "
"been deleted concurrently.", network_id)
return
filters = dict(network_id=[network_id])
subnets = plugin.get_subnets(context, filters=filters)
seg_plug = directory.get_plugin(
segment_ext.SegmentPluginBase.get_plugin_type())
nonlocal_subnets = []
if seg_plug and subnets:
seg_subnets = [subnet for subnet in subnets
if subnet.get('segment_id')]
# If there are no subnets with segments, then this is not a routed
# network and no filtering should take place.
if seg_subnets:
segment_ids = seg_plug.get_segments_by_hosts(context, [host])
# There might be something to do if no segment_ids exist that
# are mapped to this host. However, it seems that if this
# host is not mapped to any segments and this is a routed
# network, then this host shouldn't have even been scheduled
# to.
nonlocal_subnets = [subnet for subnet in seg_subnets
if subnet['segment_id'] not in segment_ids]
subnets = [subnet for subnet in seg_subnets
if subnet['segment_id'] in segment_ids]
# NOTE(kevinbenton): we sort these because the agent builds tags
# based on position in the list and has to restart the process if
# the order changes.
network['subnets'] = sorted(subnets, key=operator.itemgetter('id'))
network['non_local_subnets'] = sorted(nonlocal_subnets,
key=operator.itemgetter('id'))
network['ports'] = plugin.get_ports(context, filters=filters)
return network
@db_api.retry_db_errors
def release_dhcp_port(self, context, **kwargs):
"""Release the port currently being used by a DHCP agent."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
LOG.debug('DHCP port deletion for %(network_id)s request from '
'%(host)s',
{'network_id': network_id, 'host': host})
plugin = directory.get_plugin()
plugin.delete_ports_by_device_id(context, device_id, network_id)
@oslo_messaging.expected_exceptions(exceptions.IpAddressGenerationFailure)
@db_api.retry_db_errors
@resource_registry.mark_resources_dirty
def create_dhcp_port(self, context, **kwargs):
"""Create and return dhcp port information.
If an expected failure occurs, a None port is returned.
"""
host = kwargs.get('host')
# Note(pbondar): Create deep copy of port to prevent operating
# on changed dict if RetryRequest is raised
port = copy.deepcopy(kwargs.get('port'))
LOG.debug('Create dhcp port %(port)s '
'from %(host)s.',
{'port': port,
'host': host})
port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
port['port'][portbindings.HOST_ID] = host
if 'mac_address' not in port['port']:
port['port']['mac_address'] = constants.ATTR_NOT_SPECIFIED
plugin = directory.get_plugin()
return self._port_action(plugin, context, port, 'create_port')
@oslo_messaging.expected_exceptions(exceptions.IpAddressGenerationFailure)
@db_api.retry_db_errors
def update_dhcp_port(self, context, **kwargs):
"""Update the dhcp port."""
host = kwargs.get('host')
port = kwargs.get('port')
port['id'] = kwargs.get('port_id')
port['port'][portbindings.HOST_ID] = host
plugin = directory.get_plugin()
try:
old_port = plugin.get_port(context, port['id'])
if (old_port['device_id'] != constants.DEVICE_ID_RESERVED_DHCP_PORT
and old_port['device_id'] !=
utils.get_dhcp_agent_device_id(port['port']['network_id'],
host)):
raise n_exc.DhcpPortInUse(port_id=port['id'])
LOG.debug('Update dhcp port %(port)s '
'from %(host)s.',
{'port': port,
'host': host})
return self._port_action(plugin, context, port, 'update_port')
except exceptions.PortNotFound:
LOG.debug('Host %(host)s tried to update port '
'%(port_id)s which no longer exists.',
{'host': host, 'port_id': port['id']})
return None
@db_api.retry_db_errors
def dhcp_ready_on_ports(self, context, port_ids):
for port_id in port_ids:
provisioning_blocks.provisioning_complete(
context, port_id, resources.PORT,
provisioning_blocks.DHCP_ENTITY)
| apache-2.0 | -6,443,986,046,153,658,000 | 46.216393 | 79 | 0.593431 | false |
chrys87/orca-beep | test/keystrokes/gtk-demo/role_text_multiline_flatreview.py | 1 | 13029 | #!/usr/bin/python
"""Test of flat review of text and a toolbar."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control>f"))
sequence.append(TypeAction("Application main window"))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("Tab"))
sequence.append(TypeAction("This is a test. "))
sequence.append(KeyComboAction("Return"))
sequence.append(TypeAction("This is only a test."))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("Return"))
sequence.append(TypeAction("PLEASE DO NOT PANIC."))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Subtract"))
sequence.append(utils.AssertPresentationAction(
"1. KP_Subtract to enter flat review",
["BRAILLE LINE: 'Entering flat review.'",
" VISIBLE: 'Entering flat review.', cursor=0",
"BRAILLE LINE: 'PLEASE DO NOT PANIC. $l'",
" VISIBLE: 'PLEASE DO NOT PANIC. $l', cursor=1",
"SPEECH OUTPUT: 'Entering flat review.'",
"SPEECH OUTPUT: 'PLEASE' voice=uppercase"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"2. KP_8 to flat review 'PLEASE DO NOT PANIC.'",
["BRAILLE LINE: 'PLEASE DO NOT PANIC. $l'",
" VISIBLE: 'PLEASE DO NOT PANIC. $l', cursor=1",
"SPEECH OUTPUT: 'PLEASE DO NOT PANIC.",
"'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_5"))
sequence.append(utils.AssertPresentationAction(
"3. KP_5 to flat review 'PLEASE'",
["BRAILLE LINE: 'PLEASE DO NOT PANIC. $l'",
" VISIBLE: 'PLEASE DO NOT PANIC. $l', cursor=1",
"SPEECH OUTPUT: 'PLEASE'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_2"))
sequence.append(utils.AssertPresentationAction(
"4. KP_2 to flat review 'P'",
["BRAILLE LINE: 'PLEASE DO NOT PANIC. $l'",
" VISIBLE: 'PLEASE DO NOT PANIC. $l', cursor=1",
"SPEECH OUTPUT: 'P'"]))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(KeyComboAction("Down"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_5"))
sequence.append(utils.AssertPresentationAction(
"5. KP_5 to flat review 'This'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'This'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_5"))
sequence.append(KeyComboAction("KP_5"))
sequence.append(utils.AssertPresentationAction(
"6. KP_5 2X to spell 'This'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'This'",
"SPEECH OUTPUT: 'T'",
"SPEECH OUTPUT: 'h'",
"SPEECH OUTPUT: 'i'",
"SPEECH OUTPUT: 's'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_5"))
sequence.append(KeyComboAction("KP_5"))
sequence.append(KeyComboAction("KP_5"))
sequence.append(utils.AssertPresentationAction(
"7. KP_5 3X to military spell 'This'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'This'",
"SPEECH OUTPUT: 'T'",
"SPEECH OUTPUT: 'h'",
"SPEECH OUTPUT: 'i'",
"SPEECH OUTPUT: 's'",
"SPEECH OUTPUT: 'tango' voice=uppercase",
"SPEECH OUTPUT: 'hotel'",
"SPEECH OUTPUT: 'india'",
"SPEECH OUTPUT: 'sierra'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"8. KP_8 to flat review 'This is only a test.'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'This is only a test.",
"'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"9. KP_8 2X to spell 'This is only a test.'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'This is only a test.",
"'",
"SPEECH OUTPUT: 'T'",
"SPEECH OUTPUT: 'h'",
"SPEECH OUTPUT: 'i'",
"SPEECH OUTPUT: 's'",
"SPEECH OUTPUT: 'space'",
"SPEECH OUTPUT: 'i'",
"SPEECH OUTPUT: 's'",
"SPEECH OUTPUT: 'space'",
"SPEECH OUTPUT: 'o'",
"SPEECH OUTPUT: 'n'",
"SPEECH OUTPUT: 'l'",
"SPEECH OUTPUT: 'y'",
"SPEECH OUTPUT: 'space'",
"SPEECH OUTPUT: 'a'",
"SPEECH OUTPUT: 'space'",
"SPEECH OUTPUT: 't'",
"SPEECH OUTPUT: 'e'",
"SPEECH OUTPUT: 's'",
"SPEECH OUTPUT: 't'",
"SPEECH OUTPUT: 'dot'",
"SPEECH OUTPUT: 'newline'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_8"))
sequence.append(KeyComboAction("KP_8"))
sequence.append(KeyComboAction("KP_8"))
sequence.append(utils.AssertPresentationAction(
"10. KP_8 3X to military spell 'This is only a test.'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'This is only a test.",
"'",
"SPEECH OUTPUT: 'T'",
"SPEECH OUTPUT: 'h'",
"SPEECH OUTPUT: 'i'",
"SPEECH OUTPUT: 's'",
"SPEECH OUTPUT: 'space'",
"SPEECH OUTPUT: 'i'",
"SPEECH OUTPUT: 's'",
"SPEECH OUTPUT: 'space'",
"SPEECH OUTPUT: 'o'",
"SPEECH OUTPUT: 'n'",
"SPEECH OUTPUT: 'l'",
"SPEECH OUTPUT: 'y'",
"SPEECH OUTPUT: 'space'",
"SPEECH OUTPUT: 'a'",
"SPEECH OUTPUT: 'space'",
"SPEECH OUTPUT: 't'",
"SPEECH OUTPUT: 'e'",
"SPEECH OUTPUT: 's'",
"SPEECH OUTPUT: 't'",
"SPEECH OUTPUT: 'dot'",
"SPEECH OUTPUT: 'newline'",
"SPEECH OUTPUT: 'tango' voice=uppercase",
"SPEECH OUTPUT: 'hotel'",
"SPEECH OUTPUT: 'india'",
"SPEECH OUTPUT: 'sierra'",
"SPEECH OUTPUT: ' '",
"SPEECH OUTPUT: 'india'",
"SPEECH OUTPUT: 'sierra'",
"SPEECH OUTPUT: ' '",
"SPEECH OUTPUT: 'oscar'",
"SPEECH OUTPUT: 'november'",
"SPEECH OUTPUT: 'lima'",
"SPEECH OUTPUT: 'yankee'",
"SPEECH OUTPUT: ' '",
"SPEECH OUTPUT: 'alpha'",
"SPEECH OUTPUT: ' '",
"SPEECH OUTPUT: 'tango'",
"SPEECH OUTPUT: 'echo'",
"SPEECH OUTPUT: 'sierra'",
"SPEECH OUTPUT: 'tango'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: '",
"'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_2"))
sequence.append(utils.AssertPresentationAction(
"11. KP_2 to flat review 'T'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'T'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_2"))
sequence.append(KeyComboAction("KP_2"))
sequence.append(utils.AssertPresentationAction(
"12. KP_2 2X to military spell 'T'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'T'",
"SPEECH OUTPUT: 'tango' voice=uppercase"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_6"))
sequence.append(utils.AssertPresentationAction(
"13. KP_6 to flat review 'is'",
["BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=6",
"SPEECH OUTPUT: 'is'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_7"))
sequence.append(utils.AssertPresentationAction(
"14. KP_7 to flat review 'This is a test.' and the scrollbar",
["BRAILLE LINE: 'This is a test. $l'",
" VISIBLE: 'This is a test. $l', cursor=1",
"SPEECH OUTPUT: 'This is a test. ",
"'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_7"))
sequence.append(utils.AssertPresentationAction(
"15. KP_7 to flat review toolbar",
["BRAILLE LINE: 'Open & y toggle button Quit panel GTK! $l'",
" VISIBLE: 'Open & y toggle button Quit pane', cursor=1",
"SPEECH OUTPUT: 'Open not pressed toggle button Quit panel GTK!'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_7"))
sequence.append(utils.AssertPresentationAction(
"16. KP_7 to flat review menu",
["BRAILLE LINE: 'File Preferences Help $l'",
" VISIBLE: 'File Preferences Help $l', cursor=1",
"SPEECH OUTPUT: 'File Preferences Help'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_6"))
sequence.append(utils.AssertPresentationAction(
"17. KP_6 to flat review 'Preferences'",
["BRAILLE LINE: 'File Preferences Help $l'",
" VISIBLE: 'File Preferences Help $l', cursor=6",
"SPEECH OUTPUT: 'Preferences'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_6"))
sequence.append(utils.AssertPresentationAction(
"18. KP_6 to flat review 'Help'",
["BRAILLE LINE: 'File Preferences Help $l'",
" VISIBLE: 'File Preferences Help $l', cursor=18",
"SPEECH OUTPUT: 'Help'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("KP_5"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"19. Insert+KP_5 to flat review 'Help' accessible",
["SPEECH OUTPUT: 'Help menu.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("KP_9"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"20. Insert+KP_9 to flat review end",
["BRAILLE LINE: 'Cursor at row 1 column 0 - 60 chars in document $l'",
" VISIBLE: 'ars in document $l', cursor=15",
"SPEECH OUTPUT: 'Cursor at row 1 column 0 - 60 chars in document'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("KP_7"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"21. Insert+KP_7 to flat review home",
["BRAILLE LINE: 'File Preferences Help $l'",
" VISIBLE: 'File Preferences Help $l', cursor=1",
"SPEECH OUTPUT: 'File Preferences Help'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("KP_6"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"22. Insert+KP_6 to flat review below",
["BRAILLE LINE: 'Open & y toggle button Quit panel GTK! $l'",
" VISIBLE: 'Open & y toggle button Quit pane', cursor=1",
"SPEECH OUTPUT: 'Open'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("KP_4"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"23. Insert+KP_4 to flat review above",
["BRAILLE LINE: 'File Preferences Help $l'",
" VISIBLE: 'File Preferences Help $l', cursor=1",
"SPEECH OUTPUT: 'File'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Subtract"))
sequence.append(utils.AssertPresentationAction(
"24. KP_Subtract to exit flat review",
["BRAILLE LINE: 'Leaving flat review.'",
" VISIBLE: 'Leaving flat review.', cursor=0",
"BRAILLE LINE: 'This is only a test. $l'",
" VISIBLE: 'This is only a test. $l', cursor=1",
"SPEECH OUTPUT: 'Leaving flat review.' voice=system"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 | -5,460,343,753,319,863,000 | 37.096491 | 75 | 0.651009 | false |
vrtsystems/pyhaystack | pyhaystack/client/ops/vendor/niagara.py | 1 | 6824 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Niagara AX operation implementations.
"""
import fysom
import re
from ....util import state
from ....util.asyncexc import AsynchronousException
from ...http.auth import BasicAuthenticationCredentials
from ...http.exceptions import HTTPStatusError
class NiagaraAXAuthenticateOperation(state.HaystackOperation):
"""
An implementation of the log-in procedure for Niagara AX. The procedure
is as follows:
1. Do a request of the log-in URL, without credentials. This sets session
cookies in the client. Response should be code 200.
2. Pick up the session cookie named 'niagara_session', submit this in
a GET request for the login URL with a number of other parameters.
Response should NOT include the word 'login'.
Future requests should include the basic authentication credentials.
"""
_LOGIN_RE = re.compile('login', re.IGNORECASE)
def __init__(self, session, retries=0):
"""
Attempt to log in to the Niagara AX server.
:param session: Haystack HTTP session object.
:param uri: Possibly partial URI relative to the server base address
to perform a query. No arguments shall be given here.
:param expect_format: Request that the grid be sent in the given format.
:param args: Dictionary of key-value pairs to be given as arguments.
:param multi_grid: Boolean indicating if we are to expect multiple
grids or not. If True, then the operation will
_always_ return a list, otherwise, it will _always_
return a single grid.
:param raw_response: Boolean indicating if we should try to parse the
result. If True, then we should just pass back the
raw HTTPResponse object.
:param retries: Number of retries permitted in case of failure.
"""
super(NiagaraAXAuthenticateOperation, self).__init__()
self._retries = retries
self._session = session
self._cookies = {}
self._auth = BasicAuthenticationCredentials(session._username,
session._password)
self._state_machine = fysom.Fysom(
initial='init', final='done',
events=[
# Event Current State New State
('get_new_session', 'init', 'newsession'),
('do_login', 'newsession', 'login'),
('login_done', 'login', 'done'),
('exception', '*', 'failed'),
('retry', 'failed', 'newsession'),
('abort', 'failed', 'done'),
], callbacks={
'onenternewsession': self._do_new_session,
'onenterlogin': self._do_login,
'onenterfailed': self._do_fail_retry,
'onenterdone': self._do_done,
})
def go(self):
"""
Start the request.
"""
# Are we logged in?
try:
self._state_machine.get_new_session()
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_new_session(self, event):
"""
Request the log-in cookie.
"""
try:
self._session._get('login', self._on_new_session,
cookies={}, headers={}, exclude_cookies=True,
exclude_headers=True, api=False)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _on_new_session(self, response):
"""
Retrieve the log-in cookie.
"""
try:
if isinstance(response, AsynchronousException):
try:
response.reraise()
except HTTPStatusError as e:
if e.status == 404:
pass
else:
raise
self._cookies = response.cookies.copy()
self._state_machine.do_login()
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_login(self, event):
try:
# Cover Niagara AX 3.7 where cookies are handled differently...
try:
niagara_session = self._cookies['niagara_session']
except KeyError:
niagara_session = ""
self._session._post('login', self._on_login,
params={
'token':'',
'scheme':'cookieDigest',
'absPathBase':'/',
'content-type':'application/x-niagara-login-support',
'Referer':self._session._client.uri+'login/',
'accept':'text/zinc; charset=utf-8',
'cookiePostfix' : niagara_session,
},
headers={}, cookies=self._cookies,
exclude_cookies=True, exclude_proxies=True,
api=False, auth=self._auth)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _on_login(self, response):
"""
See if the login succeeded.
"""
try:
if isinstance(response, AsynchronousException):
try:
response.reraise()
except HTTPStatusError as e:
if e.status == 404:
pass
else:
raise
else:
if self._LOGIN_RE.match(response.text):
# No good.
raise IOError('Login failed')
self._state_machine.login_done(result=(self._auth, self._cookies))
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_fail_retry(self, event):
"""
Determine whether we retry or fail outright.
"""
if self._retries > 0:
self._retries -= 1
self._state_machine.retry()
else:
self._state_machine.abort(result=event.result)
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
| apache-2.0 | 5,503,633,175,312,445,000 | 37.994286 | 80 | 0.520516 | false |
mjabri/holoviews | holoviews/core/layout.py | 1 | 16803 | """
Supplies Pane, Layout, NdLayout and AdjointLayout. Pane extends View
to allow multiple Views to be presented side-by-side in a NdLayout. An
AdjointLayout allows one or two Views to be ajoined to a primary View
to act as supplementary elements.
"""
from functools import reduce
from itertools import chain
import numpy as np
import param
from .dimension import Dimension, Dimensioned, ViewableElement
from .ndmapping import OrderedDict, NdMapping, UniformNdMapping
from .tree import AttrTree
from .util import int_to_roman, sanitize_identifier
from . import traversal
class Composable(object):
"""
Composable is a mix-in class to allow Dimensioned object to be
embedded within Layouts and GridSpaces.
"""
def __add__(self, obj):
return Layout.from_values(self) + Layout.from_values(obj)
def __lshift__(self, other):
if isinstance(other, (ViewableElement, NdMapping)):
return AdjointLayout([self, other])
elif isinstance(other, AdjointLayout):
return AdjointLayout(other.data.values()+[self])
else:
raise TypeError('Cannot append {0} to a AdjointLayout'.format(type(other).__name__))
class Empty(Dimensioned, Composable):
"""
Empty may be used to define an empty placeholder in a Layout. It can be
placed in a Layout just like any regular Element and container
type via the + operator or by passing it to the Layout constructor
as a part of a list.
"""
group = param.String(default='Empty')
def __init__(self):
super(Empty, self).__init__(None)
class AdjointLayout(Dimensioned):
"""
A AdjointLayout provides a convenient container to lay out a primary plot
with some additional supplemental plots, e.g. an image in a
Image annotated with a luminance histogram. AdjointLayout accepts a
list of three ViewableElement elements, which are laid out as follows with
the names 'main', 'top' and 'right':
___________ __
|____ 3_____|__|
| | | 1: main
| | | 2: right
| 1 |2 | 3: top
| | |
|___________|__|
"""
kdims = param.List(default=[Dimension('AdjointLayout')], constant=True)
layout_order = ['main', 'right', 'top']
_deep_indexable = True
_auxiliary_component = False
def __init__(self, data, **params):
self.main_layer = 0 # The index of the main layer if .main is an overlay
if data and len(data) > 3:
raise Exception('AdjointLayout accepts no more than three elements.')
if data is not None and all(isinstance(v, tuple) for v in data):
data = dict(data)
if isinstance(data, dict):
wrong_pos = [k for k in data if k not in self.layout_order]
if wrong_pos:
raise Exception('Wrong AdjointLayout positions provided.')
elif isinstance(data, list):
data = dict(zip(self.layout_order, data))
else:
data = OrderedDict()
super(AdjointLayout, self).__init__(data, **params)
@property
def group(self):
if self.main and self.main.group != type(self.main).__name__:
return self.main.group
else:
return 'AdjointLayout'
@property
def label(self):
return self.main.label if self.main else ''
# Both group and label need empty setters due to param inheritance
@group.setter
def group(self, group): pass
@label.setter
def label(self, label): pass
def relabel(self, label=None, group=None, depth=1):
# Identical to standard relabel method except for default depth of 1
return super(AdjointLayout, self).relabel(label=label, group=group, depth=depth)
def get(self, key, default=None):
return self.data[key] if key in self.data else default
def dimension_values(self, dimension):
dimension = self.get_dimension(dimension).name
if dimension in self.kdims:
return self.layout_order[:len(self.data)]
else:
return self.main.dimension_values(dimension)
def __getitem__(self, key):
if key is ():
return self
data_slice = None
if isinstance(key, tuple):
data_slice = key[1:]
key = key[0]
if isinstance(key, int) and key <= len(self):
if key == 0: data = self.main
if key == 1: data = self.right
if key == 2: data = self.top
if data_slice: data = data[data_slice]
return data
elif isinstance(key, str) and key in self.data:
if data_slice is None:
return self.data[key]
else:
self.data[key][data_slice]
elif isinstance(key, slice) and key.start is None and key.stop is None:
return self if data_slice is None else self.clone([el[data_slice]
for el in self])
else:
raise KeyError("Key {0} not found in AdjointLayout.".format(key))
def __setitem__(self, key, value):
if key in ['main', 'right', 'top']:
if isinstance(value, (ViewableElement, UniformNdMapping)):
self.data[key] = value
else:
raise ValueError('AdjointLayout only accepts Element types.')
else:
raise Exception('Position %s not valid in AdjointLayout.' % key)
def __lshift__(self, other):
views = [self.data.get(k, None) for k in self.layout_order]
return AdjointLayout([v for v in views if v is not None] + [other])
@property
def ddims(self):
return self.main.dimensions()
@property
def main(self):
return self.data.get('main', None)
@property
def right(self):
return self.data.get('right', None)
@property
def top(self):
return self.data.get('top', None)
@property
def last(self):
items = [(k, v.last) if isinstance(v, NdMapping) else (k, v)
for k, v in self.data.items()]
return self.__class__(dict(items))
def keys(self):
return list(self.data.keys())
def items(self):
return list(self.data.items())
def __iter__(self):
i = 0
while i < len(self):
yield self[i]
i += 1
def __add__(self, obj):
return Layout.from_values(self) + Layout.from_values(obj)
def __len__(self):
return len(self.data)
class NdLayout(UniformNdMapping):
"""
NdLayout is a UniformNdMapping providing an n-dimensional
data structure to display the contained Elements and containers
in a layout. Using the cols method the NdLayout can be rearranged
with the desired number of columns.
"""
data_type = (ViewableElement, AdjointLayout, UniformNdMapping)
def __init__(self, initial_items=None, **params):
self._max_cols = 4
self._style = None
super(NdLayout, self).__init__(initial_items=initial_items, **params)
@property
def uniform(self):
return traversal.uniform(self)
@property
def shape(self):
num = len(self.keys())
if num <= self._max_cols:
return (1, num)
nrows = num // self._max_cols
last_row_cols = num % self._max_cols
return nrows+(1 if last_row_cols else 0), min(num, self._max_cols)
def grid_items(self):
"""
Compute a dict of {(row,column): (key, value)} elements from the
current set of items and specified number of columns.
"""
if list(self.keys()) == []: return {}
cols = self._max_cols
return {(idx // cols, idx % cols): (key, item)
for idx, (key, item) in enumerate(self.data.items())}
def cols(self, n):
self._max_cols = n
return self
def __add__(self, obj):
return Layout.from_values(self) + Layout.from_values(obj)
@property
def last(self):
"""
Returns another NdLayout constituted of the last views of the
individual elements (if they are maps).
"""
last_items = []
for (k, v) in self.items():
if isinstance(v, NdMapping):
item = (k, v.clone((v.last_key, v.last)))
elif isinstance(v, AdjointLayout):
item = (k, v.last)
else:
item = (k, v)
last_items.append(item)
return self.clone(last_items)
# To be removed after 1.3.0
class Warning(param.Parameterized): pass
collate_deprecation = Warning(name='Deprecation Warning')
class Layout(AttrTree, Dimensioned):
"""
A Layout is an AttrTree with ViewableElement objects as leaf
values. Unlike AttrTree, a Layout supports a rich display,
displaying leaf items in a grid style layout. In addition to the
usual AttrTree indexing, Layout supports indexing of items by
their row and column index in the layout.
The maximum number of columns in such a layout may be controlled
with the cols method and the display policy is set with the
display method. A display policy of 'auto' may use the string repr
of the tree for large trees that would otherwise take a long time
to display wheras a policy of 'all' will always display all the
available leaves. The detailed settings for the 'auto' policy may
be set using the max_branches option of the %output magic.
"""
group = param.String(default='Layout', constant=True)
_deep_indexable = True
@classmethod
def collate(cls, data, kdims=None, key_dimensions=None):
kdims = key_dimensions if (kdims is None) else kdims
if kdims is None:
raise Exception("Please specify the key dimensions.")
collate_deprecation.warning("Layout.collate will be deprecated after version 1.3.0."
"\nUse HoloMap.collate instead (see HoloViews homepage for example usage)")
from .element import Collator
layouts = {k:(v if isinstance(v, Layout) else Layout.from_values([v]))
for k,v in data.items()}
return Collator(layouts, kdims=kdims)()
@classmethod
def new_path(cls, path, item, paths, count):
path = tuple(sanitize_identifier(p) for p in path)
while any(path[:i] in paths or path in [p[:i] for p in paths]
for i in range(1,len(path)+1)):
path = path[:2]
pl = len(path)
if (pl == 1 and not item.label) or (pl == 2 and item.label):
new_path = path + (int_to_roman(count-1),)
if path in paths:
paths[paths.index(path)] = new_path
path = path + (int_to_roman(count),)
else:
path = path[:-1] + (int_to_roman(count),)
count += 1
return path, count
@classmethod
def relabel_item_paths(cls, items):
"""
Given a list of path items (list of tuples where each element
is a (path, element) pair), generate a new set of path items that
guarantees that no paths clash. This uses the element labels as
appropriate and automatically generates roman numeral
identifiers if necessary.
"""
paths, path_items = [], []
count = 2
for path, item in items:
new_path, count = cls.new_path(path, item, paths, count)
new_path = tuple(''.join((p[0].upper(), p[1:])) for p in new_path)
path_items.append(item)
paths.append(new_path)
return list(zip(paths, path_items))
@classmethod
def _from_values(cls, val):
return reduce(lambda x,y: x+y, val).display('auto')
@classmethod
def from_values(cls, val):
"""
Returns a Layout given a list (or tuple) of viewable
elements or just a single viewable element.
"""
collection = isinstance(val, (list, tuple))
if type(val) is cls:
return val
elif collection and len(val)>1:
return cls._from_values(val)
elif collection:
val = val[0]
group = sanitize_identifier(val.group)
group = ''.join([group[0].upper(), group[1:]])
label = sanitize_identifier(val.label if val.label else 'I')
label = ''.join([label[0].upper(), label[1:]])
return cls(items=[((group, label), val)])
def __init__(self, items=None, identifier=None, parent=None, **kwargs):
self.__dict__['_display'] = 'auto'
self.__dict__['_max_cols'] = 4
if items and all(isinstance(item, Dimensioned) for item in items):
items = self.from_values(items).data
params = {p: kwargs.pop(p) for p in list(self.params().keys())+['id'] if p in kwargs}
AttrTree.__init__(self, items, identifier, parent, **kwargs)
Dimensioned.__init__(self, self.data, **params)
@property
def uniform(self):
return traversal.uniform(self)
@property
def shape(self):
num = len(self)
if num <= self._max_cols:
return (1, num)
nrows = num // self._max_cols
last_row_cols = num % self._max_cols
return nrows+(1 if last_row_cols else 0), min(num, self._max_cols)
def relabel(self, label=None, group=None, depth=0):
# Standard relabel method except _max_cols and _display transferred
relabelled = super(Layout, self).relabel(label=label, group=group, depth=depth)
relabelled.__dict__['_max_cols'] = self.__dict__['_max_cols']
relabelled.__dict__['_display'] = self.__dict__['_display']
return relabelled
def clone(self, *args, **overrides):
"""
Clone method for Layout matches Dimensioned.clone except the
display mode is also propagated.
"""
clone = super(Layout, self).clone(*args, **overrides)
clone._display = self._display
return clone
def dimension_values(self, dimension):
"Returns the values along the specified dimension."
dimension = self.get_dimension(dimension).name
all_dims = self.traverse(lambda x: [d.name for d in x.dimensions()])
if dimension in chain.from_iterable(all_dims):
values = [el.dimension_values(dimension) for el in self
if dimension in el.dimensions(label=True)]
return np.concatenate(values)
else:
return super(Layout, self).dimension_values(dimension)
def cols(self, ncols):
self._max_cols = ncols
return self
def display(self, option):
"Sets the display policy of the Layout before returning self"
options = ['auto', 'all']
if option not in options:
raise Exception("Display option must be one of %s" %
','.join(repr(el) for el in options))
self._display = option
return self
def select(self, selection_specs=None, **selections):
return super(Layout, self).select(selection_specs, **selections).display(self._display)
def grid_items(self):
return {tuple(np.unravel_index(idx, self.shape)): (path, item)
for idx, (path, item) in enumerate(self.items())}
def regroup(self, group):
"""
Assign a new group string to all the elements and return a new
Layout.
"""
new_items = [el.relabel(group=group) for el in self.data.values()]
return reduce(lambda x,y: x+y, new_items)
def __getitem__(self, key):
if isinstance(key, int):
if key < len(self):
return self.data.values()[key]
raise KeyError("Element out of range.")
if len(key) == 2 and not any([isinstance(k, str) for k in key]):
if key == (slice(None), slice(None)): return self
row, col = key
idx = row * self._max_cols + col
keys = list(self.data.keys())
if idx >= len(keys) or col >= self._max_cols:
raise KeyError('Index %s is outside available item range' % str(key))
key = keys[idx]
return super(Layout, self).__getitem__(key)
def __len__(self):
return len(self.data)
def __add__(self, other):
other = self.from_values(other)
items = list(self.data.items()) + list(other.data.items())
return Layout(items=self.relabel_item_paths(items)).display('all')
__all__ = list(set([_k for _k, _v in locals().items()
if isinstance(_v, type) and (issubclass(_v, Dimensioned)
or issubclass(_v, Layout))]))
| bsd-3-clause | -7,848,132,722,060,934,000 | 32.011788 | 111 | 0.58799 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/express_route_circuit.py | 1 | 5408 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ExpressRouteCircuit(Resource):
"""ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: The SKU.
:type sku: ~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitSku
:param allow_classic_operations: Allow classic operations
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of
the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The
ServiceProviderProvisioningState state of the resource. Possible values
are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned',
'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2017_08_01.models.ServiceProviderProvisioningState
:param authorizations: The list of authorizations.
:type authorizations:
list[~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitAuthorization]
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeering]
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitServiceProviderProperties
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, sku=None, allow_classic_operations=None, circuit_provisioning_state=None, service_provider_provisioning_state=None, authorizations=None, peerings=None, service_key=None, service_provider_notes=None, service_provider_properties=None, provisioning_state=None, gateway_manager_etag=None):
super(ExpressRouteCircuit, self).__init__(id=id, location=location, tags=tags)
self.sku = sku
self.allow_classic_operations = allow_classic_operations
self.circuit_provisioning_state = circuit_provisioning_state
self.service_provider_provisioning_state = service_provider_provisioning_state
self.authorizations = authorizations
self.peerings = peerings
self.service_key = service_key
self.service_provider_notes = service_provider_notes
self.service_provider_properties = service_provider_properties
self.provisioning_state = provisioning_state
self.gateway_manager_etag = gateway_manager_etag
self.etag = None
| mit | -3,854,368,947,551,258,000 | 49.542056 | 343 | 0.673262 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-batchai/azure/mgmt/batchai/models/clusters_list_by_resource_group_options.py | 1 | 1251 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClustersListByResourceGroupOptions(Model):
"""Additional parameters for list_by_resource_group operation.
:param filter: An OData $filter clause.. Used to filter results that are
returned in the GET respnose.
:type filter: str
:param select: An OData $select clause. Used to select the properties to
be returned in the GET respnose.
:type select: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 files can be returned. Default value: 1000 .
:type max_results: int
"""
def __init__(self, filter=None, select=None, max_results=1000):
self.filter = filter
self.select = select
self.max_results = max_results
| mit | -7,282,982,308,689,821,000 | 38.09375 | 78 | 0.630695 | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/pythonwin/pywin/framework/intpydde.py | 1 | 1360 | # DDE support for Pythonwin
#
# Seems to work fine (in the context that IE4 seems to have broken
# DDE on _all_ NT4 machines I have tried, but only when a "Command Prompt" window
# is open. Strange, but true. If you have problems with this, close all Command Prompts!
import win32ui
import win32api, win32con
from pywin.mfc import object
from dde import *
import traceback
import string
class DDESystemTopic(object.Object):
def __init__(self, app):
self.app = app
object.Object.__init__(self, CreateServerSystemTopic())
def Exec(self, data):
try:
# print "Executing", cmd
self.app.OnDDECommand(data)
except:
# The DDE Execution failed.
print "Error executing DDE command."
traceback.print_exc()
return 0
class DDEServer(object.Object):
def __init__(self, app):
self.app = app
object.Object.__init__(self, CreateServer())
self.topic = self.item = None
def CreateSystemTopic(self):
return DDESystemTopic(self.app)
def Shutdown(self):
self._obj_.Shutdown()
self._obj_.Destroy()
if self.topic is not None:
self.topic.Destroy()
self.topic = None
if self.item is not None:
self.item.Destroy()
self.item = None
def OnCreate(self):
return 1
def Status(self, msg):
try:
win32ui.SetStatusText(msg)
except win32ui.error:
pass
| epl-1.0 | -7,496,053,804,317,189,000 | 22.285714 | 90 | 0.672059 | false |
tysonholub/twilio-python | twilio/rest/voice/v1/dialing_permissions/country/__init__.py | 1 | 18230 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.voice.v1.dialing_permissions.country.highrisk_special_prefix import HighriskSpecialPrefixList
class CountryList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version):
"""
Initialize the CountryList
:param Version version: Version that contains the resource
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryList
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryList
"""
super(CountryList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/DialingPermissions/Countries'.format(**self._solution)
def stream(self, iso_code=values.unset, continent=values.unset,
country_code=values.unset, low_risk_numbers_enabled=values.unset,
high_risk_special_numbers_enabled=values.unset,
high_risk_tollfraud_numbers_enabled=values.unset, limit=None,
page_size=None):
"""
Streams CountryInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode iso_code: Filter to retrieve the country permissions by specifying the ISO country code
:param unicode continent: Filter to retrieve the country permissions by specifying the continent
:param unicode country_code: Country code filter
:param bool low_risk_numbers_enabled: Filter to retrieve the country permissions with dialing to low-risk numbers enabled
:param bool high_risk_special_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk special service numbers enabled
:param bool high_risk_tollfraud_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk toll fraud numbers enabled
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.voice.v1.dialing_permissions.country.CountryInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
iso_code=iso_code,
continent=continent,
country_code=country_code,
low_risk_numbers_enabled=low_risk_numbers_enabled,
high_risk_special_numbers_enabled=high_risk_special_numbers_enabled,
high_risk_tollfraud_numbers_enabled=high_risk_tollfraud_numbers_enabled,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, iso_code=values.unset, continent=values.unset,
country_code=values.unset, low_risk_numbers_enabled=values.unset,
high_risk_special_numbers_enabled=values.unset,
high_risk_tollfraud_numbers_enabled=values.unset, limit=None,
page_size=None):
"""
Lists CountryInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode iso_code: Filter to retrieve the country permissions by specifying the ISO country code
:param unicode continent: Filter to retrieve the country permissions by specifying the continent
:param unicode country_code: Country code filter
:param bool low_risk_numbers_enabled: Filter to retrieve the country permissions with dialing to low-risk numbers enabled
:param bool high_risk_special_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk special service numbers enabled
:param bool high_risk_tollfraud_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk toll fraud numbers enabled
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.voice.v1.dialing_permissions.country.CountryInstance]
"""
return list(self.stream(
iso_code=iso_code,
continent=continent,
country_code=country_code,
low_risk_numbers_enabled=low_risk_numbers_enabled,
high_risk_special_numbers_enabled=high_risk_special_numbers_enabled,
high_risk_tollfraud_numbers_enabled=high_risk_tollfraud_numbers_enabled,
limit=limit,
page_size=page_size,
))
def page(self, iso_code=values.unset, continent=values.unset,
country_code=values.unset, low_risk_numbers_enabled=values.unset,
high_risk_special_numbers_enabled=values.unset,
high_risk_tollfraud_numbers_enabled=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CountryInstance records from the API.
Request is executed immediately
:param unicode iso_code: Filter to retrieve the country permissions by specifying the ISO country code
:param unicode continent: Filter to retrieve the country permissions by specifying the continent
:param unicode country_code: Country code filter
:param bool low_risk_numbers_enabled: Filter to retrieve the country permissions with dialing to low-risk numbers enabled
:param bool high_risk_special_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk special service numbers enabled
:param bool high_risk_tollfraud_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk toll fraud numbers enabled
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryPage
"""
params = values.of({
'IsoCode': iso_code,
'Continent': continent,
'CountryCode': country_code,
'LowRiskNumbersEnabled': low_risk_numbers_enabled,
'HighRiskSpecialNumbersEnabled': high_risk_special_numbers_enabled,
'HighRiskTollfraudNumbersEnabled': high_risk_tollfraud_numbers_enabled,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CountryPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of CountryInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return CountryPage(self._version, response, self._solution)
def get(self, iso_code):
"""
Constructs a CountryContext
:param iso_code: The ISO country code
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
"""
return CountryContext(self._version, iso_code=iso_code, )
def __call__(self, iso_code):
"""
Constructs a CountryContext
:param iso_code: The ISO country code
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
"""
return CountryContext(self._version, iso_code=iso_code, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Voice.V1.CountryList>'
class CountryPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the CountryPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryPage
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryPage
"""
super(CountryPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CountryInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
"""
return CountryInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Voice.V1.CountryPage>'
class CountryContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, iso_code):
"""
Initialize the CountryContext
:param Version version: Version that contains the resource
:param iso_code: The ISO country code
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
"""
super(CountryContext, self).__init__(version)
# Path Solution
self._solution = {'iso_code': iso_code, }
self._uri = '/DialingPermissions/Countries/{iso_code}'.format(**self._solution)
# Dependents
self._highrisk_special_prefixes = None
def fetch(self):
"""
Fetch a CountryInstance
:returns: Fetched CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CountryInstance(self._version, payload, iso_code=self._solution['iso_code'], )
@property
def highrisk_special_prefixes(self):
"""
Access the highrisk_special_prefixes
:returns: twilio.rest.voice.v1.dialing_permissions.country.highrisk_special_prefix.HighriskSpecialPrefixList
:rtype: twilio.rest.voice.v1.dialing_permissions.country.highrisk_special_prefix.HighriskSpecialPrefixList
"""
if self._highrisk_special_prefixes is None:
self._highrisk_special_prefixes = HighriskSpecialPrefixList(
self._version,
iso_code=self._solution['iso_code'],
)
return self._highrisk_special_prefixes
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Voice.V1.CountryContext {}>'.format(context)
class CountryInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, iso_code=None):
"""
Initialize the CountryInstance
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
"""
super(CountryInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'iso_code': payload.get('iso_code'),
'name': payload.get('name'),
'continent': payload.get('continent'),
'country_codes': payload.get('country_codes'),
'low_risk_numbers_enabled': payload.get('low_risk_numbers_enabled'),
'high_risk_special_numbers_enabled': payload.get('high_risk_special_numbers_enabled'),
'high_risk_tollfraud_numbers_enabled': payload.get('high_risk_tollfraud_numbers_enabled'),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'iso_code': iso_code or self._properties['iso_code'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CountryContext for this CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
"""
if self._context is None:
self._context = CountryContext(self._version, iso_code=self._solution['iso_code'], )
return self._context
@property
def iso_code(self):
"""
:returns: The ISO country code
:rtype: unicode
"""
return self._properties['iso_code']
@property
def name(self):
"""
:returns: Name of the country
:rtype: unicode
"""
return self._properties['name']
@property
def continent(self):
"""
:returns: Name of the continent
:rtype: unicode
"""
return self._properties['continent']
@property
def country_codes(self):
"""
:returns: The E.164 assigned country codes(s)
:rtype: unicode
"""
return self._properties['country_codes']
@property
def low_risk_numbers_enabled(self):
"""
:returns: `true`, if dialing to low-risk numbers is enabled, else `false`
:rtype: bool
"""
return self._properties['low_risk_numbers_enabled']
@property
def high_risk_special_numbers_enabled(self):
"""
:returns: `true`, if dialing to high-risk special services numbers is enabled, else `false`
:rtype: bool
"""
return self._properties['high_risk_special_numbers_enabled']
@property
def high_risk_tollfraud_numbers_enabled(self):
"""
:returns: `true`, if dialing to high-risk toll fraud numbers is enabled, else `false`
:rtype: bool
"""
return self._properties['high_risk_tollfraud_numbers_enabled']
@property
def url(self):
"""
:returns: The absolute URL of this resource
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: A list of URLs related to this resource
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a CountryInstance
:returns: Fetched CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryInstance
"""
return self._proxy.fetch()
@property
def highrisk_special_prefixes(self):
"""
Access the highrisk_special_prefixes
:returns: twilio.rest.voice.v1.dialing_permissions.country.highrisk_special_prefix.HighriskSpecialPrefixList
:rtype: twilio.rest.voice.v1.dialing_permissions.country.highrisk_special_prefix.HighriskSpecialPrefixList
"""
return self._proxy.highrisk_special_prefixes
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Voice.V1.CountryInstance {}>'.format(context)
| mit | -5,479,682,154,332,818,000 | 38.803493 | 155 | 0.644158 | false |
Logicify/xrandr-conf | profileconf/modules/xrandr/executors.py | 1 | 3876 | import re
from profileconf.executor import Executor
from profileconf.modules.xrandr import context
from tools import run_xrandr_command
__author__ = 'corvis'
class XrandrExecutor(Executor):
name = "xrandr"
def __init__(self, ref_name, definition):
super(XrandrExecutor, self).__init__()
self.cmd_options = definition
def execute(self, configuration, system_state):
super(XrandrExecutor, self).execute(configuration, system_state)
run_xrandr_command(self.cmd_options)
class ConfigureDisplaysExecutor(Executor):
"""
Provides following context variables:
Expected config:
<device wildcard>:
<device configuration options> - see create_xrandr_screen_for_display docs for details
On display section level:
* $preferredResolution - preferred resolution of the gi
"""
name = "configure-displays"
COORDS_REGEX = re.compile('^\d+x\d+$')
DISPLAY_POSITION_REGEX = re.compile('(?P<location>left\-of|right\-of|below|above)\s+(?P<id>[\w\d]+)')
def __init__(self, ref_name, definition):
super(ConfigureDisplaysExecutor, self).__init__()
self.definition = definition
def create_xrandr_screen_for_display(self, display, config_def):
"""
See example definition for reference:
state: "on"|"off"
primary: true|false
mode: "1920x1080"
position: "0x0" | left-of <ID> | right-of <ID> | below <ID> | above <ID>
rotate: "normal"
auto: true
:type display: domain.Display
:param config_def:
:return:
"""
res = ['--output ' + display.id]
if 'state' in config_def and config_def['state'].lower() == 'off':
res.append('--off')
if 'mode' in config_def:
res.append('--mode ' + config_def['mode'])
if 'position' in config_def:
if self.COORDS_REGEX.match(config_def['position']):
res.append('--pos ' + config_def['position'])
else:
match = self.DISPLAY_POSITION_REGEX.search(config_def['position'])
if len(match.groups()) != 2:
raise Exception('Invalid display position definition: ' + config_def['position'])
res.append('--{} {}'.format(match.group('location'), match.group('id')))
if 'primary' in config_def and config_def['primary']:
res.append('--primary')
if 'auto' in config_def and config_def['auto']:
res.append('--auto')
return ' '.join(res)
def initialize_context(self, configuration, system_state):
# this one expects to have "current_display" in context
self.register_context_function(context.predefined_resolution)
def execute(self, configuration, system_state):
"""
:type configuration: domain.Configuration
:type system_state: domain.SystemState
:return:
"""
super(ConfigureDisplaysExecutor, self).execute(configuration, system_state)
display_system_state = system_state.get_section('display')
xrandr_conf = ''
# We expect to get the list of devices
for display_selector, config_def in self.definition.items():
displays = display_system_state.default_screen.get_displays_by_wildcard(display_selector)
for display in displays:
local_context = {
"current_display": display
}
xrandr_conf += self.create_xrandr_screen_for_display(display,
self.preprocess_user_object(config_def,
local_context)) + ' '
print xrandr_conf
run_xrandr_command(xrandr_conf) | gpl-2.0 | -610,918,396,914,249,200 | 40.244681 | 118 | 0.580495 | false |
medit74/DeepLearning | MyTensorflow/MyAdvancedModel/keras_lstm_prediction.py | 1 | 2850 | '''
Created on 2017. 5. 12.
@author: Byoungho Kang
http://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
from keras.models import Sequential
from keras.layers import LSTM, Dense
from sklearn.preprocessing import MinMaxScaler #scikit-learn (http://scikit-learn.org/stable/)
from sklearn.metrics import mean_squared_error
'''
Training Set
'''
rawdata = pd.read_csv("../resources/international-airline-passengers.csv", usecols = [1])
print(rawdata.head())
print(rawdata.values, rawdata.values.dtype)
plt.plot(rawdata)
plt.show()
scaler = MinMaxScaler(feature_range = (0,1))
dataset = scaler.fit_transform(rawdata.values[0:-1])
print(dataset)
train_size = int(len(dataset) * 0.7)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset),:]
print(len(dataset), len(train), len(test))
def create_dataset(dataset, look_back = 1):
"""
- look_back: number of previous time steps
"""
X, Y = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i+look_back), 0]
X.append(a)
Y.append(dataset[i+look_back, 0])
return np.array(X), np.array(Y)
look_back = 1
train_X, train_y = create_dataset(train, look_back)
test_X, test_y = create_dataset(test, look_back)
train_X = np.reshape(train_X, (train_X.shape[0], 1, train_X.shape[1]))
test_X = np.reshape(test_X, (test_X.shape[0], 1, test_X.shape[1]))
print(train_X, train_y)
print(test_X, test_y)
'''
Hyper Parameter
'''
'''
Build a Model
'''
model = Sequential()
model.add(LSTM(4, input_dim = look_back))
model.add(Dense(1))
'''
Compile a Model
'''
model.compile(loss = 'mean_squared_error', optimizer = 'adam')
'''
Training a Model
'''
model.fit(train_X, train_y, nb_epoch = 100, batch_size = 1, verbose = 2)
'''
Predict
'''
train_pred = model.predict(train_X)
test_pred = model.predict(test_X)
train_pred = scaler.inverse_transform(train_pred)
train_y = scaler.inverse_transform([train_y])
test_pred = scaler.inverse_transform(test_pred)
test_y = scaler.inverse_transform([test_y])
'''
Accuracy
'''
train_score = math.sqrt(mean_squared_error(train_y[0], train_pred[:,0]))
test_score = math.sqrt(mean_squared_error(test_y[0], test_pred[:,0]))
train_pred_plot = np.empty_like(dataset)
train_pred_plot[:,:] = np.nan
train_pred_plot[look_back:len(train_pred)+look_back, :] = train_pred
test_pred_plot = np.empty_like(dataset)
test_pred_plot[:, :] = np.nan
test_pred_plot[len(train_pred)+(look_back*2)+1:len(dataset)-1, :] = test_pred
plt.plot(scaler.inverse_transform(dataset))
plt.plot(train_pred_plot)
plt.plot(test_pred_plot)
plt.show() | apache-2.0 | 1,803,307,074,710,387,700 | 26.808081 | 102 | 0.666667 | false |
vbraun/oxford-strings | lib/icalendar/tests/test_encoding.py | 1 | 3193 | # -*- coding: utf-8 -*-
from icalendar.tests import unittest
import datetime
import icalendar
import os
import pytz
class TestEncoding(unittest.TestCase):
def test_create_from_ical(self):
directory = os.path.dirname(__file__)
data = open(os.path.join(directory, 'encoding.ics'), 'rb').read()
cal = icalendar.Calendar.from_ical(data)
self.assertEqual(cal['prodid'].to_ical().decode('utf-8'),
u"-//Plönë.org//NONSGML plone.app.event//EN")
self.assertEqual(cal['X-WR-CALDESC'].to_ical().decode('utf-8'),
u"test non ascii: äöü ÄÖÜ €")
event = cal.walk('VEVENT')[0]
self.assertEqual(event['SUMMARY'].to_ical().decode('utf-8'),
u'Non-ASCII Test: ÄÖÜ äöü €')
self.assertEqual(
event['DESCRIPTION'].to_ical().decode('utf-8'),
u'icalendar should be able to handle non-ascii: €äüöÄÜÖ.'
)
self.assertEqual(event['LOCATION'].to_ical().decode('utf-8'),
u'Tribstrül')
def test_create_to_ical(self):
cal = icalendar.Calendar()
cal.add('prodid', u"-//Plönë.org//NONSGML plone.app.event//EN")
cal.add('version', u"2.0")
cal.add('x-wr-calname', u"äöü ÄÖÜ €")
cal.add('x-wr-caldesc', u"test non ascii: äöü ÄÖÜ €")
cal.add('x-wr-relcalid', u"12345")
event = icalendar.Event()
event.add(
'dtstart',
datetime.datetime(2010, 10, 10, 10, 00, 00, tzinfo=pytz.utc)
)
event.add(
'dtend',
datetime.datetime(2010, 10, 10, 12, 00, 00, tzinfo=pytz.utc)
)
event.add(
'created',
datetime.datetime(2010, 10, 10, 0, 0, 0, tzinfo=pytz.utc)
)
event.add('uid', u'123456')
event.add('summary', u'Non-ASCII Test: ÄÖÜ äöü €')
event.add(
'description',
u'icalendar should be able to de/serialize non-ascii.'
)
event.add('location', u'Tribstrül')
cal.add_component(event)
ical_lines = cal.to_ical().splitlines()
cmp = b'PRODID:-//Pl\xc3\xb6n\xc3\xab.org//NONSGML plone.app.event//EN'
self.assertTrue(cmp in ical_lines)
def test_create_event_simple(self):
event = icalendar.Event()
event.add(
"dtstart",
datetime.datetime(2010, 10, 10, 0, 0, 0, tzinfo=pytz.utc)
)
event.add("summary", u"åäö")
out = event.to_ical()
summary = b'SUMMARY:\xc3\xa5\xc3\xa4\xc3\xb6'
self.assertTrue(summary in out.splitlines())
def test_unicode_parameter_name(self):
# Test for issue #80
cal = icalendar.Calendar()
event = icalendar.Event()
event.add(u'DESCRIPTION', u'äöüßÄÖÜ')
cal.add_component(event)
c = cal.to_ical()
self.assertEqual(
c,
b'BEGIN:VCALENDAR\r\nBEGIN:VEVENT\r\nDESCRIPTION:'
+ b'\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f\xc3\x84\xc3\x96\xc3\x9c\r\n'
+ b'END:VEVENT\r\nEND:VCALENDAR\r\n'
)
| gpl-2.0 | -4,189,381,782,970,339,000 | 33.766667 | 79 | 0.549696 | false |
guoxu3/oms_backend | oms/handlers/task_status.py | 1 | 1474 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
update task status handlers
"""
import tornado.web
import tornado.escape
from db import db_task, db_machine
import json
import check
# 调用更新脚本
class TaskStatusHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def __init__(self, application, request, **kwargs):
super(TaskStatusHandler, self).__init__(application, request, **kwargs)
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with, content-type")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
self.token = self.get_secure_cookie("access_token")
def post(self):
ok, info = check.check_content_type(self.request)
if not ok:
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
return
task_status = json.loads(self.request.body)['data']
print task_status
if not db_task.update(task_status):
ok = False
info = 'update task status failed'
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
return
ok = True
info = "info = 'update task status successful"
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
def options(self):
pass
handlers = [
('/api/task_status', TaskStatusHandler),
]
| mit | 387,555,845,934,136,000 | 27.666667 | 89 | 0.618331 | false |
ubuntu-core/snapcraft | tests/integration/store/test_store_close.py | 1 | 2199 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import Equals, FileExists
from tests import integration
class ChannelClosingTestCase(integration.StoreTestCase):
def test_missing_permission(self):
self.addCleanup(self.logout)
self.login()
expected = (
"Make sure the logged in account has upload permissions on "
"'missing' in series '16'."
)
status = self.close("missing", "beta", expected=expected)
self.assertThat(status, Equals(2))
def test_close_channel(self):
self.addCleanup(self.logout)
self.login()
# Change to a random name and version when not on the fake store.
if not self.is_store_fake():
name = self.get_unique_name()
version = self.get_unique_version()
# If not, keep the name that is faked in our fake account.
else:
name = "basic"
version = "1.0"
self.copy_project_to_cwd("basic")
self.update_name_and_version(name, version)
self.run_snapcraft("snap")
# Register the snap
self.register(name)
# Upload the snap
snap_file_path = "{}_{}_{}.snap".format(name, version, "all")
self.assertThat(os.path.join(snap_file_path), FileExists())
self.assertThat(self.push(snap_file_path, release="edge,beta"), Equals(0))
expected = "The beta channel is now closed."
status = self.close(name, "beta", expected=expected)
self.assertThat(status, Equals(0))
| gpl-3.0 | -119,143,101,414,569,840 | 33.359375 | 82 | 0.653024 | false |
Hybrid-Cloud/badam | fs_patches_of_hybrid_cloud/cherry_for_111T/cinder_client/v2/shell.py | 1 | 65497 | # Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import copy
import os
import sys
import time
import six
from cinderclient import exceptions
from cinderclient import utils
from cinderclient.openstack.common import strutils
from cinderclient.v2 import availability_zones
def _poll_for_status(poll_fn, obj_id, action, final_ok_states,
poll_period=5, show_progress=True):
"""Blocks while an action occurs. Periodically shows progress."""
def print_progress(progress):
if show_progress:
msg = ('\rInstance %(action)s... %(progress)s%% complete'
% dict(action=action, progress=progress))
else:
msg = '\rInstance %(action)s...' % dict(action=action)
sys.stdout.write(msg)
sys.stdout.flush()
print()
while True:
obj = poll_fn(obj_id)
status = obj.status.lower()
progress = getattr(obj, 'progress', None) or 0
if status in final_ok_states:
print_progress(100)
print("\nFinished")
break
elif status == "error":
print("\nError %(action)s instance" % {'action': action})
break
else:
print_progress(progress)
time.sleep(poll_period)
def _find_volume_snapshot(cs, snapshot):
"""Gets a volume snapshot by name or ID."""
return utils.find_resource(cs.volume_snapshots, snapshot)
def _find_backup(cs, backup):
"""Gets a backup by name or ID."""
return utils.find_resource(cs.backups, backup)
def _find_consistencygroup(cs, consistencygroup):
"""Gets a consistencygroup by name or ID."""
return utils.find_resource(cs.consistencygroups, consistencygroup)
def _find_cgsnapshot(cs, cgsnapshot):
"""Gets a cgsnapshot by name or ID."""
return utils.find_resource(cs.cgsnapshots, cgsnapshot)
def _find_transfer(cs, transfer):
"""Gets a transfer by name or ID."""
return utils.find_resource(cs.transfers, transfer)
def _find_qos_specs(cs, qos_specs):
"""Gets a qos specs by ID."""
return utils.find_resource(cs.qos_specs, qos_specs)
def _print_volume_snapshot(snapshot):
utils.print_dict(snapshot._info)
def _print_volume_image(image):
utils.print_dict(image[1]['os-volume_upload_image'])
def _translate_keys(collection, convert):
for item in collection:
keys = item.__dict__
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
def _translate_volume_keys(collection):
convert = [('volumeType', 'volume_type'),
('os-vol-tenant-attr:tenant_id', 'tenant_id')]
_translate_keys(collection, convert)
def _translate_volume_snapshot_keys(collection):
convert = [('volumeId', 'volume_id')]
_translate_keys(collection, convert)
def _translate_availability_zone_keys(collection):
convert = [('zoneName', 'name'), ('zoneState', 'status')]
_translate_keys(collection, convert)
def _extract_metadata(args):
metadata = {}
for metadatum in args.metadata:
# unset doesn't require a val, so we have the if/else
if '=' in metadatum:
(key, value) = metadatum.split('=', 1)
else:
key = metadatum
value = None
metadata[key] = value
return metadata
@utils.arg('--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Shows details for all tenants. Admin only.')
@utils.arg('--all_tenants',
nargs='?',
type=int,
const=1,
help=argparse.SUPPRESS)
@utils.arg('--name',
metavar='<name>',
default=None,
help='Filters results by a name. OPTIONAL: Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--status',
metavar='<status>',
default=None,
help='Filters results by a status. OPTIONAL: Default=None.')
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Filters results by a metadata key and value pair. '
'OPTIONAL: Default=None.',
default=None)
@utils.arg('--marker',
metavar='<marker>',
default=None,
help='Begin returning volumes that appear later in the volume '
'list than that represented by this volume id. '
'OPTIONAL: Default=None.')
@utils.arg('--limit',
metavar='<limit>',
default=None,
help='Maximum number of volumes to return. OPTIONAL: Default=None.')
@utils.arg('--sort_key',
metavar='<sort_key>',
default=None,
help='Key to be sorted, should be (`id`, `status`, `size`, '
'`availability_zone`, `name`, `bootable`, `created_at`). '
'OPTIONAL: Default=None.')
@utils.arg('--sort_dir',
metavar='<sort_dir>',
default=None,
help='Sort direction, should be `desc` or `asc`. '
'OPTIONAL: Default=None.')
@utils.arg('--availability-zone',
metavar='<availability-zone>',
help='Filters list by availability zone.')
@utils.service_type('volumev2')
def do_list(cs, args):
"""Lists all volumes."""
# NOTE(thingee): Backwards-compatibility with v1 args
if args.display_name is not None:
args.name = args.display_name
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
search_opts = {
'all_tenants': all_tenants,
'name': args.name,
'status': args.status,
'availability-zone': args.availability_zone,
'metadata': _extract_metadata(args) if args.metadata else None,
}
volumes = cs.volumes.list(search_opts=search_opts, marker=args.marker,
limit=args.limit, sort_key=args.sort_key,
sort_dir=args.sort_dir)
_translate_volume_keys(volumes)
# Create a list of servers to which the volume is attached
for vol in volumes:
servers = [s.get('server_id') for s in vol.attachments]
setattr(vol, 'attached_to', ','.join(map(str, servers)))
if all_tenants:
key_list = ['ID', 'Tenant ID', 'Status', 'Name',
'Size', 'Volume Type', 'Bootable', 'Shareable','Attached to']
else:
key_list = ['ID', 'Status', 'Name',
'Size', 'Volume Type', 'Bootable', 'Shareable','Attached to']
utils.print_list(volumes, key_list)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume.')
@utils.service_type('volumev2')
def do_show(cs, args):
"""Shows volume details."""
info = dict()
volume = utils.find_volume(cs, args.volume)
info.update(volume._info)
info.pop('links', None)
utils.print_dict(info)
class CheckSizeArgForCreate(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if (values or args.snapshot_id or args.source_volid
or args.source_replica) is None:
parser.error('Size is a required parameter if snapshot '
'or source volume is not specified.')
setattr(args, self.dest, values)
@utils.arg('size',
metavar='<size>',
nargs='?',
type=int,
action=CheckSizeArgForCreate,
help='Size of volume, in GBs. (Required unless '
'snapshot-id/source-volid is specified).')
@utils.arg('--consisgroup-id',
metavar='<consistencygroup-id>',
default=None,
help='ID of a consistency group where the new volume belongs to. '
'Default=None.')
@utils.arg('--snapshot-id',
metavar='<snapshot-id>',
default=None,
help='Creates volume from snapshot ID. Default=None.')
@utils.arg('--snapshot_id',
help=argparse.SUPPRESS)
@utils.arg('--source-volid',
metavar='<source-volid>',
default=None,
help='Creates volume from volume ID. Default=None.')
@utils.arg('--source_volid',
help=argparse.SUPPRESS)
@utils.arg('--source-replica',
metavar='<source-replica>',
default=None,
help='Creates volume from replicated volume ID. Default=None.')
@utils.arg('--image-id',
metavar='<image-id>',
default=None,
help='Creates volume from image ID. Default=None.')
@utils.arg('--image_id',
help=argparse.SUPPRESS)
@utils.arg('--name',
metavar='<name>',
default=None,
help='Volume name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--display_name',
help=argparse.SUPPRESS)
@utils.arg('--description',
metavar='<description>',
default=None,
help='Volume description. Default=None.')
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--display_description',
help=argparse.SUPPRESS)
@utils.arg('--volume-type',
metavar='<volume-type>',
default=None,
help='Volume type. Default=None.')
@utils.arg('--volume_type',
help=argparse.SUPPRESS)
@utils.arg('--availability-zone',
metavar='<availability-zone>',
default=None,
help='Availability zone for volume. Default=None.')
@utils.arg('--availability_zone',
help=argparse.SUPPRESS)
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Metadata key and value pairs. Default=None.',
default=None)
@utils.arg('--hint',
metavar='<key=value>',
dest='scheduler_hints',
action='append',
default=[],
help='Scheduler hint, like in nova.')
@utils.arg('--shareable',
metavar="<'T'|'F'>",
help=('Allow volume to be attached more than once'
'(Optional, Default=False)'),
default=False)
@utils.service_type('volumev2')
def do_create(cs, args):
"""Creates a volume."""
# NOTE(thingee): Backwards-compatibility with v1 args
if args.display_name is not None:
args.name = args.display_name
if args.display_description is not None:
args.description = args.display_description
volume_metadata = None
if args.metadata is not None:
volume_metadata = _extract_metadata(args)
#NOTE(N.S.): take this piece from novaclient
hints = {}
if args.scheduler_hints:
for hint in args.scheduler_hints:
key, _sep, value = hint.partition('=')
# NOTE(vish): multiple copies of same hint will
# result in a list of values
if key in hints:
if isinstance(hints[key], six.string_types):
hints[key] = [hints[key]]
hints[key] += [value]
else:
hints[key] = value
#NOTE(N.S.): end of taken piece
volume = cs.volumes.create(args.size,
args.consisgroup_id,
args.snapshot_id,
args.source_volid,
args.name,
args.description,
args.volume_type,
availability_zone=args.availability_zone,
imageRef=args.image_id,
metadata=volume_metadata,
scheduler_hints=hints,
source_replica=args.source_replica,
shareable=strutils.bool_from_string(args.shareable))
info = dict()
volume = cs.volumes.get(volume.id)
info.update(volume._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volume',
metavar='<volume>', nargs='+',
help='Name or ID of volume or volumes to delete.')
@utils.service_type('volumev2')
def do_delete(cs, args):
"""Removes one or more volumes."""
failure_count = 0
for volume in args.volume:
try:
utils.find_volume(cs, volume).delete()
except Exception as e:
failure_count += 1
print("Delete for volume %s failed: %s" % (volume, e))
if failure_count == len(args.volume):
raise exceptions.CommandError("Unable to delete any of specified "
"volumes.")
@utils.arg('volume',
metavar='<volume>', nargs='+',
help='Name or ID of volume or volumes to delete.')
@utils.service_type('volumev2')
def do_force_delete(cs, args):
"""Attempts force-delete of volume, regardless of state."""
failure_count = 0
for volume in args.volume:
try:
utils.find_volume(cs, volume).force_delete()
except Exception as e:
failure_count += 1
print("Delete for volume %s failed: %s" % (volume, e))
if failure_count == len(args.volume):
raise exceptions.CommandError("Unable to force delete any of "
"specified volumes.")
@utils.arg('volume', metavar='<volume>', nargs='+',
help='Name or ID of volume to modify.')
@utils.arg('--state', metavar='<state>', default='available',
help=('The state to assign to the volume. Valid values are '
'"available," "error," "creating," "deleting," and '
'"error_deleting." '
'Default=available.'))
@utils.service_type('volumev2')
def do_reset_state(cs, args):
"""Explicitly updates the volume state."""
failure_flag = False
for volume in args.volume:
try:
utils.find_volume(cs, volume).reset_state(args.state)
except Exception as e:
failure_flag = True
msg = "Reset state for volume %s failed: %s" % (volume, e)
print(msg)
if failure_flag:
msg = "Unable to reset the state for the specified volume(s)."
raise exceptions.CommandError(msg)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume to rename.')
@utils.arg('name',
nargs='?',
metavar='<name>',
help='New name for volume.')
@utils.arg('--description', metavar='<description>',
help='Volume description. Default=None.',
default=None)
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--display_description',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_rename(cs, args):
"""Renames a volume."""
kwargs = {}
if args.name is not None:
kwargs['name'] = args.name
if args.display_description is not None:
kwargs['description'] = args.display_description
elif args.description is not None:
kwargs['description'] = args.description
if not any(kwargs):
msg = 'Must supply either name or description.'
raise exceptions.ClientException(code=1, message=msg)
utils.find_volume(cs, args.volume).update(**kwargs)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume for which to update metadata.')
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="The action. Valid values are 'set' or 'unset.'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata key and value pair to set or unset. '
'For unset, specify only the key.')
@utils.service_type('volumev2')
def do_metadata(cs, args):
"""Sets or deletes volume metadata."""
volume = utils.find_volume(cs, args.volume)
metadata = _extract_metadata(args)
if args.action == 'set':
cs.volumes.set_metadata(volume, metadata)
elif args.action == 'unset':
# NOTE(zul): Make sure py2/py3 sorting is the same
cs.volumes.delete_metadata(volume, sorted(metadata.keys(),
reverse=True))
@utils.arg('--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Shows details for all tenants. Admin only.')
@utils.arg('--all_tenants',
nargs='?',
type=int,
const=1,
help=argparse.SUPPRESS)
@utils.arg('--name',
metavar='<name>',
default=None,
help='Filters results by a name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--display_name',
help=argparse.SUPPRESS)
@utils.arg('--status',
metavar='<status>',
default=None,
help='Filters results by a status. Default=None.')
@utils.arg('--volume-id',
metavar='<volume-id>',
default=None,
help='Filters results by a volume ID. Default=None.')
@utils.arg('--volume_id',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_snapshot_list(cs, args):
"""Lists all snapshots."""
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
if args.display_name is not None:
args.name = args.display_name
search_opts = {
'all_tenants': all_tenants,
'display_name': args.name,
'status': args.status,
'volume_id': args.volume_id,
}
snapshots = cs.volume_snapshots.list(search_opts=search_opts)
_translate_volume_snapshot_keys(snapshots)
utils.print_list(snapshots,
['ID', 'Volume ID', 'Status', 'Name', 'Size'])
@utils.arg('snapshot',
metavar='<snapshot>',
help='Name or ID of snapshot.')
@utils.service_type('volumev2')
def do_snapshot_show(cs, args):
"""Shows snapshot details."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
_print_volume_snapshot(snapshot)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume to snapshot.')
@utils.arg('--force',
metavar='<True|False>',
help='Allows or disallows snapshot of '
'a volume when the volume is attached to an instance. '
'If set to True, ignores the current status of the '
'volume when attempting to snapshot it rather '
'than forcing it to be available. '
'Default=False.',
default=False)
@utils.arg('--name',
metavar='<name>',
default=None,
help='Snapshot name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--display_name',
help=argparse.SUPPRESS)
@utils.arg('--description',
metavar='<description>',
default=None,
help='Snapshot description. Default=None.')
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--display_description',
help=argparse.SUPPRESS)
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Snapshot metadata key and value pairs. Default=None.',
default=None)
@utils.service_type('volumev2')
def do_snapshot_create(cs, args):
"""Creates a snapshot."""
if args.display_name is not None:
args.name = args.display_name
if args.display_description is not None:
args.description = args.display_description
snapshot_metadata = None
if args.metadata is not None:
snapshot_metadata = _extract_metadata(args)
volume = utils.find_volume(cs, args.volume)
snapshot = cs.volume_snapshots.create(volume.id,
args.force,
args.name,
args.description,
metadata=snapshot_metadata)
_print_volume_snapshot(snapshot)
@utils.arg('snapshot',
metavar='<snapshot>', nargs='+',
help='Name or ID of the snapshot(s) to delete.')
@utils.service_type('volumev2')
def do_snapshot_delete(cs, args):
"""Removes one or more snapshots."""
failure_count = 0
for snapshot in args.snapshot:
try:
_find_volume_snapshot(cs, snapshot).delete()
except Exception as e:
failure_count += 1
print("Delete for snapshot %s failed: %s" % (snapshot, e))
if failure_count == len(args.snapshot):
raise exceptions.CommandError("Unable to delete any of the specified "
"snapshots.")
@utils.arg('snapshot', metavar='<snapshot>',
help='Name or ID of snapshot.')
@utils.arg('name', nargs='?', metavar='<name>',
help='New name for snapshot.')
@utils.arg('--description', metavar='<description>',
help='Snapshot description. Default=None.',
default=None)
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--display_description',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_snapshot_rename(cs, args):
"""Renames a snapshot."""
kwargs = {}
if args.name is not None:
kwargs['name'] = args.name
if args.description is not None:
kwargs['description'] = args.description
elif args.display_description is not None:
kwargs['description'] = args.display_description
if not any(kwargs):
msg = 'Must supply either name or description.'
raise exceptions.ClientException(code=1, message=msg)
_find_volume_snapshot(cs, args.snapshot).update(**kwargs)
@utils.arg('snapshot', metavar='<snapshot>', nargs='+',
help='Name or ID of snapshot to modify.')
@utils.arg('--state', metavar='<state>',
default='available',
help=('The state to assign to the snapshot. Valid values are '
'"available," "error," "creating," "deleting," and '
'"error_deleting." '
'Default is "available."'))
@utils.service_type('volumev2')
def do_snapshot_reset_state(cs, args):
"""Explicitly updates the snapshot state."""
failure_count = 0
single = (len(args.snapshot) == 1)
for snapshot in args.snapshot:
try:
_find_volume_snapshot(cs, snapshot).reset_state(args.state)
except Exception as e:
failure_count += 1
msg = "Reset state for snapshot %s failed: %s" % (snapshot, e)
if not single:
print(msg)
if failure_count == len(args.snapshot):
if not single:
msg = ("Unable to reset the state for any of the specified "
"snapshots.")
raise exceptions.CommandError(msg)
def _print_volume_type_list(vtypes):
utils.print_list(vtypes, ['ID', 'Name'])
@utils.service_type('volumev2')
def do_type_list(cs, args):
"""Lists available 'volume types'."""
vtypes = cs.volume_types.list()
_print_volume_type_list(vtypes)
@utils.service_type('volumev2')
def do_extra_specs_list(cs, args):
"""Lists current volume types and extra specs."""
vtypes = cs.volume_types.list()
utils.print_list(vtypes, ['ID', 'Name', 'extra_specs'])
@utils.arg('name',
metavar='<name>',
help="Name of new volume type.")
@utils.service_type('volumev2')
def do_type_create(cs, args):
"""Creates a volume type."""
vtype = cs.volume_types.create(args.name)
_print_volume_type_list([vtype])
@utils.arg('id',
metavar='<id>',
help="ID of volume type to delete.")
@utils.service_type('volumev2')
def do_type_delete(cs, args):
"""Deletes a volume type."""
cs.volume_types.delete(args.id)
@utils.arg('vtype',
metavar='<vtype>',
help="Name or ID of volume type.")
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="The action. Valid values are 'set' or 'unset.'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='The extra specs key and value pair to set or unset. '
'For unset, specify only the key.')
@utils.service_type('volumev2')
def do_type_key(cs, args):
"""Sets or unsets extra_spec for a volume type."""
vtype = _find_volume_type(cs, args.vtype)
keypair = _extract_metadata(args)
if args.action == 'set':
vtype.set_keys(keypair)
elif args.action == 'unset':
vtype.unset_keys(list(keypair))
@utils.service_type('volumev2')
def do_endpoints(cs, args):
"""Discovers endpoints registered by authentication service."""
catalog = cs.client.service_catalog.catalog
for e in catalog['serviceCatalog']:
utils.print_dict(e['endpoints'][0], e['name'])
@utils.service_type('volumev2')
def do_credentials(cs, args):
"""Shows user credentials returned from auth."""
catalog = cs.client.service_catalog.catalog
utils.print_dict(catalog['user'], "User Credentials")
utils.print_dict(catalog['token'], "Token")
_quota_resources = ['volumes', 'snapshots', 'gigabytes']
_quota_infos = ['Type', 'In_use', 'Reserved', 'Limit']
def _quota_show(quotas):
quota_dict = {}
for resource in quotas._info:
good_name = False
for name in _quota_resources:
if resource.startswith(name):
good_name = True
if not good_name:
continue
quota_dict[resource] = getattr(quotas, resource, None)
utils.print_dict(quota_dict)
def _quota_usage_show(quotas):
quota_list = []
for resource in quotas._info.keys():
good_name = False
for name in _quota_resources:
if resource.startswith(name):
good_name = True
if not good_name:
continue
quota_info = getattr(quotas, resource, None)
quota_info['Type'] = resource
quota_info = dict((k.capitalize(), v) for k, v in quota_info.items())
quota_list.append(quota_info)
utils.print_list(quota_list, _quota_infos)
def _quota_update(manager, identifier, args):
updates = {}
for resource in _quota_resources:
val = getattr(args, resource, None)
if val is not None:
if args.volume_type:
resource = resource + '_%s' % args.volume_type
updates[resource] = val
if updates:
_quota_show(manager.update(identifier, **updates))
@utils.arg('tenant',
metavar='<tenant_id>',
help='ID of tenant for which to list quotas.')
@utils.service_type('volumev2')
def do_quota_show(cs, args):
"""Lists quotas for a tenant."""
_quota_show(cs.quotas.get(args.tenant))
@utils.arg('tenant', metavar='<tenant_id>',
help='ID of tenant for which to list quota usage.')
@utils.service_type('volumev2')
def do_quota_usage(cs, args):
"""Lists quota usage for a tenant."""
_quota_usage_show(cs.quotas.get(args.tenant, usage=True))
@utils.arg('tenant',
metavar='<tenant_id>',
help='ID of tenant for which to list quota defaults.')
@utils.service_type('volumev2')
def do_quota_defaults(cs, args):
"""Lists default quotas for a tenant."""
_quota_show(cs.quotas.defaults(args.tenant))
@utils.arg('tenant',
metavar='<tenant_id>',
help='ID of tenant for which to set quotas.')
@utils.arg('--volumes',
metavar='<volumes>',
type=int, default=None,
help='The new "volumes" quota value. Default=None.')
@utils.arg('--snapshots',
metavar='<snapshots>',
type=int, default=None,
help='The new "snapshots" quota value. Default=None.')
@utils.arg('--gigabytes',
metavar='<gigabytes>',
type=int, default=None,
help='The new "gigabytes" quota value. Default=None.')
@utils.arg('--volume-type',
metavar='<volume_type_name>',
default=None,
help='Volume type. Default=None.')
@utils.service_type('volumev2')
def do_quota_update(cs, args):
"""Updates quotas for a tenant."""
_quota_update(cs.quotas, args.tenant, args)
@utils.arg('tenant', metavar='<tenant_id>',
help='UUID of tenant to delete the quotas for.')
@utils.service_type('volume')
def do_quota_delete(cs, args):
"""Delete the quotas for a tenant."""
cs.quotas.delete(args.tenant)
@utils.arg('class_name',
metavar='<class>',
help='Name of quota class for which to list quotas.')
@utils.service_type('volumev2')
def do_quota_class_show(cs, args):
"""Lists quotas for a quota class."""
_quota_show(cs.quota_classes.get(args.class_name))
@utils.arg('class-name',
metavar='<class-name>',
help='Name of quota class for which to set quotas.')
@utils.arg('--volumes',
metavar='<volumes>',
type=int, default=None,
help='The new "volumes" quota value. Default=None.')
@utils.arg('--snapshots',
metavar='<snapshots>',
type=int, default=None,
help='The new "snapshots" quota value. Default=None.')
@utils.arg('--gigabytes',
metavar='<gigabytes>',
type=int, default=None,
help='The new "gigabytes" quota value. Default=None.')
@utils.arg('--volume-type',
metavar='<volume_type_name>',
default=None,
help='Volume type. Default=None.')
@utils.service_type('volumev2')
def do_quota_class_update(cs, args):
"""Updates quotas for a quota class."""
_quota_update(cs.quota_classes, args.class_name, args)
@utils.service_type('volumev2')
def do_absolute_limits(cs, args):
"""Lists absolute limits for a user."""
limits = cs.limits.get().absolute
columns = ['Name', 'Value']
utils.print_list(limits, columns)
@utils.service_type('volumev2')
def do_rate_limits(cs, args):
"""Lists rate limits for a user."""
limits = cs.limits.get().rate
columns = ['Verb', 'URI', 'Value', 'Remain', 'Unit', 'Next_Available']
utils.print_list(limits, columns)
def _find_volume_type(cs, vtype):
"""Gets a volume type by name or ID."""
return utils.find_resource(cs.volume_types, vtype)
@utils.arg('volume',
metavar='<volume>',
help='Name or ID of volume to snapshot.')
@utils.arg('--force',
metavar='<True|False>',
help='Enables or disables upload of '
'a volume that is attached to an instance. '
'Default=False.',
default=False)
@utils.arg('--container-format',
metavar='<container-format>',
help='Container format type. '
'Default is bare.',
default='bare')
@utils.arg('--container_format',
help=argparse.SUPPRESS)
@utils.arg('--disk-format',
metavar='<disk-format>',
help='Disk format type. '
'Default is raw.',
default='raw')
@utils.arg('--disk_format',
help=argparse.SUPPRESS)
@utils.arg('image_name',
metavar='<image-name>',
help='The new image name.')
@utils.arg('--image_name',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_upload_to_image(cs, args):
"""Uploads volume to Image Service as an image."""
volume = utils.find_volume(cs, args.volume)
_print_volume_image(volume.upload_to_image(args.force,
args.image_name,
args.container_format,
args.disk_format))
@utils.arg('volume', metavar='<volume>', help='ID of volume to migrate.')
@utils.arg('host', metavar='<host>', help='Destination host.')
@utils.arg('--force-host-copy', metavar='<True|False>',
choices=['True', 'False'], required=False,
help='Enables or disables generic host-based '
'force-migration, which bypasses driver '
'optimizations. Default=False.',
default=False)
@utils.service_type('volumev2')
def do_migrate(cs, args):
"""Migrates volume to a new host."""
volume = utils.find_volume(cs, args.volume)
volume.migrate_volume(args.host, args.force_host_copy)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of volume for which to modify type.')
@utils.arg('new_type', metavar='<volume-type>', help='New volume type.')
@utils.arg('--migration-policy', metavar='<never|on-demand>', required=False,
choices=['never', 'on-demand'], default='never',
help='Migration policy during retype of volume.')
@utils.service_type('volumev2')
def do_retype(cs, args):
"""Changes the volume type for a volume."""
volume = utils.find_volume(cs, args.volume)
volume.retype(args.new_type, args.migration_policy)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of volume to backup.')
@utils.arg('--container', metavar='<container>',
help='Backup container name. Default=None.',
default=None)
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.arg('--name', metavar='<name>',
help='Backup name. Default=None.',
default=None)
@utils.arg('--display-description',
help=argparse.SUPPRESS)
@utils.arg('--description',
metavar='<description>',
default=None,
help='Backup description. Default=None.')
@utils.arg('--force',
metavar='<True|False>',
default=False,
help='force to backup in-use volume or not.')
@utils.service_type('volumev2')
def do_backup_create(cs, args):
"""Creates a volume backup."""
if args.display_name is not None:
args.name = args.display_name
if args.display_description is not None:
args.description = args.display_description
volume = utils.find_volume(cs, args.volume)
backup = cs.backups.create(volume.id,
args.container,
args.name,
args.description,
args.force)
info = {"volume_id": volume.id}
info.update(backup._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info)
@utils.arg('backup', metavar='<backup>', help='Name or ID of backup.')
@utils.service_type('volumev2')
def do_backup_show(cs, args):
"""Shows backup details."""
backup = _find_backup(cs, args.backup)
info = dict()
info.update(backup._info)
info.pop('links', None)
utils.print_dict(info)
@utils.service_type('volumev2')
def do_backup_list(cs, args):
"""Lists all backups."""
backups = cs.backups.list()
columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'Object Count',
'Container']
utils.print_list(backups, columns)
@utils.arg('backup', metavar='<backup>',
help='Name or ID of backup to delete.')
@utils.service_type('volumev2')
def do_backup_delete(cs, args):
"""Removes a backup."""
backup = _find_backup(cs, args.backup)
backup.delete()
@utils.arg('backup', metavar='<backup>',
help='ID of backup to restore.')
@utils.arg('--volume-id', metavar='<volume>',
help=argparse.SUPPRESS,
default=None)
@utils.arg('--volume', metavar='<volume>',
help='Name or ID of volume to which to restore. '
'Default=None.',
default=None)
@utils.arg('--availability-zone',
metavar='<availability-zone>',
help='backup restore by availability zone.')
@utils.arg('--description',
metavar='description',
help='backup restore description.')
@utils.service_type('volumev2')
def do_backup_restore(cs, args):
"""Restores a backup."""
vol = args.volume or args.volume_id
if vol:
volume_id = utils.find_volume(cs, vol).id
else:
volume_id = None
cs.restores.restore(args.backup,
volume_id,
args.availability_zone,
args.description)
@utils.arg('backup', metavar='<backup>',
help='ID of the backup to export.')
@utils.service_type('volumev2')
def do_backup_export(cs, args):
"""Export backup metadata record."""
info = cs.backups.export_record(args.backup)
utils.print_dict(info)
@utils.arg('backup_service', metavar='<backup_service>',
help='Backup service to use for importing the backup.')
@utils.arg('backup_url', metavar='<backup_url>',
help='Backup URL for importing the backup metadata.')
@utils.service_type('volumev2')
def do_backup_import(cs, args):
"""Import backup metadata record."""
info = cs.backups.import_record(args.backup_service, args.backup_url)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of volume to transfer.')
@utils.arg('--name',
metavar='<name>',
default=None,
help='Transfer name. Default=None.')
@utils.arg('--display-name',
help=argparse.SUPPRESS)
@utils.service_type('volumev2')
def do_transfer_create(cs, args):
"""Creates a volume transfer."""
if args.display_name is not None:
args.name = args.display_name
volume = utils.find_volume(cs, args.volume)
transfer = cs.transfers.create(volume.id,
args.name)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('transfer', metavar='<transfer>',
help='Name or ID of transfer to delete.')
@utils.service_type('volumev2')
def do_transfer_delete(cs, args):
"""Undoes a transfer."""
transfer = _find_transfer(cs, args.transfer)
transfer.delete()
@utils.arg('transfer', metavar='<transfer>',
help='ID of transfer to accept.')
@utils.arg('auth_key', metavar='<auth_key>',
help='Authentication key of transfer to accept.')
@utils.service_type('volumev2')
def do_transfer_accept(cs, args):
"""Accepts a volume transfer."""
transfer = cs.transfers.accept(args.transfer, args.auth_key)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
@utils.service_type('volumev2')
def do_transfer_list(cs, args):
"""Lists all transfers."""
transfers = cs.transfers.list()
columns = ['ID', 'Volume ID', 'Name']
utils.print_list(transfers, columns)
@utils.arg('transfer', metavar='<transfer>',
help='Name or ID of transfer to accept.')
@utils.service_type('volumev2')
def do_transfer_show(cs, args):
"""Shows transfer details."""
transfer = _find_transfer(cs, args.transfer)
info = dict()
info.update(transfer._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of volume to extend.')
@utils.arg('new_size',
metavar='<new_size>',
type=int,
help='New size of volume, in GBs.')
@utils.service_type('volumev2')
def do_extend(cs, args):
"""Attempts to extend size of an existing volume."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.extend(volume, args.new_size)
@utils.arg('--host', metavar='<hostname>', default=None,
help='Host name. Default=None.')
@utils.arg('--binary', metavar='<binary>', default=None,
help='Service binary. Default=None.')
@utils.service_type('volumev2')
def do_service_list(cs, args):
"""Lists all services. Filter by host and service binary."""
result = cs.services.list(host=args.host, binary=args.binary)
columns = ["Binary", "Host", "Zone", "Status", "State", "Updated_at"]
# NOTE(jay-lau-513): we check if the response has disabled_reason
# so as not to add the column when the extended ext is not enabled.
if result and hasattr(result[0], 'disabled_reason'):
columns.append("Disabled Reason")
utils.print_list(result, columns)
@utils.arg('host', metavar='<hostname>', help='Host name.')
@utils.arg('binary', metavar='<binary>', help='Service binary.')
@utils.service_type('volumev2')
def do_service_enable(cs, args):
"""Enables the service."""
result = cs.services.enable(args.host, args.binary)
columns = ["Host", "Binary", "Status"]
utils.print_list([result], columns)
@utils.arg('host', metavar='<hostname>', help='Host name.')
@utils.arg('binary', metavar='<binary>', help='Service binary.')
@utils.arg('--reason', metavar='<reason>',
help='Reason for disabling service.')
@utils.service_type('volumev2')
def do_service_disable(cs, args):
"""Disables the service."""
columns = ["Host", "Binary", "Status"]
if args.reason:
columns.append('Disabled Reason')
result = cs.services.disable_log_reason(args.host, args.binary,
args.reason)
else:
result = cs.services.disable(args.host, args.binary)
utils.print_list([result], columns)
def _treeizeAvailabilityZone(zone):
"""Builds a tree view for availability zones."""
AvailabilityZone = availability_zones.AvailabilityZone
az = AvailabilityZone(zone.manager,
copy.deepcopy(zone._info), zone._loaded)
result = []
# Zone tree view item
az.zoneName = zone.zoneName
az.zoneState = ('available'
if zone.zoneState['available'] else 'not available')
az._info['zoneName'] = az.zoneName
az._info['zoneState'] = az.zoneState
result.append(az)
if getattr(zone, "hosts", None) and zone.hosts is not None:
for (host, services) in zone.hosts.items():
# Host tree view item
az = AvailabilityZone(zone.manager,
copy.deepcopy(zone._info), zone._loaded)
az.zoneName = '|- %s' % host
az.zoneState = ''
az._info['zoneName'] = az.zoneName
az._info['zoneState'] = az.zoneState
result.append(az)
for (svc, state) in services.items():
# Service tree view item
az = AvailabilityZone(zone.manager,
copy.deepcopy(zone._info), zone._loaded)
az.zoneName = '| |- %s' % svc
az.zoneState = '%s %s %s' % (
'enabled' if state['active'] else 'disabled',
':-)' if state['available'] else 'XXX',
state['updated_at'])
az._info['zoneName'] = az.zoneName
az._info['zoneState'] = az.zoneState
result.append(az)
return result
@utils.service_type('volumev2')
def do_availability_zone_list(cs, _args):
"""Lists all availability zones."""
try:
availability_zones = cs.availability_zones.list()
except exceptions.Forbidden as e: # policy doesn't allow probably
try:
availability_zones = cs.availability_zones.list(detailed=False)
except Exception:
raise e
result = []
for zone in availability_zones:
result += _treeizeAvailabilityZone(zone)
_translate_availability_zone_keys(result)
utils.print_list(result, ['Name', 'Status'])
def _print_volume_encryption_type_list(encryption_types):
"""
Lists volume encryption types.
:param encryption_types: a list of :class: VolumeEncryptionType instances
"""
utils.print_list(encryption_types, ['Volume Type ID', 'Provider',
'Cipher', 'Key Size',
'Control Location'])
@utils.service_type('volumev2')
def do_encryption_type_list(cs, args):
"""Shows encryption type details for volume types. Admin only."""
result = cs.volume_encryption_types.list()
utils.print_list(result, ['Volume Type ID', 'Provider', 'Cipher',
'Key Size', 'Control Location'])
@utils.arg('volume_type',
metavar='<volume_type>',
type=str,
help="Name or ID of volume type.")
@utils.service_type('volumev2')
def do_encryption_type_show(cs, args):
"""Shows encryption type details for a volume type. Admin only."""
volume_type = _find_volume_type(cs, args.volume_type)
result = cs.volume_encryption_types.get(volume_type)
# Display result or an empty table if no result
if hasattr(result, 'volume_type_id'):
_print_volume_encryption_type_list([result])
else:
_print_volume_encryption_type_list([])
@utils.arg('volume_type',
metavar='<volume_type>',
type=str,
help="Name or ID of volume type.")
@utils.arg('provider',
metavar='<provider>',
type=str,
help='The class that provides encryption support. '
'For example, LuksEncryptor.')
@utils.arg('--cipher',
metavar='<cipher>',
type=str,
required=False,
default=None,
help='The encryption algorithm or mode. '
'For example, aes-xts-plain64. Default=None.')
@utils.arg('--key_size',
metavar='<key_size>',
type=int,
required=False,
default=None,
help='Size of encryption key, in bits. '
'For example, 128 or 256. Default=None.')
@utils.arg('--control_location',
metavar='<control_location>',
choices=['front-end', 'back-end'],
type=str,
required=False,
default='front-end',
help='Notional service where encryption is performed. '
'Valid values are "front-end" or "back-end." '
'For example, front-end=Nova. Default is "front-end."')
@utils.service_type('volumev2')
def do_encryption_type_create(cs, args):
"""Creates encryption type for a volume type. Admin only."""
volume_type = _find_volume_type(cs, args.volume_type)
body = {}
body['provider'] = args.provider
body['cipher'] = args.cipher
body['key_size'] = args.key_size
body['control_location'] = args.control_location
result = cs.volume_encryption_types.create(volume_type, body)
_print_volume_encryption_type_list([result])
@utils.arg('volume_type',
metavar='<volume_type>',
type=str,
help="Name or ID of volume type.")
@utils.service_type('volumev2')
def do_encryption_type_delete(cs, args):
"""Deletes encryption type for a volume type. Admin only."""
volume_type = _find_volume_type(cs, args.volume_type)
cs.volume_encryption_types.delete(volume_type)
def _print_qos_specs(qos_specs):
utils.print_dict(qos_specs._info)
def _print_qos_specs_list(q_specs):
utils.print_list(q_specs, ['ID', 'Name', 'Consumer', 'specs'])
def _print_qos_specs_and_associations_list(q_specs):
utils.print_list(q_specs, ['ID', 'Name', 'Consumer', 'specs'])
def _print_associations_list(associations):
utils.print_list(associations, ['Association_Type', 'Name', 'ID'])
@utils.arg('name',
metavar='<name>',
help="Name of new QoS specifications.")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help="QoS specifications.")
@utils.service_type('volumev2')
def do_qos_create(cs, args):
"""Creates a qos specs."""
keypair = None
if args.metadata is not None:
keypair = _extract_metadata(args)
qos_specs = cs.qos_specs.create(args.name, keypair)
_print_qos_specs(qos_specs)
@utils.service_type('volumev2')
def do_qos_list(cs, args):
"""Lists qos specs."""
qos_specs = cs.qos_specs.list()
_print_qos_specs_list(qos_specs)
@utils.arg('qos_specs', metavar='<qos_specs>',
help="ID of QoS specifications to show.")
@utils.service_type('volumev2')
def do_qos_show(cs, args):
"""Shows qos specs details."""
qos_specs = _find_qos_specs(cs, args.qos_specs)
_print_qos_specs(qos_specs)
@utils.arg('qos_specs', metavar='<qos_specs>',
help="ID of QoS specifications to delete.")
@utils.arg('--force',
metavar='<True|False>',
default=False,
help='Enables or disables deletion of in-use '
'QoS specifications. Default=False.')
@utils.service_type('volumev2')
def do_qos_delete(cs, args):
"""Deletes a specified qos specs."""
force = strutils.bool_from_string(args.force)
qos_specs = _find_qos_specs(cs, args.qos_specs)
cs.qos_specs.delete(qos_specs, force)
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications.')
@utils.arg('vol_type_id', metavar='<volume_type_id>',
help='ID of volume type with which to associate '
'QoS specifications.')
@utils.service_type('volumev2')
def do_qos_associate(cs, args):
"""Associates qos specs with specified volume type."""
cs.qos_specs.associate(args.qos_specs, args.vol_type_id)
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications.')
@utils.arg('vol_type_id', metavar='<volume_type_id>',
help='ID of volume type with which to associate '
'QoS specifications.')
@utils.service_type('volumev2')
def do_qos_disassociate(cs, args):
"""Disassociates qos specs from specified volume type."""
cs.qos_specs.disassociate(args.qos_specs, args.vol_type_id)
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications on which to operate.')
@utils.service_type('volumev2')
def do_qos_disassociate_all(cs, args):
"""Disassociates qos specs from all its associations."""
cs.qos_specs.disassociate_all(args.qos_specs)
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications.')
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="The action. Valid values are 'set' or 'unset.'")
@utils.arg('metadata', metavar='key=value',
nargs='+',
default=[],
help='Metadata key and value pair to set or unset. '
'For unset, specify only the key.')
def do_qos_key(cs, args):
"""Sets or unsets specifications for a qos spec."""
keypair = _extract_metadata(args)
if args.action == 'set':
cs.qos_specs.set_keys(args.qos_specs, keypair)
elif args.action == 'unset':
cs.qos_specs.unset_keys(args.qos_specs, list(keypair))
@utils.arg('qos_specs', metavar='<qos_specs>',
help='ID of QoS specifications.')
@utils.service_type('volumev2')
def do_qos_get_association(cs, args):
"""Lists all associations for specified qos specs."""
associations = cs.qos_specs.get_associations(args.qos_specs)
_print_associations_list(associations)
@utils.arg('snapshot',
metavar='<snapshot>',
help='ID of snapshot for which to update metadata.')
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="The action. Valid values are 'set' or 'unset.'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata key and value pair to set or unset. '
'For unset, specify only the key.')
@utils.service_type('volumev2')
def do_snapshot_metadata(cs, args):
"""Sets or deletes snapshot metadata."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
metadata = _extract_metadata(args)
if args.action == 'set':
metadata = snapshot.set_metadata(metadata)
utils.print_dict(metadata._info)
elif args.action == 'unset':
snapshot.delete_metadata(list(metadata.keys()))
@utils.arg('snapshot', metavar='<snapshot>',
help='ID of snapshot.')
@utils.service_type('volumev2')
def do_snapshot_metadata_show(cs, args):
"""Shows snapshot metadata."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
utils.print_dict(snapshot._info['metadata'], 'Metadata-property')
@utils.arg('volume', metavar='<volume>',
help='ID of volume.')
@utils.service_type('volumev2')
def do_metadata_show(cs, args):
"""Shows volume metadata."""
volume = utils.find_volume(cs, args.volume)
utils.print_dict(volume._info['metadata'], 'Metadata-property')
@utils.arg('volume',
metavar='<volume>',
help='ID of volume for which to update metadata.')
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata key and value pair or pairs to update.')
@utils.service_type('volumev2')
def do_metadata_update_all(cs, args):
"""Updates volume metadata."""
volume = utils.find_volume(cs, args.volume)
metadata = _extract_metadata(args)
metadata = volume.update_all_metadata(metadata)
utils.print_dict(metadata)
@utils.arg('snapshot',
metavar='<snapshot>',
help='ID of snapshot for which to update metadata.')
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata key and value pair to update.')
@utils.service_type('volumev2')
def do_snapshot_metadata_update_all(cs, args):
"""Updates snapshot metadata."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
metadata = _extract_metadata(args)
metadata = snapshot.update_all_metadata(metadata)
utils.print_dict(metadata)
@utils.arg('volume', metavar='<volume>', help='ID of volume to update.')
@utils.arg('read_only',
metavar='<True|true|False|false>',
choices=['True', 'true', 'False', 'false'],
help='Enables or disables update of volume to '
'read-only access mode.')
@utils.service_type('volumev2')
def do_readonly_mode_update(cs, args):
"""Updates volume read-only access-mode flag."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.update_readonly_flag(volume,
strutils.bool_from_string(args.read_only))
@utils.arg('volume', metavar='<volume>', help='ID of the volume to update.')
@utils.arg('bootable',
metavar='<True|true|False|false>',
choices=['True', 'true', 'False', 'false'],
help='Flag to indicate whether volume is bootable.')
@utils.service_type('volumev2')
def do_set_bootable(cs, args):
"""Update bootable status of a volume."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.set_bootable(volume,
strutils.bool_from_string(args.bootable))
@utils.arg('host',
metavar='<host>',
help='Cinder host on which the existing volume resides; '
'takes the form: host@backend-name#pool')
@utils.arg('identifier',
metavar='<identifier>',
help='Name or other Identifier for existing volume')
@utils.arg('--id-type',
metavar='<id-type>',
default='source-name',
help='Type of backend device identifier provided, '
'typically source-name or source-id (Default=source-name)')
@utils.arg('--name',
metavar='<name>',
help='Volume name (Default=None)')
@utils.arg('--description',
metavar='<description>',
help='Volume description (Default=None)')
@utils.arg('--volume-type',
metavar='<volume-type>',
help='Volume type (Default=None)')
@utils.arg('--availability-zone',
metavar='<availability-zone>',
help='Availability zone for volume (Default=None)')
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Metadata key=value pairs (Default=None)')
@utils.arg('--bootable',
action='store_true',
help='Specifies that the newly created volume should be'
' marked as bootable')
@utils.service_type('volumev2')
def do_manage(cs, args):
"""Manage an existing volume."""
volume_metadata = None
if args.metadata is not None:
volume_metadata = _extract_metadata(args)
# Build a dictionary of key/value pairs to pass to the API.
ref_dict = {}
ref_dict[args.id_type] = args.identifier
# The recommended way to specify an existing volume is by ID or name, and
# have the Cinder driver look for 'source-name' or 'source-id' elements in
# the ref structure. To make things easier for the user, we have special
# --source-name and --source-id CLI options that add the appropriate
# element to the ref structure.
#
# Note how argparse converts hyphens to underscores. We use hyphens in the
# dictionary so that it is consistent with what the user specified on the
# CLI.
if hasattr(args, 'source_name') and \
args.source_name is not None:
ref_dict['source-name'] = args.source_name
if hasattr(args, 'source_id') and \
args.source_id is not None:
ref_dict['source-id'] = args.source_id
volume = cs.volumes.manage(host=args.host,
ref=ref_dict,
name=args.name,
description=args.description,
volume_type=args.volume_type,
availability_zone=args.availability_zone,
metadata=volume_metadata,
bootable=args.bootable)
info = {}
volume = cs.volumes.get(volume.id)
info.update(volume._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of the volume to unmanage.')
@utils.service_type('volumev2')
def do_unmanage(cs, args):
"""Stop managing a volume."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.unmanage(volume.id)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of the volume to promote.')
@utils.service_type('volumev2')
def do_replication_promote(cs, args):
"""Promote a secondary volume to primary for a relationship."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.promote(volume.id)
@utils.arg('volume', metavar='<volume>',
help='Name or ID of the volume to reenable replication.')
@utils.service_type('volumev2')
def do_replication_reenable(cs, args):
"""Sync the secondary volume with primary for a relationship."""
volume = utils.find_volume(cs, args.volume)
cs.volumes.reenable(volume.id)
@utils.arg('--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Shows details for all tenants. Admin only.')
@utils.service_type('volumev2')
def do_consisgroup_list(cs, args):
"""Lists all consistencygroups."""
consistencygroups = cs.consistencygroups.list()
columns = ['ID', 'Status', 'Name']
utils.print_list(consistencygroups, columns)
@utils.arg('consistencygroup',
metavar='<consistencygroup>',
help='Name or ID of a consistency group.')
@utils.service_type('volumev2')
def do_consisgroup_show(cs, args):
"""Shows details of a consistency group."""
info = dict()
consistencygroup = _find_consistencygroup(cs, args.consistencygroup)
info.update(consistencygroup._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('volumetypes',
metavar='<volume-types>',
help='Volume types.')
@utils.arg('--name',
metavar='<name>',
help='Name of a consistency group.')
@utils.arg('--description',
metavar='<description>',
default=None,
help='Description of a consistency group. Default=None.')
@utils.arg('--availability-zone',
metavar='<availability-zone>',
default=None,
help='Availability zone for volume. Default=None.')
@utils.service_type('volumev2')
def do_consisgroup_create(cs, args):
"""Creates a consistency group."""
consistencygroup = cs.consistencygroups.create(
args.volumetypes,
args.name,
args.description,
availability_zone=args.availability_zone)
info = dict()
consistencygroup = cs.consistencygroups.get(consistencygroup.id)
info.update(consistencygroup._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('consistencygroup',
metavar='<consistencygroup>', nargs='+',
help='Name or ID of one or more consistency groups '
'to be deleted.')
@utils.arg('--force',
action='store_true',
help='Allows or disallows consistency groups '
'to be deleted. If the consistency group is empty, '
'it can be deleted without the force flag. '
'If the consistency group is not empty, the force '
'flag is required for it to be deleted.',
default=False)
@utils.service_type('volumev2')
def do_consisgroup_delete(cs, args):
"""Removes one or more consistency groups."""
failure_count = 0
for consistencygroup in args.consistencygroup:
try:
_find_consistencygroup(cs, consistencygroup).delete(args.force)
except Exception as e:
failure_count += 1
print("Delete for consistency group %s failed: %s" %
(consistencygroup, e))
if failure_count == len(args.consistencygroup):
raise exceptions.CommandError("Unable to delete any of specified "
"consistency groups.")
@utils.arg('--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Shows details for all tenants. Admin only.')
@utils.arg('--status',
metavar='<status>',
default=None,
help='Filters results by a status. Default=None.')
@utils.arg('--consistencygroup-id',
metavar='<consistencygroup_id>',
default=None,
help='Filters results by a consistency group ID. Default=None.')
@utils.service_type('volumev2')
def do_cgsnapshot_list(cs, args):
"""Lists all cgsnapshots."""
cgsnapshots = cs.cgsnapshots.list()
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
search_opts = {
'all_tenants': all_tenants,
'status': args.status,
'consistencygroup_id': args.consistencygroup_id,
}
cgsnapshots = cs.cgsnapshots.list(search_opts=search_opts)
columns = ['ID', 'Status', 'Name']
utils.print_list(cgsnapshots, columns)
@utils.arg('cgsnapshot',
metavar='<cgsnapshot>',
help='Name or ID of cgsnapshot.')
@utils.service_type('volumev2')
def do_cgsnapshot_show(cs, args):
"""Shows cgsnapshot details."""
info = dict()
cgsnapshot = _find_cgsnapshot(cs, args.cgsnapshot)
info.update(cgsnapshot._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('consistencygroup',
metavar='<consistencygroup>',
help='Name or ID of a consistency group.')
@utils.arg('--name',
metavar='<name>',
default=None,
help='Cgsnapshot name. Default=None.')
@utils.arg('--description',
metavar='<description>',
default=None,
help='Cgsnapshot description. Default=None.')
@utils.service_type('volumev2')
def do_cgsnapshot_create(cs, args):
"""Creates a cgsnapshot."""
consistencygroup = _find_consistencygroup(cs, args.consistencygroup)
cgsnapshot = cs.cgsnapshots.create(
consistencygroup.id,
args.name,
args.description)
info = dict()
cgsnapshot = cs.cgsnapshots.get(cgsnapshot.id)
info.update(cgsnapshot._info)
info.pop('links', None)
utils.print_dict(info)
@utils.arg('cgsnapshot',
metavar='<cgsnapshot>', nargs='+',
help='Name or ID of one or more cgsnapshots to be deleted.')
@utils.service_type('volumev2')
def do_cgsnapshot_delete(cs, args):
"""Removes one or more cgsnapshots."""
failure_count = 0
for cgsnapshot in args.cgsnapshot:
try:
_find_cgsnapshot(cs, cgsnapshot).delete()
except Exception as e:
failure_count += 1
print("Delete for cgsnapshot %s failed: %s" % (cgsnapshot, e))
if failure_count == len(args.cgsnapshot):
raise exceptions.CommandError("Unable to delete any of specified "
"cgsnapshots.")
| apache-2.0 | -8,371,715,886,487,103,000 | 32.866081 | 83 | 0.595768 | false |
diN0bot/ProcrasDonate | procrasdonate/applib/sendgrid.py | 1 | 2828 | from procrasdonate.applib import SmtpApiHeader
from django.utils import simplejson as json
hdr = SmtpApiHeader.SmtpApiHeader()
receiver = ['[email protected]','[email protected]','[email protected]']
times = ['1pm', '2pm', '3pm']
names = ['lucy', 'clay', 'messenger']
hdr.addTo(receiver)
hdr.addSubVal('<time>', times)
hdr.addSubVal('<name>', names)
hdr.addFilterSetting('subscriptiontrack', 'enable', 1)
hdr.addFilterSetting('twitter', 'enable', 1)
hdr.setUniqueArgs({'test':1, 'foo':2})
a = hdr.asJSON()
a = hdr.as_string()
print a
#!/usr/bin/python
import SmtpApiHeader
import json
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
hdr = SmtpApiHeader.SmtpApiHeader()
# The list of addresses this message will be sent to
receiver = ['isaac@example', 'tim@example', 'jose@example']
# The names of the recipients
times = ['1pm', '2pm', '3pm']
# Another subsitution variable
names = ['Isaac', 'Tim', 'Jose']
# Set all of the above variables
hdr.addTo(receiver)
hdr.addSubVal('<time>', times)
hdr.addSubVal('<name>', names)
# Specify that this is an initial contact message
hdr.setCategory("initial")
# Enable a text footer and set it
hdr.addFilterSetting('footer', 'enable', 1)
hdr.addFilterSetting('footer', "text/plain", "Thank you for your business")
# fromEmail is your email
# toEmail is recipient's email address
# For multiple recipient e-mails, the 'toEmail' address is irrelivant
fromEmail = '[email protected]'
toEmail = '[email protected]'
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = "Contact Response for <name> at <time>"
msg['From'] = fromEmail
msg['To'] = toEmail
msg["X-SMTPAPI"] = hdr.asJSON()
# Create the body of the message (a plain-text and an HTML version).
# text is your plain-text email
# html is your html version of the email
# if the reciever is able to view html emails then only the html
# email will be displayed
text = "Hi <name>!\nHow are you?\n"
html = """\
<html>
<head></head>
<body>
<p>Hi! <name><br>-
How are you?<br>
</p>
</body>
</html>
"""
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
msg.attach(part1)
msg.attach(part2)
# Login credentials
username = '[email protected]'
password = 'yourpassword'
# Open a connection to the SendGrid mail server
s = smtplib.SMTP('smtp.sendgrid.net')
# Authenticate
s.login(username, password)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(fromEmail, toEmail, msg.as_string())
s.quit()
| agpl-3.0 | 5,409,071,453,438,566,000 | 25.933333 | 76 | 0.716407 | false |
TomasTomecek/osbs | osbs/build/build_request.py | 1 | 12272 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, absolute_import, unicode_literals
import json
import logging
import os
from osbs.build.manipulate import DockJsonManipulator
from osbs.build.spec import CommonSpec, ProdSpec, SimpleSpec, ProdWithoutKojiSpec, CommonProdSpec
from osbs.constants import PROD_BUILD_TYPE, SIMPLE_BUILD_TYPE, PROD_WITHOUT_KOJI_BUILD_TYPE
from osbs.exceptions import OsbsException
build_classes = {}
logger = logging.getLogger(__name__)
def register_build_class(cls):
build_classes[cls.key] = cls
return cls
class BuildRequest(object):
"""
Wraps logic for creating build inputs
"""
key = None
def __init__(self, build_json_store):
"""
:param build_json_store: str, path to directory with JSON build files
"""
self.spec = None
self.build_json_store = build_json_store
self.build_json = None # rendered template
self._template = None # template loaded from filesystem
self._inner_template = None # dock json
self._dj = None
def set_params(self, **kwargs):
"""
set parameters according to specification
:param kwargs:
:return:
"""
raise NotImplementedError()
@staticmethod
def new_by_type(build_name, *args, **kwargs):
"""Find BuildRequest with the given name."""
try:
build_class = build_classes[build_name]
logger.debug("Instantiating: %s(%s, %s)", build_class.__name__, args, kwargs)
return build_class(*args, **kwargs)
except KeyError:
raise RuntimeError("Unknown build type '{0}'".format(build_name))
def render(self):
"""
render input parameters into template
:return: dict, build json
"""
raise NotImplementedError()
@property
def build_id(self):
return self.build_json['metadata']['name']
@property
def template(self):
if self._template is None:
path = os.path.join(self.build_json_store, "%s.json" % self.key)
logger.debug("loading template from path %s", path)
try:
with open(path, "r") as fp:
self._template = json.load(fp)
except (IOError, OSError) as ex:
raise OsbsException("Can't open template '%s': %s" %
(path, repr(ex)))
return self._template
@property
def inner_template(self):
if self._inner_template is None:
path = os.path.join(self.build_json_store, "%s_inner.json" % self.key)
logger.debug("loading inner template from path %s", path)
with open(path, "r") as fp:
self._inner_template = json.load(fp)
return self._inner_template
@property
def dj(self):
if self._dj is None:
self._dj = DockJsonManipulator(self.template, self.inner_template)
return self._dj
class CommonBuild(BuildRequest):
def __init__(self, build_json_store):
"""
:param build_json_store: str, path to directory with JSON build files
"""
super(CommonBuild, self).__init__(build_json_store)
self.spec = CommonSpec()
def set_params(self, **kwargs):
"""
set parameters according to specification
these parameters are accepted:
:param git_uri: str, URL of source git repository
:param git_ref: str, what git tree to build (default: master)
:param registry_uri: str, URL of docker registry where built image is pushed
:param user: str, user part of resulting image name
:param component: str, component part of the image name
:param openshift_uri: str, URL of openshift instance for the build
:param yum_repourls: list of str, URLs to yum repo files to include
"""
logger.debug("setting params '%s' for %s", kwargs, self.spec)
self.spec.set_params(**kwargs)
def render(self):
# !IMPORTANT! can't be too long: https://github.com/openshift/origin/issues/733
self.template['metadata']['name'] = self.spec.name.value
self.template['parameters']['source']['git']['uri'] = self.spec.git_uri.value
self.template['parameters']['source']['git']['ref'] = self.spec.git_ref.value
self.template['parameters']['output']['registry'] = self.spec.registry_uri.value
if (self.spec.yum_repourls.value is not None and
self.dj.dock_json_has_plugin_conf('prebuild_plugins', "add_yum_repo_by_url")):
self.dj.dock_json_set_arg('prebuild_plugins', "add_yum_repo_by_url", "repourls",
self.spec.yum_repourls.value)
def validate_input(self):
self.spec.validate()
class CommonProductionBuild(CommonBuild):
def __init__(self, build_json_store, **kwargs):
super(CommonProductionBuild, self).__init__(build_json_store, **kwargs)
self.spec = CommonProdSpec()
def set_params(self, **kwargs):
"""
set parameters according to specification
these parameters are accepted:
:param sources_command: str, command used to fetch dist-git sources
:param architecture: str, architecture we are building for
:param vendor: str, vendor name
:param build_host: str, host the build will run on
:param authoritative_registry: str, the docker registry authoritative for this image
:param metadata_plugin_use_auth: bool, use auth when posting metadata from dock?
"""
logger.debug("setting params '%s' for %s", kwargs, self.spec)
self.spec.set_params(**kwargs)
def render(self, validate=True):
if validate:
self.spec.validate()
super(CommonProductionBuild, self).render()
dj = DockJsonManipulator(self.template, self.inner_template)
dj.dock_json_set_arg('prebuild_plugins', "distgit_fetch_artefacts", "command",
self.spec.sources_command.value)
dj.dock_json_set_arg('prebuild_plugins', "change_source_registry", "registry_uri",
self.spec.registry_uri.value)
dj.dock_json_set_arg('postbuild_plugins', "tag_by_labels", "registry_uri",
self.spec.registry_uri.value)
if self.spec.metadata_plugin_use_auth.value is not None:
dj.dock_json_set_arg('postbuild_plugins', "store_metadata_in_osv3",
"use_auth", self.spec.metadata_plugin_use_auth.value)
implicit_labels = {
'Architecture': self.spec.architecture.value,
'Vendor': self.spec.vendor.value,
'Build_Host': self.spec.build_host.value,
'Authoritative_Registry': self.spec.authoritative_registry.value,
}
dj.dock_json_merge_arg('prebuild_plugins', "add_labels_in_dockerfile", "labels",
implicit_labels)
dj.dock_json_set_arg('postbuild_plugins', "store_metadata_in_osv3", "url",
self.spec.openshift_uri.value)
@register_build_class
class ProductionBuild(CommonProductionBuild):
key = PROD_BUILD_TYPE
def __init__(self, build_json_store, **kwargs):
super(ProductionBuild, self).__init__(build_json_store, **kwargs)
self.spec = ProdSpec()
def set_params(self, **kwargs):
"""
set parameters according to specification
these parameters are accepted:
:param koji_target: str, koji tag with packages used to build the image
:param kojiroot: str, URL from which koji packages are fetched
:param kojihub: str, URL of the koji hub
:param sources_command: str, command used to fetch dist-git sources
:param architecture: str, architecture we are building for
:param vendor: str, vendor name
:param build_host: str, host the build will run on
:param authoritative_registry: str, the docker registry authoritative for this image
:param metadata_plugin_use_auth: bool, use auth when posting metadata from dock?
"""
logger.debug("setting params '%s' for %s", kwargs, self.spec)
self.spec.set_params(**kwargs)
def render(self, validate=True):
if validate:
self.spec.validate()
super(ProductionBuild, self).render()
dj = DockJsonManipulator(self.template, self.inner_template)
self.template['parameters']['output']['imageTag'] = self.spec.image_tag.value
dj.dock_json_set_arg('prebuild_plugins', "koji", "target", self.spec.koji_target.value)
dj.dock_json_set_arg('prebuild_plugins', "koji", "root", self.spec.kojiroot.value)
dj.dock_json_set_arg('prebuild_plugins', "koji", "hub", self.spec.kojihub.value)
dj.write_dock_json()
self.build_json = self.template
logger.debug(self.build_json)
return self.build_json
@register_build_class
class ProductionWithoutKojiBuild(CommonProductionBuild):
key = PROD_WITHOUT_KOJI_BUILD_TYPE
def __init__(self, build_json_store, **kwargs):
super(ProductionWithoutKojiBuild, self).__init__(build_json_store, **kwargs)
self.spec = ProdWithoutKojiSpec()
def set_params(self, **kwargs):
"""
set parameters according to specification
these parameters are accepted:
:param sources_command: str, command used to fetch dist-git sources
:param architecture: str, architecture we are building for
:param vendor: str, vendor name
:param build_host: str, host the build will run on
:param authoritative_registry: str, the docker registry authoritative for this image
:param metadata_plugin_use_auth: bool, use auth when posting metadata from dock?
"""
logger.debug("setting params '%s' for %s", kwargs, self.spec)
self.spec.set_params(**kwargs)
def render(self, validate=True):
if validate:
self.spec.validate()
super(ProductionWithoutKojiBuild, self).render()
dj = DockJsonManipulator(self.template, self.inner_template)
self.template['parameters']['output']['imageTag'] = self.spec.image_tag.value
dj.write_dock_json()
self.build_json = self.template
logger.debug(self.build_json)
return self.build_json
@register_build_class
class SimpleBuild(CommonBuild):
"""
Simple build type for scratch builds - gets sources from git, builds image
according to Dockerfile, pushes it to a registry.
"""
key = SIMPLE_BUILD_TYPE
def __init__(self, build_json_store, **kwargs):
super(SimpleBuild, self).__init__(build_json_store, **kwargs)
self.spec = SimpleSpec()
def set_params(self, **kwargs):
"""
set parameters according to specification
"""
logger.debug("setting params '%s' for %s", kwargs, self.spec)
self.spec.set_params(**kwargs)
def render(self, validate=True):
if validate:
self.spec.validate()
super(SimpleBuild, self).render()
dj = DockJsonManipulator(self.template, self.inner_template)
self.template['parameters']['output']['imageTag'] = self.spec.image_tag.value
dj.dock_json_set_arg('postbuild_plugins', "store_metadata_in_osv3", "url",
self.spec.openshift_uri.value)
dj.write_dock_json()
self.build_json = self.template
logger.debug(self.build_json)
return self.build_json
class BuildManager(object):
def __init__(self, build_json_store):
self.build_json_store = build_json_store
def get_build_request_by_type(self, build_type):
"""
return instance of BuildRequest according to specified build type
:param build_type: str, name of build type
:return: instance of BuildRequest
"""
b = BuildRequest.new_by_type(build_type, build_json_store=self.build_json_store)
return b
| bsd-3-clause | -6,186,058,528,523,075,000 | 36.414634 | 97 | 0.625652 | false |
maestro-hybrid-cloud/heat | heat/tests/generic_resource.py | 1 | 10736 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_log import log as logging
import six
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine.resources import stack_resource
from heat.engine.resources import stack_user
from heat.engine import support
LOG = logging.getLogger(__name__)
class GenericResource(resource.Resource):
"""Dummy resource for use in tests."""
properties_schema = {}
attributes_schema = collections.OrderedDict([
('foo', attributes.Schema('A generic attribute')),
('Foo', attributes.Schema('Another generic attribute'))])
@classmethod
def is_service_available(cls, context):
return True
def handle_create(self):
LOG.warn(_LW('Creating generic resource (Type "%s")'),
self.type())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
LOG.warn(_LW('Updating generic resource (Type "%s")'),
self.type())
def handle_delete(self):
LOG.warn(_LW('Deleting generic resource (Type "%s")'),
self.type())
def _resolve_attribute(self, name):
return self.name
def handle_suspend(self):
LOG.warn(_LW('Suspending generic resource (Type "%s")'),
self.type())
def handle_resume(self):
LOG.warn(_LW('Resuming generic resource (Type "%s")'),
self.type())
class ResWithShowAttr(GenericResource):
def _show_resource(self):
return {'foo': self.name,
'Foo': self.name,
'Another': self.name}
class ResWithComplexPropsAndAttrs(GenericResource):
properties_schema = {
'a_string': properties.Schema(properties.Schema.STRING),
'a_list': properties.Schema(properties.Schema.LIST),
'a_map': properties.Schema(properties.Schema.MAP),
'an_int': properties.Schema(properties.Schema.INTEGER)}
attributes_schema = {'list': attributes.Schema('A list'),
'map': attributes.Schema('A map'),
'string': attributes.Schema('A string')}
update_allowed_properties = ('an_int',)
def _resolve_attribute(self, name):
try:
return self.properties["a_%s" % name]
except KeyError:
return None
class ResourceWithProps(GenericResource):
properties_schema = {
'Foo': properties.Schema(properties.Schema.STRING),
'FooInt': properties.Schema(properties.Schema.INTEGER)}
class ResourceWithPropsRefPropOnDelete(ResourceWithProps):
def check_delete_complete(self, cookie):
return self.properties['FooInt'] is not None
class ResourceWithPropsRefPropOnValidate(ResourceWithProps):
def validate(self):
super(ResourceWithPropsRefPropOnValidate, self).validate()
self.properties['FooInt'] is not None
class ResourceWithPropsAndAttrs(ResourceWithProps):
attributes_schema = {'Bar': attributes.Schema('Something.')}
class ResourceWithResourceID(GenericResource):
properties_schema = {'ID': properties.Schema(properties.Schema.STRING)}
def handle_create(self):
super(ResourceWithResourceID, self).handle_create()
self.resource_id_set(self.properties.get('ID'))
def handle_delete(self):
self.mox_resource_id(self.resource_id)
def mox_resource_id(self, resource_id):
pass
class ResourceWithComplexAttributes(GenericResource):
attributes_schema = {
'list': attributes.Schema('A list'),
'flat_dict': attributes.Schema('A flat dictionary'),
'nested_dict': attributes.Schema('A nested dictionary'),
'none': attributes.Schema('A None')
}
list = ['foo', 'bar']
flat_dict = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
nested_dict = {'list': [1, 2, 3],
'string': 'abc',
'dict': {'a': 1, 'b': 2, 'c': 3}}
def _resolve_attribute(self, name):
if name == 'list':
return self.list
if name == 'flat_dict':
return self.flat_dict
if name == 'nested_dict':
return self.nested_dict
if name == 'none':
return None
class ResourceWithRequiredProps(GenericResource):
properties_schema = {'Foo': properties.Schema(properties.Schema.STRING,
required=True)}
class ResourceWithMultipleRequiredProps(GenericResource):
properties_schema = {'Foo1': properties.Schema(properties.Schema.STRING,
required=True),
'Foo2': properties.Schema(properties.Schema.STRING,
required=True),
'Foo3': properties.Schema(properties.Schema.STRING,
required=True)}
class ResourceWithRequiredPropsAndEmptyAttrs(GenericResource):
properties_schema = {'Foo': properties.Schema(properties.Schema.STRING,
required=True)}
attributes_schema = {}
class SignalResource(signal_responder.SignalResponder):
SIGNAL_TRANSPORTS = (
CFN_SIGNAL, TEMP_URL_SIGNAL, HEAT_SIGNAL, NO_SIGNAL,
ZAQAR_SIGNAL
) = (
'CFN_SIGNAL', 'TEMP_URL_SIGNAL', 'HEAT_SIGNAL', 'NO_SIGNAL',
'ZAQAR_SIGNAL'
)
properties_schema = {
'signal_transport': properties.Schema(properties.Schema.STRING,
default='CFN_SIGNAL')}
attributes_schema = {'AlarmUrl': attributes.Schema('Get a signed webhook'),
'signal': attributes.Schema('Get a signal')}
def handle_create(self):
super(SignalResource, self).handle_create()
self.resource_id_set(self._get_user_id())
def handle_signal(self, details=None):
LOG.warn(_LW('Signaled resource (Type "%(type)s") %(details)s'),
{'type': self.type(), 'details': details})
def _resolve_attribute(self, name):
if self.resource_id is not None:
if name == 'AlarmUrl':
return self._get_signal().get('alarm_url')
elif name == 'signal':
return self._get_signal()
class StackUserResource(stack_user.StackUser):
properties_schema = {}
attributes_schema = {}
def handle_create(self):
super(StackUserResource, self).handle_create()
self.resource_id_set(self._get_user_id())
class ResourceWithCustomConstraint(GenericResource):
properties_schema = {
'Foo': properties.Schema(
properties.Schema.STRING,
constraints=[constraints.CustomConstraint('neutron.network')])}
class ResourceWithAttributeType(GenericResource):
attributes_schema = {
'attr1': attributes.Schema('A generic attribute',
type=attributes.Schema.STRING),
'attr2': attributes.Schema('Another generic attribute',
type=attributes.Schema.MAP)
}
def _resolve_attribute(self, name):
if name == 'attr1':
return "valid_sting"
elif name == 'attr2':
return "invalid_type"
class ResourceWithDefaultClientName(resource.Resource):
default_client_name = 'sample'
class ResourceWithFnGetAttType(GenericResource):
def FnGetAtt(self, name):
pass
class ResourceWithFnGetRefIdType(ResourceWithProps):
def FnGetRefId(self):
return 'ID-%s' % self.name
class ResourceWithListProp(ResourceWithFnGetRefIdType):
properties_schema = {"listprop": properties.Schema(properties.Schema.LIST)}
class StackResourceType(stack_resource.StackResource, GenericResource):
def physical_resource_name(self):
return "cb2f2b28-a663-4683-802c-4b40c916e1ff"
def set_template(self, nested_template, params):
self.nested_template = nested_template
self.nested_params = params
def handle_create(self):
return self.create_with_template(self.nested_template,
self.nested_params)
def handle_adopt(self, resource_data):
return self.create_with_template(self.nested_template,
self.nested_params,
adopt_data=resource_data)
def handle_delete(self):
self.delete_nested()
def has_nested(self):
if self.nested() is not None:
return True
return False
class ResourceWithRestoreType(ResWithComplexPropsAndAttrs):
def handle_restore(self, defn, data):
props = dict(
(key, value) for (key, value) in
six.iteritems(defn.properties(self.properties_schema))
if value is not None)
value = data['resource_data']['a_string']
props['a_string'] = value
return defn.freeze(properties=props)
class DynamicSchemaResource(resource.Resource):
"""Resource with an attribute not registered in the attribute schema."""
properties_schema = {}
attributes_schema = {
'stat_attr': attributes.Schema('A generic static attribute',
type=attributes.Schema.STRING),
}
def _init_attributes(self):
# software deployment scheme is not static
# so return dynamic attributes for it
return attributes.DynamicSchemeAttributes(
self.name, self.attributes_schema, self._resolve_attribute)
def _resolve_attribute(self, name):
if name == 'stat_attr':
return "static_attribute"
elif name == 'dynamic_attr':
return "dynamic_attribute"
else:
raise KeyError()
class ResourceTypeUnSupportedLiberty(GenericResource):
support_status = support.SupportStatus(
version='5.0.0',
status=support.UNSUPPORTED)
class ResourceTypeSupportedKilo(GenericResource):
support_status = support.SupportStatus(
version='2015.1')
| apache-2.0 | -8,903,765,915,186,320,000 | 32.23839 | 79 | 0.622113 | false |
lleszczu/PerfKitBenchmarker | perfkitbenchmarker/kubernetes/kubernetes_disk.py | 1 | 3923 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.vm_util import OUTPUT_STDOUT as STDOUT,\
OUTPUT_STDERR as STDERR, OUTPUT_EXIT_CODE as EXIT_CODE
FLAGS = flags.FLAGS
flags.DEFINE_boolean('use_ceph_volumes', True,
'Use Ceph volumes for scratch disks')
flags.DEFINE_string('ceph_secret', None,
'Name of the Ceph Secret used by Kubernetes in order to '
'authenticate with Ceph.')
flags.DEFINE_string('rbd_pool', 'rbd',
'Name of RBD pool for Ceph volumes.')
flags.DEFINE_list('ceph_monitors', [],
'IP addresses and ports of Ceph Monitors. '
'Must be provided when scratch disk is required. '
'Example: "127.0.0.1:6789,192.168.1.1:6789"')
class CephDisk(disk.BaseDisk):
def __init__(self, disk_num, disk_spec, name):
super(CephDisk, self).__init__(disk_spec)
self.disk_num = disk_num
self.image_name = 'rbd-%s-%s' % (name, self.disk_num)
self.ceph_secret = FLAGS.ceph_secret
def _Create(self):
"""
Creates Rados Block Device volumes and installs filesystem on them.
"""
cmd = ['rbd', 'create', self.image_name, '--size',
str(1024 * self.disk_size)]
output = vm_util.IssueCommand(cmd)
if output[EXIT_CODE] != 0:
raise Exception("Creating RBD image failed: %s" % output[STDERR])
cmd = ['rbd', 'map', self.image_name]
output = vm_util.IssueCommand(cmd)
if output[EXIT_CODE] != 0:
raise Exception("Mapping RBD image failed: %s" % output[STDERR])
rbd_device = output[STDOUT].rstrip()
if '/dev/rbd' not in rbd_device:
# Sometimes 'rbd map' command doesn't return any output.
# Trying to find device location another way.
cmd = ['rbd', 'showmapped']
output = vm_util.IssueCommand(cmd)
for image_device in output[STDOUT].split('\n'):
if self.image_name in image_device:
pattern = re.compile("/dev/rbd.*")
output = pattern.findall(image_device)
rbd_device = output[STDOUT].rstrip()
break
cmd = ['mkfs.ext4', rbd_device]
output = vm_util.IssueCommand(cmd)
if output[EXIT_CODE] != 0:
raise Exception("Formatting partition failed: %s" % output[STDERR])
cmd = ['rbd', 'unmap', rbd_device]
output = vm_util.IssueCommand(cmd)
if output[EXIT_CODE] != 0:
raise Exception("Unmapping block device failed: %s" % output[STDERR])
def _Delete(self):
cmd = ['rbd', 'rm', self.image_name]
output = vm_util.IssueCommand(cmd)
if output[EXIT_CODE] != 0:
msg = "Removing RBD image failed. Reattempting."
logging.warning(msg)
raise Exception(msg)
def BuildVolumeBody(self):
ceph_volume = {
"name": self.image_name,
"rbd": {
"monitors": FLAGS.ceph_monitors,
"pool": FLAGS.rbd_pool,
"image": self.image_name,
"secretRef": {
"name": FLAGS.ceph_secret
},
"fsType": "ext4",
"readOnly": False
}
}
return ceph_volume
def GetDevicePath(self):
return self.mount_point
def Attach(self):
pass
def Detach(self):
pass
| apache-2.0 | -9,028,376,455,270,050,000 | 31.966387 | 77 | 0.629875 | false |
rickdberg/mgmodel | bottom_mg_error_estimate.py | 1 | 3824 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 8 16:55:05 2017
@author: rickdberg
Calculate the rmse of modeled bottom water Mg concentration vs actual measurements
"""
import numpy as np
from sqlalchemy import create_engine
import pandas as pd
import matplotlib.pyplot as plot
import seawater
engine = create_engine("mysql://root:neogene227@localhost/iodp_compiled")
# Cl titration data
sql = """
SELECT hole_key, leg, site, hole, sample_depth, Cl
FROM iw_all
where hydrate_affected is null and Cl is not null
and sample_depth > 0.05 and sample_depth < 10
;
"""
cl_data = pd.read_sql(sql, engine)
cl_data = cl_data.fillna(np.nan).sort_values(['hole_key', 'sample_depth'])
cl_cut_data = cl_data.groupby(by='hole_key').head(1)
# cl_avgs = cl_cut_data.groupby(by='hole_key')['Cl'].mean().reset_index()
# cl_avgd = cl_avgs.merge(cl_data['hole_key'], how='inner', on=('hole_key')
# Cl ic data
sql = """
SELECT hole_key, leg, site, hole, sample_depth, Cl_ic
FROM iw_all
where hydrate_affected is null and Cl_ic is not null
and sample_depth > 0.05 and sample_depth < 10
;
"""
cl_ic_data = pd.read_sql(sql, engine)
cl_ic_data = cl_ic_data.fillna(np.nan).sort_values(['hole_key', 'sample_depth'])
cl_ic_cut_data = cl_ic_data.groupby(by='hole_key').head(1)
# cl_avgs = cl_cut_data.groupby(by='hole_key')['Cl'].mean().reset_index()
# cl_avgd = cl_avgs.merge(cl_data, how='inner', on='hole_key')
# Mg measurement directly from bottom water
sql = """
SELECT hole_key, leg, site, hole, sample_depth, Mg, Mg_ic
FROM iw_all
where hydrate_affected is null and Mg is not null
and sample_depth < 0.05
;
"""
mg_bw_data = pd.read_sql(sql, engine)
# Mg calculated from WOA salinity data
sql = """
SELECT hole_key, leg, site, hole, woa_bottom_salinity,
water_depth, woa_bottom_temp, lat, lon
FROM summary_all
;"""
woa_salinity = pd.read_sql(sql, engine)
density = seawater.eos80.dens0(woa_salinity['woa_bottom_salinity'], woa_salinity['woa_bottom_temp'])
def sal_to_cl(salinity, density):
return (1000*(salinity-0.03)*density/1000)/(1.805*35.45)
woa_cl = sal_to_cl(woa_salinity['woa_bottom_salinity'].rename('woa_mg'), density)
woa_mg=woa_cl/558*54
woa = pd.concat((woa_salinity, woa_mg), axis=1)
all_data = cl_cut_data.merge(cl_ic_cut_data, how='outer', on=(
'hole_key', 'leg', 'site', 'hole','sample_depth'))
all_data = all_data.merge(mg_bw_data.loc[:,(
'hole_key','leg', 'site', 'hole', 'Mg', 'Mg_ic')], how='inner', on=(
'hole_key','leg', 'site', 'hole'))
all_data = all_data.merge(woa.loc[:,(
'hole_key','leg', 'site', 'hole', 'woa_bottom_salinity', 'woa_mg',
'water_depth', 'woa_bottom_temp', 'lat','lon')], how='inner', on=(
'hole_key','leg', 'site', 'hole'))
# all_data = cl_avgs.merge(mg_bw_data, how='inner', on=('hole_key'))
stacked_data = pd.concat([
all_data.loc[:,('hole_key','leg', 'site', 'hole',
'sample_depth', 'Cl', 'Mg', 'Mg_ic')],
all_data.loc[:,('hole_key','leg', 'site', 'hole',
'sample_depth', 'Cl_ic', 'Mg', 'Mg_ic')].rename(columns={'Cl_ic':'Cl'})])
plot.plot(54/558*all_data['Cl'], all_data['Mg'], 'go')
# plot.plot(54/558*all_data['Cl_ic'], all_data['Mg'], 'ro')
# plot.plot(all_data['woa_mg'], all_data['Mg'], 'go')
plot.plot(np.linspace(20, 60, num=50), np.linspace(20, 60, num=50), 'k--')
plot.xlabel('Estimated Mg concentration (mM)', fontsize=20)
plot.ylabel('Measured Mg concentration (mM)', fontsize=20)
plt.tick_params(labelsize=16)
plot.show()
def rmse(model_values, measured_values):
return np.sqrt(((model_values-measured_values)**2).mean())
error = rmse(54/558*all_data['Cl'], all_data['Mg'])/54
error_all = rmse(54/558*stacked_data['Cl'], stacked_data['Mg'])/54
error_woa = rmse(all_data['woa_mg'], all_data['Mg'])/54
# all_err = 54/558*all_data['Cl'] - all_data['Mg']
# plot.hist(all_err[all_err.notnull()], bins=50)
# plot.hist(all_data['woa_mg']- all_data['Mg'], bins=50)
# eof
| mit | -4,699,286,989,374,539,000 | 34.738318 | 100 | 0.665272 | false |
Diacamma2/financial | diacamma/invoice/migrations/0001_initial.py | 1 | 6535 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import deletion
from django.core.validators import MinValueValidator, MaxValueValidator
from django.conf import settings
from lucterios.framework.tools import set_locale_lang
from lucterios.CORE.models import PrintModel
from lucterios.framework.model_fields import LucteriosDecimalField
def initial_values(*args):
set_locale_lang(settings.LANGUAGE_CODE)
PrintModel().load_model('diacamma.invoice', "Bill_0001", is_default=True)
PrintModel().load_model('diacamma.invoice', "Bill_0002", is_default=False)
class Migration(migrations.Migration):
dependencies = [
('accounting', '0001_initial'),
('payoff', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Vat',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(verbose_name='name', max_length=20)),
('rate', models.DecimalField(validators=[MinValueValidator(0.0), MaxValueValidator(
99.9)], decimal_places=2, max_digits=6, verbose_name='rate', default=10.0)),
('isactif', models.BooleanField(
verbose_name='is actif', default=True)),
],
options={
'verbose_name_plural': 'VATs',
'verbose_name': 'VAT'
},
),
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reference', models.CharField(
verbose_name='reference', max_length=30)),
('designation', models.TextField(verbose_name='designation')),
('price', LucteriosDecimalField(validators=[MinValueValidator(0.0), MaxValueValidator(
9999999.999)], decimal_places=3, max_digits=10, verbose_name='price', default=0.0)),
('unit', models.CharField(
verbose_name='unit', null=True, default='', max_length=10)),
('isdisabled', models.BooleanField(
verbose_name='is disabled', default=False)),
('sell_account', models.CharField(
verbose_name='sell account', max_length=50)),
('vat', models.ForeignKey(to='invoice.Vat', null=True,
on_delete=deletion.PROTECT, verbose_name='vat', default=None))
],
options={
'verbose_name_plural': 'articles',
'verbose_name': 'article'
},
),
migrations.CreateModel(
name='Bill',
fields=[
('supporting_ptr', models.OneToOneField(auto_created=True, parent_link=True,
serialize=False, primary_key=True, to='payoff.Supporting', on_delete=models.CASCADE)),
('fiscal_year', models.ForeignKey(on_delete=deletion.PROTECT,
null=True, to='accounting.FiscalYear', default=None, verbose_name='fiscal year')),
('bill_type', models.IntegerField(null=False, default=0, db_index=True, verbose_name='bill type', choices=[
(0, 'quotation'), (1, 'bill'), (2, 'asset'), (3, 'receipt')])),
('num', models.IntegerField(
null=True, verbose_name='numeros')),
('date', models.DateField(null=False, verbose_name='date')),
('comment', models.TextField(
verbose_name='comment', null=False, default='')),
('status', models.IntegerField(verbose_name='status', db_index=True, default=0, choices=[
(0, 'building'), (1, 'valid'), (2, 'cancel'), (3, 'archive')])),
('cost_accounting', models.ForeignKey(to='accounting.CostAccounting', null=True,
on_delete=deletion.PROTECT, verbose_name='cost accounting', default=None)),
('entry', models.ForeignKey(to='accounting.EntryAccount', null=True,
on_delete=deletion.PROTECT, verbose_name='entry', default=None)),
],
options={
'verbose_name_plural': 'bills',
'verbose_name': 'bill'
},
),
migrations.CreateModel(
name='Detail',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('designation', models.TextField(verbose_name='designation')),
('price', LucteriosDecimalField(verbose_name='price', max_digits=10, default=0.0,
decimal_places=3, validators=[MinValueValidator(0.0), MaxValueValidator(9999999.999)])),
('vta_rate', LucteriosDecimalField(default=0.0, verbose_name='vta rate', decimal_places=4,
max_digits=6, validators=[MinValueValidator(0.0), MaxValueValidator(1.0)])),
('unit', models.CharField(
null=True, verbose_name='unit', default='', max_length=10)),
('quantity', LucteriosDecimalField(validators=[MinValueValidator(0.0), MaxValueValidator(
9999999.99)], decimal_places=2, verbose_name='quantity', default=1.0, max_digits=10)),
('reduce', LucteriosDecimalField(validators=[MinValueValidator(0.0), MaxValueValidator(
9999999.999)], decimal_places=3, verbose_name='reduce', default=0.0, max_digits=10)),
('article', models.ForeignKey(null=True, default=None, to='invoice.Article',
on_delete=deletion.PROTECT, verbose_name='article')),
('bill', models.ForeignKey(
to='invoice.Bill', verbose_name='bill', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'details',
'default_permissions': [],
'verbose_name': 'detail'
},
),
migrations.RunPython(initial_values),
]
| gpl-3.0 | -5,824,598,278,623,702,000 | 52.565574 | 142 | 0.542005 | false |
bil-elmoussaoui/Gnome-TwoFactorAuth | Authenticator/models/keyring.py | 1 | 3464 | """
Copyright © 2017 Bilal Elmoussaoui <[email protected]>
This file is part of Authenticator.
Authenticator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Authenticator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Authenticator. If not, see <http://www.gnu.org/licenses/>.
"""
from gi import require_version
require_version('Secret', '1')
from gi.repository import Secret
class Keyring:
ID = "com.github.bilelmoussaoui.Authenticator"
instance = None
def __init__(self):
self.schema = Secret.Schema.new(Keyring.ID,
Secret.SchemaFlags.NONE,
{
"id": Secret.SchemaAttributeType.STRING,
"name": Secret.SchemaAttributeType.STRING,
})
@staticmethod
def get_default():
if Keyring.instance is None:
Keyring.instance = Keyring()
return Keyring.instance
@staticmethod
def get_by_id(secret_id):
"""
Return the OTP token based on a secret ID.
:param secret_id: the secret ID associated to an OTP token
:type secret_id: str
:return: the secret OTP token.
"""
schema = Keyring.get_default().schema
password = Secret.password_lookup_sync(
schema, {"id": str(secret_id)}, None)
return password
@staticmethod
def insert(secret_id, provider, username, token):
"""
Save a secret OTP token.
:param secret_id: The secret ID associated to the OTP token
:param provider: the provider name
:param username: the username
:param token: the secret OTP token.
"""
schema = Keyring.get_default().schema
data = {
"id": str(secret_id),
"name": str(username),
}
Secret.password_store_sync(
schema,
data,
Secret.COLLECTION_DEFAULT,
"{provider} OTP ({username})".format(
provider=provider, username=username),
token,
None
)
@staticmethod
def remove(secret_id):
"""
Remove a specific secret OTP token.
:param secret_id: the secret ID associated to the OTP token
:return bool: Either the token was removed successfully or not
"""
schema = Keyring.get_default().schema
success = Secret.password_clear_sync(
schema, {"id": str(secret_id)}, None)
return success
@staticmethod
def clear():
"""
Clear all existing accounts.
:return bool: Either the token was removed successfully or not
"""
schema = Keyring.get_default().schema
success = Secret.password_clear_sync(schema, {}, None)
return success
| gpl-2.0 | 82,455,778,413,601,300 | 30.364486 | 86 | 0.573202 | false |
gmarkall/COFFEE | doc/source/conf.py | 2 | 7963 | # -*- coding: utf-8 -*-
#
# COFFEE documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 30 11:25:59 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'COFFEE'
copyright = u'2014, Fabio Luporini'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
execfile("../../coffee/version.py")
version = '%d.%d' % __version_info__[0:2] # noqa: pulled from coffee/version.py
# The full version, including alpha/beta/rc tags.
release = __version__ # noqa: pulled from coffee/version.py
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'COFFEEdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'COFFEE.tex', u'COFFEE Documentation',
u'Fabio Luporini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'coffee', u'COFFEE Documentation',
[u'Fabio Luporini'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'COFFEE', u'COFFEE Documentation',
u'Fabio Luporini', 'COFFEE', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause | 4,981,702,581,545,656,000 | 31.502041 | 80 | 0.699987 | false |
ityaptin/ceilometer | ceilometer/storage/base.py | 1 | 8967 | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for storage engines
"""
import datetime
import inspect
import math
from oslo_utils import timeutils
import six
from six import moves
import ceilometer
def iter_period(start, end, period):
"""Split a time from start to end in periods of a number of seconds.
This function yields the (start, end) time for each period composing the
time passed as argument.
:param start: When the period set start.
:param end: When the period end starts.
:param period: The duration of the period.
"""
period_start = start
increment = datetime.timedelta(seconds=period)
for i in moves.xrange(int(math.ceil(
timeutils.delta_seconds(start, end)
/ float(period)))):
next_start = period_start + increment
yield (period_start, next_start)
period_start = next_start
def _handle_sort_key(model_name, sort_key=None):
"""Generate sort keys according to the passed in sort key from user.
:param model_name: Database model name be query.(meter, etc.)
:param sort_key: sort key passed from user.
return: sort keys list
"""
sort_keys_extra = {'meter': ['user_id', 'project_id'],
'resource': ['user_id', 'project_id', 'timestamp'],
}
sort_keys = sort_keys_extra[model_name]
if not sort_key:
return sort_keys
# NOTE(Fengqian): We need to put the sort key from user
# in the first place of sort keys list.
try:
sort_keys.remove(sort_key)
except ValueError:
pass
finally:
sort_keys.insert(0, sort_key)
return sort_keys
class Model(object):
"""Base class for storage API models."""
def __init__(self, **kwds):
self.fields = list(kwds)
for k, v in six.iteritems(kwds):
setattr(self, k, v)
def as_dict(self):
d = {}
for f in self.fields:
v = getattr(self, f)
if isinstance(v, Model):
v = v.as_dict()
elif isinstance(v, list) and v and isinstance(v[0], Model):
v = [sub.as_dict() for sub in v]
d[f] = v
return d
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def get_field_names(cls):
fields = inspect.getargspec(cls.__init__)[0]
return set(fields) - set(["self"])
class Connection(object):
"""Base class for storage system connections."""
# A dictionary representing the capabilities of this driver.
CAPABILITIES = {
'meters': {'query': {'simple': False,
'metadata': False}},
'resources': {'query': {'simple': False,
'metadata': False}},
'samples': {'query': {'simple': False,
'metadata': False,
'complex': False}},
'statistics': {'groupby': False,
'query': {'simple': False,
'metadata': False},
'aggregation': {'standard': False,
'selectable': {
'max': False,
'min': False,
'sum': False,
'avg': False,
'count': False,
'stddev': False,
'cardinality': False}}
},
}
STORAGE_CAPABILITIES = {
'storage': {'production_ready': False},
}
def __init__(self, url):
pass
@staticmethod
def upgrade():
"""Migrate the database to `version` or the most recent version."""
def record_metering_data_batch(self, samples):
"""Record the metering data in batch"""
for s in samples:
self.record_metering_data(s)
@staticmethod
def record_metering_data(data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.publisher.utils.meter_message_from_counter
All timestamps must be naive utc datetime object.
"""
raise ceilometer.NotImplementedError(
'Recording metering data is not implemented')
@staticmethod
def clear_expired_metering_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
raise ceilometer.NotImplementedError(
'Clearing samples not implemented')
@staticmethod
def get_resources(user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None, limit=None):
"""Return an iterable of models.Resource instances.
Iterable items containing resource information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional timestamp start range operation.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional timestamp end range operation.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Resources not implemented')
@staticmethod
def get_meters(user=None, project=None, resource=None, source=None,
metaquery=None, limit=None, unique=False):
"""Return an iterable of model.Meter instances.
Iterable items containing meter information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param limit: Maximum number of results to return.
:param unique: If set to true, return only unique meter information.
"""
raise ceilometer.NotImplementedError('Meters not implemented')
@staticmethod
def get_samples(sample_filter, limit=None):
"""Return an iterable of model.Sample instances.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Samples not implemented')
@staticmethod
def get_meter_statistics(sample_filter, period=None, groupby=None,
aggregate=None):
"""Return an iterable of model.Statistics instances.
The filter must have a meter value set.
"""
raise ceilometer.NotImplementedError('Statistics not implemented')
@staticmethod
def clear():
"""Clear database."""
@staticmethod
def query_samples(filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.Sample objects.
:param filter_expr: Filter expression for query.
:param orderby: List of field name and direction pairs for order by.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Complex query for samples '
'is not implemented.')
@classmethod
def get_capabilities(cls):
"""Return an dictionary with the capabilities of each driver."""
return cls.CAPABILITIES
@classmethod
def get_storage_capabilities(cls):
"""Return a dictionary representing the performance capabilities.
This is needed to evaluate the performance of each driver.
"""
return cls.STORAGE_CAPABILITIES
| apache-2.0 | 7,802,330,857,842,974,000 | 34.442688 | 76 | 0.597078 | false |
bolkedebruin/airflow | tests/providers/apache/druid/operators/test_hive_to_druid.py | 1 | 5078 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import requests
import requests_mock
from airflow import DAG
from airflow.providers.apache.druid.operators.hive_to_druid import HiveToDruidTransfer
class TestDruidHook(unittest.TestCase):
# To debug the large json diff
maxDiff = None
hook_config = {
'sql': 'SELECT * FROM table',
'druid_datasource': 'our_datasource',
'ts_dim': 'timedimension_column',
'metric_spec': [
{"name": "count", "type": "count"},
{"name": "amountSum", "type": "doubleSum", "fieldName": "amount"}
],
'hive_cli_conn_id': 'hive_cli_custom',
'druid_ingest_conn_id': 'druid_ingest_default',
'metastore_conn_id': 'metastore_default',
'hadoop_dependency_coordinates': 'org.apache.spark:spark-core_2.10:1.5.2-mmx1',
'intervals': '2016-01-01/2017-01-01',
'num_shards': -1,
'target_partition_size': 1925,
'query_granularity': 'month',
'segment_granularity': 'week',
'job_properties': {
"mapreduce.job.user.classpath.first": "false",
"mapreduce.map.output.compress": "false",
"mapreduce.output.fileoutputformat.compress": "false"
}
}
index_spec_config = {
'static_path': '/apps/db/warehouse/hive/',
'columns': ['country', 'segment']
}
def setUp(self):
super().setUp()
args = {
'owner': 'airflow',
'start_date': '2017-01-01'
}
self.dag = DAG('hive_to_druid', default_args=args)
session = requests.Session()
adapter = requests_mock.Adapter()
session.mount('mock', adapter)
def test_construct_ingest_query(self):
operator = HiveToDruidTransfer(
task_id='hive_to_druid',
dag=self.dag,
**self.hook_config
)
provided_index_spec = operator.construct_ingest_query(
**self.index_spec_config
)
expected_index_spec = {
"hadoopDependencyCoordinates": self.hook_config['hadoop_dependency_coordinates'],
"type": "index_hadoop",
"spec": {
"dataSchema": {
"metricsSpec": self.hook_config['metric_spec'],
"granularitySpec": {
"queryGranularity": self.hook_config['query_granularity'],
"intervals": self.hook_config['intervals'],
"type": "uniform",
"segmentGranularity": self.hook_config['segment_granularity'],
},
"parser": {
"type": "string",
"parseSpec": {
"columns": self.index_spec_config['columns'],
"dimensionsSpec": {
"dimensionExclusions": [],
"dimensions": self.index_spec_config['columns'],
"spatialDimensions": []
},
"timestampSpec": {
"column": self.hook_config['ts_dim'],
"format": "auto"
},
"format": "tsv"
}
},
"dataSource": self.hook_config['druid_datasource']
},
"tuningConfig": {
"type": "hadoop",
"jobProperties": self.hook_config['job_properties'],
"partitionsSpec": {
"type": "hashed",
"targetPartitionSize": self.hook_config['target_partition_size'],
"numShards": self.hook_config['num_shards'],
},
},
"ioConfig": {
"inputSpec": {
"paths": self.index_spec_config['static_path'],
"type": "static"
},
"type": "hadoop"
}
}
}
# Make sure it is like we expect it
self.assertEqual(provided_index_spec, expected_index_spec)
| apache-2.0 | 2,081,559,194,940,097,300 | 35.797101 | 93 | 0.51221 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.