repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kidaa/ffrk-bottle | models/quest.py | 1 | 4427 | from __future__ import absolute_import
import json
import logging
import sys
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.mysql import TINYINT
from .base import BetterBase, session_scope
from .drop import Drop
from .log import Log
from .prize import Prize
QUEST_PRIZE_TYPE = 4
class Quest(BetterBase):
__tablename__ = 'quest'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(String(length=64), nullable=False)
description = Column(String(length=1024), nullable=False)
achieve_cond_description = Column(String(length=128), nullable=False)
achieve_type = Column(TINYINT, nullable=False)
achieve_type_name = Column(String(length=16), nullable=False)
hint_title = Column(String(length=128), nullable=False)
hint_msg = Column(String(length=512), nullable=False)
frontend_columns = (
('title', 'Title'),
('description', 'Description'),
)
@property
def name(self):
return self.title
def generate_main_panels(self):
self._main_panels = [
{
'title': self.title,
'body': self.description,
},
{
'title': 'Prizes',
'items': self.prizes,
},
]
def __init__(self, **kwargs):
self.description = kwargs['description'].encode(
sys.stdout.encoding, error='ignore')
for i in (
'can_challenge',
'disp_number',
'is_achieved',
'is_completed',
'is_new',
'is_tutorial',
'prizes',
'order_cond_description',
'is_special',
'is_challenging',
# This is the recipe for "Create Ability" quests
'ability_recipes',
'description',
):
if i in kwargs:
del(kwargs[i])
super(Quest, self).__init__(**kwargs)
def __repr__(self):
return self.title
def import_quests(data=None, filepath=''):
'''
/dff/quest/list
'''
logging.debug('{}(filepath="{}") start'.format(
sys._getframe().f_code.co_name, filepath))
if data is None or not isinstance(data, dict):
if not filepath:
raise ValueError('One kwarg of data or filepath is required.')
with open(filepath) as infile:
data = json.load(infile)
if data.get('special_quest_prizes'):
logging.critical('There is a special quest prize!')
success = False
with session_scope() as session:
for quest in data['quests']:
prizes = quest['prizes']
new_quest = session.query(Quest)\
.filter(Quest.id == quest['id']).first()
if new_quest is None:
new_quest = Quest(**quest)
new_log = Log(log='Create {}({})'.format(
type(new_quest).__name__, new_quest))
session.add_all((new_quest, new_log))
session.commit()
for prize in prizes:
id = prize['id']
name = prize['name']
drop = session.query(Drop).filter(
Drop.id == id).first()
if drop is None:
drop = Drop(id=id, name=name)
old_prize = session.query(Prize).filter(
Prize.drop_id == id,
Prize.prize_type == QUEST_PRIZE_TYPE,
Prize.quest == new_quest).first()
if old_prize is not None:
continue
prize['drop_id'] = id
prize['prize_type'] = QUEST_PRIZE_TYPE
new_prize = Prize(**prize)
new_prize.drop = drop
new_prize.quest = new_quest
#session.add(new_prize)
#session.flush()
new_log = Log(log='Create {}({}) from {}({})'.format(
type(new_prize).__name__, new_prize,
type(new_quest).__name__, new_quest))
#session.add(new_log)
session.add_all((new_prize, new_log))
session.commit()
success = True
logging.debug('{}(filepath="{}") end'.format(
sys._getframe().f_code.co_name, filepath))
return success
### EOF ###
| gpl-3.0 | -8,172,674,799,110,996,000 | 30.848921 | 74 | 0.521572 | false | 4.054029 | false | false | false |
Gebesa-Dev/Addons-gebesa | mrp_bom_detail_history/models/mrp_bom_detail_history.py | 1 | 1232 | # -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import fields, models
class MrpBomDetailHistory(models.Model):
_name = 'mrp.bom.detail.history'
_description = 'Mrp Bom Detail History'
product_master_id = fields.Many2one(
'product.product',
string='Product Master',
)
prev_product_detail_id = fields.Many2one(
'product.product',
string='Previous product detail',
)
upd_product_detail_id = fields.Many2one(
'product.product',
string='Updated product detail',
)
user_id = fields.Many2one(
'res.users',
string='User',
)
action = fields.Selection(
[('create', 'Create'),
('update', 'Update'),
('delete', 'Delete')],
string="Action",
)
action_date = fields.Datetime(
string="Date",
)
prev_qty = fields.Float(
'Previous quantity',
)
upd_qty = fields.Float(
'Updated quantity',
)
prev_cost = fields.Float(
'Previous cost',
)
upd_cost = fields.Float(
'Updated cost',
)
deference = fields.Float(
'Deference',
)
| agpl-3.0 | -8,925,116,920,077,532,000 | 23.137255 | 68 | 0.562145 | false | 3.599415 | false | false | false |
AyoobAli/pyfuzz | pyfuzz.py | 1 | 13022 | #!/usr/bin/env python3
####
### Project: Pyfuzz
### Version: 1.1.0
### Creator: Ayoob Ali ( www.AyoobAli.com )
### License: MIT
###
import http.client
import sys
import os
from optparse import OptionParser
import string
import signal
import ssl
from time import sleep
import random
import subprocess
logFile = ""
def signal_handler(signal, frame):
print("\nScan stopped by user.")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def printMSG(printM):
print(printM)
if logFile != "":
fhandle = open(logFile, "a")
fhandle.write(printM + "\n")
fhandle.close()
def cmd(command = None):
returnArr = {}
returnArr.update({"returnCode": 99})
try:
if command == None or command == "":
return returnArr
stdout = ""
stderr = ""
reCode = subprocess.Popen(command,shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stData = reCode.communicate()
returnArr.update({"stdout": stData[0].decode("utf-8")})
returnArr.update({"stderr": stData[1].decode("utf-8")})
returnArr.update({"returnCode": reCode.returncode})
reCode.terminate()
return returnArr
except Exception as ErrMs:
returnArr.update({"error": ErrMs})
return returnArr
def main():
global logFile
parser = OptionParser(usage="%prog -u http://example.com/en/ -l sharepoint.txt", version="%prog 1.1.0")
parser.add_option("-u", "--url", dest="targetURL", metavar="URL", help="Target URL to scan")
parser.add_option("-l", "--list", dest="listFile", metavar="FILE", help="List of paths to scan")
parser.add_option("-r", "--redirect", action="store_true", dest="showRedirect", help="Show redirect codes (3xx)")
parser.add_option("-e", "--error", action="store_true", dest="showError", help="Show Error codes (5xx)")
parser.add_option("-s", "--sleep", dest="milliseconds", type="int", metavar="NUMBER", help="Sleep for x milliseconds after each request")
parser.add_option("-a", "--header", action="append", dest="headers", help="Add Header to the HTTP request (Ex.: -a User-Agent x)", metavar='HEADER VALUE', nargs=2)
parser.add_option("-b", "--body", dest="requestBody", metavar="Body", help="Request Body (Ex.: name=val&name2=val2)")
parser.add_option("-x", "--method", dest="requestMethod", metavar="[Method]", help="HTTP Request Method (Ex.: GET, POST, PUT, etc...)")
parser.add_option("-i", "--ignore", action="append", dest="ignoreText", metavar="Text", help="Ignore results that contain a specific string")
parser.add_option("-m", "--min-response-size", dest="dataLength", type="int", metavar="NUMBER", help="The minimum response body size in Byte")
parser.add_option("-g", "--log", dest="logFile", metavar="FILE", help="Log scan results to a file")
parser.add_option("-f", "--start-from", dest="startFrom", type="int", metavar="NUMBER", help="Start scanning from path number x in the provided list")
parser.add_option("-t", "--timeout", dest="reqTimeout", type="int", metavar="Seconds", help="Set request timeout")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Show error messages")
parser.add_option("-d", "--define-variable", action="append", dest="variables", help="Define variables to be replaced in URL (Ex.: -d '$varExtension' 'php')", metavar='VARIABLE VALUE', nargs=2)
parser.add_option("--cmd", dest="excCMD", metavar="Command", help="Execute shell command on each found results (Use with caution). Available variables ({#CODE#}, {#URL#}, {#SIZE#}, {#BODY#}, and {#REDIRECT#})")
parser.add_option("-p", "--proxy", dest="httpProxy", metavar="PROXY:PORT", help="HTTP Proxy to pass the connection through (Ex.: localhost:9080)")
startFrom = 0
reqTimeout = 15
(options, args) = parser.parse_args()
if options.requestMethod == None:
options.requestMethod = "GET"
if options.requestBody == None:
options.requestBody = ""
if options.dataLength == None:
options.dataLength = 0
requestHeaders = {}
if options.headers == None:
options.headers = []
for header in options.headers:
requestHeaders.update({header[0]: header[1]})
if options.variables == None:
options.variables = []
if options.listFile == None or options.targetURL == None:
parser.print_help()
sys.exit()
if options.logFile != None:
logFile = options.logFile
if options.startFrom != None:
startFrom = options.startFrom
if options.reqTimeout != None:
if options.reqTimeout > 0:
reqTimeout = int(options.reqTimeout)
excCMD = ""
if options.excCMD != None:
excCMD = str(options.excCMD)
isProxy = False
proxyHost = ""
proxyPort = 0
if options.httpProxy != None:
if str(options.httpProxy).find(':') >= 0:
httpProxy = str(options.httpProxy).split(':')
proxyHost = httpProxy[0]
if httpProxy[1].isnumeric() == True:
proxyPort = int(httpProxy[1])
isProxy = True
if proxyPort < 1 or proxyPort > 65535:
printMSG("Error: Port number should be between 1 and 65535.")
sys.exit()
else:
printMSG("Error: Proxy format should be HOSTNAME:PORT")
sys.exit()
if not os.path.isfile(options.listFile):
printMSG("Error: File (" + options.listFile + ") doesn't exist.")
sys.exit()
if options.targetURL[-1] != "/":
options.targetURL += "/"
targetPro = ""
if options.targetURL[:5].lower() == 'https':
targetDomain = options.targetURL[8:].split("/",1)[0].lower()
targetPath = "/" + options.targetURL[8:].split("/",1)[1]
if isProxy == True:
connection = http.client.HTTPSConnection(proxyHost, proxyPort, timeout=reqTimeout, context=ssl._create_unverified_context())
connection.set_tunnel(targetDomain)
else:
connection = http.client.HTTPSConnection(targetDomain, timeout=reqTimeout, context=ssl._create_unverified_context())
targetPro = "https://"
printMSG("Target : " + targetPro+targetDomain + " (over HTTPS)")
printMSG("Path : " + targetPath)
elif options.targetURL[:5].lower() == 'http:':
targetDomain = options.targetURL[7:].split("/",1)[0].lower()
targetPath = "/"+options.targetURL[7:].split("/",1)[1]
if isProxy == True:
connection = http.client.HTTPConnection(proxyHost, proxyPort, timeout=reqTimeout)
connection.set_tunnel(targetDomain)
else:
connection = http.client.HTTPConnection(targetDomain, timeout=reqTimeout)
targetPro = "http://"
printMSG("Target : " + targetDomain)
printMSG("Path : " + targetPath)
else:
targetDomain = options.targetURL.split("/",1)[0].lower()
targetPath = "/"+options.targetURL.split("/",1)[1]
if isProxy == True:
connection = http.client.HTTPConnection(proxyHost, proxyPort, timeout=reqTimeout)
connection.set_tunnel(targetDomain)
else:
connection = http.client.HTTPConnection(targetDomain, timeout=reqTimeout)
targetPro = "http://"
printMSG("Target : " + targetDomain)
printMSG("Path : " + targetPath)
printMSG("Method : " + options.requestMethod)
printMSG("Header : " + str(requestHeaders))
printMSG("Body : " + options.requestBody)
printMSG("Timeout : " + str(reqTimeout))
printMSG("Proxy : " + str(proxyHost) + ":" + str(proxyPort))
if options.showRedirect != None:
printMSG("Show Redirect: ON")
if options.showError != None:
printMSG("Show Error : ON")
try:
randomPage = ''.join([random.choice(string.ascii_lowercase + string.digits) for n in range(16)])
connection.request(options.requestMethod, targetPath+randomPage+".txt", options.requestBody, requestHeaders)
res = connection.getresponse()
except Exception as ErrMs:
if options.verbose != None:
printMSG("MainError: " + str(ErrMs))
sys.exit(0)
if res.status == 200:
printMSG("NOTE: Looks like the server is returning code 200 for all requests, there might be lots of false positive links.")
if res.status >= 300 and res.status < 400 and options.showRedirect != None:
printMSG("NOTE: Looks like the server is returning code " + str(res.status) + " for all requests, there might be lots of false positive links. try to scan without the option -r")
tpData = res.read()
with open(options.listFile) as lFile:
pathList = lFile.readlines()
totalURLs = len(pathList)
printMSG ("Scanning ( " + str(totalURLs) + " ) files...")
countFound = 0
countAll = 0
strLine = ""
for pathLine in pathList:
try:
countAll = countAll + 1
pathLine = pathLine.strip("\n")
pathLine = pathLine.strip("\r")
if countAll < startFrom:
continue
if pathLine != "":
for variable in options.variables:
pathLine = pathLine.replace(variable[0], variable[1])
if pathLine[:1] == "/":
pathLine = pathLine[1:]
print (' ' * len(strLine), "\r", end="")
strLine = "Checking ["+str(countAll)+"/"+str(totalURLs)+"] "+targetPath+pathLine
print (strLine,"\r", end="")
if options.milliseconds != None:
sleep(options.milliseconds/1000)
connection.request(options.requestMethod, targetPath+pathLine, options.requestBody, requestHeaders)
res = connection.getresponse()
resBody = res.read().decode("utf-8")
resBodySize = len(resBody)
isignored = False
if options.ignoreText != None:
for igText in options.ignoreText:
if igText in resBody:
isignored = True
fURL = str(targetPro+targetDomain+targetPath+pathLine)
redirectHead = ""
exCommand = False
if res.getheader("location") != None:
redirectHead = str(res.getheader("location"))
if res.status >= 200 and res.status < 300:
if isignored == False and resBodySize >= options.dataLength:
exCommand = True
print (' ' * len(strLine), "\r", end="")
printMSG("Code " + str(res.status) + " : " + fURL + " (" + str(resBodySize) + " Byte)")
countFound += 1
if options.showError != None:
if res.status >= 500 and res.status < 600:
if isignored == False and resBodySize >= options.dataLength:
exCommand = True
print (' ' * len(strLine), "\r", end="")
printMSG("Code " + str(res.status) + " : " + fURL)
countFound += 1
if options.showRedirect != None:
if res.status >= 300 and res.status < 400:
if isignored == False and resBodySize >= options.dataLength:
exCommand = True
print (' ' * len(strLine), "\r", end="")
printMSG("Code " + str(res.status) + " : " + fURL + " ( " + redirectHead + " )")
countFound += 1
if str(excCMD) != "" and exCommand == True:
cmdStr = str(excCMD)
cmdStr = cmdStr.replace("{#CODE#}", str(res.status))
cmdStr = cmdStr.replace("{#URL#}", fURL)
cmdStr = cmdStr.replace("{#SIZE#}", str(resBodySize))
cmdStr = cmdStr.replace("{#REDIRECT#}", redirectHead)
cmdStr = cmdStr.replace("{#BODY#}", resBody)
cmdRes = cmd(str(cmdStr))
if options.verbose != None and isinstance(cmdRes, dict) and 'stdout' in cmdRes:
printMSG(cmdRes['stdout'])
except Exception as ErrMs:
if options.verbose != None:
print (' ' * len(strLine), "\r", end="")
printMSG("Error[" + str(countAll) + "]: " + str(ErrMs))
try:
connection.close()
pass
except Exception as e:
if options.verbose != None:
printMSG("Error2:" + str(e))
pass
connection.close()
print (' ' * len(strLine), "\r", end="")
printMSG( "Total Pages found: " + str(countFound) )
if __name__ == "__main__":
main()
| mit | -1,045,662,705,322,039,200 | 42.697987 | 214 | 0.568807 | false | 4.059227 | false | false | false |
kazcw/NGCCCBase | coloredcoinlib/builder.py | 1 | 10522 | """ Color data builder objects"""
from logger import log
from explorer import get_spends
from toposort import toposorted
class ColorDataBuilder(object):
pass
class ColorDataBuilderManager(object):
"""Manages multiple color data builders, one per color"""
def __init__(self, colormap, blockchain_state,
cdstore, metastore, builder_class):
self.colormap = colormap
self.metastore = metastore
self.blockchain_state = blockchain_state
self.cdstore = cdstore
self.builders = {}
self.builder_class = builder_class
def get_color_def_map(self, color_id_set):
"""given a set of color_ids <color_id_set>, return
a dict of color_id to color_def.
"""
color_def_map = {}
for color_id in color_id_set:
color_def_map[color_id] = self.colormap.get_color_def(color_id)
return color_def_map
def get_builder(self, color_id):
if color_id in self.builders:
return self.builders[color_id]
colordef = self.colormap.get_color_def(color_id)
builder = self.builder_class(
self.cdstore, self.blockchain_state, colordef, self.metastore)
self.builders[color_id] = builder
return builder
def ensure_scanned_upto(self, color_id_set, blockhash):
""" Ensure color data is available up to a given block"""
for color_id in color_id_set:
if color_id == 0:
continue
builder = self.get_builder(color_id)
builder.ensure_scanned_upto(blockhash)
def scan_txhash(self, color_id_set, txhash):
for color_id in color_id_set:
if color_id == 0:
continue
builder = self.get_builder(color_id)
tx = self.blockchain_state.get_tx(txhash)
builder.scan_tx(tx)
class BasicColorDataBuilder(ColorDataBuilder):
""" Base class for color data builder algorithms"""
def __init__(self, cdstore, blockchain_state, colordef, metastore):
self.cdstore = cdstore
self.blockchain_state = blockchain_state
self.colordef = colordef
self.color_id = colordef.color_id
self.metastore = metastore
def scan_tx(self, tx):
""" Scan transaction to obtain color data for its outputs. """
in_colorvalues = []
empty = True
for inp in tx.inputs:
val = self.cdstore.get(
self.color_id, inp.prevout.hash, inp.prevout.n)
in_colorvalues.append(val)
if val:
empty = False
if empty and not self.colordef.is_special_tx(tx):
return
out_colorvalues = self.colordef.run_kernel(tx, in_colorvalues)
for o_index, val in enumerate(out_colorvalues):
if val:
self.cdstore.add(
self.color_id, tx.hash, o_index, val[0], val[1])
class FullScanColorDataBuilder(BasicColorDataBuilder):
"""Color data builder based on exhaustive blockchain scan,
for one specific color"""
def __init__(self, cdstore, blockchain_state, colordef, metastore):
super(FullScanColorDataBuilder, self).__init__(
cdstore, blockchain_state, colordef, metastore)
self.genesis_blockhash = self.blockchain_state.get_blockhash_at_height(
self.colordef.genesis['height'])
def scan_block(self, blockhash):
log("scan block %s", blockhash)
for tx in self.blockchain_state.iter_block_txs(blockhash):
self.scan_tx(tx)
self.metastore.set_as_scanned(self.color_id, blockhash)
def scan_blockchain(self, blocklist):
with self.cdstore.transaction():
for i, blockhash in enumerate(blocklist):
self.scan_block(blockhash)
if i % 25 == 0: # sync each 25 blocks
self.cdstore.sync()
def ensure_scanned_upto(self, final_blockhash):
if self.metastore.did_scan(self.color_id, final_blockhash):
return
# start from the final_blockhash and go backwards to build up
# the list of blocks to scan
blockhash = final_blockhash
genesis_height = self.blockchain_state.get_block_height(
self.genesis_blockhash)
blocklist = []
while not self.metastore.did_scan(self.color_id, blockhash):
log("recon block %s", blockhash)
blocklist.insert(0, blockhash)
blockhash, height = self.blockchain_state.get_previous_blockinfo(
blockhash)
if blockhash == self.genesis_blockhash:
break
# sanity check
if height < genesis_height:
break
self.scan_blockchain(blocklist)
class AidedColorDataBuilder(FullScanColorDataBuilder):
"""Color data builder based on following output spending transactions
from the color's genesis transaction output, for one specific color"""
def scan_blockchain(self, blocklist):
txo_queue = [self.colordef.genesis]
for blockhash in blocklist:
if self.metastore.did_scan(self.color_id, blockhash):
continue
# remove txs from this block from the queue
block_txo_queue = [txo for txo in txo_queue
if txo['blockhash'] == blockhash]
txo_queue = [txo for txo in txo_queue
if txo['blockhash'] != blockhash]
block_txos = {}
while block_txo_queue:
txo = block_txo_queue.pop()
if txo['txhash'] in block_txos:
# skip the ones we have already visited
continue
block_txos[txo['txhash']] = txo
spends = get_spends(txo['txhash'], self.blockchain_state)
for stxo in spends:
if stxo['blockhash'] == blockhash:
block_txo_queue.append(stxo)
else:
txo_queue.append(stxo)
block_txs = {}
for txhash in block_txos.keys():
block_txs[txhash] = self.blockchain_state.get_tx(txhash)
def get_prev_txs(tx):
"""all transactions from current block this transaction
directly depends on"""
prev_txs = []
for inp in tx.inputs:
if inp.prevout.hash in block_txs:
prev_txs.append(block_txs[inp.prevout.hash])
return prev_txs
sorted_block_txs = toposorted(block_txs.values(), get_prev_txs)
for tx in sorted_block_txs:
self.scan_tx(tx)
if __name__ == "__main__":
import blockchain
import store
import colormap as cm
import colordata
import datetime
start = datetime.datetime.now()
blockchain_state = blockchain.BlockchainState.from_url(None, True)
store_conn = store.DataStoreConnection("test-color.db")
cdstore = store.ColorDataStore(store_conn.conn)
metastore = store.ColorMetaStore(store_conn.conn)
colormap = cm.ColorMap(metastore)
cdbuilder = ColorDataBuilderManager(
colormap, blockchain_state, cdstore, metastore, AidedColorDataBuilder)
colordata = colordata.ThickColorData(cdbuilder, blockchain_state, cdstore)
blue_desc = "obc:" \
"b1586cd10b32f78795b86e9a3febe58dcb59189175fad884a7f4a6623b77486e:" \
"0:46442"
red_desc = "obc:" \
"8f6c8751f39357cd42af97a67301127d497597ae699ad0670b4f649bd9e39abf:" \
"0:46444"
blue_id = colormap.resolve_color_desc(blue_desc)
red_id = colormap.resolve_color_desc(red_desc)
blue_set = set([blue_id])
red_set = set([red_id])
br_set = blue_set | red_set
print br_set, ("Blue", "Red")
g = colordata.get_colorvalues
print g(
br_set,
"b1586cd10b32f78795b86e9a3febe58dcb59189175fad884a7f4a6623b77486e",
0), "== 1000 Blue (blue genesis TX)"
print g(
br_set,
"8f6c8751f39357cd42af97a67301127d497597ae699ad0670b4f649bd9e39abf",
0), "== 1000 Red (red genesis TX)"
print g(
br_set,
"b1586cd10b32f78795b86e9a3febe58dcb59189175fad884a7f4a6623b77486e",
1), "== None (blue genesis TX, other output)"
print g(
br_set,
"8f6c8751f39357cd42af97a67301127d497597ae699ad0670b4f649bd9e39abf",
1), "== None (red genesis TX, other output)"
print g(
br_set,
'c1d8d2fb75da30b7b61e109e70599c0187906e7610fe6b12c58eecc3062d1da5',
0), "== Red"
print g(
br_set,
'36af9510f65204ec5532ee62d3785584dc42a964013f4d40cfb8b94d27b30aa1',
0), "== Red"
print g(
br_set,
'3a60b70d425405f3e45f9ed93c30ca62b2a97e692f305836af38a524997dd01d',
0), "== None (Random TX from blockchain)"
print g(
br_set,
'c1d8d2fb75da30b7b61e109e70599c0187906e7610fe6b12c58eecc3062d1da5',
0), "== Red"
print g(
br_set,
'8f6c8751f39357cd42af97a67301127d497597ae699ad0670b4f649bd9e39abf',
0), "== Red"
print g(
br_set,
'f50f29906ce306be3fc06df74cc6a4ee151053c2621af8f449b9f62d86cf0647',
0), "== Blue"
print g(
br_set,
'7e40d2f414558be60481cbb976e78f2589bc6a9f04f38836c18ed3d10510dce5',
0), "== Blue"
print g(
br_set,
'4b60bb49734d6e26d798d685f76a409a5360aeddfddcb48102a7c7ec07243498',
0), "== Red (Two-input merging TX)"
print g(
br_set,
'342f119db7f9989f594d0f27e37bb5d652a3093f170de928b9ab7eed410f0bd1',
0), "== None (Color mixing TX)"
print g(
br_set,
'bd34141daf5138f62723009666b013e2682ac75a4264f088e75dbd6083fa2dba',
0), "== Blue (complex chain TX)"
print g(
br_set,
'bd34141daf5138f62723009666b013e2682ac75a4264f088e75dbd6083fa2dba',
1), "== None (mining fee change output)"
print g(
br_set,
'36af9510f65204ec5532ee62d3785584dc42a964013f4d40cfb8b94d27b30aa1',
0), "== Red (complex chain TX)"
print g(
br_set,
'741a53bf925510b67dc0d69f33eb2ad92e0a284a3172d4e82e2a145707935b3e',
0), "== Red (complex chain TX)"
print g(
br_set,
'741a53bf925510b67dc0d69f33eb2ad92e0a284a3172d4e82e2a145707935b3e',
1), "== Red (complex chain TX)"
print "Finished in", datetime.datetime.now() - start
| mit | -6,262,354,011,642,335,000 | 35.408304 | 79 | 0.608154 | false | 3.314016 | false | false | false |
jamontes/plugin.video.latelelibre_fr | resources/lib/lutil.py | 1 | 8978 | # _*_ coding: utf-8 _*_
'''
lutil: library functions for KODI media add-ons.
Copyright (C) 2017 José Antonio Montes (jamontes)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Description:
These funtions are called from the main plugin module, aimed to ease
and simplify the plugin development process.
Release 0.1.10
'''
# First of all We must import all the libraries used for plugin development.
import re, urllib, urllib2
from datetime import date
debug_enable = False # The debug logs are disabled by default.
def local_log(message):
"""This function logs the messages into the main KODI log file. Called from the libraries module by other functions."""
if debug_enable:
print "%s" % message
log = local_log # Use local log function by default.
def set_debug_mode(debug_flag, func_log=local_log):
"""This function sets the debug_enable var to log everything if debug option is true."""
global debug_enable
global log
debug_enable = debug_flag in ("true", True)
log = func_log
def get_url_decoded(url):
"""This function returns the URL decoded."""
log('get_url_decoded URL: "%s"' % url)
return urllib.unquote_plus(url)
def get_url_encoded(url):
"""This function returns the URL encoded."""
log('get_url_encoded URL: "%s"' % url)
return urllib.quote_plus(url)
def get_parms_encoded(**kwars):
"""This function returns the params encoded to form an URL or data post."""
param_list = urllib.urlencode(kwars)
log('get_parms_encoded params: "%s"' % param_list)
return param_list
def carga_web(url):
"""This function loads the html code from a webserver and returns it into a string."""
log('carga_web URL: "%s"' % url)
MiReq = urllib2.Request(url) # We use the Request method because we need to add a header into the HTTP GET to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
MiConex.close() # We close the HTTP connection as we have all the info required.
return MiHTML
def carga_web_cookies(url, headers=''):
"""This function loads the html code from a webserver passsing the headers into the GET message
and returns it into a string along with the cookies collected from the website."""
log('carga_web_cookies URL: "%s"' % url)
MiReq = urllib2.Request(url) # We use the Request method because we need to add a header into the HTTP GET to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
for key in headers:
MiReq.add_header(key, headers[key])
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
server_info = "%s" % MiConex.info()
my_cookie_pattern = re.compile('Set-Cookie: ([^;]+);')
my_cookies = ''
pcookie = ''
for lcookie in my_cookie_pattern.findall(server_info):
if (lcookie != pcookie):
my_cookies = "%s %s;" % (my_cookies, lcookie)
pcookie = lcookie
MiConex.close() # We close the HTTP connection as we have all the info required.
log('carga_web Cookie: "%s"' % my_cookies)
return MiHTML, my_cookies
def send_post_data(url, headers='', data=''):
"""This function sends an HTTP POST request with theirr corresponding headers and data to a webserver
and returns the html code into a string along with the cookies collected from the website."""
log('send_post_data URL: "%s"' % url)
MiReq = urllib2.Request(url, data) # We use the Request method because we need to send a HTTP POST to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
for key in headers:
MiReq.add_header(key, headers[key])
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
server_info = "%s" % MiConex.info()
my_cookie_pattern = re.compile('Set-Cookie: ([^;]+);')
my_cookies = ''
pcookie = ''
for lcookie in my_cookie_pattern.findall(server_info):
if (lcookie != pcookie):
my_cookies = "%s %s;" % (my_cookies, lcookie)
pcookie = lcookie
MiConex.close() # We close the HTTP connection as we have all the info required.
log('send_post_data Cookie: "%s"' % my_cookies)
return MiHTML, my_cookies
def get_redirect(url):
"""This function returns the redirected URL from a 30X response received from the webserver."""
log('get_redirect URL: "%s"' % url)
MiConex = urllib.urlopen(url) # Opens the http connection to the URL.
MiHTML = MiConex.geturl() # Gets the URL redirect link and stores it into MiHTML.
MiConex.close() # Close the http connection as we get what we need.
return MiHTML
def find_multiple(text, pattern):
"""This function allows us to find multiples matches from a regexp into a string."""
pat_url_par = re.compile(pattern, re.DOTALL)
return pat_url_par.findall(text)
def find_first(text, pattern):
"""This function gets back the first match from a regexp into a string."""
pat_url_par = re.compile(pattern, re.DOTALL)
try:
return pat_url_par.findall(text)[0]
except:
return ""
def get_this_year():
"""This function gets the current year. Useful to fill the Year infolabel whenever it isn't available"""
return date.today().year
def get_clean_title(title):
"""This function returns the title or desc cleaned.
ref: http://www.thesauruslex.com/typo/eng/enghtml.htm"""
return title.\
replace('á', 'á').\
replace('à', 'á').\
replace('é', 'é').\
replace('è', 'è').\
replace('í', 'í').\
replace('ó', 'ó').\
replace('ò', 'ò').\
replace('ú', 'ú').\
replace('ä', 'ä').\
replace('ï', 'ï').\
replace('ö', 'ö').\
replace('ü', 'ü').\
replace('ß', 'ß').\
replace('ñ', 'ñ').\
replace('ç', 'ç').\
replace('Á', 'Á').\
replace('À', 'À').\
replace('É', 'É').\
replace('È', 'È').\
replace('Í', 'Í').\
replace('Ó', 'Ó').\
replace('Ò', 'Ò').\
replace('Ú', 'Ú').\
replace('Ä', 'Ä').\
replace('Ï', 'Ï').\
replace('Ö', 'Ö').\
replace('Ü', 'Ü').\
replace('Ñ', 'Ñ').\
replace('Ç', 'Ç').\
replace('"', '"').\
replace(''', "´").\
replace(' ', " ").\
replace('–', '').\
replace('’', "'").\
replace('“', '"').\
replace('”', '"').\
replace('‟', "'").\
replace('…', '').\
replace('’', "´").\
replace('«', '"').\
replace('»', '"').\
replace('¡', '¡').\
replace('&iinte;', '¿').\
replace('&', '&').\
replace(' ', '').\
replace('"', '"').\
replace('ª', 'ª').\
replace('º', 'º').\
replace('·', '·').\
replace('…', '...').\
replace('<br />', '').\
strip()
def get_clean_html_tags(html_text):
"""This function returns the text or desc cleaned from html tags."""
return re.sub(r'<[^>]*?>', '', html_text, count=0, flags=re.DOTALL)
| gpl-3.0 | -6,179,852,869,048,163,000 | 36.410042 | 139 | 0.608209 | false | 3.503527 | false | false | false |
jinzekid/codehub | python/day7/socket/socket_server.py | 1 | 1384 | # Author: Jason Lu
'''
import socket
server = socket.socket()
server.bind(('localhost', 6969)) #绑定监听端口
server.listen(5) #监听数量5个
'''
'''
print("开始等电话了...")
conn, addr = server.accept() # 等电话打进来
print("电话来了...%s" %(conn))
'''
# 英文
'''
data = conn.recv(1024)
print("recv: %s" % (data))
'''
# 中文
''''
data = conn.recv(1024)
print("recv: %s" % (data.decode()))
conn.send(data.upper())
'''
# 改良版本
import os
import socket
server = socket.socket()
server.bind(('localhost', 6969)) #绑定监听端口
server.listen(5) #监听数量5个
while True:
print("开始等电话了...")
conn, addr = server.accept() # 等电话打进来
print("电话来了,...来自: %s" %(conn))
while True:
data = conn.recv(1024)
if not data:
print("client已经断开...111")
break
if data.decode() == "":
print("client已经断开...222")
break
print("recv: %s" % (data.decode()))
#conn.send(data.upper())
# 执行命令(通过client发送的命令来执行对应的命令)
'''
str_cmd = data.decode() # 转换字符串
res = os.popen(str_cmd).read()
conn.send(res.encode("utf-8"))
'''
# 如果发送发文件,比如视频文件
conn.close()
server.close()
| gpl-3.0 | -5,954,584,469,373,423,000 | 15.628571 | 43 | 0.541237 | false | 2.225621 | false | false | false |
dblodgett-usgs/OWSLib | examples/wps-pml-script-1.py | 16 | 2404 | # Example script that performs a set of (small) live requests versus the live PML WPS service
from __future__ import absolute_import
from __future__ import print_function
from owslib.wps import WebProcessingService, monitorExecution
# instantiate WPS client
verbose = False
wps = WebProcessingService('http://rsg.pml.ac.uk/wps/generic.cgi', verbose=verbose, skip_caps=True)
# 1) GetCapabilities
wps.getcapabilities()
print('WPS Identification type: %s' % wps.identification.type)
print('WPS Identification title: %s' % wps.identification.title)
print('WPS Identification abstract: %s' % wps.identification.abstract)
for operation in wps.operations:
print('WPS Operation: %s' % operation.name)
for process in wps.processes:
print('WPS Process: identifier=%s title=%s' % (process.identifier, process.title))
# 2) DescribeProcess
process = wps.describeprocess('reprojectImage')
print('WPS Process: identifier=%s' % process.identifier)
print('WPS Process: title=%s' % process.title)
print('WPS Process: abstract=%s' % process.abstract)
for input in process.dataInputs:
print('Process input: identifier=%s, data type=%s, minOccurs=%d, maxOccurs=%d' % (input.identifier, input.dataType, input.minOccurs, input.maxOccurs))
for output in process.processOutputs:
print('Process output: identifier=%s, data type=%s' % (output.identifier, output.dataType))
# 3a) Execute
# GET request: http://rsg.pml.ac.uk/wps/generic.cgi?request=Execute&service=wps&version=1.0.0&identifier=reprojectImage&datainputs=[inputImage=http://rsg.pml.ac.uk/wps/testdata/elev_srtm_30m.img;outputSRS=EPSG:4326]&responsedocument=outputImage=@asreference=true
processid = "reprojectImage"
inputs = [ ("inputImage","http://rsg.pml.ac.uk/wps/testdata/elev_srtm_30m.img"),
("outputSRS", "EPSG:4326") ]
output = "outputImage"
execution = wps.execute(processid, inputs, output)
monitorExecution(execution)
# 3b) Execute
# GET request: http://rsg.pml.ac.uk/wps/generic.cgi?request=Execute&service=WPS&version=1.0.0&identifier=reprojectCoords&datainputs=[coords=http://rsg.pml.ac.uk/wps/testdata/coords.txt;outputSRS=EPSG:32630;inputSRS=EPSG:4326]
processid = "reprojectCoords"
inputs = [ ("coords","http://rsg.pml.ac.uk/wps/testdata/coords.txt"),
("outputSRS", "EPSG:32630"),
("inputSRS","EPSG:4326") ]
execution = wps.execute(processid, inputs)
monitorExecution(execution)
| bsd-3-clause | 6,431,059,856,046,459,000 | 48.061224 | 262 | 0.74376 | false | 3.101935 | false | false | false |
rs2/pandas | pandas/core/arrays/period.py | 1 | 33033 | from datetime import timedelta
import operator
from typing import Any, Callable, List, Optional, Sequence, Type, Union
import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Timedelta,
delta_to_nanoseconds,
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
iNaT,
period as libperiod,
to_offset,
)
from pandas._libs.tslibs.dtypes import FreqGroup
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.offsets import Tick, delta_to_tick
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
PeriodMixin,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._typing import AnyArrayLike
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
TD64NS_DTYPE,
ensure_object,
is_datetime64_dtype,
is_dtype_equal,
is_float_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
def _field_accessor(name: str, docstring=None):
def f(self):
base = self.freq._period_dtype_code
result = get_period_field_arr(name, self.asi8, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class PeriodArray(PeriodMixin, dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
"""
Pandas ExtensionArray for storing Period data.
Users should use :func:`period_array` to create new instances.
Parameters
----------
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
PeriodIndex).
freq : str or DateOffset
The `freq` to use for the array. Mostly applicable when `values`
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
dtype : PeriodDtype, optional
A PeriodDtype instance from which to extract a `freq`. If both
`freq` and `dtype` are specified, then the frequencies must match.
copy : bool, default False
Whether to copy the ordinals before storing.
Attributes
----------
None
Methods
-------
None
See Also
--------
period_array : Create a new PeriodArray.
PeriodIndex : Immutable Index for period data.
Notes
-----
There are two components to a PeriodArray
- ordinals : integer ndarray
- freq : pd.tseries.offsets.Offset
The values are physically stored as a 1-D ndarray of integers. These are
called "ordinals" and represent some kind of offset from a base.
The `freq` indicates the span covered by each element of the array.
All elements in the PeriodArray have the same `freq`.
"""
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
# Names others delegate to us
_other_ops: List[str] = []
_bool_ops = ["is_leap_year"]
_object_ops = ["start_time", "end_time", "freq"]
_field_ops = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"weekday",
"week",
"dayofweek",
"dayofyear",
"quarter",
"qyear",
"days_in_month",
"daysinmonth",
]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["strftime", "to_timestamp", "asfreq"]
# --------------------------------------------------------------------
# Constructors
def __init__(self, values, freq=None, dtype=None, copy=False):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if isinstance(values, ABCSeries):
values = values._values
if not isinstance(values, type(self)):
raise TypeError("Incorrect dtype")
elif isinstance(values, ABCPeriodIndex):
values = values._values
if isinstance(values, type(self)):
if freq is not None and freq != values.freq:
raise raise_on_incompatible(values, freq)
values, freq = values._data, values.freq
values = np.array(values, dtype="int64", copy=copy)
self._data = values
if freq is None:
raise ValueError("freq is not specified and cannot be inferred")
self._dtype = PeriodDtype(freq)
@classmethod
def _simple_new(cls, values: np.ndarray, freq=None, **kwargs) -> "PeriodArray":
# alias for PeriodArray.__init__
assertion_msg = "Should be numpy array of type i8"
assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
return cls(values, freq=freq, **kwargs)
@classmethod
def _from_sequence(
cls: Type["PeriodArray"],
scalars: Union[Sequence[Optional[Period]], AnyArrayLike],
dtype: Optional[PeriodDtype] = None,
copy: bool = False,
) -> "PeriodArray":
if dtype:
freq = dtype.freq
else:
freq = None
if isinstance(scalars, cls):
validate_dtype_freq(scalars.dtype, freq)
if copy:
scalars = scalars.copy()
return scalars
periods = np.asarray(scalars, dtype=object)
if copy:
periods = periods.copy()
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
return cls(ordinals, freq=freq)
@classmethod
def _from_sequence_of_strings(
cls, strings, dtype=None, copy=False
) -> "PeriodArray":
return cls._from_sequence(strings, dtype, copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None) -> "PeriodArray":
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
periods = dtl.validate_periods(periods)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
"Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
# -----------------------------------------------------------------
# DatetimeLike Interface
@classmethod
def _rebox_native(cls, value: int) -> np.int64:
return np.int64(value)
def _unbox_scalar(
self, value: Union[Period, NaTType], setitem: bool = False
) -> int:
if value is NaT:
return value.value
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=setitem)
return value.ordinal
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
def _scalar_from_string(self, value: str) -> Period:
return Period(value, freq=self.freq)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
if self.freqstr != other.freqstr:
raise raise_on_incompatible(self, other)
# --------------------------------------------------------------------
# Data / Attributes
@cache_readonly
def dtype(self) -> PeriodDtype:
return self._dtype
# error: Read-only property cannot override read-write property
@property # type: ignore[misc]
def freq(self) -> BaseOffset:
"""
Return the frequency object for this PeriodArray.
"""
return self.dtype.freq
def __array__(self, dtype=None) -> np.ndarray:
if dtype == "i8":
return self.asi8
elif dtype == bool:
return ~self._isnan
# This will raise TypeError for non-object dtypes
return np.array(list(self), dtype=object)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
if pyarrow.types.is_integer(type):
return pyarrow.array(self._data, mask=self.isna(), type=type)
elif isinstance(type, ArrowPeriodType):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
"Not supported to convert PeriodArray to array with different "
f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
storage_array = pyarrow.array(self._data, mask=self.isna(), type="int64")
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor(
"year",
"""
The year of the period.
""",
)
month = _field_accessor(
"month",
"""
The month as January=1, December=12.
""",
)
day = _field_accessor(
"day",
"""
The days of the period.
""",
)
hour = _field_accessor(
"hour",
"""
The hour of the period.
""",
)
minute = _field_accessor(
"minute",
"""
The minute of the period.
""",
)
second = _field_accessor(
"second",
"""
The second of the period.
""",
)
weekofyear = _field_accessor(
"week",
"""
The week ordinal of the year.
""",
)
week = weekofyear
dayofweek = _field_accessor(
"weekday",
"""
The day of the week with Monday=0, Sunday=6.
""",
)
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
"day_of_year",
"""
The ordinal day of the year.
""",
)
quarter = _field_accessor(
"quarter",
"""
The quarter of the date.
""",
)
qyear = _field_accessor("qyear")
days_in_month = _field_accessor(
"days_in_month",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
@property
def is_leap_year(self) -> np.ndarray:
"""
Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how="start")
@property
def end_time(self):
return self.to_timestamp(how="end")
def to_timestamp(self, freq=None, how="start"):
"""
Cast to DatetimeArray/Index.
Parameters
----------
freq : str or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise.
how : {'s', 'e', 'start', 'end'}
Whether to use the start or end of the time period being converted.
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays import DatetimeArray
how = libperiod.validate_end_alias(how)
end = how == "E"
if end:
if freq == "B" or self.freq == "B":
# roll forward to ensure we land on B date
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
return self.to_timestamp(how="start") + adjust
else:
adjust = Timedelta(1, "ns")
return (self + self.freq).to_timestamp(how="start") - adjust
if freq is None:
freq = self._get_to_timestamp_base()
base = freq
else:
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
new_data = self.asfreq(freq, how=how)
new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base)
return DatetimeArray(new_data)._with_freq("infer")
# --------------------------------------------------------------------
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None:
raise TypeError(
"`freq` argument is not supported for "
f"{type(self).__name__}._time_shift"
)
values = self.asi8 + periods * self.freq.n
if self._hasnans:
values[self._isnan] = iNaT
return type(self)(values, freq=self.freq)
def _box_func(self, x) -> Union[Period, NaTType]:
return Period._from_ordinal(ordinal=x, freq=self.freq)
def asfreq(self, freq=None, how: str = "E") -> "PeriodArray":
"""
Convert the Period Array/Index to the specified frequency `freq`.
Parameters
----------
freq : str
A frequency.
how : str {'E', 'S'}
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
Period Array/Index
Constructed with the new frequency.
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[A-DEC]', freq='A-DEC')
>>> pidx.asfreq('M')
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]', freq='M')
>>> pidx.asfreq('M', how='S')
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]', freq='M')
"""
how = libperiod.validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1 = self.freq._period_dtype_code
base2 = freq._period_dtype_code
asi8 = self.asi8
# self.freq.n can't be negative or 0
end = how == "E"
if end:
ordinal = asi8 + self.freq.n - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self._hasnans:
new_data[self._isnan] = iNaT
return type(self)(new_data, freq=freq)
# ------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed: bool = False):
if boxed:
return str
return "'{}'".format
def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: str(dt)
if self._hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
# ------------------------------------------------------------------
def astype(self, dtype, copy: bool = True):
# We handle Period[T] -> Period[U]
# Our parent handles everything else.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self._dtype):
if not copy:
return self
elif copy:
return self.copy()
if is_period_dtype(dtype):
return self.asfreq(dtype.freq)
return super().astype(dtype, copy=copy)
def searchsorted(self, value, side="left", sorter=None):
value = self._validate_searchsorted_value(value).view("M8[ns]")
# Cast to M8 to get datetime-like NaT placement
m8arr = self._ndarray.view("M8[ns]")
return m8arr.searchsorted(value, side=side, sorter=sorter)
# ------------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike(self, other):
assert other is not NaT
return NotImplemented
def _sub_period(self, other):
# If the operation is well-defined, we return an object-Index
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
new_data = asi8 - other.ordinal
new_data = np.array([self.freq * x for x in new_data])
if self._hasnans:
new_data[self._isnan] = NaT
return new_data
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr
)
raise IncompatibleFrequency(msg)
new_values = algos.checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = NaT
return new_values
def _addsub_int_array(
self, other: np.ndarray, op: Callable[[Any, Any], Any]
) -> "PeriodArray":
"""
Add or subtract array of integers; equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : np.ndarray[integer-dtype]
op : {operator.add, operator.sub}
Returns
-------
result : PeriodArray
"""
assert op in [operator.add, operator.sub]
if op is operator.sub:
other = -other
res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan)
res_values = res_values.view("i8")
res_values[self._isnan] = iNaT
return type(self)(res_values, freq=self.freq)
def _add_offset(self, other: BaseOffset):
assert not isinstance(other, Tick)
if other.base != self.freq.base:
raise raise_on_incompatible(self, other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
result = super()._add_timedeltalike_scalar(other.n)
return type(self)(result, freq=self.freq)
def _add_timedeltalike_scalar(self, other):
"""
Parameters
----------
other : timedelta, Tick, np.timedelta64
Returns
-------
PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise raise_on_incompatible(self, other)
if notna(other):
# special handling for np.timedelta64("NaT"), avoid calling
# _check_timedeltalike_freq_compat as that would raise TypeError
other = self._check_timedeltalike_freq_compat(other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
return super()._add_timedeltalike_scalar(other)
def _add_timedelta_arraylike(self, other):
"""
Parameters
----------
other : TimedeltaArray or ndarray[timedelta64]
Returns
-------
result : ndarray[int64]
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise TypeError(
f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
)
if not np.all(isna(other)):
delta = self._check_timedeltalike_freq_compat(other)
else:
# all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
return self + np.timedelta64("NaT")
ordinals = self._addsub_int_array(delta, operator.add).asi8
return type(self)(ordinals, dtype=self.dtype)
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
base_nanos = self.freq.base.nanos
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == "m"
if other.dtype != TD64NS_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(TD64NS_DTYPE)
nanos = other.view("i8")
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
raise raise_on_incompatible(self, other)
def raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : None, DateOffset, Period, ndarray, or timedelta-like
Returns
-------
IncompatibleFrequency
Exception to be raised by the caller.
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, BaseOffset)):
other_freq = right.freqstr
else:
other_freq = delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(
cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq
)
return IncompatibleFrequency(msg)
# -------------------------------------------------------------------
# Constructor Helpers
def period_array(
data: Union[Sequence[Optional[Period]], AnyArrayLike],
freq: Optional[Union[str, Tick]] = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
<PeriodArray>
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
data_dtype = getattr(data, "dtype", None)
if is_datetime64_dtype(data_dtype):
return PeriodArray._from_datetime64(data, freq)
if is_period_dtype(data_dtype):
return PeriodArray(data, freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
data = list(data)
data = np.asarray(data)
dtype: Optional[PeriodDtype]
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(data) and len(data) > 0:
raise TypeError("PeriodIndex does not allow floating point in construction")
data = ensure_object(data)
return PeriodArray._from_sequence(data, dtype=dtype)
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq
def dt64arr_to_periodarr(data, freq, tz=None):
"""
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int]
freq : Tick
The frequency extracted from the Series or DatetimeIndex if that's
used.
"""
if data.dtype != np.dtype("M8[ns]"):
raise ValueError(f"Wrong dtype: {data.dtype}")
if freq is None:
if isinstance(data, ABCIndexClass):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
freq = Period._maybe_convert_freq(freq)
if isinstance(data, (ABCIndexClass, ABCSeries)):
data = data._values
base = freq._period_dtype_code
return c_dt64arr_to_periodarr(data.view("i8"), base, tz), freq
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is not None:
freq = to_offset(freq)
mult = freq.n
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError("start and end must have same freq")
if start is NaT or end is NaT:
raise ValueError("start and end must not be NaT")
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError("Could not infer freq from start/end")
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(
end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
)
else:
data = np.arange(
start.ordinal, start.ordinal + periods, mult, dtype=np.int64
)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(
year=None,
month=None,
quarter=None,
day=None,
hour=None,
minute=None,
second=None,
freq=None,
):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = to_offset("Q")
base = FreqGroup.FR_QTR
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
if base != FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
freqstr = freq.freqstr
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = libperiod.quarter_to_myear(y, q, freqstr)
val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError("Mismatched Period array lengths")
elif length is None:
length = len(x)
arrays = [
np.asarray(x)
if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length)
for x in fields
]
return arrays
| bsd-3-clause | -2,622,719,645,001,006,600 | 29.389144 | 88 | 0.564587 | false | 4.033333 | false | false | false |
atiberghien/makerscience-server | makerscience_admin/middleware.py | 1 | 2620 | from django.conf import settings
from django.http import HttpResponse
from django.core.urlresolvers import resolve
from .models import PageViews
from ipware.ip import get_real_ip, get_ip
import json
class PageViewsMiddleware(object):
def process_response(self, request, response):
if request.method == 'GET':
try:
view, args, kwargs = resolve(request.path)
resource_name = kwargs.get("resource_name", None)
except:
resource_name = None
if resource_name :
if resource_name in settings.PAGEVIEWS_FILTER:
pageviews_counter = 0
try:
content = json.loads(response.content)
except:
return response
if 'objects' in content and len(content['objects']) == 1:
resource_uri = content['objects'][0]['resource_uri']
created = False
autorepare = True #sometimes doublons can be created, don't know why
while autorepare:
autorepare = False
if request.user.is_authenticated() :
try:
pv, created = PageViews.objects.get_or_create(client=request.user.username, resource_uri=resource_uri)
except PageViews.MultipleObjectsReturned:
pv = PageViews.objects.filter(client=request.user.username, resource_uri=resource_uri)
PageViews.objects.exclude(id=pv[0].id).delete()
autorepare = True
else:
try:
pv, created = PageViews.objects.get_or_create(client=get_ip(request), resource_uri=resource_uri)
except PageViews.MultipleObjectsReturned:
pv = PageViews.objects.filter(client=get_ip(request), resource_uri=resource_uri)
PageViews.objects.exclude(id=pv[0].id).delete()
autorepare = True
pageviews_counter = PageViews.objects.filter(resource_uri=resource_uri).count()
content['objects'][0]['pageviews_counter'] = pageviews_counter
return HttpResponse(json.dumps(content), content_type="application/json")
return response
| agpl-3.0 | 5,691,684,768,779,739,000 | 47.518519 | 138 | 0.509542 | false | 5.282258 | false | false | false |
jeffcharles/Open-Source-at-Laurier-Website | wluopensource/osl_voting/views.py | 1 | 1390 | from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.utils import simplejson
from voting.models import Vote
def get_vote_box_template(request, object_id, model, vote_url_name,
vote_box_url_name):
lookup_kwargs = {}
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
try:
obj = model._default_manager.get(**lookup_kwargs)
except ObjectDoesNotExist:
return HttpResponse(simplejson.dumps(dict(success=False,
error_message='No %s found for %s.' % (model._meta.verbose_name,
lookup_kwargs))))
vote = Vote.objects.get_for_user(obj, request.user)
upvote_url = \
reverse(vote_url_name, kwargs={'object_id': object_id,
'direction': 'up'})
clearvote_url = \
reverse(vote_url_name, kwargs={'object_id': object_id,
'direction': 'clear'})
downvote_url = \
reverse(vote_url_name, kwargs={'object_id': object_id,
'direction': 'down'})
vote_box_url = \
reverse(vote_box_url_name, kwargs={'object_id': object_id})
return render_to_response('voting/default_vote_box.html',
{'vote': vote, 'upvote_url': upvote_url, 'clearvote_url': clearvote_url,
'downvote_url': downvote_url, 'vote_box_ajax_url': vote_box_url})
| bsd-3-clause | -4,559,534,169,786,427,400 | 37.611111 | 80 | 0.645324 | false | 3.518987 | false | false | false |
blabla1337/skf-flask | skf/api/projects/endpoints/project_update.py | 1 | 1283 |
from flask import request
from flask_restplus import Resource
from skf.api.security import security_headers, validate_privilege
from skf.api.projects.business import update_project
from skf.api.projects.serializers import project_update, message
from skf.api.projects.parsers import authorization
from skf.api.restplus import api
from skf.api.security import log, val_num, val_alpha, val_alpha_num, val_alpha_num_special
ns = api.namespace('project', description='Operations related to projects')
@ns.route('/update/<int:project_id>')
@api.doc(params={'project_id': 'The project id'})
@api.response(404, 'Validation error', message)
class KBItemUpdate(Resource):
@api.expect(authorization, project_update)
@api.marshal_with(message, 'Success')
@api.response(400, 'No results found', message)
def put(self, project_id):
"""
Update a project item.
* Privileges required: **edit**
"""
data = request.json
val_num(project_id)
val_alpha_num_special(data.get('name'))
val_alpha_num_special(data.get('description'))
val_alpha_num_special(data.get('version'))
validate_privilege(self, 'edit')
result = update_project(project_id, data)
return result, 200, security_headers()
| agpl-3.0 | 8,402,734,135,304,279,000 | 36.735294 | 90 | 0.700701 | false | 3.665714 | false | false | false |
rspavel/spack | var/spack/repos/builtin/packages/singularity/package.py | 1 | 7578 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import llnl.util.tty as tty
import os
import shutil
class Singularity(MakefilePackage):
'''Singularity is a container technology focused on building portable
encapsulated environments to support "Mobility of Compute" For older
versions of Singularity (pre 3.0) you should use singularity-legacy,
which has a different install base (Autotools).
Needs post-install chmod/chown steps to enable full functionality.
See package definition or `spack-build-out.txt` build log for details,
e.g.
tail -15 $(spack location -i singularity)/.spack/spack-build-out.txt
'''
homepage = "https://www.sylabs.io/singularity/"
url = "https://github.com/sylabs/singularity/releases/download/v3.6.1/singularity-3.6.1.tar.gz"
git = "https://github.com/sylabs/singularity.git"
maintainers = ['alalazo']
version('master', branch='master')
version('3.6.1', sha256='6cac56106ee7f209150aaee9f8788d03b58796af1b767245d343f0b8a691121c')
version('3.5.3', sha256='0c76f1e3808bf4c10e92b17150314b2b816be79f8101be448a6e9d7a96c9e486')
version('3.5.2', sha256='f9c21e289377a4c40ed7a78a0c95e1ff416dec202ed49a6c616dd2c37700eab8')
version('3.4.1', sha256='638fd7cc5ab2a20e779b8768f73baf21909148339d6c4edf6ff61349c53a70c2')
version('3.4.0', sha256='eafb27f1ffbed427922ebe2b5b95d1c9c09bfeb897518867444fe230e3e35e41')
version('3.3.0', sha256='070530a472e7e78492f1f142c8d4b77c64de4626c4973b0589f0d18e1fcf5b4f')
version('3.2.1', sha256='d4388fb5f7e0083f0c344354c9ad3b5b823e2f3f27980e56efa7785140c9b616')
version('3.1.1', sha256='7f0df46458d8894ba0c2071b0848895304ae6b1137d3d4630f1600ed8eddf1a4')
variant('suid', default=True, description='install SUID binary')
variant('network', default=True, description='install network plugins')
depends_on('pkgconfig', type='build')
depends_on('go')
depends_on('libuuid')
depends_on('libgpg-error')
depends_on('libseccomp')
depends_on('squashfs', type='run')
depends_on('git', when='@develop') # mconfig uses it for version info
depends_on('shadow', type='run', when='@3.3:')
depends_on('cryptsetup', type=('build', 'run'), when='@3.4:')
patch('singularity_v3.4.0_remove_root_check.patch', level=0, when='@3.4.0:3.4.1')
# Go has novel ideas about how projects should be organized.
# We'll point GOPATH at the stage dir, and move the unpacked src
# tree into the proper subdir in our overridden do_stage below.
@property
def gopath(self):
return self.stage.path
@property
def sylabs_gopath_dir(self):
return join_path(self.gopath, 'src/github.com/sylabs/')
@property
def singularity_gopath_dir(self):
return join_path(self.sylabs_gopath_dir, 'singularity')
# Unpack the tarball as usual, then move the src dir into
# its home within GOPATH.
def do_stage(self, mirror_only=False):
super(Singularity, self).do_stage(mirror_only)
if not os.path.exists(self.singularity_gopath_dir):
# Move the expanded source to its destination
tty.debug("Moving {0} to {1}".format(
self.stage.source_path, self.singularity_gopath_dir))
shutil.move(self.stage.source_path, self.singularity_gopath_dir)
# The build process still needs access to the source path,
# so create a symlink.
force_symlink(self.singularity_gopath_dir, self.stage.source_path)
# MakefilePackage's stages use this via working_dir()
@property
def build_directory(self):
return self.singularity_gopath_dir
# Hijack the edit stage to run mconfig.
def edit(self, spec, prefix):
with working_dir(self.build_directory):
confstring = './mconfig --prefix=%s' % prefix
if '~suid' in spec:
confstring += ' --without-suid'
if '~network' in spec:
confstring += ' --without-network'
configure = Executable(confstring)
configure()
# Set these for use by MakefilePackage's default build/install methods.
build_targets = ['-C', 'builddir', 'parallel=False']
install_targets = ['install', '-C', 'builddir', 'parallel=False']
def setup_build_environment(self, env):
# Point GOPATH at the top of the staging dir for the build step.
env.prepend_path('GOPATH', self.gopath)
# `singularity` has a fixed path where it will look for
# mksquashfs. If it lives somewhere else you need to specify the
# full path in the config file. This bit uses filter_file to edit
# the config file, uncommenting and setting the mksquashfs path.
@run_after('install')
def fix_mksquashfs_path(self):
prefix = self.spec.prefix
squash_path = join_path(self.spec['squashfs'].prefix.bin, 'mksquashfs')
filter_file(r'^# mksquashfs path =',
'mksquashfs path = {0}'.format(squash_path),
join_path(prefix.etc, 'singularity', 'singularity.conf'))
#
# Assemble a script that fixes the ownership and permissions of several
# key files, install it, and tty.warn() the user.
# HEADSUP: https://github.com/spack/spack/pull/10412.
#
def perm_script(self):
return 'spack_perms_fix.sh'
def perm_script_tmpl(self):
return "{0}.j2".format(self.perm_script())
def perm_script_path(self):
return join_path(self.spec.prefix.bin, self.perm_script())
def _build_script(self, filename, variable_data):
with open(filename, 'w') as f:
env = spack.tengine.make_environment(dirs=self.package_dir)
t = env.get_template(self.perm_script_tmpl())
f.write(t.render(variable_data))
@run_after('install')
def build_perms_script(self):
if self.spec.satisfies('+suid'):
script = self.perm_script_path()
chown_files = ['libexec/singularity/bin/starter-suid',
'etc/singularity/singularity.conf',
'etc/singularity/capability.json',
'etc/singularity/ecl.toml']
setuid_files = ['libexec/singularity/bin/starter-suid']
self._build_script(script, {'prefix': self.spec.prefix,
'chown_files': chown_files,
'setuid_files': setuid_files})
chmod = which('chmod')
chmod('555', script)
# Until tty output works better from build steps, this ends up in
# the build log. See https://github.com/spack/spack/pull/10412.
@run_after('install')
def caveats(self):
if self.spec.satisfies('+suid'):
tty.warn("""
For full functionality, you'll need to chown and chmod some files
after installing the package. This has security implications.
For details, see:
https://sylabs.io/guides/2.6/admin-guide/security.html
https://sylabs.io/guides/3.2/admin-guide/admin_quickstart.html#singularity-security
We've installed a script that will make the necessary changes;
read through it and then execute it as root (e.g. via sudo).
The script is named:
{0}
""".format(self.perm_script_path()))
| lgpl-2.1 | -8,852,560,500,763,001,000 | 42.302857 | 104 | 0.650699 | false | 3.347173 | true | false | false |
jclgoodwin/bustimes.org.uk | timetables/txc.py | 1 | 27262 | """Represent TransXChange concepts, and generate a matrix timetable from
TransXChange documents
"""
import re
import xml.etree.cElementTree as ET
import calendar
import datetime
import ciso8601
import logging
from psycopg2.extras import DateRange as PDateRange
from django.utils.text import slugify
from django.utils.dateparse import parse_duration
from chardet.universaldetector import UniversalDetector
from titlecase import titlecase
logger = logging.getLogger(__name__)
NS = {
'txc': 'http://www.transxchange.org.uk/'
}
# A safe date, far from any daylight savings changes or leap seconds
DESCRIPTION_REGEX = re.compile(r'.+,([^ ].+)$')
WEEKDAYS = {day: i for i, day in enumerate(calendar.day_name)}
def sanitize_description_part(part):
"""Given an oddly formatted part like 'Bus Station bay 5,Blyth',
return a shorter, more normal version like 'Blyth'.
"""
sanitized_part = DESCRIPTION_REGEX.match(part.strip())
return sanitized_part.group(1) if sanitized_part is not None else part
def correct_description(description):
"""Given an description, return a version with any typos pedantically corrected."""
for old, new in (
('Stitians', 'Stithians'),
('Kings Lynn', "King's Lynn"),
('Wells - Next - The - Sea', 'Wells-next-the-Sea'),
('Wells next the Sea', 'Wells-next-the-Sea'),
('Baasingstoke', 'Basingstoke'),
('Liskerard', 'Liskeard'),
('Tauton', 'Taunton'),
('City Centre,st Stephens Street', 'Norwich'),
('Charlton Horethore', 'Charlton Horethorne'),
('Camleford', 'Camelford'),
('Greenstead Green', 'Greensted Green'),
('Tinagel', 'Tintagel'),
('Plymouh City Cerntre', 'Plymouth City Centre'),
('Winterbourn ', 'Winterbourne'),
('Exetedr', 'Exeter'),
('- ', ' - '),
(' -', ' - '),
(' ', ' '),
):
description = description.replace(old, new)
return description
class Stop:
"""A TransXChange StopPoint."""
stop = None
locality = None
def __init__(self, element):
if element:
self.atco_code = element.find('txc:StopPointRef', NS)
if self.atco_code is None:
self.atco_code = element.find('txc:AtcoCode', NS)
if self.atco_code is not None:
self.atco_code = self.atco_code.text or ''
self.common_name = element.find('txc:CommonName', NS)
self.locality = element.find('txc:LocalityName', NS)
if self.common_name is not None:
self.common_name = self.common_name.text
if self.locality is not None:
self.locality = self.locality.text
def __str__(self):
if not self.locality or self.locality in self.common_name:
return self.common_name or self.atco_code
return '%s %s' % (self.locality, self.common_name)
class JourneyPattern:
"""A collection of JourneyPatternSections, in order."""
def __init__(self, element, sections):
self.id = element.attrib.get('id')
# self.journeys = []
self.sections = [
sections[section_element.text]
for section_element in element.findall('txc:JourneyPatternSectionRefs', NS)
if section_element.text in sections
]
self.direction = element.find('txc:Direction', NS)
if self.direction is not None:
self.direction = self.direction.text
def get_timinglinks(self):
for section in self.sections:
for timinglink in section.timinglinks:
yield timinglink
class JourneyPatternSection:
"""A collection of JourneyPatternStopUsages, in order."""
def __init__(self, element, stops):
self.id = element.get('id')
self.timinglinks = [
JourneyPatternTimingLink(timinglink_element, stops)
for timinglink_element in element
]
class JourneyPatternStopUsage:
"""Either a 'From' or 'To' element in TransXChange."""
def __init__(self, element, stops):
self.activity = element.find('txc:Activity', NS)
if self.activity is not None:
self.activity = self.activity.text
self.sequencenumber = element.get('SequenceNumber')
if self.sequencenumber is not None:
self.sequencenumber = int(self.sequencenumber)
self.stop = stops.get(element.find('txc:StopPointRef', NS).text)
if self.stop is None:
self.stop = Stop(element)
self.timingstatus = element.find('txc:TimingStatus', NS)
if self.timingstatus is not None:
self.timingstatus = self.timingstatus.text
self.wait_time = element.find('txc:WaitTime', NS)
if self.wait_time is not None:
self.wait_time = parse_duration(self.wait_time.text)
if self.wait_time.total_seconds() > 10000:
# bad data detected
print(self.wait_time)
self.wait_time = None
self.row = None
self.parent = None
class JourneyPatternTimingLink:
def __init__(self, element, stops):
self.origin = JourneyPatternStopUsage(element.find('txc:From', NS), stops)
self.destination = JourneyPatternStopUsage(element.find('txc:To', NS), stops)
self.origin.parent = self.destination.parent = self
self.runtime = parse_duration(element.find('txc:RunTime', NS).text)
self.id = element.get('id')
def get_deadruns(journey_element):
"""Given a VehicleJourney element, return a tuple."""
start_element = journey_element.find('txc:StartDeadRun', NS)
end_element = journey_element.find('txc:EndDeadRun', NS)
return (get_deadrun_ref(start_element), get_deadrun_ref(end_element))
def get_deadrun_ref(deadrun_element):
"""Given a StartDeadRun or EndDeadRun element with a ShortWorking,
return the ID of a JourneyPetternTimingLink.
"""
if deadrun_element is not None:
element = deadrun_element.find('txc:ShortWorking/txc:JourneyPatternTimingLinkRef', NS)
if element is not None:
return element.text
# ignore PositioningLinks
class VehicleJourneyTimingLink:
def __init__(self, element):
self.id = element.attrib.get('id')
self.journeypatterntiminglinkref = element.find('txc:JourneyPatternTimingLinkRef', NS).text
self.run_time = element.find('txc:RunTime', NS)
if self.run_time is not None:
self.run_time = parse_duration(self.run_time.text)
self.from_wait_time = element.find('txc:From/txc:WaitTime', NS)
if self.from_wait_time is not None:
self.from_wait_time = parse_duration(self.from_wait_time.text)
self.to_wait_time = element.find('txc:To/txc:WaitTime', NS)
if self.to_wait_time is not None:
self.to_wait_time = parse_duration(self.to_wait_time.text)
class VehicleJourney:
"""A journey represents a scheduled journey that happens at most once per
day. A sort of "instance" of a JourneyPattern, made distinct by having its
own start time (and possibly operating profile and dead run).
"""
operating_profile = None
journey_pattern = None
journey_ref = None
def __str__(self):
return str(self.departure_time)
def __init__(self, element, services, serviced_organisations):
self.code = element.find('txc:VehicleJourneyCode', NS).text
self.private_code = element.find('txc:PrivateCode', NS)
if self.private_code is not None:
self.private_code = self.private_code.text
self.service_ref = element.find('txc:ServiceRef', NS).text
self.line_ref = element.find('txc:LineRef', NS).text
journeypatternref_element = element.find('txc:JourneyPatternRef', NS)
if journeypatternref_element is not None:
self.journey_pattern = services[self.service_ref].journey_patterns.get(journeypatternref_element.text)
else:
# Journey has no direct reference to a JourneyPattern.
# Instead, it has a reference to another journey...
self.journey_ref = element.find('txc:VehicleJourneyRef', NS).text
operatingprofile_element = element.find('txc:OperatingProfile', NS)
if operatingprofile_element is not None:
self.operating_profile = OperatingProfile(operatingprofile_element, serviced_organisations)
departure_time = datetime.datetime.strptime(
element.find('txc:DepartureTime', NS).text, '%H:%M:%S'
)
self.departure_time = datetime.timedelta(hours=departure_time.hour,
minutes=departure_time.minute,
seconds=departure_time.second)
self.start_deadrun, self.end_deadrun = get_deadruns(element)
self.operator = element.find('txc:OperatorRef', NS)
if self.operator is not None:
self.operator = self.operator.text
sequencenumber = element.get('SequenceNumber')
self.sequencenumber = sequencenumber and int(sequencenumber)
timing_links = element.findall('txc:VehicleJourneyTimingLink', NS)
self.timing_links = [VehicleJourneyTimingLink(timing_link) for timing_link in timing_links]
note_elements = element.findall('txc:Note', NS)
if note_elements is not None:
self.notes = {
note_element.find('txc:NoteCode', NS).text: note_element.find('txc:NoteText', NS).text
for note_element in note_elements
}
def get_timinglinks(self):
pattern_links = self.journey_pattern.get_timinglinks()
if self.timing_links:
timing_links = iter(self.timing_links)
journey_link = next(timing_links)
for link in pattern_links:
if link.id == journey_link.journeypatterntiminglinkref:
yield link, journey_link
try:
journey_link = next(timing_links)
except StopIteration:
pass
else:
yield link, None
else:
for link in pattern_links:
yield link, None
def get_times(self):
stopusage = None
time = self.departure_time
deadrun = self.start_deadrun is not None
deadrun_next = False
wait_time = None
for timinglink, journey_timinglink in self.get_timinglinks():
stopusage = timinglink.origin
if deadrun and self.start_deadrun == timinglink.id:
deadrun = False # end of dead run
if journey_timinglink and journey_timinglink.from_wait_time is not None:
wait_time = journey_timinglink.from_wait_time
else:
wait_time = stopusage.wait_time or wait_time
if wait_time:
next_time = time + wait_time
if not deadrun:
yield Cell(stopusage, time, next_time)
time = next_time
elif not deadrun:
yield Cell(stopusage, time, time)
if journey_timinglink and journey_timinglink.run_time is not None:
run_time = journey_timinglink.run_time
else:
run_time = timinglink.runtime
if run_time:
time += run_time
if deadrun_next:
deadrun = True
deadrun_next = False
elif self.end_deadrun == timinglink.id:
deadrun_next = True # start of dead run
stopusage = timinglink.destination
if journey_timinglink and journey_timinglink.to_wait_time is not None:
wait_time = journey_timinglink.to_wait_time
else:
wait_time = stopusage.wait_time
if not deadrun:
yield Cell(timinglink.destination, time, time)
class ServicedOrganisation:
def __init__(self, element):
self.code = element.find('txc:OrganisationCode', NS).text
self.name = element.find('txc:Name', NS)
if self.name is not None:
self.name = self.name.text
working_days_element = element.find('txc:WorkingDays', NS)
if working_days_element is not None:
self.working_days = [DateRange(e) for e in working_days_element]
else:
self.working_days = []
holidays_element = element.find('txc:Holidays', NS)
if holidays_element is not None:
self.holidays = [DateRange(e) for e in holidays_element]
else:
self.holidays = []
class ServicedOrganisationDayType:
def __init__(self, element, servicedorgs):
self.nonoperation_holidays = None
self.nonoperation_workingdays = None
self.operation_holidays = None
self.operation_workingdays = None
# Days of non-operation:
noop_element = element.find('txc:DaysOfNonOperation', NS)
if noop_element is not None:
noop_hols_element = noop_element.find('txc:Holidays/txc:ServicedOrganisationRef', NS)
noop_workingdays_element = noop_element.find('txc:WorkingDays/txc:ServicedOrganisationRef', NS)
if noop_hols_element is not None:
self.nonoperation_holidays = servicedorgs[noop_hols_element.text]
if noop_workingdays_element is not None:
self.nonoperation_workingdays = servicedorgs[noop_workingdays_element.text]
# Days of operation:
op_element = element.find('txc:DaysOfOperation', NS)
if op_element is not None:
op_hols_element = op_element.find('txc:Holidays/txc:ServicedOrganisationRef', NS)
op_workingdays_element = op_element.find('txc:WorkingDays/txc:ServicedOrganisationRef', NS)
if op_hols_element is not None:
self.operation_holidays = servicedorgs[op_hols_element.text]
if op_workingdays_element is not None:
self.operation_workingdays = servicedorgs[op_workingdays_element.text]
class DayOfWeek:
def __init__(self, day):
if isinstance(day, int):
self.day = day
else:
self.day = WEEKDAYS[day]
def __eq__(self, other):
if type(other) == int:
return self.day == other
return self.day == other.day
def __repr__(self):
return calendar.day_name[self.day]
class OperatingProfile:
servicedorganisation = None
nonoperation_days = ()
operation_days = ()
def __init__(self, element, servicedorgs):
element = element
week_days_element = element.find('txc:RegularDayType/txc:DaysOfWeek', NS)
self.regular_days = []
if week_days_element is not None:
for day in [e.tag[33:] for e in week_days_element]:
if 'To' in day:
day_range_bounds = [WEEKDAYS[i] for i in day.split('To')]
day_range = range(day_range_bounds[0], day_range_bounds[1] + 1)
self.regular_days += [DayOfWeek(i) for i in day_range]
elif day == 'Weekend':
self.regular_days += [DayOfWeek(5), DayOfWeek(6)]
elif day[:3] == 'Not':
print(day)
else:
self.regular_days.append(DayOfWeek(day))
# Special Days:
special_days_element = element.find('txc:SpecialDaysOperation', NS)
if special_days_element is not None:
nonoperation_days_element = special_days_element.find('txc:DaysOfNonOperation', NS)
if nonoperation_days_element is not None:
self.nonoperation_days = list(map(DateRange, nonoperation_days_element.findall('txc:DateRange', NS)))
operation_days_element = special_days_element.find('txc:DaysOfOperation', NS)
if operation_days_element is not None:
self.operation_days = list(map(DateRange, operation_days_element.findall('txc:DateRange', NS)))
# Serviced Organisation:
servicedorg_days_element = element.find('txc:ServicedOrganisationDayType', NS)
if servicedorg_days_element is not None:
self.servicedorganisation = ServicedOrganisationDayType(servicedorg_days_element, servicedorgs)
# Bank Holidays
bank_holidays_operation_element = element.find('txc:BankHolidayOperation/txc:DaysOfOperation', NS)
bank_holidays_nonoperation_element = element.find('txc:BankHolidayOperation/txc:DaysOfNonOperation', NS)
if bank_holidays_operation_element is not None:
self.operation_bank_holidays = [e.tag[33:] for e in bank_holidays_operation_element]
else:
self.operation_bank_holidays = []
if bank_holidays_nonoperation_element is not None:
self.nonoperation_bank_holidays = [e.tag[33:] for e in bank_holidays_nonoperation_element]
else:
self.nonoperation_bank_holidays = []
class DateRange:
def __init__(self, element):
self.start = ciso8601.parse_datetime(element.find('txc:StartDate', NS).text).date()
self.end = element.find('txc:EndDate', NS)
if self.end is not None:
self.end = self.end.text
if self.end:
self.end = ciso8601.parse_datetime(self.end).date()
def __str__(self):
if self.start == self.end:
return self.start.strftime('%-d %B %Y')
else:
return '%s to %s' % (self.start, self.end)
def contains(self, date):
return self.start <= date and (not self.end or self.end >= date)
def dates(self):
return PDateRange(self.start, self.end, '[]')
class OperatingPeriod(DateRange):
def __str__(self):
if self.start == self.end:
return self.start.strftime('on %-d %B %Y')
today = datetime.date.today()
if self.start > today:
if self.end and (self.end - self.start).days < 14:
start_format = '%-d'
if self.start.month != self.end.month:
start_format += ' %B'
if self.start.year != self.end.year:
start_format += ' %Y'
return 'from {} to {}'.format(
self.start.strftime(start_format),
self.end.strftime('%-d %B %Y')
)
return self.start.strftime('from %-d %B %Y')
# The end date is often bogus,
# but show it if the period seems short enough to be relevant
if self.end and (self.end - self.start).days < 7:
return self.end.strftime('until %-d %B %Y')
return ''
class Service:
description = None
description_parts = None
via = None
def set_description(self, description):
if description.isupper():
description = titlecase(description)
elif ' via ' in description and description[:description.find(' via ')].isupper():
parts = description.split(' via ')
parts[0] = titlecase(parts[0])
description = ' via '.join(parts)
self.description = correct_description(description)
self.via = None
if ' - ' in self.description:
parts = self.description.split(' - ')
elif ' to ' in self.description:
parts = self.description.split(' to ')
else:
parts = [self.description]
self.description_parts = [sanitize_description_part(part) for part in parts]
if ' via ' in self.description_parts[-1]:
self.description_parts[-1], self.via = self.description_parts[-1].split(' via ', 1)
def __init__(self, element, serviced_organisations, journey_pattern_sections):
self.element = element
mode_element = element.find('txc:Mode', NS)
if mode_element is not None:
self.mode = mode_element.text
else:
self.mode = ''
self.operator = element.find('txc:RegisteredOperatorRef', NS)
if self.operator is not None:
self.operator = self.operator.text
operatingprofile_element = element.find('txc:OperatingProfile', NS)
if operatingprofile_element is not None:
self.operating_profile = OperatingProfile(operatingprofile_element, serviced_organisations)
self.operating_period = OperatingPeriod(element.find('txc:OperatingPeriod', NS))
self.service_code = element.find('txc:ServiceCode', NS).text
description_element = element.find('txc:Description', NS)
if description_element is not None:
self.set_description(description_element.text)
self.origin = element.find('txc:StandardService/txc:Origin', NS).text
self.destination = element.find('txc:StandardService/txc:Destination', NS).text
self.vias = element.find('txc:StandardService/txc:Vias', NS)
if self.vias:
self.vias = [via.text for via in self.vias]
self.journey_patterns = {
journey_pattern.id: journey_pattern for journey_pattern in (
JourneyPattern(journey_pattern, journey_pattern_sections)
for journey_pattern in element.findall('txc:StandardService/txc:JourneyPattern', NS)
) if journey_pattern.sections
}
class TransXChange:
def get_journeys(self, service_code, line_id):
return [journey for journey in self.journeys
if journey.service_ref == service_code and journey.line_ref == line_id]
def __get_journeys(self, journeys_element, serviced_organisations):
journeys = {
journey.code: journey for journey in (
VehicleJourney(element, self.services, serviced_organisations)
for element in journeys_element
)
}
# Some Journeys do not have a direct reference to a JourneyPattern,
# but rather a reference to another Journey which has a reference to a JourneyPattern
for journey in iter(journeys.values()):
if journey.journey_ref and not journey.journey_pattern:
journey.journey_pattern = journeys[journey.journey_ref].journey_pattern
return [journey for journey in journeys.values() if journey.journey_pattern]
def __init__(self, open_file):
try:
detector = UniversalDetector()
for line in open_file:
detector.feed(line)
if detector.done:
break
detector.close()
encoding = detector.result['encoding']
if encoding == 'UTF-8-SIG':
encoding = 'utf-8'
parser = ET.XMLParser(encoding=encoding)
except TypeError:
parser = None
open_file.seek(0)
iterator = ET.iterparse(open_file, parser=parser)
self.services = {}
# element = None
serviced_organisations = None
journey_pattern_sections = {}
for _, element in iterator:
tag = element.tag[33:]
if tag == 'StopPoints':
stops = (Stop(stop) for stop in element)
self.stops = {stop.atco_code: stop for stop in stops}
element.clear()
elif tag == 'Routes':
# routes = {
# route.get('id'): route.find('txc:Description', NS).text
# for route in element
# }
element.clear()
elif tag == 'RouteSections':
element.clear()
elif tag == 'Operators':
self.operators = element
elif tag == 'JourneyPatternSections':
for section in element:
section = JourneyPatternSection(section, self.stops)
if section.timinglinks:
journey_pattern_sections[section.id] = section
element.clear()
elif tag == 'ServicedOrganisations':
serviced_organisations = (ServicedOrganisation(child) for child in element)
serviced_organisations = {
organisation.code: organisation for organisation in serviced_organisations
}
elif tag == 'VehicleJourneys':
try:
self.journeys = self.__get_journeys(element, serviced_organisations)
except (AttributeError, KeyError) as e:
logger.error(e, exc_info=True)
return
element.clear()
elif tag == 'Service':
service = Service(element, serviced_organisations, journey_pattern_sections)
self.services[service.service_code] = service
elif tag == 'Garages':
# print(ET.tostring(element).decode())
element.clear()
self.element = element
self.transxchange_date = max(
element.attrib['CreationDateTime'], element.attrib['ModificationDateTime']
)[:10]
class Cell:
last = False
def __init__(self, stopusage, arrival_time, departure_time):
self.stopusage = stopusage
self.arrival_time = arrival_time
self.departure_time = departure_time
self.wait_time = arrival_time and departure_time and arrival_time != departure_time
def stop_is_at(stop, text):
"""Whether a given slugified string, roughly matches either
this stop's locality's name, or this stop's name
(e.g. 'kings-lynn' matches 'kings-lynn-bus-station' and vice versa).
"""
if stop.locality:
name = slugify(stop.locality)
if name in text or text in name:
if name == text:
return 2
return 1
name = slugify(stop.common_name)
if text in name or name in text:
if name == text:
return 2
return 1
return False
class Grouping:
def __init__(self, parent, origin, destination):
self.description_parts = parent.description_parts
self.via = parent.via
self.origin = origin
self.destination = destination
def starts_at(self, text):
return stop_is_at(self.origin, text)
def ends_at(self, text):
return stop_is_at(self.destination, text)
def __str__(self):
parts = self.description_parts
if parts:
start = slugify(parts[0])
end = slugify(parts[-1])
same_score = self.starts_at(start) + self.ends_at(end)
reverse_score = self.starts_at(end) + self.ends_at(start)
if same_score > reverse_score or (reverse_score == 4 and same_score == 4):
description = ' - '.join(parts)
elif same_score < reverse_score:
description = ' - '.join(reversed(parts))
else:
description = None
if description:
if self.via:
description += ' via ' + self.via
return description
return ''
| mpl-2.0 | -4,024,976,303,751,727,000 | 37.022315 | 117 | 0.597021 | false | 3.869146 | false | false | false |
mesosphere/hackers-at-berkeley | find_hack/find_hack.py | 1 | 1106 | import sys
from flask import Flask, jsonify
import srvlookup
app = Flask(__name__)
mapping = {
"10.0.4.70": "52.89.82.119",
"10.0.4.63": "52.89.78.120",
"10.0.4.66": "52.88.33.238",
"10.0.4.71": "52.89.79.28",
"10.0.4.64": "52.89.79.55",
"10.0.4.68": "52.89.76.194",
"10.0.4.69": "52.89.78.250",
"10.0.4.72": "52.89.88.228",
"10.0.4.67": "52.89.77.0",
"10.0.4.65": "52.89.80.185"
}
def lookup_backends(name):
try:
return srvlookup.lookup(name, 'tcp', 'marathon.mesos')
except e:
print >> sys.stderr, e
return []
@app.route('/')
def index():
return "hit /<app_name> to get the host and port"
@app.route('/<name>')
def read(name=""):
results = []
print >> sys.stderr, "looking up %s" % name
for backend in lookup_backends(name):
endpoint = "%s:%s" % (mapping[backend.host], backend.port)
print >> sys.stderr, "got backend %s" % endpoint
results.append(endpoint)
return jsonify({"results":results})
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True, port=8080)
| apache-2.0 | -1,163,061,589,259,164,000 | 24.72093 | 66 | 0.550633 | false | 2.614657 | false | false | false |
forseti-security/forseti-security | google/cloud/forseti/scanner/audit/firewall_rules_engine.py | 1 | 24417 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules engine for firewall rules."""
from builtins import object
import itertools
import threading
from collections import namedtuple
from google.cloud.forseti.common.gcp_type import firewall_rule
from google.cloud.forseti.common.gcp_type import resource as resource_mod
from google.cloud.forseti.common.gcp_type import resource_util
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import relationship
from google.cloud.forseti.scanner.audit import base_rules_engine as bre
from google.cloud.forseti.scanner.audit import rules as scanner_rules
LOGGER = logger.get_logger(__name__)
class Error(Exception):
"""Base error class for the module."""
class DuplicateFirewallRuleError(Error):
"""Raised if a rule id is reused in the rule definitions, must be unique."""
class DuplicateFirewallGroupError(Error):
"""Raised if group id is reused in the group definitions, must be unique."""
class RuleDoesntExistError(Error):
"""Raised if a rule group tries to add a rule that doesn't exist."""
class GroupDoesntExistError(Error):
"""Raised if an org policy tries to add a group that doesn't exist."""
class InvalidRuleDefinition(Error):
"""Raised if a rule definition is invalid."""
class InvalidGroupDefinition(Error):
"""Raised if a group definition is invalid."""
class InvalidOrgDefinition(Error):
"""Raised if a org definition is invalid."""
class FirewallRulesEngine(bre.BaseRulesEngine):
"""Rules engine for firewall resources."""
def __init__(self, rules_file_path, snapshot_timestamp=None):
"""Initialize.
Args:
rules_file_path (str): File location of rules.
snapshot_timestamp (str): The snapshot to work with.
"""
super(FirewallRulesEngine, self).__init__(
rules_file_path=rules_file_path,
snapshot_timestamp=snapshot_timestamp)
self._repository_lock = threading.RLock()
self.rule_book = None
def build_rule_book(self, global_configs):
"""Build RuleBook from the rule definition file.
Args:
global_configs (dict): Global configurations.
"""
del global_configs # unused.
with self._repository_lock:
rule_file_dict = self._load_rule_definitions()
rule_defs = rule_file_dict.get('rules', [])
group_defs = rule_file_dict.get('rule_groups', [])
org_policy = rule_file_dict.get('org_policy', [])
self.rule_book = RuleBook(
rule_defs=rule_defs,
group_defs=group_defs,
org_policy=org_policy,
snapshot_timestamp=self.snapshot_timestamp)
def find_violations(self, resource, policy, force_rebuild=False):
"""Determine whether policy violates rules.
Args:
resource (Resource): The resource that the policy belongs to.
policy (dict): The policy to compare against the rules.
force_rebuild (bool): If True, rebuilds the rule book.
This will reload the rules definition file and add the rules to the
book.
Returns:
list: A list of the rule violations.
"""
if self.rule_book is None or force_rebuild:
self.build_rule_book(self.full_rules_path)
violations = self.rule_book.find_violations(resource, policy)
return list(violations)
class RuleBook(bre.BaseRuleBook):
"""The RuleBook for firewall auditing.
Rules from the rules definition file are parsed and then the hierarchy and
enforcement points are parsed. Rules then are assessed at the first
applicable point in the ancestory tree that has rules.
Sample org structure:
org 1234
/ \
f-1 p-c
/ \
p-a p-b
Rules can be applied at any node above. When a policy is being audited,
it the rulebook will start at the lowest level (the project) and will
walk up the hierarchy until it reaches the first instance with rules and
these are the only rules that are checked.
"""
def __init__(self,
rule_defs=None,
snapshot_timestamp=None,
group_defs=None,
org_policy=None):
"""Initialize.
Args:
rule_defs (list): The parsed list of dictionary rules from the YAML
definition file.
snapshot_timestamp (str): The snapshot to work with.
group_defs (list): The parsed list of dictionary group ids to rules.
org_policy (dict): The parsed org policy configuration.
"""
super(RuleBook, self).__init__()
self.rule_indices = {}
self.rules_map = {}
self.rule_groups_map = {}
self.org_policy_rules_map = {}
self.snapshot_timestamp = snapshot_timestamp or None
self._repository_lock = threading.RLock()
if rule_defs:
self.add_rules(rule_defs)
if group_defs:
self.add_rule_groups(group_defs)
if org_policy:
self.add_org_policy(org_policy)
def add_rules(self, rule_defs):
"""Adds rules to rule book.
Args:
rule_defs (list): Rule definition dictionaries from yaml config file.
Raises:
InvalidRuleDefinition: If the rule is missing required fields or the
fields have invalid values.
"""
with self._repository_lock:
for i, rule_def in enumerate(rule_defs):
if rule_def is not None:
self.add_rule(rule_def, i)
def add_rule(self, rule_def, rule_index):
"""Adds a rule to the rule book.
Args:
rule_def (Rule): A Rule used to check for violations.
rule_index (int): Used for logs.
Raises:
DuplicateFirewallRuleError: When the rule by the same name exists.
"""
rule = Rule.from_config(rule_def)
if rule.id in self.rules_map:
raise DuplicateFirewallRuleError(
'Rule id "%s" already in rules (rule %s)' % (
rule.id, rule_index))
self.rule_indices[rule.id] = rule_index
self.rules_map[rule.id] = rule
def add_rule_groups(self, group_defs):
"""Creates group to rule matching.
Args:
group_defs (dict): A dictionary with a group id and a list of rule ids
that will be included by including this group in a policy.
Raises:
DuplicateFirewallGroupError: Raised if the group id already exists.
RuleDoesntExistError: Raised if a rule included in the group does not
exist.
InvalidGroupDefinition: Raised if a group definition is invalid.
"""
for group_def in group_defs:
group_id = group_def.get('group_id')
if not group_id:
raise InvalidGroupDefinition('Group requires a group id')
if group_id in self.rule_groups_map:
raise DuplicateFirewallGroupError(
'Group id already exists: %s' % group_id)
rule_ids = group_def.get('rule_ids')
if not rule_ids:
raise InvalidGroupDefinition(
'Group "%s" does not have any rules' % group_id)
for rule_id in rule_ids:
if rule_id not in self.rules_map:
raise RuleDoesntExistError(
'Rule id "%s" does not exist, cannot be in group' %
rule_id)
self.rule_groups_map[group_id] = rule_ids
def add_org_policy(self, org_def):
"""Creates org policy and rule mapping.
Sample org structure:
org 1234
/ \
f-1 p-c
/ \
p-a p-b
Rules can be applied at any node above. When a policy is being audited,
it the rulebook will start at the lowest level (the project) and will
walk up the hierarchy until it reaches the first instance with rules and
these are the only rules that are checked.
Args:
org_def (dict): A dictionary of resource ids and enforced rules.
Raises:
RuleDoesntExistError: Raised if a rule included in the group does not
exist.
GroupDoesntExistError: Raised if a group included in an org policy
does not exist.
InvalidOrgDefinition: Raised if org policy doesn't have resources.
"""
resources = org_def.get('resources', [])
if not resources:
raise InvalidOrgDefinition('Org policy does not have any resources')
for resource in resources:
resource_type = resource_mod.ResourceType.verify(
resource.get('type'))
ids = resource.get('resource_ids', [])
rules = resource.get('rules', {})
groups = rules.get('group_ids', [])
expanded_rules = set()
for group_id in groups:
if group_id not in self.rule_groups_map:
raise GroupDoesntExistError(
'Group "%s" does not exist' % group_id)
expanded_group = self.rule_groups_map.get(group_id, [])
expanded_rules.update(expanded_group)
for rule_id in rules.get('rule_ids', []):
if rule_id not in self.rules_map:
raise RuleDoesntExistError(
'Rule id "%s" does not exist' % rule_id)
expanded_rules.add(rule_id)
for resource_id in ids:
gcp_resource = resource_util.create_resource(
resource_id=resource_id,
resource_type=resource_type)
self.org_policy_rules_map[gcp_resource] = sorted(expanded_rules)
def find_violations(self, resource, policies):
"""Find policy binding violations in the rule book.
Args:
resource (Resource): The GCP resource associated with the
policy binding.
This is where we start looking for rule violations and
we move up the resource hierarchy (if permitted by the
resource's "inherit_from_parents" property).
policies(list): A list of FirewallRule policies.
Returns:
iterable: A generator of the rule violations.
"""
violations = itertools.chain()
resource_ancestors = (
relationship.find_ancestors(resource, policies[0].full_name))
for curr_resource in resource_ancestors:
if curr_resource in self.org_policy_rules_map:
org_policy_rules = self.org_policy_rules_map.get(
curr_resource, [])
for rule_id in org_policy_rules:
rule = self.rules_map[rule_id]
violations = itertools.chain(
violations,
rule.find_violations(policies))
break # Only the first rules found in the ancestry are applied
return violations
class Rule(object):
"""Rule properties from the firewall rules definitions file.
Also finds violations.
"""
VALID_RULE_MODES = frozenset([
scanner_rules.RuleMode.WHITELIST,
scanner_rules.RuleMode.BLACKLIST,
scanner_rules.RuleMode.REQUIRED,
scanner_rules.RuleMode.MATCHES,
])
def __init__(self,
rule_id=None,
match_policies=None,
verify_policies=None,
mode=scanner_rules.RuleMode.WHITELIST,
exact_match=True):
"""Initialize.
Args:
rule_id (str): The id of the rule.
match_policies (list): A list of policy dictionaries.
verify_policies (list): A list of policy dictionaries.
mode (RuleMode): The RuleMode for this rule.
exact_match (bool): Whether to exactly match required rules.
"""
self.id = rule_id
self._match_policies = match_policies
self._match_rules = None
self._exact_match = exact_match
self.mode = mode
self._verify_policies = verify_policies
self._verify_rules = None
def __hash__(self):
"""Makes a hash of the rule id.
Returns:
int: The hash of the rule id.
"""
return hash(self.id)
@classmethod
def from_config(cls, rule_def):
"""Creates a Rule from a config file.
Args:
rule_def (dict): A dictionary rule definition parsed from YAML config.
Returns:
Rule: A rule created from the rule definition.
Raises:
InvalidRuleDefinition: If rule is missing required fields.
"""
rule_id = rule_def.get('rule_id')
if not rule_id:
raise InvalidRuleDefinition('Rule requires rule_id')
mode = rule_def.get('mode')
if not mode:
raise InvalidRuleDefinition('Rule requires mode')
mode = mode.lower()
if mode not in cls.VALID_RULE_MODES:
raise InvalidRuleDefinition('Mode %s is not in valid modes: %s'
% (mode, cls.VALID_RULE_MODES))
match_policies = rule_def.get('match_policies', [])
verify_policies = rule_def.get('verify_policies', [])
if mode in ['whitelist', 'blacklist']:
if not match_policies or not verify_policies:
raise InvalidRuleDefinition(
'Whitelist and blacklist rules require match and verify '
'policies')
if mode in ['required', 'matches']:
if not match_policies:
raise InvalidRuleDefinition(
'Required and matches rules require match policies')
if verify_policies:
raise InvalidRuleDefinition(
'Required and matches rules cannot have verify policies')
return Rule(
rule_id=rule_id,
match_policies=match_policies,
verify_policies=verify_policies,
mode=mode,
exact_match=rule_def.get('exact_match', True),
)
@staticmethod
def create_rules(policies, validate=False):
"""Creates FirewallRules from policies.
Args:
policies (list): A list of policy dictionaries.
validate (bool): Whether to validate that this is a valid firewall
rule (one that can be passed to the API).
Returns:
list: A list of FirewallRule.
"""
match_rules = []
for policy in policies:
rule = firewall_rule.FirewallRule.from_dict(
policy, validate=validate)
match_rules.append(rule)
return match_rules
@property
def match_rules(self):
"""The FirewallRules used to filter policies.
Returns:
list: A list of FirewallRule.
"""
if not self._match_rules:
validate = self.mode in {
scanner_rules.RuleMode.REQUIRED,
scanner_rules.RuleMode.MATCHES
}
self._match_rules = self.create_rules(
self._match_policies, validate=validate)
return self._match_rules
@property
def verify_rules(self):
"""The FirewallRules used to check policies.
Returns:
list: A list of FirewallRule.
"""
if not self._verify_rules:
self._verify_rules = self.create_rules(self._verify_policies)
return self._verify_rules
def find_violations(self, firewall_policies):
"""Finds policy violations in a list of firewall policies.
Args:
firewall_policies (list): A list of FirewallRule.
Returns:
iterable: A generator of RuleViolations.
"""
if self.mode == scanner_rules.RuleMode.MATCHES:
violations = self._yield_match_violations(firewall_policies)
elif self.mode == scanner_rules.RuleMode.REQUIRED:
violations = self._yield_required_violations(firewall_policies)
elif self.mode == scanner_rules.RuleMode.WHITELIST:
violations = self._yield_whitelist_violations(firewall_policies)
elif self.mode == scanner_rules.RuleMode.BLACKLIST:
violations = self._yield_blacklist_violations(firewall_policies)
return violations
def _yield_match_violations(self, firewall_policies):
"""Finds policies that don't match the required policy.
Args:
firewall_policies (list): A list of FirewallRules to check.
Yields:
iterable: A generator of RuleViolations.
"""
inserts = set([])
deletes = set([])
for i, rule in enumerate(self.match_rules):
if is_rule_exists_violation(rule, firewall_policies,
self._exact_match):
inserts.add('%s: rule %s' % (self.id, i))
for policy in firewall_policies:
if is_rule_exists_violation(policy, self.match_rules,
self._exact_match):
deletes.add(policy.name)
updates = inserts & deletes
inserts, deletes = (inserts - updates, deletes - updates)
if inserts or deletes or updates:
yield self._create_violation(
firewall_policies, 'FIREWALL_MATCHES_VIOLATION',
recommended_actions={
'INSERT_FIREWALL_RULES': sorted(inserts),
'DELETE_FIREWALL_RULES': sorted(deletes),
'UPDATE_FIREWALL_RULES': sorted(updates),
})
def _yield_required_violations(self, firewall_policies):
"""Finds missing policies that are required.
Args:
firewall_policies (list): A list of FirewallRules to check.
Yields:
iterable: A generator of RuleViolations.
"""
for i, rule in enumerate(self.match_rules):
if is_rule_exists_violation(rule, firewall_policies,
self._exact_match):
yield self._create_violation(
firewall_policies, 'FIREWALL_REQUIRED_VIOLATION',
recommended_actions={
'INSERT_FIREWALL_RULES': [
'%s: rule %s' % (self.id, i)
],
})
def _yield_whitelist_violations(self, firewall_policies):
"""Finds policies that aren't whitelisted.
Args:
firewall_policies (list): A list of FirewallRules to check.
Yields:
iterable: A generator of RuleViolations.
"""
for policy in firewall_policies:
if not any([policy > rule for rule in self.match_rules]):
continue
if is_whitelist_violation(self.verify_rules, policy):
yield self._create_violation(
[policy], 'FIREWALL_WHITELIST_VIOLATION',
recommended_actions={
'DELETE_FIREWALL_RULES': [policy.name],
})
def _yield_blacklist_violations(self, firewall_policies):
"""Finds blacklisted policies.
Args:
firewall_policies (list): A list of FirewallRules to check.
Yields:
iterable: A generator of RuleViolations.
"""
for policy in firewall_policies:
if not any([policy > rule for rule in self.match_rules]):
continue
if is_blacklist_violation(self.verify_rules, policy):
yield self._create_violation(
[policy], 'FIREWALL_BLACKLIST_VIOLATION',
recommended_actions={
'DELETE_FIREWALL_RULES': [policy.name],
})
def _create_violation(self, policies, violation_type,
recommended_actions=None):
"""Creates a RuleViolation.
Args:
policies (list): A list of FirewallRule that violate the policy.
violation_type (str): The type of violation.
recommended_actions (list): The list of actions to take.
Returns:
RuleViolation: A RuleViolation for the given policies.
Raises:
ValueError: If no policies are passed in.
"""
if not policies:
raise ValueError('No policies in violation')
inventory_data = []
for policy in policies:
inventory_data.append(policy.as_json())
return RuleViolation(
resource_name=','.join([p.name for p in policies]),
resource_type=resource_mod.ResourceType.FIREWALL_RULE,
resource_id=policies[0].project_id,
full_name=policies[0].full_name,
rule_id=self.id,
violation_type=violation_type,
policy_names=[p.name for p in policies],
recommended_actions=recommended_actions,
resource_data=inventory_data
)
# Rule violation.
# resource_type: string
# resource_id: string
# rule_name: string
# violation_type: FIREWALL_VIOLATION
# policy_names: string
# recommeded_action: string
RuleViolation = namedtuple('RuleViolation',
['resource_type', 'resource_id', 'full_name',
'rule_id', 'violation_type', 'policy_names',
'recommended_actions', 'resource_data',
'resource_name'])
def is_whitelist_violation(rules, policy):
"""Checks if the policy is not a subset of those allowed by the rules.
Args:
rules (list): A list of FirewallRule that the policy must be a subset of.
policy (FirweallRule): A FirewallRule.
Returns:
bool: If the policy is a subset of one of the allowed rules or not.
"""
policy_subset_check = []
for rule in rules:
if policy < rule:
policy_subset_check.append(True)
else:
policy_subset_check.append(False)
result = not any(policy_subset_check)
return result
def is_blacklist_violation(rules, policy):
"""Checks if the policy is a superset of any not allowed by the rules.
Args:
rules (list): A list of FirewallRule that the policy must be a subset of.
policy (FirweallRule): A FirewallRule.
Returns:
bool: If the policy is a superset of one of the blacklisted rules or not.
"""
policy_superset_check = []
for rule in rules:
if policy > rule:
policy_superset_check.append(True)
else:
policy_superset_check.append(False)
result = any(policy_superset_check)
return result
def is_rule_exists_violation(rule, policies, exact_match=True):
"""Checks if the rule is the same as one of the policies.
Args:
rule (FirweallRule): A FirewallRule.
policies (list): A list of FirewallRule that must have the rule.
exact_match (bool): Whether to match the rule exactly.
Returns:
bool: If the required rule is in the policies.
"""
if exact_match:
result = []
for policy in policies:
if policy == rule:
result.append(True)
else:
result.append(False)
final_result = not any(result)
return final_result
result = []
for policy in policies:
if policy.is_equilvalent(rule):
result.append(True)
else:
result.append(False)
final_result = not any(result)
return final_result
| apache-2.0 | 592,752,568,076,237,700 | 35.119822 | 80 | 0.588361 | false | 4.428183 | false | false | false |
openstack/zaqar | zaqar/conf/storage.py | 1 | 2136 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from zaqar.i18n import _
queue_pipeline = cfg.ListOpt(
'queue_pipeline', default=[],
help=_('Pipeline to use for processing queue operations. This pipeline '
'will be consumed before calling the storage driver\'s controller '
'methods.'))
message_pipeline = cfg.ListOpt(
'message_pipeline', default=[],
help=_('Pipeline to use for processing message operations. This pipeline '
'will be consumed before calling the storage driver\'s controller '
'methods.'))
claim_pipeline = cfg.ListOpt(
'claim_pipeline', default=[],
help=_('Pipeline to use for processing claim operations. This pipeline '
'will be consumed before calling the storage driver\'s controller '
'methods.'))
subscription_pipeline = cfg.ListOpt(
'subscription_pipeline', default=[],
help=_('Pipeline to use for processing subscription operations. This '
'pipeline will be consumed before calling the storage driver\'s '
'controller methods.'))
topic_pipeline = cfg.ListOpt(
'topic_pipeline', default=[],
help=_('Pipeline to use for processing topic operations. This '
'pipeline will be consumed before calling the storage driver\'s '
'controller methods.'))
GROUP_NAME = 'storage'
ALL_OPTS = [
queue_pipeline,
message_pipeline,
claim_pipeline,
subscription_pipeline,
topic_pipeline
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
| apache-2.0 | 415,385,300,332,636,350 | 29.956522 | 78 | 0.691011 | false | 4.350305 | false | false | false |
magicicada-bot/magicicada-server | magicicada/server/tests/test_volumes.py | 1 | 18529 | # -*- coding: utf-8 -*-
# Copyright 2008-2015 Canonical
# Copyright 2015-2018 Chicharreros (https://launchpad.net/~chicharreros)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/magicicada-server
"""Test volume operations."""
from magicicadaprotocol import request
from magicicadaprotocol.volumes import RootVolume, UDFVolume, ShareVolume
from twisted.internet import defer
from magicicada.filesync.models import Share
from magicicada.filesync.services import get_storage_user
from magicicada.server.testing.testcase import TestWithDatabase
class TestListVolumes(TestWithDatabase):
"""Test list_volumes command."""
def test_root_only(self):
"""Users have one volume by default: root."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and test."""
yield client.dummy_authenticate("open sesame")
root_node_id = yield client.get_root()
req = yield client.list_volumes()
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), root_node_id)
self.assertEqual(root.generation, 0)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
return self.callback_test(auth, add_default_callbacks=True)
def test_root_only_with_generation(self):
"""Test that the Root volume gets it generation."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and test."""
# create a file in order to get a generation > 0
self.usr0.root.make_file(u"filename_1")
yield client.dummy_authenticate("open sesame")
root_node_id = yield client.get_root()
req = yield client.list_volumes()
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), root_node_id)
self.assertEqual(root.generation, 1)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
return self.callback_test(auth, add_default_callbacks=True)
def test_one_share_offered(self):
"""Offered shares are not shown in volumes."""
def check(req):
"""Check volumes response."""
# root should be here only
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda r: client.get_root())
d.addCallback(self.save_req, "root")
d.addCallback(lambda r: client.create_share(r, self.usr1.username,
u"n1", Share.VIEW))
d.addCallback(lambda _: client.list_volumes())
d.addCallback(check)
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def _create_share(self, _, accept=False, dead=False, from_id=None):
"""Create the share to me."""
if from_id is None:
from_id = self.usr1.id
fromusr = get_storage_user(from_id)
node = fromusr.root.load()
share = node.share(self.usr0.id, u"name", readonly=True)
self._state.subtree_id = node.id
if accept:
self.usr0.get_share(share.id).accept()
self._state.share_id = share.id
if dead:
share.delete()
return share
def test_share_to_me_no_accept(self):
"""A share offered to me should not be in the list if not accepted."""
def check(req):
"""Check volumes response."""
# root should be here only
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
def auth(client):
"""Authenticate and run the test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda r: client.get_root())
d.addCallback(self.save_req, "root")
# create the share
d.addCallback(self._create_share)
# list the volumes and check
d.addCallback(lambda _: client.list_volumes())
d.addCallback(check)
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def test_share_to_me_accepted(self):
"""A share offered to me should be in the volumes list if accepted."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and run the test."""
yield client.dummy_authenticate("open sesame")
client_root = yield client.get_root()
# create the share
_share = self._create_share(client_root, accept=True)
# create a file in order to get a generation > 0
self.usr0.root.make_file(u"filename_1")
# list the volumes and check
req = yield client.list_volumes()
# check volumes response.
self.assertEqual(len(req.volumes), 2)
# test root
root = [v for v in req.volumes if isinstance(v, RootVolume)][0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), client_root)
self.assertEqual(root.generation, 1)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
# test share
share = [v for v in req.volumes if isinstance(v, ShareVolume)][0]
self.assertEqual(share.volume_id, _share.id)
self.assertEqual(share.node_id, _share.root_id)
self.assertEqual(share.direction, "to_me")
self.assertEqual(share.share_name, "name")
self.assertEqual(share.other_username, self.usr1.username)
self.assertEqual(share.accepted, True)
self.assertEqual(share.access_level, Share.VIEW)
self.assertEqual(share.free_bytes, self.usr1.free_bytes)
return self.callback_test(auth, add_default_callbacks=True)
def test_share_to_me_accepted_with_generation(self):
"""A share offered to me should be in the volumes list if accepted."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and run the test."""
yield client.dummy_authenticate("open sesame")
client_root = yield client.get_root()
# create the share
_share = self._create_share(client_root, accept=True)
# increae the generation of the share
self.usr1.root.make_file(u"filename_1")
# create a file in order to get a generation > 0
self.usr0.root.make_file(u"filename_1")
# list the volumes and check
req = yield client.list_volumes()
# check volumes response.
self.assertEqual(len(req.volumes), 2)
# test root
root = [v for v in req.volumes if isinstance(v, RootVolume)][0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), client_root)
self.assertEqual(root.generation, 1)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
# test share
share = [v for v in req.volumes if isinstance(v, ShareVolume)][0]
self.assertEqual(share.volume_id, _share.id)
self.assertEqual(share.node_id, _share.root_id)
self.assertEqual(share.direction, "to_me")
self.assertEqual(share.share_name, "name")
self.assertEqual(share.other_username, self.usr1.username)
self.assertEqual(share.accepted, True)
self.assertEqual(share.access_level, Share.VIEW)
self.assertEqual(share.free_bytes, self.usr1.free_bytes)
self.assertEqual(share.generation, 1)
return self.callback_test(auth, add_default_callbacks=True)
def test_udf(self):
"""An UDF should be in the volume list."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and test."""
# increase the generation in the root
self.usr0.root.make_file(u"filename_1")
yield client.dummy_authenticate("open sesame")
client_root = yield client.get_root()
# create the udf
client_udf = yield client.create_udf(u"~/ñ", u"foo")
# increase the generation in the udf
self.usr0.volume(client_udf.volume_id).root.make_file(u"file_1")
# list the volumes and check
req = yield client.list_volumes()
# check
self.assertEqual(len(req.volumes), 2)
# test root
root = [v for v in req.volumes if isinstance(v, RootVolume)][0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), client_root)
self.assertEqual(root.generation, 1)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
# test udf
udf = [v for v in req.volumes if isinstance(v, UDFVolume)][0]
self.assertEqual(str(udf.volume_id), client_udf.volume_id)
self.assertEqual(str(udf.node_id), client_udf.node_id)
self.assertEqual(udf.suggested_path, u"~/ñ/foo")
self.assertEqual(udf.generation, 1)
self.assertEqual(udf.free_bytes, self.usr0.free_bytes)
return self.callback_test(auth, add_default_callbacks=True)
def test_shares_to_me_accepted_dead(self):
"""A dead share offered to me should not be in the list."""
def check(req):
"""Check volumes response."""
# root should be here only
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
def auth(client):
"""Authenticate and run the test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda r: client.get_root())
d.addCallback(self.save_req, "root")
# create the share
d.addCallback(self._create_share, accept=True, dead=True)
# list the volumes and check
d.addCallback(lambda _: client.list_volumes())
d.addCallback(check)
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def test_udf_dead(self):
"""A dead UDF should not be in the volume list."""
def check(req):
"""Check volumes response."""
# root should be here only
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda r: client.get_root())
d.addCallback(self.save_req, "root")
# create the udf
d.addCallback(lambda _: client.create_udf(u"~/ñ", u"foo"))
d.addCallback(lambda r: client.delete_volume(r.volume_id))
# list the volumes and check
d.addCallback(lambda _: client.list_volumes())
d.addCallback(check)
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def test_mixed(self):
"""Mix of UDFs and shares, dead and alive."""
def check(req):
"""Check volumes response."""
self.assertEqual(len(req.volumes), 3)
# test root
root = [v for v in req.volumes if isinstance(v, RootVolume)][0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
# test udf
udf = [v for v in req.volumes if isinstance(v, UDFVolume)][0]
self.assertEqual(str(udf.volume_id), self._state.udf.volume_id)
self.assertEqual(str(udf.node_id), self._state.udf.node_id)
self.assertEqual(udf.suggested_path, u"~/ñ/foo")
self.assertEqual(udf.free_bytes, self.usr0.free_bytes)
# test share
share = [v for v in req.volumes if isinstance(v, ShareVolume)][0]
self.assertEqual(share.volume_id, self._state.share_id)
self.assertEqual(share.node_id, self._state.subtree_id)
self.assertEqual(share.direction, "to_me")
self.assertEqual(share.share_name, "name")
self.assertEqual(share.other_username, self.usr2.username)
self.assertEqual(share.accepted, True)
self.assertEqual(share.access_level, Share.VIEW)
self.assertEqual(share.free_bytes, self.usr1.free_bytes)
@defer.inlineCallbacks
def auth(client):
"""Authenticate and test."""
client.dummy_authenticate("open sesame")
root = yield client.get_root()
self.save_req(root, "root")
# create two udfs, kill one
udf = yield client.create_udf(u"~/ñ", u"foo")
self.save_req(udf, "udf")
result = yield client.create_udf(u"~/moño", u"groovy")
yield client.delete_volume(result.volume_id)
# create two shares, one dead (the second one should be the live
# one because the helper function stores data for comparison)
self._create_share(None, accept=True, dead=True)
self._create_share(None, accept=True, from_id=self.usr2.id)
# list the volumes and check
req = yield client.list_volumes()
check(req)
return self.callback_test(auth, add_default_callbacks=True)
class TestDataWithVolumes(TestWithDatabase):
"""Tests data handling in the context of several volumes."""
def test_same_names(self):
"""Be able to have same names in different roots."""
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: client.get_root())
# create a subdir in root
d.addCallback(lambda root: client.make_dir(request.ROOT,
root, "subdir"))
# create the udf, with a dir of same name
d.addCallback(lambda _: client.create_udf(u"~", u"myudf"))
d.addCallback(lambda r: client.make_dir(r.volume_id,
r.node_id, "subdir"))
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def test_unlink_same_path(self):
"""Unlink with similar paths, should work ok."""
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: client.get_root())
# create a subdir in root
d.addCallback(lambda root: client.make_dir(request.ROOT,
root, "tdir1"))
d.addCallback(self.save_req, "dir_del")
# create the udf, with two subdirs
d.addCallback(lambda _: client.create_udf(u"~", u"myudf"))
d.addCallback(self.save_req, "udf")
d.addCallback(lambda r: client.make_dir(r.volume_id,
r.node_id, "tdir1"))
d.addCallback(lambda r: client.make_dir(self._state.udf.volume_id,
r.new_id, "tdir2"))
# delete one dir in one volume
d.addCallback(lambda _: client.unlink(request.ROOT,
self._state.dir_del.new_id))
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
class TestVolumesBasic(TestWithDatabase):
"""Test basic operations on volumes."""
def test_delete_root(self):
"""Test deletion of root volume."""
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: client.delete_volume(request.ROOT))
def check(failure):
"""Checks the error returned."""
self.assertIsInstance(failure.value,
request.StorageRequestError)
self.assertEqual(str(failure.value), 'NO_PERMISSION')
client.test_done(True)
d.addCallbacks(client.test_fail, check)
return self.callback_test(auth)
def test_delete_bad_volume_id(self):
"""Test deletion of bad volume id."""
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: client.delete_volume('foo bar'))
def check(failure):
"""Checks the error returned."""
self.assertIsInstance(failure.value,
request.StorageRequestError)
self.assertEqual(str(failure.value), 'DOES_NOT_EXIST')
client.test_done(True)
d.addCallbacks(client.test_fail, check)
return self.callback_test(auth)
| agpl-3.0 | -5,594,257,917,132,242,000 | 41.877315 | 78 | 0.587432 | false | 4.088962 | true | false | false |
NeuralProsthesisLab/unlock | unlock/state/diagnostic_state.py | 1 | 9523 | # Copyright (c) Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unlock.state.state import UnlockState, TrialState
class FacialEmgDiagnosticState(UnlockState):
Up = 'UP'
Down = 'Down'
Left = 'Left'
Right = 'Right'
Selection = 'Selection'
def __init__(self, timer, classifier):
super(FacialEmgDiagnosticState, self).__init__()
self.timer = timer
self.classifier = classifier
self.text = ''
def start(self):
self.timer.begin_timer()
self.classifier.reset()
self.state = True
self.text = 'Detecting'
def stop(self):
self.timer.reset()
self.state = False
def process_command(self, command):
if command.keyboard_selection:
self.start()
return
if not self.state:
return
self.timer.update_timer(command.delta)
if command.decision is not None:
self.handle_decision(command.decision)
elif command.selection is not None:
self.text = FacialEmgDiagnosticState.Selection
if self.timer.is_complete():
self.stop()
def handle_decision(self, decision):
if decision == FacialEMGDetector.UpDecision:
self.text = FacialEmgDiagnosticState.Up
elif decision == FacialEMGDetector.DownDecision:
self.text = FacialEmgDiagnosticState.Down
elif decision == FacialEMGDetector.LeftDecision:
self.text = FacialEmgDiagnosticState.Left
elif decision == FacialEMGDetector.RightDecision:
self.text = FacialEmgDiagnosticState.Right
class VepDiagnosticState(UnlockState):
"""
The diagnostic model supports two modes of operation: continuous and
discrete. In the continuous mode, the stimuli is always flickering and the
scope is always updating. In the discrete mode, the stimulus presents
itself for a fixed amount of time, then the scope and/or decoder metrics
are printed. The discrete mode trials are triggered by a selection event
e.g. space bar press.
"""
FrequencyUp = 1
FrequencyDown = 2
ChannelDown = 3
ChannelUp = 4
def __init__(self, scope, stimuli, decoders, frequencies):
super(VepDiagnosticState, self).__init__()
self.scope = scope
self.stimuli = stimuli
self.frequencies = frequencies
self.cursor = 0
rate = 1 / (self.frequencies[self.cursor] * 2)
self.stimuli.model.stimuli[0].time_state.set_duration(rate)
self.decoders = decoders
if decoders is None:
self.decoders = list()
for decoder in self.decoders:
# this should be pushed into the decoder as an object reference
# so changing it doesn't require a push-style update list this
decoder.target_label = self.cursor
self.trial_count = 0
self.feedback_change = False
self.feedback_results = list()
def trial_start(self):
print ("TRIAL START ")
self.stimuli.model.start()
for decoder in self.decoders:
decoder.start()
self.feedback_change = True
self.feedback_results = list()
def trial_stop(self):
print ("TRIAL STop")
self.stimuli.model.stop()
for decoder in self.decoders:
decoder.stop()
self.feedback_change = True
def process_command(self, command):
raise Exception("Base class")
return None
def handle_decision(self, decision):
print ("HANDLE DECISION")
if decision == DiagnosticState.FrequencyUp:
self.cursor += 1
if self.cursor >= len(self.frequencies):
self.cursor = len(self.frequencies) - 1
rate = 1 / (self.frequencies[self.cursor] * 2)
self.stimuli.model.stimuli[0].time_state.set_duration(rate)
for decoder in self.decoders:
decoder.target_label = self.cursor
elif decision == DiagnosticState.FrequencyDown:
self.cursor -= 1
if self.cursor < 0:
self.cursor = 0
rate = 1 / (self.frequencies[self.cursor] * 2)
self.stimuli.model.stimuli[0].time_state.set_duration(rate)
for decoder in self.decoders:
decoder.target_label = self.cursor
elif decision == DiagnosticState.ChannelDown:
if self.scope is not None:
self.scope.model.change_display_channel(-1)
elif decision == DiagnosticState.ChannelUp:
if self.scope is not None:
self.scope.model.change_display_channel(1)
def update_decoders(self, command):
print ("UPDATE DECODERS")
for decoder in self.decoders:
result = decoder.classify(command)
if result is not None:
self.feedback_results.append(result)
def get_state(self):
print("GET STATE")
if self.feedback_change:
text = ','.join(self.feedback_results)
if text != '':
text = '[%.1f Hz] - %s' % (self.frequencies[self.cursor], text)
self.feedback_change = False
return True, text
else:
return False, ''
class ContinuousVepDiagnosticState(VepDiagnosticState):
def __init__(self, scope, stimuli, frequencies, decoders):
super(ContinuousVepDiagnosticState, self).__init__(scope, stimuli, decoders, frequencies)
def process_command(self, command):
"""
For continuous usage, allow changes to the scope and stimuli
frequencies at any event. The stimuli can also be started and stopped
by the user.
"""
if command.selection:
if self.stimuli.model.state.is_stopped():
self.trial_start()
else:
self.trial_stop()
if command.decision is not None:
self.handle_decision(command.decision)
self.update_decoders(command)
#return True
class DiscreteVepDiagnosticState(VepDiagnosticState):
def __init__(self, scope, stimuli, decoders, frequencies):
super(DiscreteVepDiagnosticState, self).__init__(scope, stimuli, decoders, frequencies)
def process_command(self, command):
"""
For discrete usage, only allow changes when a trial is not underway.
Handle the transition between trial and output.
"""
print("PROCESS COMMAND")
if not self.stimuli.model.state.is_stopped():
print("HACK STIMULLI state is stopped")
if self.trial_count == 0:
print(" trial count == 0")
# this is a hack to get around the current setup where the
# stimuli starts immediately
self.trial_stop()
elif self.stimuli.model.state.last_change == TrialState.TrialExpiry:
print (" Trial expiry ")
# there is an occasional delay apparently that can happen when
# using actual devices which causes this state to be missed
# i.e. it goes to rest, then the next rest state, resulting in
# an Unchanged response, before this check happens. A better
# method would preserve the value until it was queried.
self.trial_stop()
self.update_decoders(command)
else:
print (" ELSE UPDATE DECODERS ")
self.update_decoders(command)
return #True
if command.selection:
print ("Command selection ")
self.trial_count += 1
self.trial_start()
if command.decision is not None:
print("Command . decision not none")
self.handle_decision(command.decision)
#return True
| bsd-3-clause | -16,656,330,169,797,434 | 37.873469 | 97 | 0.620183 | false | 4.402681 | false | false | false |
amolkahat/pandas | pandas/tests/extension/test_common.py | 1 | 2329 | import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes import dtypes
class DummyDtype(dtypes.ExtensionDtype):
pass
class DummyArray(ExtensionArray):
def __init__(self, data):
self.data = data
def __array__(self, dtype):
return self.data
@property
def dtype(self):
return DummyDtype()
def astype(self, dtype, copy=True):
# we don't support anything but a single dtype
if isinstance(dtype, DummyDtype):
if copy:
return type(self)(self.data)
return self
return np.array(self, dtype=dtype, copy=copy)
class TestExtensionArrayDtype(object):
@pytest.mark.parametrize('values', [
pd.Categorical([]),
pd.Categorical([]).dtype,
pd.Series(pd.Categorical([])),
DummyDtype(),
DummyArray(np.array([1, 2])),
])
def test_is_extension_array_dtype(self, values):
assert is_extension_array_dtype(values)
@pytest.mark.parametrize('values', [
np.array([]),
pd.Series(np.array([])),
])
def test_is_not_extension_array_dtype(self, values):
assert not is_extension_array_dtype(values)
def test_astype():
arr = DummyArray(np.array([1, 2, 3]))
expected = np.array([1, 2, 3], dtype=object)
result = arr.astype(object)
tm.assert_numpy_array_equal(result, expected)
result = arr.astype('object')
tm.assert_numpy_array_equal(result, expected)
def test_astype_no_copy():
arr = DummyArray(np.array([1, 2, 3], dtype=np.int64))
result = arr.astype(arr.dtype, copy=False)
assert arr is result
result = arr.astype(arr.dtype)
assert arr is not result
@pytest.mark.parametrize('dtype', [
dtypes.DatetimeTZDtype('ns', 'US/Central'),
])
def test_is_not_extension_array_dtype(dtype):
assert not isinstance(dtype, dtypes.ExtensionDtype)
assert not is_extension_array_dtype(dtype)
@pytest.mark.parametrize('dtype', [
dtypes.CategoricalDtype(),
dtypes.IntervalDtype(),
])
def test_is_extension_array_dtype(dtype):
assert isinstance(dtype, dtypes.ExtensionDtype)
assert is_extension_array_dtype(dtype)
| bsd-3-clause | -8,029,631,026,453,329,000 | 24.043011 | 62 | 0.657364 | false | 3.633385 | true | false | false |
cwgreene/Nanostructure-Simulator | meshes/triangle/mesh_creator.py | 1 | 1081 | from dolfin import *
import triangulate as trig
from itertools import *
def mesh_creator(triangles):
#flatten triangle list
print "triangles",triangles[0:1]
points = trig.flat(triangles)
print "points",points[0:3]
#create mesh and editor
mesh = Mesh()
editor = MeshEditor()
editor.open(mesh,"triangle",2,2)
point_ids = {}
#put points into hashtable,add them as vertices
for point in points:
try:
point_ids[tuple(point)] = 0
except:
print point,type(point)
raise
print len(points),len(point_ids)
#Init Points, now that we know how many
#editor.initCells(len(triangles))
editor.init_cells(len(triangles))
#editor.initVertices(len(point_ids))
editor.init_vertices(len(point_ids))
for point,id in izip(point_ids,count()):
point_ids[point] = id
editor.add_vertex(id,*point)
#editor.addVertex(id,*point)
#now add cells
for tri,id in izip(triangles,count()):
tri_id = map(lambda p: point_ids[tuple(p)],tri)
editor.add_cell(id,*tri_id)
editor.close()
print "Mesh triangles:points",len(triangles),":",len(mesh.coordinates())
return mesh
| mit | -1,139,904,979,769,238,100 | 24.139535 | 73 | 0.712303 | false | 2.793282 | false | false | false |
kalleastrom/LocoPositioningSystemSlam | python/data/converter.py | 1 | 1346 | """
Converts a .mat file to the appropriate JSON format
"""
import scipy.io as sio
import os
import json
def convert_data_set(lfile):
"""
Converts a .mat file to the appropriate JSON format
"""
directory = os.path.dirname(os.path.abspath(__file__))
try:
data = sio.loadmat(lfile)
except:
raise Exception(("Could not load file, check check that %s exists in"+
"the directory %s.") % (lfile,directory))
ouput = {
'info':'%s - %s' %(data['__header__'],data['data'][0][0][10][0]),
'dimensions':{
'numberOfAnchors': data['data'][0][0][0].shape[0],
'numberOfMeasurements':data['data'][0][0][0].shape[1]
},
'ranges':data['data'][0][0][0].tolist(),
'acceleration': None,
'rates': None,
'thrust': None,
'torques': None,
'times': None
}
try:
# Saves data in the JSON format in the filename in which it was loaded
path = os.path.join(directory, lfile[:-4] + '.json')
with open(path, 'w') as outfile:
json.dump(ouput, outfile , separators=(',', ':'), sort_keys=True, indent=4)
except:
raise Exception('Could not save the file')
if __name__ == "__main__":
loadFileName = 'data4.mat'
convertDataSet(loadFileName)
| mit | 4,789,105,622,986,190,000 | 28.282609 | 87 | 0.547548 | false | 3.677596 | false | false | false |
PnX-SI/GeoNature | install/deploy_gn/run.py | 1 | 1403 | import sys
from config import *
from connection import connect
def deploy():
conn = connect()
URL_SETTING = f"https://raw.githubusercontent.com/PnX-SI/GeoNature/{GN_VERSION}/install/install_all/install_all.ini"
URL_SCRIPT = f"https://raw.githubusercontent.com/PnX-SI/GeoNature/{GN_VERSION}/install/install_all/install_all.sh"
conn.run(f"wget {URL_SETTING}")
conn.run(f"wget {URL_SCRIPT}")
# sed the settings.ini
conn.run(f"sed -i 's/my_url=.*$/my_url={DOMAIN}/g' install_all.ini")
conn.run(f"sed -i 's/geonature_release=.*$/geonature_release={GN_VERSION}/g' install_all.ini")
conn.run(f"sed -i 's/install_default_dem=.*$/install_default_dem=false/g' install_all.ini")
conn.run(f"sed -i 's/drop_geonaturedb=.*$/drop_geonaturedb={DROP_DB}/g' install_all.ini")
conn.run("touch install_all.log")
conn.run("chmod +x install_all.sh")
conn.run("./install_all.sh 2>&1 | tee install_all.log")
def clean():
conn = connect()
conn.run("sudo rm -r geonature taxhub usershub install_all.*")
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Pass 'deploy' or 'clean' argument to the script")
elif len(sys.argv) > 1:
arg1 = sys.argv[1]
if arg1 == "deploy":
deploy()
elif arg1 == "clean":
clean()
else:
print("Pass 'deploy' or 'clean' argument to the script")
| gpl-3.0 | 2,093,802,083,464,185,600 | 34.974359 | 120 | 0.626515 | false | 2.904762 | false | false | false |
protonyx/labtronyx-gui | labtronyxgui/common/rpc/jsonrpc.py | 1 | 8281 |
import json
"""
JSON RPC Python class
Follows the JSON RPC 2.0 Spec (http://www.jsonrpc.org/specification)
This class can either be instantiated with a JSON encoded string or used as
a utility helper class
"""
#===============================================================================
# Error Type
#===============================================================================
class JsonRpc_Error(RuntimeError):
code = None
message = None
data = None
def __init__(self, **rpc_dict):
RuntimeError.__init__(self)
self.id = rpc_dict.get('id', None)
if 'error' in rpc_dict:
error = rpc_dict.get('error', {})
self.code = error.get('code', None)
self.message = error.get('message', None)
def __str__(self):
return repr(str(self.message))
def export(self):
return {'id': self.id,
'error': {'code': self.code, 'message': self.message}}
class JsonRpc_ParseError(JsonRpc_Error):
code = -32700
message = 'Invalid JSON was received by the server.'
class JsonRpc_InvalidRequest(JsonRpc_Error):
code = -32600
message = 'The JSON sent is not a valid Request object.'
class JsonRpc_MethodNotFound(JsonRpc_Error):
code = -32601
message = 'The method does not exist / is not available.'
class JsonRpc_InvalidParams(JsonRpc_Error):
code = -32602
message = 'Invalid method parameter(s).'
class JsonRpc_InternalError(JsonRpc_Error):
code = -32603
message = 'Internal JSON-RPC error.'
class JsonRpc_ServerException(JsonRpc_Error):
code = -32000
message = 'An unhandled server exception occurred'
JsonRpcErrors = { -32700: JsonRpc_ParseError,
-32600: JsonRpc_InvalidRequest,
-32601: JsonRpc_MethodNotFound,
-32602: JsonRpc_InvalidParams,
-32603: JsonRpc_InternalError,
-32000: JsonRpc_ServerException }
# -32000 to -32099 are reserved server-errors
#===============================================================================
# Request Type
#===============================================================================
class JsonRpc_Request(object):
def __init__(self, **rpc_dict):
self.id = rpc_dict.get('id', None)
self.method = rpc_dict.get('method', '')
self.params = rpc_dict.get('params', [])
self.kwargs = rpc_dict.get('kwargs', {})
def getID(self):
return self.id
def getMethod(self):
return self.method
def export(self):
# Slight modification of the JSON RPC 2.0 specification to allow
# both positional and named parameters
# Adds kwargs variable to object only when both are present
out = {'id': self.id, 'method': self.method }
if len(self.params) > 0:
out['params'] = self.params
if len(self.kwargs) > 0:
out['kwargs'] = self.kwargs
elif len(self.params) == 0:
out['params'] = self.kwargs
return out
def call(self, target):
# Invoke target method with stored arguments
# Don't attempt to catch exceptions here, let them bubble up
if type(self.params) == dict and len(self.kwargs) == 0:
# Only keyword parameters
return target(**self.params)
else:
return target(*self.params, **self.kwargs)
#===============================================================================
# Response Type
#===============================================================================
class JsonRpc_Response(object):
def __init__(self, **rpc_dict):
self.id = rpc_dict.get('id', None)
self.result = rpc_dict.get('result', None)
def getID(self):
return self.id
def getResult(self):
return self.result
def export(self):
ret = {'id': self.id,
'result': self.result}
return ret
#===============================================================================
# JSON RPC Handlers
#===============================================================================
class JsonRpcPacket(object):
def __init__(self, str_req=None):
self.requests = []
self.responses = []
self.errors = []
if str_req is not None:
try:
req = json.loads(str_req)
if type(req) == list:
# Batch request
for sub_req in req:
try:
self._parseJsonObject(sub_req)
except:
self.errors.append(JsonRpc_InvalidRequest())
if len(req) == 0:
self.errors.append(JsonRpc_InvalidRequest())
elif type(req) == dict:
# Single request
self._parseJsonObject(req)
else:
self.errors.append(JsonRpc_ParseError())
except:
# No JSON object could be decoded
self.errors.append(JsonRpc_ParseError())
def _parseJsonObject(self, rpc_dict):
"""
Takes a dictionary and determines if it is an RPC request or response
"""
if rpc_dict.get('jsonrpc') == '2.0':
if 'method' in rpc_dict.keys() and type(rpc_dict.get('method')) is unicode:
# Request object
self.requests.append(JsonRpc_Request(**rpc_dict))
elif 'id' in rpc_dict.keys() and 'result' in rpc_dict.keys():
# Result response object
self.responses.append(JsonRpc_Response(**rpc_dict))
elif 'id' in rpc_dict.keys() and 'error' in rpc_dict.keys():
# Error response object
error_code = rpc_dict['error'].get('code', -32700)
err_obj = JsonRpcErrors.get(error_code, JsonRpc_ParseError)
self.errors.append(err_obj(**rpc_dict))
else:
self.errors.append(JsonRpc_InvalidRequest(**rpc_dict))
else:
self.errors.append(JsonRpc_InvalidRequest())
def addRequest(self, id, method, *args, **kwargs):
self.requests.append(JsonRpc_Request(id=id,
method=method,
params=args,
kwargs=kwargs))
def clearRequests(self):
self.requests = []
def getRequests(self):
return self.requests
def addResponse(self, id, result):
self.responses.append(JsonRpc_Response(id=id,
result=result))
def clearResponses(self):
self.responses = []
def getResponses(self):
return self.responses
def addError_InvalidParams(self, id):
if id is not None:
self.errors.append(JsonRpc_InvalidParams(id=id))
def addError_ServerException(self, id, msg=None):
if id is not None:
self.errors.append(JsonRpc_ServerException(id=id,
message=msg))
def addError_MethodNotFound(self, id):
if id is not None:
self.errors.append(JsonRpc_MethodNotFound(id=id))
def getErrors(self):
return self.errors
def export(self):
ret = []
for rpc_obj in self.requests + self.responses + self.errors:
rpc_dict = rpc_obj.export()
rpc_dict['jsonrpc'] = '2.0'
ret.append(rpc_dict)
if len(ret) == 1:
return str(json.dumps(ret[0]))
elif len(ret) > 1:
return str(json.dumps(ret))
else:
return ''
| mit | 6,244,627,912,885,074,000 | 32.257028 | 87 | 0.473735 | false | 4.699773 | false | false | false |
danielparton/TargetExplorer | targetexplorer/oncotator.py | 3 | 1547 | import urllib2
import json
import StringIO
def build_oncotator_search_string(
chromosome_number,
chromosome_start_pos,
chromosome_end_pos,
ref_allele,
var_allele
):
search_string = '{0}_{1}_{2}_{3}_{4}'.format(
chromosome_number,
chromosome_start_pos,
chromosome_end_pos,
ref_allele,
var_allele
)
return search_string
def retrieve_oncotator_mutation_data_as_json(
chromosome_number=None,
chromosome_start_pos=None,
chromosome_end_pos=None,
ref_allele=None,
var_allele=None,
search_string=None
):
"""
Parameters
----------
chromosome_number: int
chromosome_start_pos: int
chromosome_end_pos: int
ref_allele: str
var_allele: str
Returns
-------
oncotator_data: dict
"""
if search_string is None:
search_string = build_oncotator_search_string(
chromosome_number,
chromosome_start_pos,
chromosome_end_pos,
ref_allele,
var_allele
)
page = retrieve_oncotator_mutation_data(search_string)
return json.load(StringIO.StringIO(page))
def retrieve_oncotator_mutation_data(search_string_query, maxreadlength=100000000):
base_url = 'http://www.broadinstitute.org/oncotator/mutation/{0}/'
url_request_string = base_url.format(search_string_query)
response = urllib2.urlopen(url_request_string)
page = response.read(maxreadlength)
return page
| gpl-2.0 | -6,292,946,117,440,557,000 | 24.360656 | 83 | 0.614092 | false | 3.540046 | false | false | false |
arborh/tensorflow | tensorflow/python/autograph/pyct/error_utils.py | 3 | 6081 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code transformation exceptions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.autograph.pyct import origin_info
class FrameInfo(
collections.namedtuple(
'FrameInfo',
('filename', 'lineno', 'function_name', 'code', 'converted'))):
pass
def _stack_trace_inside_mapped_code(tb, source_map):
"""Summarizes inner traceback frames up to the call to a given function.
This functions locates the innermost (i.e. most recent) frame that corresponds
to code that can be mapped by source_map originated from, and returns a
translated stack trace ending at that frame. If no such frame is found, the
entire stack trace is summarized.
For example, the following code:
def f():
for i in tf.range(1):
z = y + i # z only defined here
Would generate this traceback:
<converted code>
ag__.for_stmt(...)
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Which is then processed into:
<f>
for i in tf.range(1):
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Args:
tb: List[Tuple], the traceback corresponding to an error; typically,
the output of traceback.extract_tb.
source_map: Dict[LineLocation, OriginInfo], a source map as created by
origin_info.create_source_map.
Returns:
List[FrameInfo]
"""
result_frames = []
for filename, line_number, function_name, text in reversed(tb):
loc = origin_info.LineLocation(filename=filename, lineno=line_number)
if loc in source_map:
origin = source_map[loc]
origin_frame_info = FrameInfo(
filename=origin.loc.filename,
lineno=origin.loc.lineno,
function_name=origin.function_name,
code=origin.source_code_line,
converted=True)
result_frames.append(origin_frame_info)
break
fi = FrameInfo(
filename=filename,
lineno=line_number,
function_name=function_name,
code=text,
converted=False)
result_frames.append(fi)
return tuple(result_frames)
KNOWN_STRING_CONSTRUCTOR_ERRORS = (
AssertionError,
AttributeError,
NameError,
NotImplementedError,
RuntimeError,
StopIteration,
TypeError,
ValueError,
)
# KeyError escapes newlines in strings. We create a special subclass
# that doesn't do that. Overriding the name for display purposes; hopefully
# that won't create too many surprises.
class MultilineMessageKeyError(KeyError):
def __init__(self, message, original_key):
super(MultilineMessageKeyError, self).__init__(original_key)
self.__message = message
def __str__(self):
return self.__message
MultilineMessageKeyError.__name__ = KeyError.__name__
class ErrorMetadataBase(object):
"""Container objects attached to exceptions in converted code.
This metadata allows re-raising exceptions that occur in generated code, with
a custom error message that includes a stack trace relative to user-readable
code from which the generated code originated.
"""
def __init__(self, callsite_tb, cause_metadata, cause_message, source_map):
translated_stack = _stack_trace_inside_mapped_code(callsite_tb, source_map)
if cause_metadata is None:
self.translated_stack = translated_stack
self.cause_message = cause_message
else:
# Daisy chain the translated stacks.
self.translated_stack = (
cause_metadata.translated_stack + (translated_stack[-1],))
self.cause_message = cause_metadata.cause_message
def get_message(self):
"""Returns the message for the underlying exception."""
lines = []
lines.append('in converted code:')
lines.append('')
for frame_info in reversed(self.translated_stack):
lines.append(' {}:{} {}{}'.format(
frame_info.filename,
frame_info.lineno,
frame_info.function_name,
' *' if frame_info.converted else '',
))
if frame_info.code is None:
code_snippet = '<source unavailable>'
else:
code_snippet = frame_info.code.strip()
lines.append(' {}'.format(code_snippet))
lines.append('')
message_lines = self.cause_message.split('\n')
for i in range(len(message_lines)):
message_lines[i] = ' ' + message_lines[i]
lines.extend(message_lines)
lines.append('')
return '\n'.join(lines)
def create_exception(self, source_error):
preferred_type = type(source_error)
if preferred_type.__init__ is Exception.__init__:
return preferred_type(self.get_message())
if preferred_type in KNOWN_STRING_CONSTRUCTOR_ERRORS:
return preferred_type(self.get_message())
elif preferred_type is KeyError:
return MultilineMessageKeyError(self.get_message(), self.cause_message)
return None
def to_exception(self, source_error):
exc = self.create_exception(source_error)
exc.__suppress_context__ = True
exc.ag_error_metadata = self
return exc
| apache-2.0 | 8,587,418,798,658,513,000 | 30.02551 | 80 | 0.667818 | false | 3.964146 | false | false | false |
simmetria/sentry | src/sentry/pool/redis.py | 1 | 2551 | """
sentry.pool.redis
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import random
from nydus.db import create_cluster
class RedisCappedPool(object):
"""
Implements a capped queue based on Reservoir Sammpling
"""
key_expire = 60 * 60 # 1 hour
def __init__(self, keyspace, size=1000, hosts=None, router='nydus.db.routers.keyvalue.PartitionRouter', **options):
if hosts is None:
hosts = {
0: {} # localhost / default
}
self.conn = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': router,
'hosts': hosts,
})
# We could set this to the maximum value of random.random() (1.0) if we new this pool class
# could stay instantiated. Unfortuantely we'll need an offset per project, which could grow
# indefinitely and would require us to have an LRU.
self.offset = None
def put(self, *items):
"""
Efficiently samples ``items`` onto the pool's keyspace.
"""
if self.offset is None:
self.offset = self.conn.zrange(self.keyspace, self.size, self.size, withscores=True)
for item in items:
val = random.random()
if val < self.offset:
with self.conn.map() as conn:
conn.zadd(self.keyspace, val)
conn.zremrangebyrank(self.keyspace, self.size)
result = self.conn.zrange(self.keyspace, self.size, self.size, withscores=True)
self.offset = result[-1][-1]
def get(self):
"""
Pops a random item off the sample set.
"""
val = random.random()
with self.conn.map() as conn:
# we have to fetch both values as we dont know which one is actually set
item_a = conn.zrange(self.keyspace, val, 1, withscores=True)
item_b = conn.zrevrange(self.keyspace, val, 1, withscores=True)
# pick either item, doesnt matter
item, score = (item_a or item_b)
# remove matching scored item (even if its not the same item)
self.conn.zremrangebyscore(self.keyspace, val, 1)
def values(self):
"""
Returns all samples and clears the pool.
"""
with self.conn.map() as conn:
results = conn.zrange(self.keyspace, 0, self.size)
conn.delete(self.keyspace)
return results
| bsd-3-clause | 2,098,426,676,678,218,000 | 33.472973 | 119 | 0.578989 | false | 3.894656 | false | false | false |
fluxer/spm | nuitka/nuitka/codegen/ExpressionCodes.py | 1 | 2219 | # Copyright 2016, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Expression codes, side effects, or statements that are an unused expression.
When you write "f()", i.e. you don't use the return value, that is an expression
only statement.
"""
from .ErrorCodes import getReleaseCode
from .Helpers import generateExpressionCode
def generateExpressionOnlyCode(statement, emit, context):
return getStatementOnlyCode(
value = statement.getExpression(),
emit = emit,
context = context
)
def getStatementOnlyCode(value, emit, context):
tmp_name = context.allocateTempName(
base_name = "unused",
type_code = "NUITKA_MAY_BE_UNUSED PyObject *",
unique = True
)
# An error of the expression is dealt inside of this, not necessary here.
generateExpressionCode(
expression = value,
to_name = tmp_name,
emit = emit,
context = context
)
getReleaseCode(
release_name = tmp_name,
emit = emit,
context = context
)
def generateSideEffectsCode(to_name, expression, emit, context):
for side_effect in expression.getSideEffects():
getStatementOnlyCode(
value = side_effect,
emit = emit,
context = context
)
generateExpressionCode(
to_name = to_name,
expression = expression.getExpression(),
emit = emit,
context = context
)
| gpl-2.0 | 1,210,095,960,699,402,000 | 29.819444 | 80 | 0.646237 | false | 4.094096 | false | false | false |
carlgao/lenga | images/lenny64-peon/usr/share/python-support/pyrite-publisher/PyritePublisher/plugin_CSV.py | 1 | 6822 | #
# $Id: plugin_CSV.py,v 1.1 2002/03/26 12:56:06 rob Exp $
#
# Copyright 2001 Rob Tillotson <[email protected]>
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee or royalty is
# hereby granted, provided that the above copyright notice appear in
# all copies and that both the copyright notice and this permission
# notice appear in supporting documentation or portions thereof,
# including modifications, that you you make.
#
# THE AUTHOR ROB TILLOTSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE!
#
"""
"""
__version__ = '$Id: plugin_CSV.py,v 1.1 2002/03/26 12:56:06 rob Exp $'
__author__ = 'Rob Tillotson <[email protected]>'
__copyright__ = 'Copyright 2001 Rob Tillotson <[email protected]>'
from dtkplugins import ParserPlugin, LOG_ERROR, LOG_NORMAL, LOG_DEBUG, LOG_WARNING
from dbprotocol import *
import string
class CSVError(Exception): pass
class CSVParser:
def __init__(self):
self.fields = []
self.buf = ''
self.comment_marker = '#'
self.field_separator = ','
self.escape_double_quote = 1
self.skip_blank_lines = 1
self._state = 'start-field'
self._accum = ''
def flush(self):
self._state = 'start-field'
self._accum = ''
self.buf = ''
self.fields = []
def __parse(self):
x = 0
done = 0
while x < len(self.buf):
c = self.buf[x]
# start-field state: looking for beginning of field
# skip whitespace, separator means field was empty
if self._state == 'start-field':
if c == ' ' or c == '\t':
x = x + 1
continue
elif c == '\n':
done = 1
x = x + 1
break
elif c == '"':
self._state = 'quoted-string'
elif c == self.field_separator:
self.fields.append('')
else:
self._accum = self._accum + c
self._state = 'in-field'
elif self._state == 'in-field':
if c == self.field_separator:
self.fields.append(self._accum.strip())
self._accum = ''
self._state = 'start-field'
elif c == '\n':
self.fields.append(self._accum.strip())
self._accum = ''
self._state = 'start-field'
done = 1
x = x + 1
break
elif c == '"' and self.escape_double_quote and x < len(self.buf)-1 \
and self.buf[x+1] == '"':
x = x + 1 # eat second quote
self._accum = self._accum + '"'
else:
self._accum = self._accum + c
elif self._state == 'quoted-string':
if c == '"':
if self.escape_double_quote and x < len(self.buf)-1 and self.buf[x+1] == '"':
x = x + 1
self._accum = self._accum + '"'
else:
self.fields.append(self._accum)
self._accum = ''
self._state = 'after-quoted-string'
else:
self._accum = self._accum + c
elif self._state == 'after-quoted-string':
if c == '\n':
done = 1
x = x + 1
self._state = 'start-field'
break
elif c == ' ' or c == '\t':
x = x + 1
continue
elif c == self.field_separator:
self._state = 'start-field'
else:
self.flush()
raise CSVError, "text after quote"
x = x + 1
self.buf = self.buf[x:]
if done:
f = self.fields
self.fields = []
return f
def feed(self, text=''):
self.buf = self.buf + text
f = self.__parse()
while f is not None:
if f or not self.skip_blank_lines: self.handle_line(f)
f = self.__parse()
def eof(self):
self.feed('\n')
def handle_line(self, fields):
print fields
class PPCSVParser(CSVParser):
def __init__(self, next):
CSVParser.__init__(self)
self.next = next
self.field_specs = 0
def handle_line(self, fields):
if self.use_field_names and not self.field_specs:
self.field_specs = [FieldSpec(x, FLD_STRING) for x in fields]
self.next.define_fields(self.field_specs)
else:
if not self.field_specs:
self.field_specs = [FieldSpec("Field%d" % x, FLD_STRING) \
for x in range(len(fields))]
self.next.define_fields(self.field_specs)
self.next.feed_record(fields)
def eof(self):
CSVParser.eof(self)
class Plugin(ParserPlugin, CSVParser):
name = 'CSV'
description = 'Comma-separated-values parser.'
links = [ (0, 'text/comma-separated-values', 'columnar-database'),
(-100, 'text/plain', 'columnar-database'),
(-100, 'application/octet-stream', 'columnar-database'),
]
def __init__(self, *a, **kw):
ParserPlugin.__init__(self, *a, **kw)
self._add_property('use_field_names', 'Get field names from first line', boolean=1)
self._add_cli_option('use_field_names', None, 'use-field-names',
'Get field names from first line',
boolean=1)
self.use_field_names = 0
def open(self, chain, next, *a, **kw):
ParserPlugin.open(self, chain, next, *a, **kw)
self.parser = PPCSVParser(next)
self.copyProperties(self.parser)
self.ttbl = string.maketrans('','')
return self
def feed(self, data):
l = string.translate(data, self.ttbl, '\r')
self.parser.feed(l)
def eof(self):
self.parser.eof()
| mit | 4,477,478,368,812,505,600 | 32.772277 | 97 | 0.49355 | false | 4.124547 | false | false | false |
jpoppe/puppetmodules | modules/shared/reprepro/files/usr/local/bin/rep.py | 1 | 7804 | #!/usr/bin/env python
"""
rep.py (c) 2010-2011 eBay - written by Jasper Poppe <[email protected]>
"""
import optparse
import os
import pwd
import subprocess
def get_repos(path):
"""create a dictionary with all the repositories"""
repos = os.listdir(path)
repos = [repo for repo in repos if os.path.isfile(os.path.join(path, repo, 'conf/distributions'))]
confs = [os.path.join(path, directory, 'conf/distributions') for directory in repos]
repositories = {}
for conf in confs:
updates_file = os.path.join(conf.rsplit('/', 1)[0], 'updates')
repo = conf.replace(path, '')[1:].split('/', 1)[0]
if os.path.isfile(updates_file):
type = 'mirror'
else:
type = 'custom'
repositories[repo] = {'type': type, 'path': os.path.join(path, repo)}
return repositories
def get_codenames(repositories):
"""add the codename for each repository to the repositories dictionary"""
for repo in repositories:
file = os.path.join(repositories[repo]['path'], 'conf/distributions')
data = open(file, 'r').read()
for line in data.split('\n'):
line = line.split(': ')
if line[0] == 'Codename':
if not repositories[repo].has_key('codenames'):
repositories[repo]['codenames'] = [line[1]]
else:
repositories[repo]['codenames'].append(line[1])
return repositories
def add(repositories, add, packages, user='repo'):
"""add a package to a reprepro repository"""
repo, codename = add
repo_dir = repositories[repo]['path']
for package in packages:
if os.path.isfile(package):
print ('info: adding "%s" package "%s" to "%s"' % (codename, package, repo))
#subprocess.call(['sudo', '-u', user, '/usr/bin/reprepro', '-V', '-b', repo_dir, 'includedeb', codename, package])
subprocess.call(['/usr/bin/reprepro', '-V', '-b', repo_dir, 'includedeb', codename, package])
else:
print ('error: package "%s" not found' % package)
def delete(repositories, delete, packages, user='repo'):
"""delete a package from a reprepro repository"""
repo, codename = delete
repo_dir = repositories[repo]['path']
for package in packages:
print ('info: removing package "%s" from "%s" (%s)' % (package, repo, codename))
#subprocess.call(['sudo', '-u', user, '/usr/bin/reprepro', '-V', '-b', repo_dir, 'remove', codename, package])
subprocess.call(['/usr/bin/reprepro', '-V', '-b', repo_dir, 'remove', codename, package])
def contents(name, repo_dir, codename):
"""list the packages in the specified repository"""
print ('info: listing contents for codename "%s" in repository "%s"' % (codename, name))
subprocess.call(['/usr/bin/reprepro', '-V', '-b', repo_dir, 'list', codename])
def update(repo, path, user='repo'):
"""sync a mirror"""
print ('info: fetching updates for repository "%s"' % repo)
#subprocess.call(['sudo', '-u', user, '/usr/bin/reprepro', '-V', '-b', path, '--noskipold', 'update'])
subprocess.call(['/usr/bin/reprepro', '-V', '-b', path, '--noskipold', 'update'])
print ('')
def list_repos(repositories, repo_type):
"""list all available repositories"""
for key, values in repositories.items():
if values['type'] == repo_type:
print ('%s (%s)' % (key, ', '.join(values['codenames'])))
def main():
"""main application, this function won't called when used as a module"""
parser = optparse.OptionParser(prog='rep.py', version='0.1')
parser.set_description('rep.py - reprepro manager (c) 2010-2011 eBay - Jasper Poppe <[email protected]>')
parser.set_usage('%prog -l | [-a|-d <repository> <codename> <package>... | [-c <repository> <codename>] | [-u <repository>]')
parser.add_option('-l', dest='list_all', help='list available repositories', action='store_true')
parser.add_option('-a', dest='add', help='add package(s) to a custom repository', nargs=2)
parser.add_option('-d', dest='delete', help='remove package(s) from a custom repository', nargs=2)
parser.add_option('-c', dest='contents', help='list the contents of a repository', nargs=2)
parser.add_option('-u', dest='update', help='update mirror', action='append')
parser.add_option('-U', dest='user', help='override repository owner (default: repo) (DO NOT USE IN A PRODUCTION ENVIRONMENT)', default='repo')
parser.add_option('-p', dest='repo_path', help='repository path (default: /opt/repositories/debian)', default='/opt/repositories/debian')
(opts, args) = parser.parse_args()
if opts.add or opts.delete or opts.contents or opts.update:
#if not os.geteuid() == 0:
if not pwd.getpwuid(os.getuid())[0] == opts.user:
parser.error('only the "%s" user can modify repositories' % opts.user)
#parser.error('only a user with root permissions can modify repositories')
if opts:
#if opts.user == 'root':
# os.environ['HOME'] = '/root'
#else:
# os.environ['HOME'] = os.path.join('/home', opts.user)
repositories = get_repos(opts.repo_path)
repositories = get_codenames(repositories)
if opts.list_all:
print ('Custom repositories (you can add debian packages here):')
list_repos(repositories, 'custom')
print ('')
print ('Mirrors:')
list_repos(repositories, 'mirror')
elif opts.add:
if not args:
parser.error('need to specify at least one package')
if repositories.has_key(opts.add[0]):
if repositories[opts.add[0]]['type'] == 'custom':
#add(repositories, opts.add, args, opts.user)
add(repositories, opts.add, args)
else:
parser.error('"%s" is not a valid and or a custom repository (hint: try -l)' % opts.add)
else:
parser.error('repository "%s" not found (hint: try -l)' % opts.add[0])
elif opts.delete:
if not args:
parser.error('need to specify at least one package')
if repositories.has_key(opts.delete[0]):
if repositories[opts.delete[0]]['type'] == 'custom':
#delete(repositories, opts.delete, args, opts.user)
delete(repositories, opts.delete, args)
else:
parser.error('"%s" is not a valid and or a custom repository (hint: try -l)' % opts.delete)
elif opts.update:
if len(opts.update) == 0 and opts.update[0] == 'ALL':
for key, value in repositories.items():
if value['type'] == 'mirror':
update(key, value['path'])
#update(key, value['path'], opts.user)
else:
for repo in opts.update:
if repositories.has_key(repo):
if repositories[repo]['type'] == 'mirror':
#update(repo, repositories[repo]['path'], opts.user)
update(repo, repositories[repo]['path'])
else:
parser.error('"%s" is not a mirror, refusing to update (hint: try -l)' % repo)
else:
parser.error('"%s" is not a valid repository (hint: try -l)' % repo)
elif opts.contents:
if args:
parser.error('the contents option takes no arguments')
else:
try:
contents(opts.contents[0], repositories[opts.contents[0]]['path'], opts.contents[1])
except KeyError:
parser.error('%s is not a valid repository, type -l to list all available repositories' % opts.contents[0])
else:
parser.print_help()
if __name__ == '__main__':
main()
| apache-2.0 | 5,650,508,856,775,417,000 | 44.905882 | 147 | 0.587007 | false | 3.806829 | false | false | false |
lightalchemist/ML-algorithms | dim_reduction/test_spectral_embedding.py | 1 | 1960 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
import spectral_embedding
import sys
sys.path.insert(0, '../cluster')
import kmeans
def test():
k = 2
X, y_true = make_moons(n_samples=500, random_state=0, noise=0.01)
Y = spectral_embedding.transform(X, k, n_neighbors=7, sigma=0.1)
n = np.linalg.norm(Y, axis=1)
n = n.reshape(-1, 1)
Y = Y / n
# Apply K-Means to cluster Y
y_pred, _, _ = kmeans.kmeans(Y, k)
fig = plt.figure()
ax = fig.add_subplot(121)
ax.scatter(np.arange(len(Y)), Y[:, 0])
ax.set_title("Eigenvector 1")
ax = fig.add_subplot(122)
ax.scatter(np.arange(len(Y)), Y[:, 1])
ax.set_title("Eigenvector 2")
# Plot the data
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X[y_true==0, 0], X[y_true==0, 1], c='b', alpha=0.5, label="Class 1")
ax.scatter(X[y_true==1, 0], X[y_true==1, 1], c='g', alpha=0.5, label="Class 2")
ax.set_title("Original data")
ax.legend()
# Plot the predictions
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X[y_pred==0, 0], X[y_pred==0, 1], c='r', alpha=0.5, label="Class 1")
ax.scatter(X[y_pred==1, 0], X[y_pred==1, 1], c='y', alpha=0.5, label="Class 2")
ax.set_title("Result of clustering")
ax.legend()
# Plot the transformed data
fig = plt.figure()
ax = fig.add_subplot(111)
idx_class0 = np.argwhere(y_true==0)
idx_class1 = np.argwhere(y_true==1)
ax.scatter(Y[idx_class0, 0], Y[idx_class0, 1], c='b', alpha=0.5, label="Class 1")
ax.scatter(Y[idx_class1, 0], Y[idx_class1, 1], c='g', alpha=0.5, label="Class 2")
ax.set_title("Original data after spectral embedding")
ax.legend()
print("Number in class 0: {}".format(np.sum(y_pred==0)))
print("Number in class 1: {}".format(np.sum(y_pred==1)))
plt.show()
if __name__ == '__main__':
test()
| mit | -4,237,771,139,865,878,000 | 28.69697 | 85 | 0.593367 | false | 2.707182 | false | false | false |
milara/recipe-project-master-2016 | autograder.py | 1 | 5651 | '''Version 0.33'''
import json
import csv
import glob
import sys
import importlib
from pprint import pprint
from collections import Counter
# init is an optional flag to indicate you're starting
# over; old autograder results are written over and column
# headers are printed to the file.
team = "0"
init = False
for arg in sys.argv:
if arg == "init":
init = True
else:
team = arg
api = importlib.import_module("Team%s.recipe_api" % team)
def check_tools(answer, stud):
score = 0
expans = dict([[a, a.split()] for a in answer])
for s in stud:
if s in answer:
print s
score += 1
answer.remove(s)
stud.remove(s)
expans = dict([[a, {'words': a.split(), 'matches': Counter()}] for a in answer])
expstud = dict([[a, a.split()] for a in stud])
for s in expstud:
tmpscore = -1
for word in expans:
complement = set(expstud[s]) ^ set(expans[word]['words'])
intersection = set(expstud[s]) & set(expans[word]['words'])
newscore = float(len(intersection))/(len(intersection)+len(complement))
print "%s, %s, %d, %d, %f" % (s, word, len(intersection), len(complement), newscore)
if newscore > tmpscore:
tmpscore = newscore
tmpmatch = word
if tmpscore > 0:
expans[tmpmatch]['matches'][s] = tmpscore
stud.remove(s)
for word in expans:
match = expans[word]['matches'].most_common(1)
if len(match) > 0:
score += expans[word]['matches'].most_common(1)[0][1]
return score
def check_ingredients(answer, stud):
scores = []
score = 0
for x in range(min([len(answer), len(stud)])):
for ind in ['name', 'measurement', 'quantity', 'descriptor', 'preparation', 'prep-description']:
if ind in stud[x]:
print "\nYour answer: %s"%str(stud[x][ind])
print "Valid answers: %s"%str(answer[x][ind])
if ind == 'quantity':
flag = False
for val in answer[x][ind]:
if type(stud[x][ind]) is str:
if val == stud[x][ind]:
flag = True
elif val == stud[x][ind]:
flag = True
elif float('%.2f'%stud[x][ind]) == val:
flag = True
if flag:
score += 1
else:
print "No match!"
elif stud[x][ind] in answer[x][ind]:
score += 1
scores.append(min([score, answer[x]['max']]))
print "Score: %s\n---"%str(scores[-1])
score = 0
return sum(scores)
def get_file(fn):
with open(fn, 'r') as f:
answer = json.load(f)
return answer
def main(team, init=False):
"""Pass 'init' as a command line variable if this is your
first time running the program and you want it to print the
column headers to the file."""
keys = ['ingredients', 'primary cooking method', 'cooking methods', 'implied cooking methods', 'cooking tools', 'implied cooking tools']
if init:
with open('parsegrades.csv', 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter='\t')
csvwriter.writerow(keys)
scores = Counter(dict(zip(keys, [0]*len(keys))))
cnt = 1
for answer in (get_file(fn) for fn in glob.iglob('Recipes/*.json')):
stud = getattr(api, "autograder")(answer['url'])
temp = Counter(dict(zip(keys, [0]*len(keys))))
if type(stud) == str:
stud = json.loads(stud)
if type(stud) == dict:
temp['cooking tools'] = min([check_tools(answer['cooking tools'], stud['cooking tools']), answer['max']['cooking tools']])/float(answer['max']['cooking tools'])
temp['cooking methods'] = min([check_tools(answer['cooking methods'], stud['cooking methods']), answer['max']['cooking methods']])/float(answer['max']['cooking methods'])
temp['implied cooking tools'] = min([check_tools(answer['implied cooking tools'], stud['cooking tools']), answer['max']['implied cooking tools']])/float(answer['max']['implied cooking tools'])
temp['implied cooking methods'] = min([check_tools(answer['implied cooking methods'], stud['cooking methods']), answer['max']['implied cooking methods']])/float(answer['max']['implied cooking methods'])
if stud['primary cooking method'] == answer['primary cooking method']:
temp['primary cooking method'] = 1
stud = stud['ingredients']
temp['ingredients'] = check_ingredients(answer['ingredients'], stud)/float(answer['max']['ingredients'])
scores += temp
print "%s\t%s\t%s\t%s\t%s\t%s\t%s" % ("Recipe", 'Ingredients', 'Primary Method', 'Methods', 'Implied Methods', 'Tools', 'Implied Tools')
print "Recipe %d:\t%.3f\t%d\t%.3f\t%.3f\t%.3f\t%.3f" % (cnt, temp['ingredients'], temp['primary cooking method'], temp['cooking methods'], temp['implied cooking methods'], temp['cooking tools'], temp['implied cooking tools'])
cnt += 1
else:
print "student answer formatting error"
row = ["Team %s" % team]
row.extend([scores[k] for k in keys])
with open('parsegrades.csv', 'ab') as csvfile:
csvwriter = csv.writer(csvfile, delimiter='\t')
csvwriter.writerow(row)
if __name__ == '__main__':
main(team, init)
| mit | 7,319,128,202,753,511,000 | 37.442177 | 237 | 0.551584 | false | 3.727573 | false | false | false |
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_chart_layout05.py | 1 | 2626 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_layout05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'area'})
chart.axis_ids = [43495808, 43497728]
data = [
[1, 2, 3, 4, 5],
[8, 7, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.set_x_axis({
'name': 'XXX',
'name_layout': {
'x': 0.34620319335083105,
'y': 0.85090259550889469,
}
})
chart.set_y_axis({
'name': 'YYY',
'name_layout': {
'x': 0.21388888888888888,
'y': 0.26349919801691457,
}
})
worksheet.insert_chart('E9', chart)
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,320,063,357,677,295,000 | 25 | 79 | 0.464966 | false | 3.85044 | true | false | false |
tkliuxing/bookspider | bookspider/bookspider/spiders/qidianrank_spider.py | 1 | 3343 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# import re
import urlparse
import urllib
import time
# import redis
from pyquery import PyQuery as PQ
from scrapy.spiders import Spider
# from scrapy.selector import Selector
from scrapy.http import Request
from bookspider.items import QidianRankItem
# RC = redis.Redis()
class QidianRankSpider(Spider):
name = "qidianrank"
allowed_domains = ["top.qidian.com"]
start_urls = [
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=1",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=10",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=20",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=30",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=40",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=1",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=10",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=20",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=30",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=40",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=1",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=10",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=20",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=30",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=40",
]
# def is_pass_url(self, url):
# for i in PASS_URL:
# if i in url:
# return True
# return False
def parse(self, response):
url = response.url
# sel = Selector(response)
jQ = PQ(response.body_as_unicode())
for i in jQ("#list1 tr"):
elem = jQ(i)
title = elem.find("td").eq(2).find("a").eq(0).text()
if title:
try:
click = int(elem.find("td").eq(3).text())
except:
continue
else:
item = QidianRankItem()
item["time_type"] = QidianRankItem.get_time_type(url)
item["title"] = title
item["vip_click"] = click
yield item
url_obj = urlparse.urlparse(url)
page_num = str(
int(urlparse.parse_qs(url_obj.query).get("PageIndex", ['0'])[0]) + 1
)
time_num = urlparse.parse_qs(url_obj.query).get("Time", ['3'])[0]
if page_num == "50":
yield Request(url, callback=self.parse)
else:
new_qs = urllib.urlencode({
"PageIndex": page_num,
"Time": time_num,
"TopType": '1',
})
new_url = urlparse.urlunparse([
url_obj.scheme,
url_obj.netloc,
url_obj.path,
url_obj.params,
new_qs,
url_obj.fragment
])
time.sleep(0.5)
yield Request(new_url, callback=self.parse)
| apache-2.0 | 310,982,364,915,513,540 | 36.561798 | 82 | 0.56566 | false | 3.264648 | false | false | false |
scizen9/kpy | SEDMrph/transformations.py | 1 | 3245 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 15:25:46 2015
@author: nadiablago
"""
import numpy as np
def johnson2sdss(U, B, V, R, I):
'''global transformations between UBVRI and ugriz'''
'''
#Color Color Term Zeropoint Range
"gV": (0.630 ± 0.002) (B − V) −(0.124 ± 0.002)
"ri": (1.007 ± 0.005) (R − I) −(0.236 ± 0.003)
"rz": (1.584 ± 0.008) (R − I) −(0.386 ± 0.005)
"rR": (0.267 ± 0.005) (V − R) +(0.088 ± 0.003) V − R ≤ 0.93
"rR": (0.77 ± 0.04) (V − R) −(0.37 ± 0.04) V − R > 0.93
"ug": (0.750 ± 0.050) (U − B) + (0.770 ± 0.070) (B − V) +(0.720 ± 0.040)
"gB": −(0.370 ± 0.002) (B − V) −(0.124 ± 0.002)
"gr": (1.646 ± 0.008) (V − R) −(0.139 ± 0.004)
"iI": [0.247, 0.329]'''
def sdss2johnson(ref_sdss, savefile=None):
'''
Jordi et. al 2006
ugriz -> UBVRcIc
================
Transformation
U-B = (0.79 ± 0.02)*(u-g) - (0.93 ± 0.02)
U-B = (0.52 ± 0.06)*(u-g) + (0.53 ± 0.09)*(g-r) - (0.82 ± 0.04)
B-g = (0.175 ± 0.002)*(u-g) + (0.150 ± 0.003)
B-g = (0.313 ± 0.003)*(g-r) + (0.219 ± 0.002)
V-g = (-0.565 ± 0.001)*(g-r) - (0.016 ± 0.001)
V-I = (0.675 ± 0.002)*(g-i) + (0.364 ± 0.002) if g-i <= 2.1
V-I = (1.11 ± 0.02)*(g-i) - (0.52 ± 0.05) if g-i > 2.1
R-r = (-0.153 ± 0.003)*(r-i) - (0.117 ± 0.003)
R-I = (0.930 ± 0.005)*(r-i) + (0.259 ± 0.002)
I-i = (-0.386 ± 0.004)*(i-z) - (0.397 ± 0.001)
'''
ref_sdss = np.genfromtxt(ref_sdss, dtype=None, names=True, delimiter=',')
bands = "BVRI"
john = np.zeros(len(ref_sdss), dtype=[('id', '<i8'), ('ra', '<f8'), ('dec', '<f8'), \
('U', '<f4'), ('B', '<f4'), ('V', '<f4'), ('R', '<f4'), ('I', '<f4'),\
('dU', '<f4'), ('dB', '<f4'), ('dV', '<f4'), ('dR', '<f4'), ('dI', '<f4')])
band_dic = {"B":"g", "V":"g", "R":"r", "I":"i"}
coldic = {"U":"ug", "B":"gr", "V":"gr", "R":"ri", "I":"iz"}
coefs = {"U": [np.array([0.79, 0.93]), np.array([0.02, 0.02])],
"B": [np.array([0.313, 0.219]), np.array([0.003, 0.002])],
"V": [np.array([-0.565, 0.016]), np.array([0.001, 0.001])],
"R": [np.array([-0.153, 0.117]), np.array([0.003, 0.003])],
"I": [np.array([-0.386, 0.397]), np.array([0.004, 0.001])] }
for b in bands:
col = ref_sdss[coldic[b][0]] - ref_sdss[coldic[b][1]]
john[b] = np.sum(np.array([col, 1]) * coefs[b][0]) + ref_sdss[band_dic[b]]
john["d"+b] = np.sum(np.array([col, 1]) * coefs[b][1])
#U band a bit different
b = "U"
col = ref_sdss[coldic[b][0]] - ref_sdss[coldic[b][1]]
john[b] = np.sum(np.array([col, 1]) * coefs[b][0]) + john["B"]
john["d"+b] = np.sum( np.array([col, 1]) * coefs[b][1] )
john["ra"] = ref_sdss["ra"]
john["dec"] = ref_sdss["dec"]
john["id"] = ref_sdss["objid"]
if (not savefile is None):
np.savetxt(savefile, john, header="id,ra,dec,U,B,V,R,I,dU,dB,dV,dR,dI", fmt="%d,%.5f,%.5f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f")
return john | gpl-2.0 | -1,532,624,609,444,865,500 | 39.641026 | 149 | 0.422215 | false | 2.060468 | false | false | false |
waipu/bakawipe | lib/beon/exc.py | 1 | 1273 | # -*- coding: utf-8 -*-
# -*- mode: python -*-
from exceptions import *
class InvalidLogin(PermanentError, AnswerException): pass
class GuestDeny(PermanentError, AnswerException): pass
class UserDeny(PermanentError, AnswerException): pass
class Bumplimit(PermanentError, AnswerException): pass
class PermClosed(PermanentError, AnswerException): pass
class TopicDoesNotExist(PermanentError, AnswerException): pass
class BadGateway(TemporaryError, AnswerException): pass
class EmptyAnswer(TemporaryError, AnswerException): pass
class Antispam(TemporaryError, AnswerException): pass
class Redir(TemporaryError, AnswerException): pass
class Wait5Min(TemporaryError, AnswerException): pass
class Closed(TemporaryError, AnswerException): pass
class UnknownAnswer(TemporaryError, AnswerException): pass
class RegRetryLimit(PermanentError): pass
class DosVersionError(PermanentError, NotImplementedError): pass
class Captcha(AnswerException):
def __init__(self, page, errstr=None, target=None, postdata=None, catry=1):
'''
page: raw page with captcha
catry: see Beon.addtopicfin()
'''
super(Captcha, self).__init__(errstr, target, page, postdata)
self.page, self.catry = page, catry
class Success(AnswerException): pass
| gpl-3.0 | 673,661,353,908,967,800 | 38.78125 | 79 | 0.769049 | false | 4.05414 | false | false | false |
mfisk/smacq | pysmacq/smacq.py | 1 | 8900 | """System for Modular Analysis and Continuous Queries.
See http://smacq.sourceforge.net/
"""
import libpysmacq
import time, sys
# TODO:
# Change all instances of raise Exception to raise More_Appropriate_Exception
class SmacqQuery: # {{{
"""Executes one or more queries in the SMACQ (System for Modular Analysis and Continous Queries) API."""
graph = None
dts = None
scheduler = None
running = False
def __init__(self, query_str = None, run_now = False):
self.__running = False
self.scheduler = libpysmacq.SmacqScheduler()
self.dts = libpysmacq.DTS()
self.graph = libpysmacq.SmacqGraph()
if query_str:
self.graph.addQuery(self.dts, self.scheduler, query_str)
if run_now:
self.run()
def run(self, ignoreDups = False):
"""Adds this query to the main SmacqGraph and runs it. If the scheduler hasn't already been
started, then it is started."""
if self.__running:
if not ignoreDups:
print "This query is already running."
else:
self.graph.init(self.dts, self.scheduler)
self.scheduler.seed_produce(self.graph)
self.scheduler.start_threads(0)
self.__running = True
return
def is_running(self):
return self.__running
# Fetching Methods {{{
def fetchone(self):
"""Fetch the next result object and return it, or None when no more data is available"""
return self.fetchmany(1)
def fetchmany(self, num_results = 1): # {{{
"""Returns num_results DtsObject objects in a list. This will wait for results if it
needs to. If the number of results returned is less than requested, then the
query has been completed."""
self.run(True)
query_results = []
for i in range(num_results):
result = self.scheduler.get()
if result:
query_results.append( result )
else:
break
return query_results
def fetch_nb(self, num_results = 1): # {{{
"""Performs a non-blocking fetch of num_results data items.
To test if the query is done, check the value of done(). If done is True, then a fetchall
performed afterwards should return the remaining results without blocking."""
self.run(True)
query_results = []
for i in range(num_results):
result = self.scheduler.element()
if result:
query_results.append( result )
else:
break
return query_results
def fetchall(self, result_limit = None, time_limit = 0): # {{{
"""Fetches all results produced by the current query. Note that querying an unbounded
data source will cause the function to never return or run out of memory. Returns a tuple
containing the list of results and the reason it returned.
Two limiting parameters are provided: result_limit and time_limit (in seconds).
If either limit is reached, the query will return it's results immediately. If either limit
is set to None (default) or zero, it has no effect. """
self.run(True)
if result_limit == 0:
result_limit = None
time_done = time.time() + time_limit
num_results = 0
results = []
while (True):
if result_limit is not None:
if num_results >= result_limit:
stop_reason = "max_results"
break
else:
num_results += 1
if time_limit != 0 and time.time >= time_done:
stop_reason = "time"
break
result = self.scheduler.get()
if not result:
stop_reason = "done"
break
results.append( result )
return (results, stop_reason)
def busy_loop(self):
"""Runs the query until it is done, but throws out any results"""
self.run(True)
self.scheduler.busy_loop()
def __done(self):
# While this isn't currently needed, it will be if non-blocking fetches are implemented.
"""Returns True if the query is done processing, False otherwise"""
return self.scheduler.done()
#end Fetching methods }}}
# Iterator methods {{{
def __iter__(self):
"""Return self in compliane with iterator protocol."""
self.run(True)
return self
def next(self):
"""Return the next DtsObject returned by the query. Raise StopIteration when complete."""
x = self.scheduler.get()
if x:
return x
else:
raise StopIteration
# }}}
# Join methods {{{
def append(self, query):
"""Joins this query with the other_query.
other_query can be either a string or a SmacqGraph object"""
if type(query) == str:
newg = libpysmacq.SmacqGraph()
newg.addQuery(self.dts, self.scheduler, query)
self.graph.join(newg)
elif type(query) == libpysmacq.SmacqGraph:
self.graph.join(query)
else:
print type(query)
raise TypeError
def __iadd__(self, query):
"""This is the += operator."""
self += query
return self
def add(self, query):
"""Adds the query on the righthand side to the query on the left.
If the right hand side is a query string, it is used to create a new query object first."""
if type(query) == str:
self.graph.addQuery(self.dts, self.scheduler, query)
else:
self.graph.addQuery(query)
def __rshift__(self, query):
"""This is the >>= operator. Joins the query on the righthand side with the query on the left.
If the right hand side is a query string, it is used to create a new query object first."""
self.append(query)
return self
def __add__(self, query):
"""Adds two queries together, and returns a new query as a result"""
newQuery = self.clone()
newQuery += (query)
return newQuery
# end join methods }}}
def __str__(self): # {{{
return self.graph.print_query()
# }}}
# end SmacqQuery }}}
def DtsObject_getdata(self):
if len(self):
return self.get()._getdata()
else:
return None
def DtsObject_getitem(self, index):
index = str(index) # Covert numeric indices to strings
x = self.get().getfield(index, True)
if not x.get() and self.has_key(index):
return None
if not x.get():
raise KeyError, "DtsObject instance does not contain field " + index
return x
libpysmacq.DtsObject.__len__ = lambda self: self.get().__len__()
libpysmacq.DtsObject.has_key = lambda self, name: (self.get().getfield(str(name), True).get() != None)
libpysmacq.DtsObject.__getattr__ = lambda self, name: self.get().__getattribute__(name)
libpysmacq.DtsObject.__nonzero__ = lambda self: (self.get() != None)
def DtsObject_dict(self):
"""Construct a dictionary of all possible fields"""
d = {}
for f in self.keys():
d[f] = self[f]
return d
def DtsObject_str(self):
"""Return human-readable version of DtsObject by showing all of its fields"""
return str(self.dict())
def DtsObject_repr(self):
"""Return string representation of DtsObject"""
if self.has_key("string"):
s = self["string"]
if s:
# SMACQ strings are NULL-terminated, so ignore final byte
return s.getdata()[:-1]
return repr(self.getdata())
def DtsObject_keys(self, field_refs = False):
"""Returns a list of field names for this object.
If field_refs is True, DtsField objects are returned instead. DtsField objects can be used instead
of field name strings for DtsObject field lookups, and are signifigantly faster. The DtsField
objects will be returned in the same order as the field names."""
self.prime_all_fields()
fields = self.fieldcache()
field_names = []
field_nums = []
for i in range( len(fields) ):
if fields[i].get() is not None:
field_nums.append(i)
if field_refs:
# Make a list of DtsField objects
for i in field_nums:
field_names.append( libpysmacq.DtsField(i) )
else:
# Make a list of field names
field_getname = self.getfieldname
for i in field_nums:
field_names.append( field_getname( i ) )
return field_names
libpysmacq.DtsObject.dict = DtsObject_dict
libpysmacq.DtsObject.keys = DtsObject_keys
libpysmacq.DtsObject.getdata = DtsObject_getdata
libpysmacq.DtsObject.__str__ = DtsObject_str
libpysmacq.DtsObject.__repr__ = DtsObject_repr
libpysmacq.DtsObject.__getitem__ = DtsObject_getitem
del DtsObject_keys, DtsObject_str, DtsObject_dict, DtsObject_getitem, DtsObject_repr, DtsObject_getdata
| gpl-2.0 | 8,746,293,703,037,373,000 | 29.902778 | 108 | 0.611798 | false | 3.806672 | false | false | false |
jadarve/optical-flow-filter | python/flowfilter/propagation.py | 1 | 9419 | """
flowfilter.propagation
----------------------
Module containing propagation methods.
:copyright: 2015, Juan David Adarve, ANU. See AUTHORS for more details
:license: 3-clause BSD, see LICENSE for more details
"""
import numpy as np
import scipy.ndimage as nd
__all__ = ['dominantFlowX', 'dominantFlowY',
'propagate', 'propagationStep']
###########################################################
# GLOBAL VARIABLES
###########################################################
"""forward difference operator in X (column)"""
_dxp_k = np.array([[1.0, -1.0, 0.0]], dtype=np.float32)
"""backward difference operator in X (column)"""
_dxm_k = np.array([[0.0, 1.0, -1.0]], dtype=np.float32)
"""central difference in X (column)"""
_dxc_k = np.array([[1.0, 0.0, -1.0]], dtype=np.float32)
"""forward difference operator in Y (row)"""
_dyp_k = np.copy(_dxp_k.T)
"""backward difference operator in Y (row)"""
_dym_k = np.copy(_dxm_k.T)
"""central difference in Y (row)"""
_dyc_k = np.copy(_dxc_k.T)
"""+1 shift operator in X (column)"""
_sxp_k = np.array([[1.0, 0.0, 0.0]], dtype=np.float32)
"""-1 shift operator in X (column)"""
_sxm_k = np.array([[0.0, 0.0, 1.0]], dtype=np.float32)
"""+1 shift operator in Y (row)"""
_syp_k = np.copy(_sxp_k.T)
"""-1 shift operator in Y (row)"""
_sym_k = np.copy(_sxm_k.T)
def dominantFlowX(flow_x):
"""Computes dominant flow in X (column) direction.
Parameters
----------
flow_x : ndarray
Optical X flow component.
Returns
-------
flow_x_dom : ndarray
Dominant flow in X (column) direction
Raises
------
ValueError : if flow_x.ndim != 2
See Also
--------
dominantFlowY : Computes dominant flow in Y (row) direction
"""
if flow_x.ndim != 2:
raise ValueError('flow_x should be a 2D ndarray')
flow_x_dom = np.zeros_like(flow_x)
# central difference of absolute value of flow
flow_x_abs = nd.convolve(np.abs(flow_x), _dxc_k)
# pixel masks for positive and negative absolute differences
dabs_p = flow_x_abs >= 0
dabs_m = flow_x_abs < 0
flow_x_dom[dabs_p] = nd.convolve(flow_x, _sxp_k)[dabs_p]
flow_x_dom[dabs_m] = nd.convolve(flow_x, _sxm_k)[dabs_m]
return flow_x_dom
def dominantFlowY(flow_y):
"""Computes dominant flow in Y (row) direction
Parameters
----------
flow_y : ndarray
Optical flow Y component.
Returns
-------
flow_y_dom : ndarray
Dominant flow in Y (row) directions.
Raises
------
ValueError : if flow_y.ndim != 2
See Also
--------
dominantFlowX : Computes dominant flow in X (column) direction.
"""
if flow_y.ndim != 2:
raise ValueError('flow_y should be a 2D ndarray')
flow_y_dom = np.zeros_like(flow_y)
# central difference of absolute value of flow
flow_y_abs = nd.convolve(np.abs(flow_y), _dyc_k)
# pixel masks for positive and negative absolute differences
dabs_p = flow_y_abs >= 0
dabs_m = flow_y_abs < 0
# assign possitive or negative shifte
flow_y_dom[dabs_p] = nd.convolve(flow_y, _syp_k)[dabs_p]
flow_y_dom[dabs_m] = nd.convolve(flow_y, _sym_k)[dabs_m]
return flow_y_dom
def propagate(flow, iterations=1, dx=1.0, payload=None, border=3):
"""Propagate an optical flow field and attached payloads
Parameters
----------
flow : ndarray
Optical flow field. Each pixel (i, j) contains the (u, v)
components of optical flow.
iterations : integer, optional
Number of iterations the numerical scheme is run.
Defaults to 1
dx : float, optional
Pixel size. Defaults to 1.0.
payload : list, optional
List of scalar fields to be propagated alongside the
flow. Each element of the list must be a 2D ndarray.
Defautls to None
border: integer, optional
Border width in which the propagation does not take place.
The returned propagated flow with have the same values as
the input in the border regions. Defaults to 3.
Returns
-------
flowPropagated : ndarray
Propagated flow field.
payloadPropagated: list
Propagated payloads or None if payload parameters is None
Raises
------
ValueError : if iterations <= 0
See Also
--------
propagationStep : Performs one iteration of the propagation numerical scheme.
"""
if iterations <= 0: raise ValueError('iterations must be greater than zero')
# time step
dt = 1.0 / float(iterations)
# run the numerical scheme
for _ in range(iterations):
flow, payload = propagationStep(flow, dt, dx, payload, border)
# return the propagated flow and payload
return flow, payload
def propagationStep(flow, dt=1.0, dx=1.0, payload=None, border=3):
"""Performs one iteration of the propagation numerical scheme.
Parameters
----------
flow : ndarray
Optical flow field. Each pixel (i, j) contains the (u, v)
components of optical flow.
dt : float, optional
Time step. Defaults to 1.0.
dx : float, optional
Pixel size. Defaults to 1.0.
payload : list, optional
List of scalar fields to be propagated alongside the
optical flow. Each element of the list must be a 2D ndarray.
Defautls to None
border: integer, optional
Border width in which the propagation does not take place.
The returned propagated flow with have the same values as
the input in the border regions. Defaults to 3.
Returns
-------
flowPropagated : ndarray
Propagated flow field.
payloadPropagated: list
Propagated payloads or None if payload parameters is None
Raises
------
ValueError : if flow.ndim != 3
ValueError : if border < 0
ValueError : if dx <= 0.0
ValueError : if dt <= 0.0
See Also
--------
propagate : Propagate an optical flow field and attached payloads
"""
# Parameters check
if flow.ndim != 3: raise ValueError('flow field must be a 3D ndarray')
if border < 0: raise ValueError('border should be greater or equal zero')
if dx <= 0.0: raise ValueError('dx should be greater than zero')
if dt <= 0.0: raise ValueError('dt should be greater than zero')
# U V flow components
U = np.copy(flow[:,:,0])
V = np.copy(flow[:,:,1])
# ratio between time and pixel size
R = dt/dx
#############################################
# PROPAGATION IN X (column) DIRECTION
#
# Uh = U - R*U*dx(U)
# Vh = V - R*U*dx(V)
#############################################
Ud = dominantFlowX(U)
# sign of dominant flow
Up = Ud >= 0
Um = Ud < 0
Uh = np.copy(U)
Vh = np.copy(V)
# propagation with upwind difference operators
Uh[Up] -= R*(Ud*nd.convolve(U, _dxm_k))[Up]
Uh[Um] -= R*(Ud*nd.convolve(U, _dxp_k))[Um]
Vh[Up] -= R*(Ud*nd.convolve(V, _dxm_k))[Up]
Vh[Um] -= R*(Ud*nd.convolve(V, _dxp_k))[Um]
# payload propagation
if payload != None:
payloadPropX = list()
# for each field in the payload list
for field in payload:
fieldPropX = np.copy(field)
fieldPropX[Up] -= R*(Ud*nd.convolve(field, _dxm_k))[Up]
fieldPropX[Um] -= R*(Ud*nd.convolve(field, _dxp_k))[Um]
payloadPropX.append(fieldPropX)
#############################################
# PROPAGATION IN Y DIRECTION
#
# U1 = Uh - R*Uh*dy(U)
# V1 = Vh - R*Vh*dy(V)
#############################################
Vd = dominantFlowY(Vh)
# sign of dominant flow
Vp = Vd >= 0
Vm = Vd < 0
U1 = np.copy(Uh)
V1 = np.copy(Vh)
# propagation with upwind difference operators
U1[Vp] -= R*(Vd*nd.convolve(Uh, _dym_k))[Vp]
U1[Vm] -= R*(Vd*nd.convolve(Uh, _dyp_k))[Vm]
V1[Vp] -= R*(Vd*nd.convolve(Vh, _dym_k))[Vp]
V1[Vm] -= R*(Vd*nd.convolve(Vh, _dyp_k))[Vm]
# payload propagation
payloadPropagated = None
if payload != None:
payloadPropagated = list()
# for each scalar field in the payload
for i in range(len(payloadPropX)):
field = payloadPropX[i]
fieldPropY = np.copy(field)
fieldPropY[Vp] -= R*(Vd*nd.convolve(field, _dym_k))[Vp]
fieldPropY[Vm] -= R*(Vd*nd.convolve(field, _dyp_k))[Vm]
payloadPropagated.append(fieldPropY)
##############################################
# PACK THE PROPAGATED FLOW WITH BORDER REMOVAL
##############################################
if border == 0:
flowPropagated = np.concatenate([p[...,np.newaxis] for p in [U1, V1]], axis=2)
else:
flowPropagated = np.copy(flow)
# assign the propagated flow to the interior region of the field
flowPropagated[border:-border, border:-border, 0] = U1[border:-border, border:-border]
flowPropagated[border:-border, border:-border, 1] = V1[border:-border, border:-border]
# sanity check
if np.isnan(flowPropagated).any() or np.isinf(flowPropagated).any():
print('propagationStep(): NaN or Inf detected in propagated flow')
return flowPropagated, payloadPropagated
| bsd-3-clause | -5,081,180,791,804,882,000 | 26.144092 | 94 | 0.573097 | false | 3.476929 | false | false | false |
stefanklug/mapnik | scons/scons-local-2.3.6/SCons/Tool/ipkg.py | 4 | 2475 | """SCons.Tool.ipkg
Tool-specific initialization for ipkg.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The ipkg tool calls the ipkg-build. Its only argument should be the
packages fake_root.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ipkg.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import os
import SCons.Builder
def generate(env):
"""Add Builders and construction variables for ipkg to an Environment."""
try:
bld = env['BUILDERS']['Ipkg']
except KeyError:
bld = SCons.Builder.Builder( action = '$IPKGCOM',
suffix = '$IPKGSUFFIX',
source_scanner = None,
target_scanner = None)
env['BUILDERS']['Ipkg'] = bld
env['IPKG'] = 'ipkg-build'
env['IPKGCOM'] = '$IPKG $IPKGFLAGS ${SOURCE}'
env['IPKGUSER'] = os.popen('id -un').read().strip()
env['IPKGGROUP'] = os.popen('id -gn').read().strip()
env['IPKGFLAGS'] = SCons.Util.CLVar('-o $IPKGUSER -g $IPKGGROUP')
env['IPKGSUFFIX'] = '.ipk'
def exists(env):
return env.Detect('ipkg-build')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 | -683,296,968,136,176,600 | 35.940299 | 103 | 0.684848 | false | 3.761398 | false | false | false |
dkodnik/arp | addons/website_sale_delivery/models/sale_order.py | 39 | 5989 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
from openerp import SUPERUSER_ID
from openerp.addons import decimal_precision
class delivery_carrier(orm.Model):
_inherit = 'delivery.carrier'
_columns = {
'website_published': fields.boolean('Available in the website'),
'website_description': fields.text('Description for the website'),
}
_defaults = {
'website_published': True
}
class SaleOrder(orm.Model):
_inherit = 'sale.order'
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = super(SaleOrder, self)._amount_all(cr, uid, ids, field_name, arg, context=context)
currency_pool = self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
line_amount = sum([line.price_subtotal for line in order.order_line if line.is_delivery])
currency = order.pricelist_id.currency_id
res[order.id]['amount_delivery'] = currency_pool.round(cr, uid, currency, line_amount)
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'amount_delivery': fields.function(
_amount_all_wrapper, type='float', digits_compute=decimal_precision.get_precision('Account'),
string='Delivery Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'
),
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
domain=[('is_delivery', '=', False)],
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
}
def _check_carrier_quotation(self, cr, uid, order, force_carrier_id=None, context=None):
carrier_obj = self.pool.get('delivery.carrier')
# check to add or remove carrier_id
if not order:
return False
if all(line.product_id.type == "service" for line in order.website_order_line):
order.write({'carrier_id': None}, context=context)
self.pool['sale.order']._delivery_unset(cr, SUPERUSER_ID, [order.id], context=context)
return True
else:
carrier_id = force_carrier_id or order.carrier_id.id
carrier_ids = self._get_delivery_methods(cr, uid, order, context=context)
if carrier_id:
if carrier_id not in carrier_ids:
carrier_id = False
else:
carrier_ids.remove(carrier_id)
carrier_ids.insert(0, carrier_id)
if force_carrier_id or not carrier_id or not carrier_id in carrier_ids:
for delivery_id in carrier_ids:
grid_id = carrier_obj.grid_get(cr, SUPERUSER_ID, [delivery_id], order.partner_shipping_id.id)
if grid_id:
carrier_id = delivery_id
break
order.write({'carrier_id': carrier_id}, context=context)
if carrier_id:
order.delivery_set(context=context)
else:
order._delivery_unset(context=context)
return bool(carrier_id)
def _get_delivery_methods(self, cr, uid, order, context=None):
carrier_obj = self.pool.get('delivery.carrier')
delivery_ids = carrier_obj.search(cr, uid, [('website_published','=',True)], context=context)
# Following loop is done to avoid displaying delivery methods who are not available for this order
# This can surely be done in a more efficient way, but at the moment, it mimics the way it's
# done in delivery_set method of sale.py, from delivery module
for delivery_id in list(delivery_ids):
grid_id = carrier_obj.grid_get(cr, SUPERUSER_ID, [delivery_id], order.partner_shipping_id.id)
if not grid_id:
delivery_ids.remove(delivery_id)
return delivery_ids
def _get_errors(self, cr, uid, order, context=None):
errors = super(SaleOrder, self)._get_errors(cr, uid, order, context=context)
if not self._get_delivery_methods(cr, uid, order, context=context):
errors.append(('No delivery method available', 'There is no available delivery method for your order'))
return errors
def _get_website_data(self, cr, uid, order, context=None):
""" Override to add delivery-related website data. """
values = super(SaleOrder, self)._get_website_data(cr, uid, order, context=context)
# We need a delivery only if we have stockable products
has_stockable_products = False
for line in order.order_line:
if line.product_id.type in ('consu', 'product'):
has_stockable_products = True
if not has_stockable_products:
return values
delivery_ctx = dict(context, order_id=order.id)
DeliveryCarrier = self.pool.get('delivery.carrier')
delivery_ids = self._get_delivery_methods(cr, uid, order, context=context)
values['deliveries'] = DeliveryCarrier.browse(cr, SUPERUSER_ID, delivery_ids, context=delivery_ctx)
return values
| agpl-3.0 | -115,469,809,943,804,400 | 46.531746 | 127 | 0.605944 | false | 3.934954 | false | false | false |
mfm24/uidecorators | qt_framework.py | 1 | 7615 | # -*- coding: utf-8 -*-
"""
Created on Sat May 4 22:07:54 2013
@author: matt
Qt-specific code for creating UI elements from objects
decorated with ui_decorators
"""
from ui_decorators import *
#UI creation from properties:
import PySide.QtCore
import PySide.QtGui
from Queue import Queue
_bool_checkstate_map = {True: PySide.QtCore.Qt.CheckState.Checked,
False: PySide.QtCore.Qt.CheckState.Unchecked,
None: PySide.QtCore.Qt.CheckState.PartiallyChecked}
def _bool_to_checkstate(b):
"""
Convert a python object into a Qt CheckState.
Returns Checked for True, Unchecked for False,
and PartiallyChceked for anything else
>>> _bool_to_checkstate(True)
PySide.QtCore.Qt.CheckState.Checked
>>> _bool_to_checkstate(False)
PySide.QtCore.Qt.CheckState.Unchecked
>>> _bool_to_checkstate(None)
PySide.QtCore.Qt.CheckState.PartiallyChecked
>>> _bool_to_checkstate(34)
PySide.QtCore.Qt.CheckState.PartiallyChecked
"""
return _bool_checkstate_map.get(b, _bool_checkstate_map[None])
def _checkstate_to_bool(cs):
"""
Convert a Qt CheckState int a python bool or None.
Returns True for Checked, False for Unchecked or None.
>>> _checkstate_to_bool(PySide.QtCore.Qt.CheckState.Checked)
True
>>> _checkstate_to_bool(PySide.QtCore.Qt.CheckState.Unchecked)
False
>>> _checkstate_to_bool(PySide.QtCore.Qt.CheckState.PartiallyChecked) is None
True
"""
for key, val in _bool_checkstate_map.iteritems():
if val==cs:
return key
class Framework(FrameworkBase, PySide.QtCore.QObject):
"""
Qt Framework class
We derive from QObject to use signals, allowing us to implement
the run_on_ui_thread function by adding the function to a Queue,
emitting a signal and having ourselves connected to that signal.
The code for the received signal is in the UI thread, so we then
call any functions in the queue.
"""
_queue_updated = PySide.QtCore.Signal()
def __init__(self):
PySide.QtCore.QObject.__init__(self)
self.q = Queue()
self._queue_updated.connect(self.on_queue_updated)
self.app = PySide.QtGui.QApplication("Qt")
self.main = PySide.QtGui.QMainWindow()
self.main.setDockOptions(self.main.AllowNestedDocks | self.main.AllowTabbedDocks)
self.changing_widgets = []
def close(self):
self.app.quit()
def on_queue_updated(self):
while not self.q.empty():
f = self.q.get()
f()
def run_on_ui_thread(self, f):
self.q.put(f)
self._queue_updated.emit()
def get_main_window(self):
return self.main
def get_filename(self, mode):
if mode=="load":
return PySide.QtGui.QFileDialog.getOpenFileName()
else:
return PySide.QtGui.QFileDialog.getSaveFileName()
def get_widgets_for_method(self, method):
"""
Return a list of (text, widget) tuples
"""
ret = []
listenerfunc = getattr(method, "listeners", None)
method_name = method.__func__.__name__
def add_widget(name, widget, found_attr, update_widget=None):
if name is None:
name = method_name
if update_widget:
# we wrap the update function in a check to make sure we're
#not in the middle of changing the control
update_widget = updating_widget(widget, update_widget)
# we subscribe to changes from any listeners
if listenerfunc:
listenerfunc(method.im_self).append(update_widget)
# if a get func is supplied we use it to initialize the widget
if found_attr.get("getfunc"):
curval=found_attr.get("getfunc")(method.im_self)
update_widget(curval)
ret.append((name, widget))
def widget_changing(widget, func):
# we wrap change func so that we know which UI elements are changing
# NB we can change a textedit which can change a slider. We want to
# ignore the slider value and the text value, so we need a list of
# changing widgets
def setter(*args):
self.changing_widgets.append(widget)
try:
ret = func(*args)
finally:
self.changing_widgets.remove(widget)
return ret
return setter
def updating_widget(widget, func):
def updater(*args):
if widget not in self.changing_widgets:
return func(*args)
return updater
if hasattr(method, "_slider"):
widget = PySide.QtGui.QSlider(PySide.QtCore.Qt.Orientation.Horizontal)
widget.setMaximum(method._slider["maximum"])
widget.setMinimum(method._slider["minimum"])
widget.valueChanged.connect(widget_changing(widget,
lambda x, method=method: method(x / method._slider["scale"])))
update_widget = lambda newv, method=method, widget=widget: widget.setValue(newv * method._slider["scale"])
add_widget(None, widget, method._slider, update_widget)
if hasattr(method, "_button"):
widget = PySide.QtGui.QPushButton(method_name)
widget.clicked.connect(lambda method=method: method())
add_widget("", widget, method._button)
if hasattr(method, "_combobox"):
widget = PySide.QtGui.QComboBox()
widget.addItems(map(str, method._combobox["options"]))
widget.currentIndexChanged.connect(widget_changing(widget,
lambda x, method=method: method(method._combobox["options"][x])))
update_widget = lambda newv, method=method, widget=widget: widget.setCurrentIndex(method._combobox["options"].index(newv))
add_widget(None, widget, method._combobox, update_widget)
if hasattr(method, "_textbox"):
widget = PySide.QtGui.QLineEdit()
widget.textEdited.connect(widget_changing(widget,
lambda x, method=method: method(x)))
update_widget = lambda newv, widget=widget: widget.setText(str(newv))
add_widget(None, widget, method._textbox, update_widget)
if hasattr(method, "_checkbox"):
widget = PySide.QtGui.QCheckBox()
widget.stateChanged.connect(widget_changing(widget,
lambda x, method=method: method(_checkstate_to_bool(x))))
update_widget = lambda newv, widget=widget: widget.setCheckState(_bool_to_checkstate(newv))
add_widget(None, widget, method._checkbox, update_widget)
return ret
def get_obj_widget(self, obj):
layout = PySide.QtGui.QFormLayout()
for p in dir(obj):
v = getattr(obj, p)
if not isinstance(v, types.MethodType):
continue
widgets = self.get_widgets_for_method(v)
for name, widget in widgets:
layout.addRow(name, widget)
d=PySide.QtGui.QDockWidget(obj.__class__.__name__)
d.setWidget(PySide.QtGui.QWidget())
d.widget().setLayout(layout)
return d
def display_widgets(self, ws):
for w in ws:
self.main.addDockWidget(PySide.QtCore.Qt.LeftDockWidgetArea, w)
self.main.show()
self.app.exec_()
| apache-2.0 | 341,605,952,793,275,140 | 39.084211 | 140 | 0.603808 | false | 4.074371 | false | false | false |
corinnelhh/chatbot | brains.py | 1 | 2729 | import random
from nltk.tokenize import wordpunct_tokenize
from collections import OrderedDict
from trainbot import Trainbot
import chatbot_brain
brain_dict = OrderedDict({})
def add_func_to_dict(name=None):
def wrapper(func):
function_name = name
if function_name is None:
function_name = func.__name__
brain_dict[function_name] = func, func.__doc__
return func
return wrapper
@add_func_to_dict("Bigram Brain")
def _create_bi_chains(chatbot_brain, seeds, size=200):
u"""Return list of Markov-Chain generated strings where each word
added onto the sentence is selected solely from the probability
of it following the given last word in the training data."""
print "the seeds are: " + str(seeds)
candidates = []
while len(candidates) < size:
seed = str(chatbot_brain.i_filter_random(seeds))
candidate = [seed]
done = False
count = 0
while not done:
count += 1
try:
next_word = random.choice(chatbot_brain.bi_lexicon[seed])
candidate.append(next_word)
seed = next_word
except KeyError:
candidates.append(" ".join(candidate))
done = True
if next_word in chatbot_brain.stop_puncts:
candidates.append(" ".join(candidate))
done = True
if count > 75:
done = True
return candidates
@add_func_to_dict("Trigram Brain")
def _create_chains(chatbot_brain, seeds, size=200):
u"""Return list of Markov-Chain generated strings where each word
added onto the sentence is selected solely from the probability
of it following the given last two words in the training data."""
print "the seeds are: " + str(seeds)
candidates = []
while len(candidates) < size:
seed = str(chatbot_brain.i_filter_random(seeds))
pair = str(chatbot_brain._pair_seed(seed))
w_1 = pair[0]
w_2 = pair[1]
next_word = ""
word_1, word_2 = w_1, w_2
candidate = [word_1, word_2]
pair = "{} {}".format(word_1, word_2)
done = False
while not done:
try:
next_word = random.choice(chatbot_brain.tri_lexicon[pair])
candidate.append(next_word)
word_1, word_2 = word_2, next_word
pair = "{} {}".format(word_1, word_2)
except KeyError:
candidates.append(" ".join(candidate))
done = True
if next_word in chatbot_brain.stop_puncts:
candidates.append(" ".join(candidate))
done = True
return candidates
| mit | 7,755,319,220,106,287,000 | 33.987179 | 74 | 0.580799 | false | 4.007342 | false | false | false |
dherrendoerfer/My-3D-Things | Malyan-M180-docs/settings-tools/m180_sanify.py | 1 | 1090 | #!/usr/bin/python
import sys, getopt
def main(argv):
inputfile = ''
outputfile = ''
count = 0
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'm180sanify.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
if inputfile == '' or outputfile == '':
sys.exit()
print 'Input file is ', inputfile
print 'Output file is ', outputfile
f1 = open(inputfile, 'r')
f2 = open(outputfile, 'w')
for line in f1:
if line in ("T0\n", "T1\n"):
f2.write(line.replace('T0', 'M108 T0').replace('T1', 'M108 T1'))
count += 1
else:
f2.write(line)
f1.close()
f2.close()
print 'Replaced ',count,' tool change calls.'
if __name__ == "__main__":
main(sys.argv[1:])
| cc0-1.0 | -7,753,466,669,175,833,000 | 24.585366 | 73 | 0.511927 | false | 3.273273 | false | false | false |
mcocdawc/chemcoord | src/chemcoord/cartesian_coordinates/_cartesian_class_symmetry.py | 1 | 4797 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
from chemcoord.cartesian_coordinates._cartesian_class_core import CartesianCore
from chemcoord.cartesian_coordinates.point_group import PointGroupOperations
class CartesianSymmetry(CartesianCore):
def _get_point_group_analyzer(self, tolerance=0.3):
from pymatgen.symmetry.analyzer import PointGroupAnalyzer
return PointGroupAnalyzer(self.get_pymatgen_molecule(),
tolerance=tolerance)
def _convert_eq(self, eq):
"""WORKS INPLACE on eq
"""
rename = dict(enumerate(self.index))
eq['eq_sets'] = {rename[k]: {rename[x] for x in v}
for k, v in eq['eq_sets'].items()}
eq['sym_ops'] = {rename[k]: {rename[x]: v[x] for x in v}
for k, v in eq['sym_ops'].items()}
try:
sym_mol = self.from_pymatgen_molecule(eq['sym_mol'])
sym_mol.index = self.index
eq['sym_mol'] = sym_mol._to_numeric()
except KeyError:
pass
def get_pointgroup(self, tolerance=0.3):
"""Returns a PointGroup object for the molecule.
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
:class:`~PointGroupOperations`
"""
PA = self._get_point_group_analyzer(tolerance=tolerance)
return PointGroupOperations(PA.sch_symbol, PA.symmops)
def get_equivalent_atoms(self, tolerance=0.3):
"""Returns sets of equivalent atoms with symmetry operations
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
PA = self._get_point_group_analyzer(tolerance=tolerance)
eq = PA.get_equivalent_atoms()
self._convert_eq(eq)
return eq
def symmetrize(self, max_n=10, tolerance=0.3, epsilon=1e-3):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~Cartesian.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule.
This operation is repeated iteratively ``max_n`` times at maximum
until the difference between subsequently symmetrized structures is
smaller than ``epsilon``.
Args:
max_n (int): Maximum number of iterations.
tolerance (float): Tolerance for detecting symmetry.
Gets passed as Argument into
:class:`~pymatgen.analyzer.symmetry.PointGroupAnalyzer`.
epsilon (float): If the elementwise absolute difference of two
subsequently symmetrized structures is smaller epsilon,
the iteration stops before ``max_n`` is reached.
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule :class:`~Cartesian`
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not symmetry-equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
from pymatgen.symmetry.analyzer import iterative_symmetrize
mg_mol = self.get_pymatgen_molecule()
eq = iterative_symmetrize(mg_mol, max_n=max_n, tolerance=tolerance,
epsilon=epsilon)
self._convert_eq(eq)
return eq
def get_asymmetric_unit(self, eq=None):
eq = self.get_equivalent_atoms() if (eq is None) else eq
new_frame = self.loc[eq['eq_sets'].keys(), :]._frame
from chemcoord.cartesian_coordinates.asymmetric_unit_cartesian_class \
import AsymmetricUnitCartesian
return AsymmetricUnitCartesian(new_frame, _metadata={'eq': eq})
| lgpl-3.0 | -520,179,848,877,947,600 | 38.644628 | 79 | 0.603294 | false | 4.388838 | false | false | false |
replica-con-k/retropi3d_engine | tests/26_puppet_inherit.py | 1 | 1591 | #!/usr/bin/env python
#
import glob
import random
import test
import replika
import replika.assets
from replika.ingame import action
background = replika.assets.image('../assets/background.jpg')
woman = replika.assets.Loop(
replika.assets.images(sorted(glob.glob('../assets/walk_*.png'))))
class Woman(replika.ingame.Puppet):
def __init__(self, puppet_asset, layer, name, position=None,
distance=5.0):
super(Woman, self).__init__(puppet_asset, layer, name,
position, distance)
game = replika.new_game()
scene = game.new_scene(auto_switch=True)
scene.add_asset(background)
woman_graphics = replika.assets.Puppet({
'initial': replika.assets.Loop(
replika.assets.images(sorted(glob.glob('../assets/walk_*.png')))),
'move_right': replika.assets.Loop(
replika.assets.images(sorted(glob.glob('../assets/walk_*.png')))),
'move_left': replika.assets.Loop(
replika.assets.images(sorted(glob.glob('../assets/walk_*.png')),
horizontal_flip=True))
})
woman_graphics.behaviour = Woman
test.start('Puppet() inherit test')
while game.is_running:
position = (random.randint(-512, 512), random.randint(-384, 384))
try:
new_woman = scene.add_asset(woman_graphics, position=position)
if not isinstance(new_woman, Woman):
test.failed('Invalid type of InGame() object')
except:
test.failed('Cannot inherit from Puppet() objects')
if game.frame >= 50:
game.quit()
game.update()
test.ok()
| gpl-3.0 | 4,187,544,860,923,314,700 | 27.410714 | 74 | 0.634821 | false | 3.342437 | true | false | false |
chasing0819/Sample_CPP_Cocos2dx | tools/cocos2d-console/plugins/plugin_update.py | 33 | 1759 | # ----------------------------------------------------------------------------
# cocos2d "update" plugin
#
# Author: Luis Parravicini
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"update" plugin for cocos2d command line tool
'''
__docformat__ = 'restructuredtext'
import re
import os
import cocos2d
import httplib
class CCPluginUpdate(cocos2d.CCPlugin):
@staticmethod
def plugin_name():
return "update"
@staticmethod
def brief_description():
return "checks if there's an update available"
def _check_versions(self):
latest_version = self._get_latest_version()
current_version = '2.1.0'
#FIXME check version numbers with verlib? https://wiki.python.org/moin/Distutils/VersionComparison
def _get_latest_version(self):
cocos2d.Logging.info("obtaining latest version number")
conn = httplib.HTTPConnection('cocos2d-x.org', timeout=10)
try:
conn.request('GET', '/download')
res = conn.getresponse()
if res.status != httplib.OK:
raise cocos2d.CCPluginError("Unexpected response status (%d)" % res.status)
data = res.read()
#FIXME: quick and dirty (and error prone) way to extract the latest version
#from the html page
match = re.search('href="http://cdn.cocos2d-x.org/cocos2d-x-(.*?).zip"', data)
if match is None:
raise cocos2d.CCPluginError("Couldn't extract latest version from site")
return match.group(1)
finally:
conn.close()
def run(self, argv, dependencies):
self.parse_args(argv)
self._check_versions()
| mit | -1,075,677,219,558,098,000 | 27.836066 | 106 | 0.558272 | false | 4.148585 | true | false | false |
akelux-github/typendraw | typendraw.py | 1 | 5213 | """
A canvas class with type and draw feature.
Author: Rong Xiao <[email protected]>
LICENSE: GPL 2.0
"""
try:
# Python2
import Tkinter as tk
except ImportError:
# Python3
import tkinter as tk
from font_chooser import askChooseFont
from tkColorChooser import askcolor
import tkFileDialog, tkMessageBox
class TypeDraw(tk.Canvas):
"""
A Canvas variant with predefined bindings for typing and drawing.
"""
def __init__(self, master=None, cnf={}, **kw):
tk.Canvas.__init__(self, master=master, cnf=cnf, **kw)
self.mx = -1
self.my = -1
self.draw_color = 'black'
self.color = 'white'
self.font = ('Consolas', 16)
self.line_width = 2
self.em = 12
self.saved = True
self.cursor = None
self.blink = False
self.stack = [] # history for redo
self.bind('<Button-1>', self.catch_mouse)
self.bind_all('<Key>', self.key_pressed) # have to use bind all
self.bind('<B1-Motion>', self.draw)
# self.bind('<B3-Motion>', self.draw)
def catch_mouse(self, event = None):
self.mx = event.x
self.my = event.y
self.start_blinking()
# self.root.update()
def key_pressed(self, event=None):
# print 'event.char:', event.char
# print "key symbol:", event.keysym
if len(event.char) != 1: # process combined control keys
sym = event.keysym
# if sym == 'Escape':
# self.blink = False
if sym == 'Right':
self.mx += 1
elif sym == 'Left':
self.mx -= 1
elif sym == 'Up':
self.my -= 1
elif sym == 'Down':
self.my += 1
return
o = ord(event.char)
# print "ord:", o
widget = None
if o == 32: # don't draw space
self.mx = self.mx+3*self.em/4
elif o == 27: # escape
self.blink = False
elif o>32 and o<127:
widget = self.create_text(self.mx, self.my, text = event.char, font=self.font, fill=self.draw_color)
self.saved = False
self.stack.append(widget) # put to stack for undo
self.mx += self.em # shift after draw a character
self.start_blinking()
elif o == 127 or o == 8:
self.blink = False
if self.stack:
widget = self.stack.pop()
self.delete(widget)
# self.root.update()
def draw(self, event=None):
# self.stop_blinking()
self.blink = False
mx = event.x
my = event.y
if self.mx >= 0:
w = self.create_line(self.mx, self.my, mx, my, width=self.line_width, fill=self.draw_color)
self.saved = False
self.stack.append(w)
self.mx=mx
self.my=my
def clear(self, event=None):
self.delete(tk.ALL)
def change_color(self,color):
self.draw_color = color
def change_linewidth(self,width):
self.line_width = width
def blinking(self):
if self.cursor == None: # draw cursor
h = 5*self.em/4
w = (self.line_width+1)/2
self.cursor = self.create_rectangle(self.mx-w, self.my-h/2, self.mx + w,self.my + h/2,outline = self.draw_color, fill=self.draw_color)
else: # hide cursor
self.delete(self.cursor)
self.cursor = None
if self.blink:
self.after(500, self.blinking)
elif self.cursor:
self.delete(self.cursor)
self.cursor = None
def start_blinking(self):
if not self.blink:
self.blink = True
self.after(500, self.blinking)
def choose_font(self):
self.font, self.em = askChooseFont(self)
def set_bgcolor(self):
self.color = askcolor(parent=self,
title='Choose a background color')
self.config(bg=self.color[1])
def set_drawcolor(self):
self.draw_color = askcolor(parent=self,
title='Choose a drawing color')[1]
def save(self):
if not self.saved:
f = tkFileDialog.asksaveasfilename(parent=self)
if f:
if f[-4:] != '.eps':
f+='.eps'
self.postscript(file=f, colormode='color')
self.saved = True
return self.saved
def load(self): # T.B.D.
f = tkFileDialog.askopenfilename(parent=self)
photo = tk.PhotoImage(file=f)
self.delete(tk.ALL)
self.create_image(image=photo)
def close(self): # ask for saving before closing
if not self.saved:
ok = tkMessageBox.askyesnocancel(parent=self,
message="Your scratch has unsaved modifications. Do you want to save the scratch?",
title="Save scratch")
if ok == True:
return self.save()
elif ok == None: # cancel
return False # close without saving
else: # no
return True
return True
| gpl-2.0 | -3,260,505,024,188,491,300 | 29.664706 | 146 | 0.525417 | false | 3.852919 | false | false | false |
t3dev/odoo | addons/point_of_sale/wizard/pos_payment.py | 4 | 2994 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.tools import float_is_zero
class PosMakePayment(models.TransientModel):
_name = 'pos.make.payment'
_description = 'Point of Sale Payment'
def _default_session(self):
active_id = self.env.context.get('active_id')
if active_id:
return self.env['pos.order'].browse(active_id).session_id
return False
def _default_journal(self):
active_id = self.env.context.get('active_id')
if active_id:
session = self.env['pos.order'].browse(active_id).session_id
return session.config_id.journal_ids and session.config_id.journal_ids.ids[0] or False
return False
def _default_amount(self):
active_id = self.env.context.get('active_id')
if active_id:
order = self.env['pos.order'].browse(active_id)
return (order.amount_total - order.amount_paid)
return False
session_id = fields.Many2one('pos.session', required=True, default=_default_session)
journal_id = fields.Many2one('account.journal', string='Payment Mode', required=True, default=_default_journal)
amount = fields.Float(digits=0, required=True, default=_default_amount)
payment_name = fields.Char(string='Payment Reference')
payment_date = fields.Date(string='Payment Date', required=True, default=lambda *a: fields.Date.today())
@api.onchange('session_id')
def _on_change_session(self):
if self.session_id:
return {
'domain': {'journal_id': [('id', 'in', self.session_id.config_id.journal_ids.ids)]}
}
@api.multi
def check(self):
"""Check the order:
if the order is not paid: continue payment,
if the order is paid print ticket.
"""
self.ensure_one()
order = self.env['pos.order'].browse(self.env.context.get('active_id', False))
currency = order.pricelist_id.currency_id
amount = order.amount_total - order.amount_paid
data = self.read()[0]
# add_payment expect a journal key
data['journal'] = data['journal_id'][0]
data['amount'] = currency.round(data['amount']) if currency else data['amount']
if not float_is_zero(amount, precision_rounding=currency.rounding or 0.01):
order.add_payment(data)
if order.test_paid():
order.action_pos_order_paid()
return {'type': 'ir.actions.act_window_close'}
return self.launch_payment()
def launch_payment(self):
return {
'name': _('Payment'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.make.payment',
'view_id': False,
'target': 'new',
'views': False,
'type': 'ir.actions.act_window',
'context': self.env.context,
}
| gpl-3.0 | -5,949,387,394,674,377,000 | 37.384615 | 115 | 0.601202 | false | 3.766038 | false | false | false |
d2emon/generator-pack | src/genesys/generator/fng/description/character/__init__.py | 1 | 3300 | from genesys.generator._unknown.character import race
import random
class Mark():
types = [] # 12
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
memory_types = [] # 16
memory_ofs = [] # 17
def __init__(self, name_id):
self.name_id = name_id
class Scar(Mark):
pass
class Birthmark(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
class Moles(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
class Frecles(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
memory_types = [] # 16
memory_ofs = [] # 17
class SmoothSkin(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
memory_types = [] # 16
memory_ofs = [] # 17
class SoftSkin(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
memory_types = [] # 16
memory_ofs = [] # 17
def charGen():
marks = [Scar(i) for i in range(6)] + \
[Birthmark(i) for i in range(6, 9)] + \
[Moles(9), Frecles(10), SmoothSkin(11), ] + \
[SoftSkin(12 + i) for i in range(10)]
names20 = []
races = [race.Human(i) for i in range(3)] + \
[race.Elf(i) for i in range(3, 9)] + \
[race.Gnome(10), ] + \
[race.Troll(11), race.Orc(12), race.Goblin(13)] + \
[race.Dwarf(14), race.Giant(15)] + \
[race.Race(15 + i) for i in range(10)]
names22 = []
names23 = []
names24 = []
names25 = []
names26 = []
names27 = []
names28 = []
srace = random.choice(races)
mark = random.choice(marks)
mark_from = random.choice(mark.places_form)
mark_through = random.choice(mark.places_through)
mark_to = random.choice(mark.places_to)
memory_type = random.choice(mark.memory_types)
memory_of = random.choice(mark.memory_ofs)
first_name = random.choice(srace.first_name)
last_name = random.choice(srace.last_name)
random20 = random.choice(names20)
random22 = random.choice(names22)
random23 = random.choice(names23)
random24 = random.choice(names24)
random25 = random.choice(names25)
random26 = random.choice(names26)
while random26 == random25:
random26 = random.choice(names26)
random27 = random.choice(names27)
random28 = random.choice(names28)
head = "%s a %s. %s over %s" % (
srace.hair,
srace.face,
srace.eyes,
srace.promise,
)
name2 = "%s %s %s %s leaves %s of %s." % (
mark,
mark_from,
mark_through,
mark_to,
memory_type,
memory_of,
)
name3 = "This is the face of %s %s, a true %s among %s. He stands %s others, despite his %s frame." % (
first_name,
last_name,
random20,
srace,
random22,
random23,
)
name4 = "There's something %s about him, perhaps it's %s or perhaps it's simply %s. But nonetheless, people tend to %s, while %s." % (
random24,
random25,
random26,
random27,
random28,
)
return "\n".join([
head,
name2,
name3,
name4,
])
| gpl-3.0 | 2,039,169,166,083,165,700 | 22.913043 | 138 | 0.534848 | false | 3.095685 | false | false | false |
grit-engine/grit-engine | dependencies/quex-0.34.1/quex/core_engine/generator/base.py | 2 | 8715 | from quex.frs_py.file_in import error_msg
import quex.core_engine.state_machine.parallelize as parallelize
from quex.core_engine.generator.action_info import PatternActionInfo
import quex.core_engine.state_machine.nfa_to_dfa as nfa_to_dfa
import quex.core_engine.state_machine.hopcroft_minimization as hopcroft
class GeneratorBase:
def __init__(self, PatternActionPair_List, StateMachineName):
assert type(PatternActionPair_List) == list
assert map(lambda elm: elm.__class__ == PatternActionInfo, PatternActionPair_List) \
== [ True ] * len(PatternActionPair_List)
self.state_machine_name = StateMachineName
# -- setup of state machine lists and id lists
self.__extract_special_lists(PatternActionPair_List)
# (*) create state (combined) state machines
# -- core state machine
self.sm = self.__create_core_state_machine()
# -- pre conditions
self.pre_context_sm = self.__create_pre_context_state_machine()
# -- backward detectors for state machines with forward ambiguous
# post-conditions.
self.papc_backward_detector_state_machine_list = \
self.__create_backward_input_position_detectors()
def __extract_special_lists(self, PatternActionPair_List):
# (0) extract data structures:
# -- state machine list: simply a list of all state machines
# (the original state machine id is marked as 'origin' inside
# 'get_state_machine')
# -- a map from state machine id to related action (i.e. the code fragment)
self.state_machine_list = []
self.action_db = {}
# -- extract:
# -- state machines that are post-conditioned
self.post_contexted_sm_id_list = []
# -- state machines that nore non-trivially pre-conditioned,
# i.e. they need a reverse state machine to be verified.
self.pre_context_sm_id_list = []
self.pre_context_sm_list = []
# -- pre-conditions that are trivial, i.e. it is only checked for
# the last character, if it was a particular one or not.
self.begin_of_line_condition_f = False
# [NOT IMPLEMENTED YET]
# # trivial_pre_context_dict = {} # map: state machine id --> character code(s)
for action_info in PatternActionPair_List:
sm = action_info.pattern_state_machine()
sm_id = sm.get_id()
self.state_machine_list.append(sm)
# -- register action information under the state machine id, where it belongs.
self.action_db[sm_id] = action_info
# -- collect all pre-conditions and make one single state machine out of it
pre_sm = sm.core().pre_context_sm()
if pre_sm != None:
self.pre_context_sm_list.append(pre_sm)
self.pre_context_sm_id_list.append(pre_sm.get_id())
if sm.core().pre_context_begin_of_line_f():
self.begin_of_line_condition_f = True
# [NOT IMPLEMENTED YET]
# # -- collect information about trivial (char code) pre-conditions
# # if sm.get_trivial_pre_context_character_codes() != []:
# # trivial_pre_context_dict[sm.get_id()] = sm.get_trivial_pre_context_character_codes()
# -- collect all ids of post conditioned state machines
if sm.core().post_context_id() != -1L:
self.post_contexted_sm_id_list.append(sm_id)
def __create_core_state_machine(self):
# (1) transform all given patterns into a single state machine
# (the index of the patterns remain as 'origins' inside the states)
return self.__get_combined_state_machine(self.state_machine_list)
def __create_pre_context_state_machine(self):
if self.pre_context_sm_list == []: return None
# -- add empty actions for the pre-condition terminal states
for pre_sm in self.pre_context_sm_list:
self.action_db[pre_sm.get_id()] = PatternActionInfo(pre_sm, "")
return self.__get_combined_state_machine(self.pre_context_sm_list,
FilterDominatedOriginsF=False)
def __create_backward_input_position_detectors(self):
# -- find state machines that contain a state flagged with
# 'pseudo-ambiguous-post-condition'.
# -- collect all backward detector state machines in a list
papc_sm_list = []
for sm in self.state_machine_list:
papc_sm = sm.core().post_context_backward_input_position_detector_sm()
if sm.core().post_context_backward_input_position_detector_sm() == None: continue
papc_sm_list.append(papc_sm)
# -- code generation 'looks' for origins, so mark them.
papc_sm.mark_state_origins()
return papc_sm_list
def __get_combined_state_machine(self, StateMachine_List, FilterDominatedOriginsF=True):
"""Creates a DFA state machine that incorporates the paralell
process of all pattern passed as state machines in
the StateMachine_List. Each origins of each state machine
are kept in the final state, if it is not dominated.
Performs: -- parallelization
-- translation from NFA to DFA
-- Frank Schaefers Adapted Hopcroft optimization.
Again: The state machine ids of the original state machines
are traced through the whole process.
FilterDominatedOriginsF, if set to False, can disable the filtering
of dominated origins. This is important for pre-conditions, because,
all successful patterns need to be reported!
"""
def __check(Place, sm):
__check_on_orphan_states(Place, sm)
__check_on_init_state_not_acceptance(Place, sm)
def __check_on_orphan_states(Place, sm):
orphan_state_list = sm.get_orphaned_state_index_list()
if orphan_state_list == []: return
error_msg("After '%s'" % Place + "\n" + \
"Orphaned state(s) detected in regular expression (optimization lack).\n" + \
"Please, log a defect at the projects website quex.sourceforge.net.\n" + \
"Orphan state(s) = " + repr(orphan_state_list) + "\n",
fh, DontExitF=True)
def __check_on_init_state_not_acceptance(Place, sm):
init_state = sm.get_init_state()
if init_state.core().is_acceptance():
error_msg("After '%s'" % Place + "\n" + \
"The initial state is 'acceptance'. This should never appear.\n" + \
"Please, log a defect at the projects website quex.sourceforge.net.\n")
if filter(lambda origin: origin.is_acceptance(), init_state.origins().get_list()) != []:
error_msg("After '%s'" % Place + "\n" + \
"Initial state contains an origin that is 'acceptance'. This should never appear.\n" + \
"Please, log a defect at the projects website quex.sourceforge.net.\n")
# (1) mark at each state machine the machine and states as 'original'.
#
# This is necessary to trace in the combined state machine the
# pattern that actually matched. Note, that a state machine in
# the StateMachine_List represents one possible pattern that can
# match the current input.
#
map(lambda x: x.mark_state_origins(), StateMachine_List)
# (2) setup all patterns in paralell
sm = parallelize.do(StateMachine_List)
__check("Parallelization", sm)
# (3) convert the state machine to an DFA (paralellization created an NFA)
sm = nfa_to_dfa.do(sm)
__check("NFA to DFA", sm)
# (4) determine for each state in the DFA what is the dominating original state
if FilterDominatedOriginsF: sm.filter_dominated_origins()
__check("Filter Dominated Origins", sm)
# (5) perform hopcroft optimization
# Note, that hopcroft optimization does consider the original acceptance
# states when deciding if two state sets are equivalent.
sm = hopcroft.do(sm)
__check("Hopcroft Minimization", sm)
return sm
| mit | -2,361,270,941,677,922,300 | 49.375723 | 114 | 0.590476 | false | 4.029126 | false | false | false |
Ehco1996/django-sspanel | apps/sspanel/migrations/0004_auto_20201108_0400.py | 1 | 10904 | # Generated by Django 3.1.2 on 2020-11-07 20:00
from decimal import Decimal
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sspanel", "0003_auto_20200729_0733"),
]
operations = [
migrations.CreateModel(
name="TrojanNode",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("node_id", models.IntegerField(unique=True)),
("level", models.PositiveIntegerField(default=0)),
("name", models.CharField(max_length=32, verbose_name="名字")),
("info", models.CharField(max_length=1024, verbose_name="节点说明")),
(
"country",
models.CharField(
choices=[
("US", "美国"),
("CN", "中国"),
("GB", "英国"),
("SG", "新加坡"),
("TW", "台湾"),
("HK", "香港"),
("JP", "日本"),
("FR", "法国"),
("DE", "德国"),
("KR", "韩国"),
("JE", "泽西岛"),
("NZ", "新西兰"),
("MX", "墨西哥"),
("CA", "加拿大"),
("BR", "巴西"),
("CU", "古巴"),
("CZ", "捷克"),
("EG", "埃及"),
("FI", "芬兰"),
("GR", "希腊"),
("GU", "关岛"),
("IS", "冰岛"),
("MO", "澳门"),
("NL", "荷兰"),
("NO", "挪威"),
("PL", "波兰"),
("IT", "意大利"),
("IE", "爱尔兰"),
("AR", "阿根廷"),
("PT", "葡萄牙"),
("AU", "澳大利亚"),
("RU", "俄罗斯联邦"),
("CF", "中非共和国"),
],
default="CN",
max_length=5,
verbose_name="国家",
),
),
(
"used_traffic",
models.BigIntegerField(default=0, verbose_name="已用流量"),
),
(
"total_traffic",
models.BigIntegerField(default=1073741824, verbose_name="总流量"),
),
(
"enable",
models.BooleanField(
db_index=True, default=True, verbose_name="是否开启"
),
),
(
"enlarge_scale",
models.DecimalField(
decimal_places=2,
default=Decimal("1.0"),
max_digits=10,
verbose_name="倍率",
),
),
(
"ehco_listen_host",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="隧道监听地址"
),
),
(
"ehco_listen_port",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="隧道监听端口"
),
),
(
"ehco_listen_type",
models.CharField(
choices=[("raw", "raw"), ("wss", "wss"), ("mwss", "mwss")],
default="raw",
max_length=64,
verbose_name="隧道监听类型",
),
),
(
"ehco_transport_type",
models.CharField(
choices=[("raw", "raw"), ("wss", "wss"), ("mwss", "mwss")],
default="raw",
max_length=64,
verbose_name="隧道传输类型",
),
),
(
"enable_ehco_lb",
models.BooleanField(
db_index=True, default=True, verbose_name="是否负载均衡"
),
),
("server", models.CharField(max_length=128, verbose_name="服务器地址")),
(
"inbound_tag",
models.CharField(default="proxy", max_length=64, verbose_name="标签"),
),
(
"service_port",
models.IntegerField(default=443, verbose_name="服务端端口"),
),
("client_port", models.IntegerField(default=443, verbose_name="客户端端口")),
(
"listen_host",
models.CharField(
default="0.0.0.0", max_length=64, verbose_name="本地监听地址"
),
),
(
"grpc_host",
models.CharField(
default="0.0.0.0", max_length=64, verbose_name="grpc地址"
),
),
(
"grpc_port",
models.CharField(
default="8080", max_length=64, verbose_name="grpc端口"
),
),
(
"network",
models.CharField(default="tcp", max_length=64, verbose_name="连接方式"),
),
(
"security",
models.CharField(
blank=True,
default="tls",
max_length=64,
null=True,
verbose_name="加密方式",
),
),
(
"alpn",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="alpn"
),
),
(
"certificateFile",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="crt地址"
),
),
(
"keyFile",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="key地址"
),
),
],
options={
"verbose_name_plural": "Trojan节点",
},
),
migrations.AddField(
model_name="relaynode",
name="ehco_trojan_lb_port",
field=models.IntegerField(
blank=True,
help_text="trojan负载均衡端口",
null=True,
verbose_name="trojan负载均衡端口",
),
),
migrations.AlterField(
model_name="nodeonlinelog",
name="node_type",
field=models.CharField(
choices=[("ss", "ss"), ("vmess", "vmess"), ("trojan", "trojan")],
default="ss",
max_length=32,
verbose_name="节点类型",
),
),
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
migrations.AlterField(
model_name="usertrafficlog",
name="node_type",
field=models.CharField(
choices=[("ss", "ss"), ("vmess", "vmess"), ("trojan", "trojan")],
default="ss",
max_length=32,
verbose_name="节点类型",
),
),
migrations.CreateModel(
name="TrojanRelayRule",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("relay_port", models.CharField(max_length=64, verbose_name="中转端口")),
(
"listen_type",
models.CharField(
choices=[("raw", "raw"), ("wss", "wss"), ("mwss", "mwss")],
default="raw",
max_length=64,
verbose_name="监听类型",
),
),
(
"transport_type",
models.CharField(
choices=[("raw", "raw"), ("wss", "wss"), ("mwss", "mwss")],
default="raw",
max_length=64,
verbose_name="传输类型",
),
),
(
"relay_node",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="trojan_relay_rules",
to="sspanel.relaynode",
verbose_name="中转节点",
),
),
(
"trojan_node",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="relay_rules",
to="sspanel.trojannode",
verbose_name="Trojan节点",
),
),
],
options={
"verbose_name_plural": "Trojan转发规则",
},
),
]
| gpl-3.0 | 804,888,003,165,539,600 | 34.856164 | 88 | 0.316141 | false | 4.655402 | false | false | false |
wavefrontHQ/python-client | wavefront_api_client/api/search_api.py | 1 | 285331 | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from wavefront_api_client.api_client import ApiClient
class SearchApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def search_account_entities(self, **kwargs): # noqa: E501
"""Search over a customer's accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_account_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAccount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_account_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_account_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_account_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_account_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAccount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_account_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/account', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedAccount', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_account_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_account_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_account_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_account_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_account_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_account_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_account_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_account_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/account/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_account_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_account_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_account_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_account_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_account_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_account_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_account_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/account/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_alert_deleted_entities(self, **kwargs): # noqa: E501
"""Search over a customer's deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_deleted_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_deleted_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_alert_deleted_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_alert_deleted_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_deleted_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_alert_deleted_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/alert/deleted', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_alert_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_alert_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_alert_deleted_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_deleted_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_alert_deleted_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_alert_deleted_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/alert/deleted/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_alert_deleted_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_deleted_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_alert_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_alert_deleted_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_deleted_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_alert_deleted_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/alert/deleted/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_alert_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAlertWithStats
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_alert_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_alert_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAlertWithStats
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_alert_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/alert', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedAlertWithStats', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_alert_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_alert_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_alert_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_alert_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_alert_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/alert/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_alert_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_alert_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_alert_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_alert_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/alert/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_cloud_integration_deleted_entities(self, **kwargs): # noqa: E501
"""Search over a customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_deleted_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_deleted_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_cloud_integration_deleted_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_cloud_integration_deleted_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/cloudintegration/deleted', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_cloud_integration_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_cloud_integration_deleted_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_cloud_integration_deleted_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_cloud_integration_deleted_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/cloudintegration/deleted/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_cloud_integration_deleted_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_cloud_integration_deleted_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_cloud_integration_deleted_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/cloudintegration/deleted/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_cloud_integration_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_cloud_integration_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_cloud_integration_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/cloudintegration', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedCloudIntegration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_cloud_integration_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_cloud_integration_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_cloud_integration_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_cloud_integration_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/cloudintegration/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_cloud_integration_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_cloud_integration_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_cloud_integration_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/cloudintegration/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_dashboard_deleted_entities(self, **kwargs): # noqa: E501
"""Search over a customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_deleted_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_dashboard_deleted_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_dashboard_deleted_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_dashboard_deleted_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/dashboard/deleted', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedDashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_dashboard_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_dashboard_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_dashboard_deleted_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_dashboard_deleted_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_dashboard_deleted_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/dashboard/deleted/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_dashboard_deleted_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_dashboard_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_dashboard_deleted_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_deleted_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_dashboard_deleted_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/dashboard/deleted/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_dashboard_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_dashboard_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_dashboard_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_dashboard_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/dashboard', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedDashboard', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_dashboard_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_dashboard_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_dashboard_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_dashboard_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_dashboard_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/dashboard/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_dashboard_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_dashboard_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_dashboard_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_dashboard_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted dashboards # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_dashboard_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_dashboard_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/dashboard/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_external_link_entities(self, **kwargs): # noqa: E501
"""Search over a customer's external links # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_external_link_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedExternalLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_external_link_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_external_link_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_external_link_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's external links # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_external_link_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedExternalLink
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_external_link_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/extlink', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedExternalLink', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_external_links_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's external links # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_external_links_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_external_links_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_external_links_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_external_links_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's external links # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_external_links_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_external_links_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_external_links_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/extlink/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_external_links_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's external links # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_external_links_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_external_links_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_external_links_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_external_links_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's external links # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_external_links_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_external_links_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/extlink/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_ingestion_policy_entities(self, **kwargs): # noqa: E501
"""Search over a customer's ingestion policies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_ingestion_policy_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedIngestionPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_ingestion_policy_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_ingestion_policy_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_ingestion_policy_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's ingestion policies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_ingestion_policy_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedIngestionPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_ingestion_policy_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/ingestionpolicy', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedIngestionPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_ingestion_policy_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's ingestion policies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_ingestion_policy_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_ingestion_policy_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_ingestion_policy_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_ingestion_policy_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's ingestion policies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_ingestion_policy_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_ingestion_policy_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_ingestion_policy_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/ingestionpolicy/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_ingestion_policy_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's ingestion policies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_ingestion_policy_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_ingestion_policy_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_ingestion_policy_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_ingestion_policy_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's ingestion policies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_ingestion_policy_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_ingestion_policy_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/ingestionpolicy/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_maintenance_window_entities(self, **kwargs): # noqa: E501
"""Search over a customer's maintenance windows # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_maintenance_window_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedMaintenanceWindow
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_maintenance_window_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_maintenance_window_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_maintenance_window_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's maintenance windows # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_maintenance_window_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedMaintenanceWindow
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_maintenance_window_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/maintenancewindow', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedMaintenanceWindow', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_maintenance_window_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's maintenance windows # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_maintenance_window_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_maintenance_window_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_maintenance_window_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_maintenance_window_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's maintenance windows # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_maintenance_window_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_maintenance_window_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_maintenance_window_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/maintenancewindow/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_maintenance_window_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's maintenance windows # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_maintenance_window_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_maintenance_window_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_maintenance_window_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_maintenance_window_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's maintenance windows # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_maintenance_window_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_maintenance_window_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/maintenancewindow/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_monitored_application_entities(self, **kwargs): # noqa: E501
"""Search over all the customer's non-deleted monitored applications # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_application_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedMonitoredApplicationDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_monitored_application_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_monitored_application_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_monitored_application_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over all the customer's non-deleted monitored applications # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_application_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedMonitoredApplicationDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_monitored_application_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/monitoredapplication', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedMonitoredApplicationDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_monitored_application_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted monitored application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_application_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_monitored_application_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_monitored_application_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_monitored_application_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted monitored application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_application_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_monitored_application_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_monitored_application_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/monitoredapplication/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_monitored_application_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted monitored clusters # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_application_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_monitored_application_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_monitored_application_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_monitored_application_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted monitored clusters # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_application_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_monitored_application_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/monitoredapplication/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_monitored_service_entities(self, **kwargs): # noqa: E501
"""Search over all the customer's non-deleted monitored services # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_service_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_monitored_service_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_monitored_service_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_monitored_service_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over all the customer's non-deleted monitored services # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_service_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_monitored_service_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/monitoredservice', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_monitored_service_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted monitored application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_service_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_monitored_service_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_monitored_service_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_monitored_service_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted monitored application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_service_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_monitored_service_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_monitored_service_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/monitoredservice/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_monitored_service_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted monitored clusters # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_service_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_monitored_service_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_monitored_service_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_monitored_service_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted monitored clusters # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_monitored_service_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_monitored_service_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/monitoredservice/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_notficant_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's notificants # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_notficant_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_notficant_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_notficant_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_notficant_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's notificants # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_notficant_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_notficant_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/notificant/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_notificant_entities(self, **kwargs): # noqa: E501
"""Search over a customer's notificants # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_notificant_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedNotificant
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_notificant_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_notificant_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_notificant_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's notificants # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_notificant_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedNotificant
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_notificant_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/notificant', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedNotificant', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_notificant_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's notificants # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_notificant_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_notificant_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_notificant_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_notificant_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's notificants # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_notificant_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_notificant_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_notificant_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/notificant/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_deleted_entities(self, **kwargs): # noqa: E501
"""Search over a customer's deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_deleted_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedProxy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_deleted_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_proxy_deleted_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_proxy_deleted_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_deleted_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedProxy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_deleted_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy/deleted', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedProxy', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_proxy_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_proxy_deleted_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_deleted_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_deleted_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_proxy_deleted_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy/deleted/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_deleted_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_deleted_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_proxy_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_proxy_deleted_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_deleted_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_deleted_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy/deleted/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedProxy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_proxy_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_proxy_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedProxy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedProxy', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_proxy_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_proxy_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_proxy_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_proxy_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_proxy_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_proxy_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_proxy_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted proxies # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_proxy_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_proxy_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/proxy/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_registered_query_deleted_entities(self, **kwargs): # noqa: E501
"""Search over a customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_deleted_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_registered_query_deleted_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_registered_query_deleted_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_registered_query_deleted_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/derivedmetric/deleted', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedDerivedMetricDefinition', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_registered_query_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_registered_query_deleted_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_registered_query_deleted_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_registered_query_deleted_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/derivedmetric/deleted/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_registered_query_deleted_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_registered_query_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_registered_query_deleted_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_registered_query_deleted_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/derivedmetric/deleted/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_registered_query_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDerivedMetricDefinitionWithStats
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_registered_query_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_registered_query_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedDerivedMetricDefinitionWithStats
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_registered_query_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/derivedmetric', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedDerivedMetricDefinitionWithStats', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_registered_query_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_registered_query_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_registered_query_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's non-deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_registered_query_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_registered_query_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/derivedmetric/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_registered_query_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_registered_query_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_registered_query_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's non-deleted derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_registered_query_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/derivedmetric/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_related_report_event_anomaly_entities(self, event_id, **kwargs): # noqa: E501
"""List the related events and anomalies over a firing event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_related_report_event_anomaly_entities(event_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str event_id: (required)
:param EventSearchRequest body:
:return: ResponseContainerPagedReportEventAnomalyDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_related_report_event_anomaly_entities_with_http_info(event_id, **kwargs) # noqa: E501
else:
(data) = self.search_related_report_event_anomaly_entities_with_http_info(event_id, **kwargs) # noqa: E501
return data
def search_related_report_event_anomaly_entities_with_http_info(self, event_id, **kwargs): # noqa: E501
"""List the related events and anomalies over a firing event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_related_report_event_anomaly_entities_with_http_info(event_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str event_id: (required)
:param EventSearchRequest body:
:return: ResponseContainerPagedReportEventAnomalyDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['event_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_related_report_event_anomaly_entities" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'event_id' is set
if ('event_id' not in params or
params['event_id'] is None):
raise ValueError("Missing the required parameter `event_id` when calling `search_related_report_event_anomaly_entities`") # noqa: E501
collection_formats = {}
path_params = {}
if 'event_id' in params:
path_params['eventId'] = params['event_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/event/related/{eventId}/withAnomalies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedReportEventAnomalyDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_related_report_event_entities(self, event_id, **kwargs): # noqa: E501
"""List the related events over a firing event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_related_report_event_entities(event_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str event_id: (required)
:param EventSearchRequest body:
:return: ResponseContainerPagedRelatedEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_related_report_event_entities_with_http_info(event_id, **kwargs) # noqa: E501
else:
(data) = self.search_related_report_event_entities_with_http_info(event_id, **kwargs) # noqa: E501
return data
def search_related_report_event_entities_with_http_info(self, event_id, **kwargs): # noqa: E501
"""List the related events over a firing event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_related_report_event_entities_with_http_info(event_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str event_id: (required)
:param EventSearchRequest body:
:return: ResponseContainerPagedRelatedEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['event_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_related_report_event_entities" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'event_id' is set
if ('event_id' not in params or
params['event_id'] is None):
raise ValueError("Missing the required parameter `event_id` when calling `search_related_report_event_entities`") # noqa: E501
collection_formats = {}
path_params = {}
if 'event_id' in params:
path_params['eventId'] = params['event_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/event/related/{eventId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedRelatedEvent', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_report_event_entities(self, **kwargs): # noqa: E501
"""Search over a customer's events # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_report_event_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EventSearchRequest body:
:return: ResponseContainerPagedEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_report_event_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_report_event_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_report_event_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's events # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_report_event_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EventSearchRequest body:
:return: ResponseContainerPagedEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_report_event_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/event', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedEvent', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_report_event_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's events # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_report_event_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_report_event_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_report_event_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_report_event_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's events # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_report_event_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_report_event_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_report_event_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/event/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_report_event_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's events # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_report_event_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_report_event_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_report_event_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_report_event_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's events # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_report_event_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_report_event_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/event/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_role_entities(self, **kwargs): # noqa: E501
"""Search over a customer's roles # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_role_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedRoleDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_role_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_role_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_role_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's roles # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_role_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedRoleDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_role_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/role', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedRoleDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_role_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's roles # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_role_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_role_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_role_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_role_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's roles # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_role_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_role_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_role_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/role/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_role_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's roles # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_role_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_role_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_role_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_role_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's roles # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_role_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_role_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/role/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_service_account_entities(self, **kwargs): # noqa: E501
"""Search over a customer's service accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_service_account_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedServiceAccount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_service_account_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_service_account_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_service_account_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's service accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_service_account_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedServiceAccount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_service_account_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/serviceaccount', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedServiceAccount', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_service_account_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's service accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_service_account_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_service_account_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_service_account_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_service_account_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's service accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_service_account_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_service_account_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_service_account_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/serviceaccount/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_service_account_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's service accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_service_account_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_service_account_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_service_account_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_service_account_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's service accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_service_account_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_service_account_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/serviceaccount/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_tagged_source_entities(self, **kwargs): # noqa: E501
"""Search over a customer's sources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_tagged_source_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SourceSearchRequestContainer body:
:return: ResponseContainerPagedSource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_tagged_source_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_tagged_source_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_tagged_source_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's sources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_tagged_source_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SourceSearchRequestContainer body:
:return: ResponseContainerPagedSource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_tagged_source_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/source', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedSource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_tagged_source_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's sources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_tagged_source_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_tagged_source_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_tagged_source_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_tagged_source_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's sources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_tagged_source_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_tagged_source_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_tagged_source_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/source/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_tagged_source_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's sources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_tagged_source_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_tagged_source_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_tagged_source_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_tagged_source_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's sources # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_tagged_source_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_tagged_source_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/source/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_user_entities(self, **kwargs): # noqa: E501
"""Search over a customer's users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCustomerFacingUserObject
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_user_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_user_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_user_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCustomerFacingUserObject
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_user_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/user', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedCustomerFacingUserObject', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_user_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_user_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_user_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_user_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_user_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_user_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/user/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_user_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_user_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_user_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_user_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's users # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_user_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/user/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_user_group_entities(self, **kwargs): # noqa: E501
"""Search over a customer's user groups # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_group_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedUserGroupModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_user_group_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_user_group_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_user_group_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's user groups # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_group_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedUserGroupModel
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_user_group_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/usergroup', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedUserGroupModel', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_user_group_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's user groups # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_group_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_user_group_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_user_group_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_user_group_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's user groups # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_group_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_user_group_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_user_group_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/usergroup/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_user_group_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's user groups # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_group_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_user_group_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_user_group_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_user_group_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's user groups # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_user_group_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_user_group_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/usergroup/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_web_hook_entities(self, **kwargs): # noqa: E501
"""Search over a customer's webhooks # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_web_hook_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedNotificant
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_web_hook_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_web_hook_entities_with_http_info(**kwargs) # noqa: E501
return data
def search_web_hook_entities_with_http_info(self, **kwargs): # noqa: E501
"""Search over a customer's webhooks # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_web_hook_entities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedNotificant
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_web_hook_entities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/webhook', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedNotificant', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_web_hook_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's webhooks # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_web_hook_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_web_hook_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_web_hook_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data
def search_web_hook_for_facet_with_http_info(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's webhooks # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_web_hook_for_facet_with_http_info(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['facet', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_web_hook_for_facet" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'facet' is set
if ('facet' not in params or
params['facet'] is None):
raise ValueError("Missing the required parameter `facet` when calling `search_web_hook_for_facet`") # noqa: E501
collection_formats = {}
path_params = {}
if 'facet' in params:
path_params['facet'] = params['facet'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/webhook/{facet}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_webhook_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's webhooks # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_webhook_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_webhook_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_webhook_for_facets_with_http_info(**kwargs) # noqa: E501
return data
def search_webhook_for_facets_with_http_info(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's webhooks # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_webhook_for_facets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_webhook_for_facets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/search/webhook/facets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerFacetsResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 | -6,381,698,075,157,138,000 | 38.236936 | 409 | 0.586333 | false | 4.206623 | false | false | false |
jordij/menorkayak | config/settings/local.py | 1 | 2241 | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='=SuS&AJ-F7KxV^lYdC^l%Uxn`CV{5<RHMAgwCA>J)xa:6O_Q@c')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + '1']
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| mit | -3,946,521,920,245,016,600 | 29.283784 | 99 | 0.484159 | false | 4.15 | false | false | false |
joeedh/webblender | tools/extjs_cc/js_smcat.py | 1 | 1399 | #!/usr/bin/env python3
import sys, os.path, os, time, stat, struct, ctypes, io, subprocess, math, random, difflib
import ply, re, traceback
import argparse, base64, json
from js_global import AbstractGlob
from js_cc import concat_smaps
#source map concatenator, for
#browsers that don't support
#index maps
class LocalGlob(AbstractGlob):
g_file = ""
g_outfile = ""
glob = LocalGlob()
def main():
cparse = argparse.ArgumentParser(add_help=False)
glob.add_args(cparse)
cparse.add_argument("--help", action="help", help="Print this message")
args = cparse.parse_args()
glob.parse_args(cparse, args)
glob.g_outfile = args.outfile
#test_regexpr()
#return 1
glob.g_file = args.infile
if args.infile == None:
print("js_smcat.py: no input files")
return -1
f = open(args.infile, "r")
files = f.readlines()
f.close()
for i in range(len(files)):
files[i] = os.path.abspath(os.path.normpath(files[i].strip()))
ret = json.dumps(concat_smaps(files))
f = open(args.outfile, "w")
f.write(ret)
f.close()
return 0
if __name__ == "__main__":
import io, traceback
try:
ret = main()
except SystemExit:
ret = -1
except:
traceback.print_stack()
traceback.print_exc()
ret = -1
sys.exit(ret)
| apache-2.0 | 542,645,221,419,791,800 | 20.19697 | 90 | 0.596855 | false | 3.420538 | false | false | false |
looprock/Megaphone | sample_service2.py | 1 | 1843 | #!/usr/bin/env python
import json
import sys
import os
from bottle import route, run, get
import time
import httplib
server = "127.0.0.1"
statport = "18082"
host = "%s:18001" % server
staturl = "http://%s:%s/status" % (server,statport)
blob = {"id": "foo", "url": staturl}
data = json.dumps(blob)
connection = httplib.HTTPConnection(host)
connection.request('POST', '/checks', data)
result = connection.getresponse()
print "RESULT: %s - %s" % (result.status, result.reason)
def usage():
print "%s [status: OK,Unknown,Warning,Critical]" % (sys.argv[0])
msgs = {
"OK": "Everything is groovy!",
"Unknown": "Unknown error!",
"Warning": "Houston, I think we have a problem.",
"Critical": "Danger Will Rogers! Danger!"
}
t = len(sys.argv)
if t < 2:
usage()
sys.exit(1)
else:
statusm = sys.argv[1]
t = time.localtime()
ts = time.strftime('%Y-%m-%dT%H:%M:%S%Z', t)
rootdir = "./"
# Change working directory so relative paths (and template lookup) work again
root = os.path.join(os.path.dirname(__file__))
sys.path.insert(0, root)
# generate nested python dictionaries, copied from here:
# http://stackoverflow.com/questions/635483/what-is-the-best-way-to-implement-nested-dictionaries-in-python
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
@get('/status')
def status():
data = AutoVivification()
data['id'] = "bar"
data['status'] = statusm
data['date'] = ts
data['message'] = msgs[statusm]
data['version'] = "1.0.0"
return data
run(host='localhost', port=statport, debug=True)
| isc | 8,763,208,669,548,261,000 | 25.623188 | 107 | 0.623843 | false | 3.183709 | false | false | false |
fluxcapacitor/pipeline | libs/pipeline_model/tensorflow/core/framework/tensor_description_pb2.py | 1 | 4799 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/tensor_description.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import allocation_description_pb2 as tensorflow_dot_core_dot_framework_dot_allocation__description__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/tensor_description.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n2tensorflow/core/framework/tensor_description.proto\x12\ntensorflow\x1a%tensorflow/core/framework/types.proto\x1a,tensorflow/core/framework/tensor_shape.proto\x1a\x36tensorflow/core/framework/allocation_description.proto\"\xa8\x01\n\x11TensorDescription\x12#\n\x05\x64type\x18\x01 \x01(\x0e\x32\x14.tensorflow.DataType\x12+\n\x05shape\x18\x02 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\x41\n\x16\x61llocation_description\x18\x04 \x01(\x0b\x32!.tensorflow.AllocationDescriptionB8\n\x18org.tensorflow.frameworkB\x17TensorDescriptionProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_allocation__description__pb2.DESCRIPTOR,])
_TENSORDESCRIPTION = _descriptor.Descriptor(
name='TensorDescription',
full_name='tensorflow.TensorDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dtype', full_name='tensorflow.TensorDescription.dtype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shape', full_name='tensorflow.TensorDescription.shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocation_description', full_name='tensorflow.TensorDescription.allocation_description', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=208,
serialized_end=376,
)
_TENSORDESCRIPTION.fields_by_name['dtype'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_TENSORDESCRIPTION.fields_by_name['shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_TENSORDESCRIPTION.fields_by_name['allocation_description'].message_type = tensorflow_dot_core_dot_framework_dot_allocation__description__pb2._ALLOCATIONDESCRIPTION
DESCRIPTOR.message_types_by_name['TensorDescription'] = _TENSORDESCRIPTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TensorDescription = _reflection.GeneratedProtocolMessageType('TensorDescription', (_message.Message,), dict(
DESCRIPTOR = _TENSORDESCRIPTION,
__module__ = 'tensorflow.core.framework.tensor_description_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorDescription)
))
_sym_db.RegisterMessage(TensorDescription)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\027TensorDescriptionProtosP\001\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| apache-2.0 | -6,591,478,038,082,507,000 | 46.04902 | 598 | 0.770994 | false | 3.450036 | false | true | false |
MattPerron/esper | esper/query/management/commands/face_gender.py | 1 | 1653 | from django.core.management.base import BaseCommand
from faceDB.face_db import FaceDB
from faceDB.face import FaceCluster
from faceDB.util import * # only required for saving cluster images
from carnie_helper import RudeCarnie
from query.models import *
import random
import json
class Command(BaseCommand):
help = 'Find genders for all the detected faces'
def add_arguments(self, parser):
parser.add_argument('path')
def handle_video(self, path, rude_carnie):
print path
video = Video.objects.filter(path=path).get()
labelset = video.detected_labelset()
faces = Face.objects.filter(frame__labelset=labelset).all()
faces = [f for f in faces if f.gender == '0']
print(len(faces))
imgs = ['./assets/thumbnails/{}_{}.png'.format(labelset.id, f.id) for f in faces]
male_ids = []
female_ids = []
if len(imgs) == 0:
return
genders = rude_carnie.get_gender_batch(imgs)
for face, gender in zip(faces, genders):
if gender[0] == 'M':
male_ids.append(face.id)
elif gender[0] == 'F':
female_ids.append(face.id)
Face.objects.filter(id__in=male_ids).update(gender='M')
Face.objects.filter(id__in=female_ids).update(gender='F')
def handle(self, *args, **options):
with open(options['path']) as f:
paths = [s.strip() for s in f.readlines()]
model_dir = '/app/deps/rude-carnie/inception_gender_checkpoint'
rc = RudeCarnie(model_dir=model_dir)
for path in paths:
self.handle_video(path, rc)
| apache-2.0 | 5,525,166,055,548,253,000 | 32.734694 | 89 | 0.611615 | false | 3.487342 | false | false | false |
antoinecarme/pyaf | pyaf/TS/PredictionIntervals.py | 1 | 3711 | # Copyright (C) 2016 Antoine Carme <[email protected]>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
from . import SignalDecomposition as sigdec
from . import Perf as tsperf
from . import Utils as tsutil
class cPredictionIntervalsEstimator:
def __init__(self):
self.mModel = None;
self.mSignalFrame = pd.DataFrame()
self.mHorizon = -1;
self.mFitPerformances = {}
self.mForecastPerformances = {}
self.mTestPerformances = {}
def computePerformances(self):
self.mTime = self.mModel.mTime;
self.mSignal = self.mModel.mOriginalSignal;
self.mHorizon = self.mModel.mTimeInfo.mHorizon;
lTimeColumn = self.mTime;
lSignalColumn = self.mSignal;
lForecastColumn = str(self.mSignal) + "_Forecast";
df = self.mModel.mTrend.mSignalFrame.reset_index();
N = df.shape[0];
(lOriginalFit, lOriginalForecast, lOriginalTest) = self.mModel.mTimeInfo.mSplit.cutFrame(df);
df1 = df;
for h in range(0 , self.mHorizon):
df2 = None;
df2 = self.mModel.forecastOneStepAhead(df1, horizon_index = h+1, perf_mode = True);
df2 = df2.head(N);
lHorizonName = lForecastColumn + "_" + str(h + 1);
(lFrameFit, lFrameForecast, lFrameTest) = self.mModel.mTimeInfo.mSplit.cutFrame(df2);
self.mFitPerformances[lHorizonName] = tsperf.cPerf();
self.mFitPerformances[lHorizonName].compute(lOriginalFit[lSignalColumn], lFrameFit[lForecastColumn], lHorizonName);
self.mForecastPerformances[lHorizonName] = tsperf.cPerf();
self.mForecastPerformances[lHorizonName].compute(lOriginalForecast[lSignalColumn], lFrameForecast[lForecastColumn], lHorizonName);
self.mTestPerformances[lHorizonName] = tsperf.cPerf();
if(lOriginalTest.shape[0] > 0):
self.mTestPerformances[lHorizonName].compute(lOriginalTest[lSignalColumn], lFrameTest[lForecastColumn], lHorizonName);
df1 = df2[[lTimeColumn , lForecastColumn,
self.mModel.mTimeInfo.mRowNumberColumn,
self.mModel.mTimeInfo.mNormalizedTimeColumn]];
df1.columns = [lTimeColumn , lSignalColumn, self.mModel.mTimeInfo.mRowNumberColumn,
self.mModel.mTimeInfo.mNormalizedTimeColumn]
# self.dump_detailed();
def dump_detailed(self):
logger = tsutil.get_pyaf_logger();
lForecastColumn = str(self.mSignal) + "_Forecast";
for h in range(0 , self.mHorizon):
lHorizonName = lForecastColumn + "_" + str(h + 1);
hn = lHorizonName;
logger.info("CONFIDENCE_INTERVAL_DUMP_FIT " +str(hn) + " " + str(self.mFitPerformances[hn].mL2) + " " + str(self.mFitPerformances[hn].mMAPE));
logger.info("CONFIDENCE_INTERVAL_DUMP_FORECAST " +str(hn) + " " + str(self.mForecastPerformances[hn].mL2) + " " + str(self.mForecastPerformances[hn].mMAPE));
logger.info("CONFIDENCE_INTERVAL_DUMP_TEST " +str(hn) + " " + str(self.mTestPerformances[hn].mL2) + " " + str(self.mTestPerformances[hn].mMAPE));
def dump(self):
logger = tsutil.get_pyaf_logger();
lForecastColumn = str(self.mSignal) + "_Forecast";
for h in range(0 , self.mHorizon):
lHorizonName = lForecastColumn + "_" + str(h + 1);
hn = lHorizonName;
logger.info("CONFIDENCE_INTERVAL_DUMP_FORECAST " + str(hn) + " " + str(self.mForecastPerformances[hn].mL2));
| bsd-3-clause | 1,306,167,367,611,973,600 | 49.148649 | 169 | 0.642684 | false | 3.289894 | true | false | false |
lxylinki/medCC | src/main/resources/output/evalresults2014/avgImpNew/overIndex/impoverss.py | 1 | 1586 | #!/usr/bin/python3
# generate a 20x20 matrix
import os
def collectimps(Mods, Edges, maxbudlevel):
filedir = './'
outfilename = 'impoverss.dat'
outfilename = os.path.join('./', outfilename)
outfile = open(outfilename, 'w')
# for each bud level
for x in range(1, maxbudlevel+1):
impoverssallindex = []
# collect imp across each index on this level
for y in range(0,20):
filename = '{}_{}_AvgOverIndex.txt'.format(Mods[y], Edges[y])
filename = os.path.join(filedir, filename)
imps = open(filename, 'r')
for line in imps:
items = line.split()
if (items[0].isdigit()==False):
continue
budlevel = int(items[0])
if (budlevel != x):
continue
# find corresponding imp
if (budlevel == x):
impoverss = float(items[2])
# record to list
impoverssallindex.append(impoverss)
imps.close()
for y in range(0,len(impoverssallindex)):
outfile.write('%d\t%d\t%.2f\n'% (x, y, impoverssallindex[y]))
outfile.write('\n')
outfile.close()
if __name__=='__main__':
Mods = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50,
55, 60, 65, 70, 75, 80, 85, 90, 95, 100]
Edges = [6, 15, 60, 80, 200, 300, 500, 500, 580, 500,
800, 900, 950, 950, 1000, 1200, 1200, 1600, 1600, 2000]
# 20 bud levels
collectimps(Mods, Edges, 20)
| gpl-3.0 | 6,885,915,757,102,359,000 | 28.924528 | 73 | 0.504414 | false | 3.388889 | false | false | false |
kholia/ReproducibleBuilds | koji/ssl/SSLCommon.py | 1 | 5017 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2005 Dan Williams <[email protected]> and Red Hat, Inc.
import os, sys
from OpenSSL import SSL
import SSLConnection
import httplib
import socket
import SocketServer
def our_verify(connection, x509, errNum, errDepth, preverifyOK):
# print "Verify: errNum = %s, errDepth = %s, preverifyOK = %s" % (errNum, errDepth, preverifyOK)
# preverifyOK should tell us whether or not the client's certificate
# correctly authenticates against the CA chain
return preverifyOK
def CreateSSLContext(certs):
key_and_cert = certs['key_and_cert']
ca_cert = certs['ca_cert']
peer_ca_cert = certs['peer_ca_cert']
for f in key_and_cert, ca_cert, peer_ca_cert:
if f and not os.access(f, os.R_OK):
raise StandardError, "%s does not exist or is not readable" % f
ctx = SSL.Context(SSL.SSLv23_METHOD) # Use best possible TLS Method
ctx.use_certificate_file(key_and_cert)
ctx.use_privatekey_file(key_and_cert)
ctx.load_client_ca(ca_cert)
ctx.load_verify_locations(peer_ca_cert)
verify = SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT
ctx.set_verify(verify, our_verify)
ctx.set_verify_depth(10)
ctx.set_options(SSL.OP_NO_SSLv3 | SSL.OP_NO_SSLv2) # disable SSLv2 and SSLv3
return ctx
class PlgBaseServer(SocketServer.ThreadingTCPServer):
allow_reuse_address = 1
def __init__(self, server_addr, req_handler):
self._quit = False
self.allow_reuse_address = 1
SocketServer.ThreadingTCPServer.__init__(self, server_addr, req_handler)
def stop(self):
self._quit = True
def serve_forever(self):
while not self._quit:
self.handle_request()
self.server_close()
class PlgBaseSSLServer(PlgBaseServer):
""" SSL-enabled variant """
def __init__(self, server_address, req_handler, certs, timeout=None):
self._timeout = timeout
self.ssl_ctx = CreateSSLContext(certs)
PlgBaseServer.__init__(self, server_address, req_handler)
sock = socket.socket(self.address_family, self.socket_type)
con = SSL.Connection(self.ssl_ctx, sock)
self.socket = SSLConnection.SSLConnection(con)
if sys.version_info[:3] >= (2, 3, 0):
self.socket.settimeout(self._timeout)
self.server_bind()
self.server_activate()
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class PlgHTTPSConnection(httplib.HTTPConnection):
"This class allows communication via SSL."
response_class = httplib.HTTPResponse
def __init__(self, host, port=None, ssl_context=None, strict=None, timeout=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.ssl_ctx = ssl_context
self._timeout = timeout
def connect(self):
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
con = SSL.Connection(self.ssl_ctx, sock)
self.sock = SSLConnection.SSLConnection(con)
if sys.version_info[:3] >= (2, 3, 0):
self.sock.settimeout(self._timeout)
self.sock.connect(sa)
if self.debuglevel > 0:
print "connect: (%s, %s) [ssl]" % (self.host, self.port)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
else:
raise socket.error, "failed to connect"
class PlgHTTPS(httplib.HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
_connection_class = PlgHTTPSConnection
def __init__(self, host='', port=None, ssl_context=None, strict=None, timeout=None):
self._setup(self._connection_class(host, port, ssl_context, strict, timeout))
| gpl-2.0 | -5,407,672,898,626,829,000 | 34.58156 | 100 | 0.644608 | false | 3.732887 | false | false | false |
ainterr/scoring_engine | scoring_engine/settings.py | 1 | 4139 | import os
import logging
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#l*-2%5rhe+p5@=%!kq*)n7$2sho*yx$$9)c1e(l0%(ohmrxsc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'engine',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scoring_engine.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scoring_engine.wsgi.application'
# Supress noisy request logger
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# Logging config
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(module)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S",
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'debug.log',
'formatter': 'verbose',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
}
},
'loggers': {
'engine': {
'handlers': ['console', 'file'],
'filters': ['require_debug_true'],
'level': 'DEBUG',
'propagate': True,
},
},
}
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Override the default User model
AUTH_USER_MODEL = 'engine.User'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Authentication
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
| mit | -1,615,866,596,781,920,500 | 25.196203 | 91 | 0.624547 | false | 3.637083 | false | false | false |
crooks/mimix | mimix/webcgi.py | 1 | 1333 | #!/usr/bin/python
#
# vim: tabstop=4 expandtab shiftwidth=4 noautoindent
#
# webcgi.py - CGI file for storing Mimix messages in a Pool
#
# Copyright (C) 2014 Steve Crook <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import cgi
import cgitb
import sys
import os
from Crypto import Random
cgitb.enable()
print "Content-type:text/html\r\n\r\n"
form = cgi.FieldStorage()
content = form.getvalue('mimix')
if content is None:
sys.exit(0)
if '-----BEGIN MIMIX MESSAGE-----' in content:
while True:
fn = os.path.join('/home/crooks/mimix/inbound_pool',
'm' + Random.new().read(4).encode('hex'))
if not os.path.isfile(fn):
break
with open(fn, 'w') as f:
f.write(content)
| gpl-3.0 | -14,758,339,898,345,144 | 30 | 78 | 0.694674 | false | 3.426735 | false | false | false |
polarise/RP-python | name_change.py | 1 | 1132 | #!/home/paulk/software/bin/python
from sys import argv,stderr,stdout
import argparse
parser = argparse.ArgumentParser(description="Script to replace a column in a file with another identifier given a map between identifiers.")
parser.add_argument('infile',help="the file whose column is to be swapped")
parser.add_argument('-m','--map',help="the map of current to required identifiers")
parser.add_argument('-c','--column',type=int,default=0,help="the 0-based index of the column to be swapped")
parser.add_argument('-o','--outfile',help="outfile; optional [default: stdout]")
args = parser.parse_args()
mapfile = args.map
colno = args.column
infile = args.infile
outfile = args.outfile
names = dict()
f = open(mapfile)
for row in f:
l = row.strip().split('\t')
names[l[0]] = l[1]
f.close()
count = 0
f = open(infile)
if outfile: g = open(outfile,'w')
else: g = stdout
for row in f:
l = row.strip().split('\t')
try:
# print names[l[0]]+"\t"+"\t".join(l[1:])
print >> g,"\t".join(l[:colno]+[names[l[colno]]]+l[colno+1:])
except KeyError:
count += 1
f.close()
if outfile: g.close()
print >> stderr,"missing %s" % count
| gpl-2.0 | 8,682,498,210,810,467,000 | 28.789474 | 141 | 0.688163 | false | 3.026738 | false | false | false |
sebrandon1/nova | nova/tests/unit/scheduler/test_scheduler_utils.py | 4 | 17048 | # Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Utils
"""
import mock
import six
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import exception
from nova import objects
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests import uuidsentinel as uuids
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
def setUp(self):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
def test_build_request_spec_without_image(self):
instance = {'uuid': uuids.instance}
instance_type = objects.Flavor(**test_flavor.fake_flavor)
with mock.patch.object(flavors, 'extract_flavor') as mock_extract:
mock_extract.return_value = instance_type
request_spec = scheduler_utils.build_request_spec(self.context,
None,
[instance])
mock_extract.assert_called_once_with({'uuid': uuids.instance})
self.assertEqual({}, request_spec['image'])
def test_build_request_spec_with_object(self):
instance_type = objects.Flavor()
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(instance, 'get_flavor') as mock_get:
mock_get.return_value = instance_type
request_spec = scheduler_utils.build_request_spec(self.context,
None,
[instance])
mock_get.assert_called_once_with()
self.assertIsInstance(request_spec['instance_properties'], dict)
@mock.patch.object(rpc, 'get_notifier', return_value=mock.Mock())
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(objects.Instance, 'save')
def test_set_vm_state_and_notify(self, mock_save, mock_add, mock_get):
expected_uuid = uuids.instance
request_spec = dict(instance_properties=dict(uuid='other-uuid'))
updates = dict(vm_state='fake-vm-state')
service = 'fake-service'
method = 'fake-method'
exc_info = 'exc_info'
payload = dict(request_spec=request_spec,
instance_properties=request_spec.get(
'instance_properties', {}),
instance_id=expected_uuid,
state='fake-vm-state',
method=method,
reason=exc_info)
event_type = '%s.%s' % (service, method)
scheduler_utils.set_vm_state_and_notify(self.context,
expected_uuid,
service,
method,
updates,
exc_info,
request_spec)
mock_save.assert_called_once_with()
mock_add.assert_called_once_with(self.context, mock.ANY,
exc_info, mock.ANY)
self.assertIsInstance(mock_add.call_args[0][1], objects.Instance)
self.assertIsInstance(mock_add.call_args[0][3], tuple)
mock_get.return_value.error.assert_called_once_with(self.context,
event_type,
payload)
def test_build_filter_properties(self):
sched_hints = {'hint': ['over-there']}
forced_host = 'forced-host1'
forced_node = 'forced-node1'
instance_type = objects.Flavor()
filt_props = scheduler_utils.build_filter_properties(sched_hints,
forced_host, forced_node, instance_type)
self.assertEqual(sched_hints, filt_props['scheduler_hints'])
self.assertEqual([forced_host], filt_props['force_hosts'])
self.assertEqual([forced_node], filt_props['force_nodes'])
self.assertEqual(instance_type, filt_props['instance_type'])
def test_build_filter_properties_no_forced_host_no_force_node(self):
sched_hints = {'hint': ['over-there']}
forced_host = None
forced_node = None
instance_type = objects.Flavor()
filt_props = scheduler_utils.build_filter_properties(sched_hints,
forced_host, forced_node, instance_type)
self.assertEqual(sched_hints, filt_props['scheduler_hints'])
self.assertEqual(instance_type, filt_props['instance_type'])
self.assertNotIn('forced_host', filt_props)
self.assertNotIn('forced_node', filt_props)
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True,
force_hosts=None,
force_nodes=None):
if force_hosts is None:
force_hosts = []
if force_nodes is None:
force_nodes = []
if with_retry:
if ((len(force_hosts) == 1 and len(force_nodes) <= 1)
or (len(force_nodes) == 1 and len(force_hosts) <= 1)):
filter_properties = dict(force_hosts=force_hosts,
force_nodes=force_nodes)
elif len(force_hosts) > 1 or len(force_nodes) > 1:
filter_properties = dict(retry=dict(hosts=[]),
force_hosts=force_hosts,
force_nodes=force_nodes)
else:
filter_properties = dict(retry=dict(hosts=[]))
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1
enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1
if with_retry or enable_retry_force_hosts or enable_retry_force_nodes:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if force_hosts:
expected_limits = None
else:
expected_limits = 'fake-limits'
self.assertEqual(expected_limits,
filter_properties.get('limits'))
if (with_retry and enable_retry_force_hosts
and enable_retry_force_nodes):
self.assertEqual([['fake-host', 'fake-node'],
['fake-host', 'fake-node']],
filter_properties['retry']['hosts'])
else:
self.assertNotIn('retry', filter_properties)
def test_populate_filter_props(self):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
def test_populate_filter_props_force_hosts_no_retry(self):
self._test_populate_filter_props(force_hosts=['force-host'])
def test_populate_filter_props_force_nodes_no_retry(self):
self._test_populate_filter_props(force_nodes=['force-node'])
def test_populate_filter_props_multi_force_hosts_with_retry(self):
self._test_populate_filter_props(force_hosts=['force-host1',
'force-host2'])
def test_populate_filter_props_multi_force_nodes_with_retry(self):
self._test_populate_filter_props(force_nodes=['force-node1',
'force-node2'])
def test_populate_retry_exception_at_max_attempts(self):
self.flags(max_attempts=2, group='scheduler')
msg = 'The exception text was preserved!'
filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
exc_reason=[msg]))
nvh = self.assertRaises(exception.MaxRetriesExceeded,
scheduler_utils.populate_retry,
filter_properties, uuids.instance)
# make sure 'msg' is a substring of the complete exception text
self.assertIn(msg, six.text_type(nvh))
def _check_parse_options(self, opts, sep, converter, expected):
good = scheduler_utils.parse_options(opts,
sep=sep,
converter=converter)
for item in expected:
self.assertIn(item, good)
def test_parse_options(self):
# check normal
self._check_parse_options(['foo=1', 'bar=-2.1'],
'=',
float,
[('foo', 1.0), ('bar', -2.1)])
# check convert error
self._check_parse_options(['foo=a1', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check separator missing
self._check_parse_options(['foo', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check key missing
self._check_parse_options(['=5', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
def test_validate_filters_configured(self):
self.flags(enabled_filters='FakeFilter1,FakeFilter2',
group='filter_scheduler')
self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
def test_validate_weighers_configured(self):
self.flags(weight_classes=[
'ServerGroupSoftAntiAffinityWeigher', 'FakeFilter1'],
group='filter_scheduler')
self.assertTrue(scheduler_utils.validate_weigher(
'ServerGroupSoftAntiAffinityWeigher'))
self.assertTrue(scheduler_utils.validate_weigher('FakeFilter1'))
self.assertFalse(scheduler_utils.validate_weigher(
'ServerGroupSoftAffinityWeigher'))
def test_validate_weighers_configured_all_weighers(self):
self.assertTrue(scheduler_utils.validate_weigher(
'ServerGroupSoftAffinityWeigher'))
self.assertTrue(scheduler_utils.validate_weigher(
'ServerGroupSoftAntiAffinityWeigher'))
def _create_server_group(self, policy='anti-affinity'):
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.name = 'pele'
group.uuid = uuids.fake
group.members = [instance.uuid]
group.policies = [policy]
return group
def _get_group_details(self, group, policy=None):
group_hosts = ['hostB']
with test.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(
self.context, 'fake_uuid', group_hosts)
self.assertEqual(
(set(['hostA', 'hostB']), [policy], group.members),
group_info)
def test_get_group_details(self):
for policy in ['affinity', 'anti-affinity',
'soft-affinity', 'soft-anti-affinity']:
group = self._create_server_group(policy)
self._get_group_details(group, policy=policy)
def test_get_group_details_with_no_instance_uuid(self):
group_info = scheduler_utils._get_group_details(self.context, None)
self.assertIsNone(group_info)
def _get_group_details_with_filter_not_configured(self, policy):
self.flags(enabled_filters=['fake'], group='filter_scheduler')
self.flags(weight_classes=['fake'], group='filter_scheduler')
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.uuid = uuids.fake
group.members = [instance.uuid]
group.policies = [policy]
with test.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
) as (get_group,):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
scheduler_utils._SUPPORTS_SOFT_AFFINITY = None
scheduler_utils._SUPPORTS_SOFT_ANTI_AFFINITY = None
self.assertRaises(exception.UnsupportedPolicyException,
scheduler_utils._get_group_details,
self.context, uuids.instance)
def test_get_group_details_with_filter_not_configured(self):
policies = ['anti-affinity', 'affinity',
'soft-affinity', 'soft-anti-affinity']
for policy in policies:
self._get_group_details_with_filter_not_configured(policy)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_in_filter_properties(self, mock_ggd):
mock_ggd.return_value = scheduler_utils.GroupDetails(
hosts=set(['hostA', 'hostB']), policies=['policy'],
members=['instance1'])
spec = {'instance_properties': {'uuid': uuids.instance}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, uuids.instance,
['hostC'])
expected_filter_props = {'group_updated': True,
'group_hosts': set(['hostA', 'hostB']),
'group_policies': ['policy'],
'group_members': ['instance1']}
self.assertEqual(expected_filter_props, filter_props)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_no_group(self, mock_ggd):
mock_ggd.return_value = None
spec = {'instance_properties': {'uuid': uuids.instance}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, uuids.instance,
['hostC'])
self.assertNotIn('group_updated', filter_props)
self.assertNotIn('group_policies', filter_props)
self.assertEqual(['hostC'], filter_props['group_hosts'])
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
spec = {'instance_properties': {'uuid': uuids.instance}}
filter_props = {'group_hosts': ['hostC']}
self.assertRaises(exception.NoValidHost,
scheduler_utils.setup_instance_group,
self.context, spec, filter_props)
| apache-2.0 | 2,927,628,251,592,277,500 | 44.340426 | 78 | 0.558013 | false | 4.36345 | true | false | false |
redhat-openstack/python-neutronclient | quantumclient/tests/unit/test_cli20_subnet.py | 1 | 15395 | # Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import sys
from quantumclient.quantum.v2_0.subnet import CreateSubnet
from quantumclient.quantum.v2_0.subnet import DeleteSubnet
from quantumclient.quantum.v2_0.subnet import ListSubnet
from quantumclient.quantum.v2_0.subnet import ShowSubnet
from quantumclient.quantum.v2_0.subnet import UpdateSubnet
from quantumclient.tests.unit.test_cli20 import CLITestV20Base
from quantumclient.tests.unit.test_cli20 import MyApp
class CLITestV20Subnet(CLITestV20Base):
def test_create_subnet(self):
"""Create subnet: --gateway gateway netid cidr."""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'cidrvalue'
gateway = 'gatewayvalue'
args = ['--gateway', gateway, netid, cidr]
position_names = ['ip_version', 'network_id', 'cidr', 'gateway_ip']
position_values = [4, netid, cidr, gateway]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_subnet_with_no_gateway(self):
"""Create subnet: --no-gateway netid cidr"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'cidrvalue'
args = ['--no-gateway', netid, cidr]
position_names = ['ip_version', 'network_id', 'cidr', 'gateway_ip']
position_values = [4, netid, cidr, None]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_subnet_with_bad_gateway_option(self):
"""Create sbunet: --no-gateway netid cidr"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'cidrvalue'
gateway = 'gatewayvalue'
args = ['--gateway', gateway, '--no-gateway', netid, cidr]
position_names = ['ip_version', 'network_id', 'cidr', 'gateway_ip']
position_values = [4, netid, cidr, None]
try:
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
except:
return
self.fail('No exception for bad gateway option')
def test_create_subnet_tenant(self):
"""Create subnet: --tenant_id tenantid netid cidr."""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid', netid, cidr]
position_names = ['ip_version', 'network_id', 'cidr']
position_values = [4, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_tags(self):
"""Create subnet: netid cidr --tags a b."""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = [netid, cidr, '--tags', 'a', 'b']
position_names = ['ip_version', 'network_id', 'cidr']
position_values = [4, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_subnet_allocation_pool(self):
"""Create subnet: --tenant_id tenantid <allocation_pool> netid cidr.
The <allocation_pool> is --allocation_pool start=1.1.1.10,end=1.1.1.20
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--allocation_pool', 'start=1.1.1.10,end=1.1.1.20',
netid, cidr]
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pool = [{'start': '1.1.1.10', 'end': '1.1.1.20'}]
position_values = [4, pool, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_allocation_pools(self):
"""Create subnet: --tenant-id tenantid <pools> netid cidr.
The <pools> are --allocation_pool start=1.1.1.10,end=1.1.1.20 and
--allocation_pool start=1.1.1.30,end=1.1.1.40
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--allocation_pool', 'start=1.1.1.10,end=1.1.1.20',
'--allocation_pool', 'start=1.1.1.30,end=1.1.1.40',
netid, cidr]
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pools = [{'start': '1.1.1.10', 'end': '1.1.1.20'},
{'start': '1.1.1.30', 'end': '1.1.1.40'}]
position_values = [4, pools, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_host_route(self):
"""Create subnet: --tenant_id tenantid <host_route> netid cidr.
The <host_route> is
--host-route destination=172.16.1.0/24,nexthop=1.1.1.20
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--host-route', 'destination=172.16.1.0/24,nexthop=1.1.1.20',
netid, cidr]
position_names = ['ip_version', 'host_routes', 'network_id',
'cidr']
route = [{'destination': '172.16.1.0/24', 'nexthop': '1.1.1.20'}]
position_values = [4, route, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_host_routes(self):
"""Create subnet: --tenant-id tenantid <host_routes> netid cidr.
The <host_routes> are
--host-route destination=172.16.1.0/24,nexthop=1.1.1.20 and
--host-route destination=172.17.7.0/24,nexthop=1.1.1.40
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--host-route', 'destination=172.16.1.0/24,nexthop=1.1.1.20',
'--host-route', 'destination=172.17.7.0/24,nexthop=1.1.1.40',
netid, cidr]
position_names = ['ip_version', 'host_routes', 'network_id',
'cidr']
routes = [{'destination': '172.16.1.0/24', 'nexthop': '1.1.1.20'},
{'destination': '172.17.7.0/24', 'nexthop': '1.1.1.40'}]
position_values = [4, routes, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_dns_nameservers(self):
"""Create subnet: --tenant-id tenantid <dns-nameservers> netid cidr.
The <dns-nameservers> are
--dns-nameserver 1.1.1.20 and --dns-nameserver 1.1.1.40
"""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--dns-nameserver', '1.1.1.20',
'--dns-nameserver', '1.1.1.40',
netid, cidr]
position_names = ['ip_version', 'dns_nameservers', 'network_id',
'cidr']
nameservers = ['1.1.1.20', '1.1.1.40']
position_values = [4, nameservers, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_with_disable_dhcp(self):
"""Create subnet: --tenant-id tenantid --disable-dhcp netid cidr."""
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--disable-dhcp',
netid, cidr]
position_names = ['ip_version', 'enable_dhcp', 'network_id',
'cidr']
position_values = [4, False, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_merge_single_plurar(self):
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--allocation-pool', 'start=1.1.1.10,end=1.1.1.20',
netid, cidr,
'--allocation-pools', 'list=true', 'type=dict',
'start=1.1.1.30,end=1.1.1.40']
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pools = [{'start': '1.1.1.10', 'end': '1.1.1.20'},
{'start': '1.1.1.30', 'end': '1.1.1.40'}]
position_values = [4, pools, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_merge_plurar(self):
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
netid, cidr,
'--allocation-pools', 'list=true', 'type=dict',
'start=1.1.1.30,end=1.1.1.40']
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pools = [{'start': '1.1.1.30', 'end': '1.1.1.40'}]
position_values = [4, pools, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_subnet_merge_single_single(self):
resource = 'subnet'
cmd = CreateSubnet(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
cidr = 'prefixvalue'
args = ['--tenant_id', 'tenantid',
'--allocation-pool', 'start=1.1.1.10,end=1.1.1.20',
netid, cidr,
'--allocation-pool',
'start=1.1.1.30,end=1.1.1.40']
position_names = ['ip_version', 'allocation_pools', 'network_id',
'cidr']
pools = [{'start': '1.1.1.10', 'end': '1.1.1.20'},
{'start': '1.1.1.30', 'end': '1.1.1.40'}]
position_values = [4, pools, netid, cidr]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_list_subnets_detail(self):
"""List subnets: -D."""
resources = "subnets"
cmd = ListSubnet(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_subnets_tags(self):
"""List subnets: -- --tags a b."""
resources = "subnets"
cmd = ListSubnet(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_subnets_detail_tags(self):
"""List subnets: -D -- --tags a b."""
resources = "subnets"
cmd = ListSubnet(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_subnets_fields(self):
"""List subnets: --fields a --fields b -- --fields c d."""
resources = "subnets"
cmd = ListSubnet(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def test_update_subnet(self):
"""Update subnet: myid --name myname --tags a b."""
resource = 'subnet'
cmd = UpdateSubnet(MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_show_subnet(self):
"""Show subnet: --fields id --fields name myid."""
resource = 'subnet'
cmd = ShowSubnet(MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_subnet(self):
"""Delete subnet: subnetid."""
resource = 'subnet'
cmd = DeleteSubnet(MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
| apache-2.0 | -4,125,228,170,321,448,400 | 42.735795 | 79 | 0.517895 | false | 3.63004 | true | false | false |
hankcs/HanLP | hanlp/components/parsers/constituency/crf_constituency_model.py | 1 | 9086 | # -*- coding:utf-8 -*-
# Adopted from https://github.com/yzhangcs/parser
# MIT License
#
# Copyright (c) 2020 Yu Zhang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from torch import nn
from hanlp.components.parsers.constituency.treecrf import CRFConstituency
from hanlp.components.parsers.alg import cky
from hanlp.components.parsers.biaffine.biaffine import Biaffine
from hanlp.components.parsers.biaffine.mlp import MLP
class CRFConstituencyDecoder(nn.Module):
r"""
The implementation of CRF Constituency Parser,
also called FANCY (abbr. of Fast and Accurate Neural Crf constituencY) Parser.
References:
- Yu Zhang, Houquan Zhou and Zhenghua Li. 2020.
`Fast and Accurate Neural CRF Constituency Parsing`_.
Args:
n_words (int):
The size of the word vocabulary.
n_feats (int):
The size of the feat vocabulary.
n_labels (int):
The number of labels.
feat (str):
Specifies which type of additional feature to use: ``'char'`` | ``'bert'`` | ``'tag'``.
``'char'``: Character-level representations extracted by CharLSTM.
``'bert'``: BERT representations, other pretrained langugae models like XLNet are also feasible.
``'tag'``: POS tag embeddings.
Default: 'char'.
n_embed (int):
The size of word embeddings. Default: 100.
n_feat_embed (int):
The size of feature representations. Default: 100.
n_char_embed (int):
The size of character embeddings serving as inputs of CharLSTM, required if ``feat='char'``. Default: 50.
bert (str):
Specifies which kind of language model to use, e.g., ``'bert-base-cased'`` and ``'xlnet-base-cased'``.
This is required if ``feat='bert'``. The full list can be found in `transformers`.
Default: ``None``.
n_bert_layers (int):
Specifies how many last layers to use. Required if ``feat='bert'``.
The final outputs would be the weight sum of the hidden states of these layers.
Default: 4.
mix_dropout (float):
The dropout ratio of BERT layers. Required if ``feat='bert'``. Default: .0.
embed_dropout (float):
The dropout ratio of input embeddings. Default: .33.
n_hidden (int):
The size of LSTM hidden states. Default: 400.
n_lstm_layers (int):
The number of LSTM layers. Default: 3.
lstm_dropout (float):
The dropout ratio of LSTM. Default: .33.
n_mlp_span (int):
Span MLP size. Default: 500.
n_mlp_label (int):
Label MLP size. Default: 100.
mlp_dropout (float):
The dropout ratio of MLP layers. Default: .33.
feat_pad_index (int):
The index of the padding token in the feat vocabulary. Default: 0.
pad_index (int):
The index of the padding token in the word vocabulary. Default: 0.
unk_index (int):
The index of the unknown token in the word vocabulary. Default: 1.
.. _Fast and Accurate Neural CRF Constituency Parsing:
https://www.ijcai.org/Proceedings/2020/560/
.. _transformers:
https://github.com/huggingface/transformers
"""
def __init__(self,
n_labels,
n_hidden=400,
n_mlp_span=500,
n_mlp_label=100,
mlp_dropout=.33,
**kwargs
):
super().__init__()
# the MLP layers
self.mlp_span_l = MLP(n_in=n_hidden, n_out=n_mlp_span, dropout=mlp_dropout)
self.mlp_span_r = MLP(n_in=n_hidden, n_out=n_mlp_span, dropout=mlp_dropout)
self.mlp_label_l = MLP(n_in=n_hidden, n_out=n_mlp_label, dropout=mlp_dropout)
self.mlp_label_r = MLP(n_in=n_hidden, n_out=n_mlp_label, dropout=mlp_dropout)
# the Biaffine layers
self.span_attn = Biaffine(n_in=n_mlp_span, bias_x=True, bias_y=False)
self.label_attn = Biaffine(n_in=n_mlp_label, n_out=n_labels, bias_x=True, bias_y=True)
self.crf = CRFConstituency()
self.criterion = nn.CrossEntropyLoss()
def forward(self, x, **kwargs):
r"""
Args:
x (~torch.FloatTensor): ``[batch_size, seq_len, hidden_dim]``.
Hidden states from encoder.
Returns:
~torch.Tensor, ~torch.Tensor:
The first tensor of shape ``[batch_size, seq_len, seq_len]`` holds scores of all possible spans.
The second of shape ``[batch_size, seq_len, seq_len, n_labels]`` holds
scores of all possible labels on each span.
"""
x_f, x_b = x.chunk(2, -1)
x = torch.cat((x_f[:, :-1], x_b[:, 1:]), -1)
# apply MLPs to the BiLSTM output states
span_l = self.mlp_span_l(x)
span_r = self.mlp_span_r(x)
label_l = self.mlp_label_l(x)
label_r = self.mlp_label_r(x)
# [batch_size, seq_len, seq_len]
s_span = self.span_attn(span_l, span_r)
# [batch_size, seq_len, seq_len, n_labels]
s_label = self.label_attn(label_l, label_r).permute(0, 2, 3, 1)
return s_span, s_label
def loss(self, s_span, s_label, charts, mask, mbr=True):
r"""
Args:
s_span (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all spans
s_label (~torch.Tensor): ``[batch_size, seq_len, seq_len, n_labels]``.
Scores of all labels on each span.
charts (~torch.LongTensor): ``[batch_size, seq_len, seq_len]``.
The tensor of gold-standard labels, in which positions without labels are filled with -1.
mask (~torch.BoolTensor): ``[batch_size, seq_len, seq_len]``.
The mask for covering the unpadded tokens in each chart.
mbr (bool):
If ``True``, returns marginals for MBR decoding. Default: ``True``.
Returns:
~torch.Tensor, ~torch.Tensor:
The training loss and
original span scores of shape ``[batch_size, seq_len, seq_len]`` if ``mbr=False``, or marginals otherwise.
"""
span_mask = charts.ge(0) & mask
span_loss, span_probs = self.crf(s_span, mask, span_mask, mbr)
label_loss = self.criterion(s_label[span_mask], charts[span_mask])
loss = span_loss + label_loss
return loss, span_probs
def decode(self, s_span, s_label, mask):
r"""
Args:
s_span (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all spans.
s_label (~torch.Tensor): ``[batch_size, seq_len, seq_len, n_labels]``.
Scores of all labels on each span.
mask (~torch.BoolTensor): ``[batch_size, seq_len, seq_len]``.
The mask for covering the unpadded tokens in each chart.
Returns:
list[list[tuple]]:
Sequences of factorized labeled trees traversed in pre-order.
"""
span_preds = cky(s_span, mask)
label_preds = s_label.argmax(-1).tolist()
return [[(i, j, labels[i][j]) for i, j in spans] for spans, labels in zip(span_preds, label_preds)]
class CRFConstituencyModel(nn.Module):
def __init__(self, encoder, decoder: CRFConstituencyDecoder) -> None:
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, batch):
r"""
Args:
batch (~dict):
Batch of input data.
Returns:
~torch.Tensor, ~torch.Tensor:
The first tensor of shape ``[batch_size, seq_len, seq_len]`` holds scores of all possible spans.
The second of shape ``[batch_size, seq_len, seq_len, n_labels]`` holds
scores of all possible labels on each span.
"""
x = self.encoder(batch)
return self.decoder(x)
| apache-2.0 | 4,345,479,136,631,748,600 | 41.457944 | 122 | 0.596412 | false | 3.698006 | false | false | false |
shiny-fortnight/auto_id | OpenCNAMAPI.py | 1 | 1598 | #!/bin/python
# coding: utf-8
"""
Python API to retrieve Caller ID from phone number using OpenCNAM api.
"""
import requests
from bs4 import BeautifulSoup
import json
import phonenumbers
class OpenCNAMAPI(object):
"""
OpenCNAMAPI Main Handler
"""
_instance = None
_verbose = False
def __init__(self, arg=None):
pass
def __new__(cls, *args, **kwargs):
"""
__new__ builtin
"""
if not cls._instance:
cls._instance = super(OpenCNAMAPI, cls).__new__(
cls, *args, **kwargs)
if (args and args[0] and args[0]['verbose']):
cls._verbose = True
return cls._instance
def display_message(self, s):
if (self._verbose):
print('[verbose] %s' % s)
def format_number(self, phone_number):
parsed_number = phonenumbers.parse(phone_number, 'US')
return phonenumbers.format_number(parsed_number, phonenumbers.PhoneNumberFormat.E164)
def get(self, phone_number):
formatted_number = self.format_number(phone_number)
s = requests.Session()
req = s.get('https://ACc8aa48a044604425ba66940a2f6bdb54:[email protected]/v2/phone/%s?format=json' % formatted_number)
soup = BeautifulSoup(req.content)
json_result = json.loads(str(soup))
dataJson = json.dumps(json_result)
full_name = json_result['name']
phone_number = json_result['number']
return {'dataJson': dataJson, 'full_name': full_name, 'phone_number': phone_number}
| apache-2.0 | -447,218,251,182,167,500 | 28.054545 | 160 | 0.611389 | false | 3.519824 | false | false | false |
csala/zato | code/zato-server/test/zato/server/service/internal/outgoing/test_ftp.py | 6 | 7296 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Bunch
from bunch import Bunch
# Zato
from zato.common import zato_namespace
from zato.common.test import rand_bool, rand_int, rand_string, ServiceTestCase
from zato.server.service import Boolean
from zato.server.service.internal.outgoing.ftp import GetList, Create, Edit, Delete, ChangePassword
##############################################################################
class GetListTestCase(ServiceTestCase):
def setUp(self):
self.service_class = GetList
self.sio = self.service_class.SimpleIO
def get_request_data(self):
return {'cluster_id': rand_int()}
def get_response_data(self):
return Bunch(
{'id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), 'host':rand_string(),
'port':rand_int(), 'user':rand_string(), 'acct':rand_string(),
'timeout':rand_int(), 'dircache':rand_bool()}
)
def test_sio(self):
self.assertEquals(self.sio.request_elem, 'zato_outgoing_ftp_get_list_request')
self.assertEquals(self.sio.response_elem, 'zato_outgoing_ftp_get_list_response')
self.assertEquals(self.sio.input_required, ('cluster_id',))
self.assertEquals(self.sio.output_required, ('id', 'name', 'is_active', 'host', 'port'))
self.assertEquals(self.sio.output_optional, ('user', 'acct', 'timeout', self.wrap_force_type(Boolean('dircache'))))
self.assertEquals(self.sio.namespace, zato_namespace)
self.assertRaises(AttributeError, getattr, self.sio, 'input_optional')
def test_impl(self):
self.assertEquals(self.service_class.get_name(), 'zato.outgoing.ftp.get-list')
##############################################################################
class CreateTestCase(ServiceTestCase):
def setUp(self):
self.service_class = Create
self.sio = self.service_class.SimpleIO
def get_request_data(self):
return {'cluster_id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), 'host':rand_string(),
'port':rand_int(),'dircache':rand_bool(), 'user':rand_string(), 'acct':rand_string(), 'timeout':rand_int()}
def get_response_data(self):
return Bunch({'id':self.id, 'name':self.name})
def test_sio(self):
self.assertEquals(self.sio.request_elem, 'zato_outgoing_ftp_create_request')
self.assertEquals(self.sio.response_elem, 'zato_outgoing_ftp_create_response')
self.assertEquals(self.sio.input_required, ('cluster_id', 'name', 'is_active', 'host', 'port', self.wrap_force_type(Boolean('dircache'))))
self.assertEquals(self.sio.input_optional, ('user', 'acct', 'timeout'))
self.assertEquals(self.sio.output_required, ('id', 'name'))
self.assertEquals(self.sio.namespace, zato_namespace)
self.assertRaises(AttributeError, getattr, self.sio, 'output_optional')
self.assertRaises(AttributeError, getattr, self.sio, 'output_repeated')
def test_impl(self):
self.assertEquals(self.service_class.get_name(), 'zato.outgoing.ftp.create')
##############################################################################
class EditTestCase(ServiceTestCase):
def setUp(self):
self.service_class = Edit
self.sio = self.service_class.SimpleIO
def get_request_data(self):
return {'id':rand_int(), 'cluster_id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(),
'host':rand_string(),'port':rand_int(), 'dircache':rand_bool(), 'user':rand_string(), 'acct':rand_string(),
'timeout':rand_int()}
def get_response_data(self):
return Bunch({'id':rand_int(), 'name':rand_string()})
def test_sio(self):
self.assertEquals(self.sio.request_elem, 'zato_outgoing_ftp_edit_request')
self.assertEquals(self.sio.response_elem, 'zato_outgoing_ftp_edit_response')
self.assertEquals(self.sio.input_required, ('id', 'cluster_id', 'name', 'is_active', 'host', 'port',
self.wrap_force_type(Boolean('dircache'))))
self.assertEquals(self.sio.input_optional, ('user', 'acct', 'timeout'))
self.assertEquals(self.sio.output_required, ('id', 'name'))
self.assertEquals(self.sio.namespace, zato_namespace)
self.assertRaises(AttributeError, getattr, self.sio, 'output_optional')
self.assertRaises(AttributeError, getattr, self.sio, 'output_repeated')
def test_impl(self):
self.assertEquals(self.service_class.get_name(), 'zato.outgoing.ftp.edit')
##############################################################################
class DeleteTestCase(ServiceTestCase):
def setUp(self):
self.service_class = Delete
self.sio = self.service_class.SimpleIO
def get_request_data(self):
return {'id':rand_int()}
def get_response_data(self):
return Bunch()
def test_sio(self):
self.assertEquals(self.sio.request_elem, 'zato_outgoing_ftp_delete_request')
self.assertEquals(self.sio.response_elem, 'zato_outgoing_ftp_delete_response')
self.assertEquals(self.sio.input_required, ('id',))
self.assertEquals(self.sio.namespace, zato_namespace)
self.assertRaises(AttributeError, getattr, self.sio, 'input_optional')
self.assertRaises(AttributeError, getattr, self.sio, 'output_required')
self.assertRaises(AttributeError, getattr, self.sio, 'output_optional')
self.assertRaises(AttributeError, getattr, self.sio, 'output_repeated')
def test_impl(self):
self.assertEquals(self.service_class.get_name(), 'zato.outgoing.ftp.delete')
##############################################################################
class ChangePasswordTestCase(ServiceTestCase):
def setUp(self):
self.service_class = ChangePassword
self.sio = self.service_class.SimpleIO
def get_request_data(self):
return {'id':rand_int(), 'password1':rand_string(), 'password2':rand_string()}
def get_response_data(self):
return Bunch()
def test_sio(self):
self.assertEquals(self.sio.request_elem, 'zato_outgoing_ftp_change_password_request')
self.assertEquals(self.sio.response_elem, 'zato_outgoing_ftp_change_password_response')
self.assertEquals(self.sio.input_required, ('id', 'password1', 'password2'))
self.assertEquals(self.sio.namespace, zato_namespace)
self.assertRaises(AttributeError, getattr, self.sio, 'input_optional')
self.assertRaises(AttributeError, getattr, self.sio, 'output_required')
self.assertRaises(AttributeError, getattr, self.sio, 'output_optional')
self.assertRaises(AttributeError, getattr, self.sio, 'output_repeated')
def test_impl(self):
self.assertEquals(self.service_class.get_name(), 'zato.outgoing.ftp.change-password')
| gpl-3.0 | -9,125,045,859,334,039,000 | 44.31677 | 146 | 0.611979 | false | 3.920473 | true | false | false |
geoffkilpin/pombola | pombola/settings/base.py | 1 | 20414 | import os
import re
import yaml
from .apps import *
from django.template.defaultfilters import slugify
from pombola.core.logging_filters import skip_unreadable_post
from pombola.hansard.constants import NAME_SUBSTRING_MATCH, NAME_SET_INTERSECTION_MATCH
IN_TEST_MODE = False
# Work out where we are to set up the paths correctly and load config
base_dir = os.path.abspath( os.path.join( os.path.split(__file__)[0], '..', '..' ) )
root_dir = os.path.abspath( os.path.join( base_dir, '..' ) )
# load the mySociety config
config_file = os.path.join( base_dir, 'conf', 'general.yml' )
config = yaml.load( open(config_file, 'r') )
if int(config.get('STAGING')):
STAGING = True
else:
STAGING = False
# switch on all debug when staging
DEBUG = STAGING
TEMPLATE_DEBUG = STAGING
ADMINS = (
(config.get('ERRORS_NAME'), config.get('ERRORS_EMAIL')),
)
SLUGGABLE_SLUGIFY_FUNCTION = slugify
DEFAULT_FROM_EMAIL = config.get('FROM_EMAIL')
# This is the From: address used for error emails to ADMINS
SERVER_EMAIL = DEFAULT_FROM_EMAIL
MANAGERS = (
(config.get('MANAGERS_NAME'), config.get('MANAGERS_EMAIL')),
)
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': config.get('POMBOLA_DB_NAME'),
'USER': config.get('POMBOLA_DB_USER'),
'PASSWORD': config.get('POMBOLA_DB_PASS'),
'HOST': config.get('POMBOLA_DB_HOST'),
'PORT': config.get('POMBOLA_DB_PORT'),
}
}
# Numberof seconds to keep a database connection open for
# in case it can be reused
CONN_MAX_AGE = 0 if STAGING else 300
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = config.get('ALLOWED_HOSTS', [])
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = config.get('TIME_ZONE')
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-GB'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.normpath( os.path.join( root_dir, "media_root/") )
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media_root/'
# Use django-pipeline for handling static files
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.normpath( os.path.join( root_dir, "collected_static/") )
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join( base_dir, "web/static/" ),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'pipeline.finders.FileSystemFinder',
# 'pipeline.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
'pipeline.finders.CachedFileFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = config.get('DJANGO_SECRET_KEY')
CACHES = {
# by default use memcached locally. This is what get used by
# django.core.cache.cache
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': config.get('POMBOLA_DB_NAME'),
},
# we also have a dummy cache that is used for all the page requests - we want
# the cache framework to auto-add all the caching headers, but we don't actually
# want to do the caching ourselves - rather we leave that to Varnish on the
# servers.
'dummy': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
CACHE_MIDDLEWARE_ALIAS='dummy'
if DEBUG:
CACHE_MIDDLEWARE_SECONDS = 0
else:
CACHE_MIDDLEWARE_SECONDS = 60 * 20 # twenty minutes
CACHE_MIDDLEWARE_KEY_PREFIX = config.get('POMBOLA_DB_NAME')
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# Always use the TemporaryFileUploadHandler as it allows us to access the
# uploaded file on disk more easily. Currently used by the CSV upload in
# scorecards admin.
FILE_UPLOAD_HANDLERS = (
# "django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.app_directories.Loader',
'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware', # first in list so it is able to act last on response
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'pagination.middleware.PaginationMiddleware',
)
if config.get('DEBUG_TOOLBAR', True):
MIDDLEWARE_CLASSES += ( 'debug_toolbar.middleware.DebugToolbarMiddleware', )
ROOT_URLCONF = 'pombola.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join( base_dir, "pombola/templates" ),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"pombola.core.context_processors.add_settings",
)
MAPIT_AREA_SRID = 4326
MAPIT_RATE_LIMIT = ['127.0.0.1']
# MAPIT_COUNTRY should be set in the country-specific file
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'skip_unreadable_posts': {
'()': 'django.utils.log.CallbackFilter',
'callback': skip_unreadable_post,
},
},
'handlers': {
'mail_admins': {
'filters': ['require_debug_false', 'skip_unreadable_posts'],
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'stream_to_stderr': {
'level': 'WARN',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['stream_to_stderr'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'elasticsearch': {
'handlers': ['stream_to_stderr'],
'level': 'INFO',
'propagate': True,
},
'elasticsearch.trace': {
'handlers': ['stream_to_stderr'],
'level': 'INFO',
'propagate': True,
},
}
}
# Configure the Hansard app
HANSARD_CACHE = os.path.join( base_dir, "../hansard_cache" )
KENYA_PARSER_PDF_TO_HTML_HOST = config.get('KENYA_PARSER_PDF_TO_HTML_HOST')
# The name of a Twitter account related to this website. This will be used to
# pull in the latest tweets on the homepage and in the share on twitter links.
TWITTER_USERNAME = config.get('TWITTER_USERNAME')
# The widget ID is used for displaying tweets on the homepage.
TWITTER_WIDGET_ID = config.get('TWITTER_WIDGET_ID')
# pagination related settings
PAGINATION_DEFAULT_PAGINATION = 10
PAGINATION_DEFAULT_WINDOW = 2
PAGINATION_DEFAULT_ORPHANS = 2
PAGINATION_INVALID_PAGE_RAISES_404 = True
# haystack config - interface to search engine
HAYSTACK_CONNECTIONS = {
#'default': {
# 'ENGINE': 'xapian_backend.XapianEngine',
# 'PATH': os.path.join( root_dir, "pombola_xapian" ),
#'PATH': os.path.join(os.path.dirname(__file__), 'xapian_index'),
#},
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'localhost:9200',
'INDEX_NAME': config.get('POMBOLA_DB_NAME'),
'EXCLUDED_INDEXES': [],
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# Admin autocomplete
AJAX_LOOKUP_CHANNELS = {
'person_name' : dict(model='core.person', search_field='legal_name'),
'organisation_name' : dict(model='core.organisation', search_field='name'),
'place_name' : dict(model='core.place', search_field='name'),
'title_name' : dict(model='core.positiontitle', search_field='name'),
}
# misc settings
HTTPLIB2_CACHE_DIR = os.path.join( root_dir, 'httplib2_cache' )
GOOGLE_ANALYTICS_ACCOUNT = config.get('GOOGLE_ANALYTICS_ACCOUNT')
COUNTY_PERFORMANCE_EXPERIMENT_KEY = config.get('COUNTY_PERFORMANCE_EXPERIMENT_KEY')
YOUTH_EMPLOYMENT_BILL_EXPERIMENT_KEY = config.get('YOUTH_EMPLOYMENT_BILL_EXPERIMENT_KEY')
IEBC_API_ID = config.get('IEBC_API_ID')
IEBC_API_SECRET = config.get('IEBC_API_SECRET')
# Markitup settings
MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True, 'extensions':['tables']})
MARKITUP_SET = 'markitup/sets/markdown'
# There are some models that are just for testing, so they are not included in
# the South migrations.
SOUTH_TESTS_MIGRATE = False
# Use nose as the test runner
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--with-doctest', '--with-yanc']
# For the disqus comments
DISQUS_SHORTNAME = config.get( 'DISQUS_SHORTNAME', None )
# At some point we should deprecate this. For now it defaults to true so that
# no entry in the config does the right thing.
DISQUS_USE_IDENTIFIERS = config.get( 'DISQUS_USE_IDENTIFIERS', True )
FACEBOOK_APP_ID = config.get('FACEBOOK_APP_ID')
# Polldaddy widget ID - from http://polldaddy.com/
# Use the widget rather than embedding a poll direct as it will allow the poll
# to be changed without having to alter the settings or HTML. If left blank
# then no poll will be shown.
POLLDADDY_WIDGET_ID = config.get( 'POLLDADDY_WIDGET_ID', None );
# RSS feed to the blog related to this site. If present will cause the 'Latest
# News' to appear on the homepage.
BLOG_RSS_FEED = config.get( 'BLOG_RSS_FEED', None )
THUMBNAIL_DEBUG = True
# ZA Hansard settings
HANSARD_CACHE = os.path.join( root_dir, 'hansard_cache' )
COMMITTEE_CACHE = os.path.join( HANSARD_CACHE, 'committee' )
ANSWER_CACHE = os.path.join( HANSARD_CACHE, 'answers' )
QUESTION_CACHE = os.path.join( HANSARD_CACHE, 'questions' )
ANSWER_JSON_CACHE = os.path.join( HANSARD_CACHE, 'answers_json' )
QUESTION_JSON_CACHE = os.path.join( HANSARD_CACHE, 'questions_json' )
PMG_COMMITTEE_USER = config.get('PMG_COMMITTEE_USER', '')
PMG_COMMITTEE_PASS = config.get('PMG_COMMITTEE_PASS', '')
PMG_API_KEY = config.get('PMG_API_KEY', '')
# Algorithm to use for matching names when scraping hansard
# NAME_SUBSTRING_MATCH
# - strips the title from the name and then searches for current politicians
# with names containing that string (used by Kenya).
# NAME_SET_INTERSECTION_MATCH
# - splits the name, including title, into words, and then compares the
# set of these words with similar sets from current politicians,
# looking for the largest intersection.
HANSARD_NAME_MATCHING_ALGORITHM = NAME_SET_INTERSECTION_MATCH
# Which popit instance to use
POPIT_API_URL = config.get('POPIT_API_URL')
BREADCRUMB_URL_NAME_MAPPINGS = {
'info' : ('Information', '/info/'),
'organisation' : ('Organisations', '/organisation/all/'),
'person' : ('Politicians', '/person/all/'),
'place' : ('Places', '/place/all/'),
'search' : ('Search', '/search/')
}
# Info page settings
INFO_POSTS_PER_LIST_PAGE = 10
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.gis',
'pombola.admin_additions',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'ajax_select',
'autocomplete_light',
'markitup',
'pipeline',
'mapit',
'pombola.images',
'sorl.thumbnail',
'haystack',
'pombola.slug_helpers',
'pombola.info',
'pombola.tasks',
'pombola.core',
'pombola.feedback',
'pombola.scorecards',
'pombola.search',
'pombola.file_archive',
'pombola.map',
'pombola.experiments',
'pombola.budgets',
'pagination',
'django_nose',
)
if config.get('DEBUG_TOOLBAR', True):
INSTALLED_APPS += ('debug_toolbar',)
def insert_after(sequence, existing_item, item_to_put_after):
"""A helper method for inserting an item after another in a sequence
This returns a new list with 'item_to_put_after' inserted after
'existing_item' in 'sequence'; this is useful for putting items
into the expected position in INSTALLED_APPS. Note that this will
return a list even if sequence is a tuple, but Django doesn't mind
if INSTALLED_APPS is a list."""
l = list(sequence)
i = l.index(existing_item)
l.insert(i + 1, item_to_put_after)
return l
def make_enabled_features(installed_apps, all_optional_apps):
result = {}
for key in all_optional_apps:
key = re.sub(r'^pombola\.', '', key)
result[key] = ('pombola.' + key in installed_apps) or (key in installed_apps)
return result
# Set up the core CSS and JS files:
PIPELINE_CSS = {
'core': {
'source_filenames': (
# .css files from core:
'css/jquery-ui-1.8.17.custom.css',
),
'output_filename': 'css/core.css',
},
'countdown': {
'source_filenames': (
'css/jquery.countdown-v1.6.0.css',
'sass/countdown.scss',
),
'output_filename': 'css/countdown.css',
},
'admin': {
'source_filenames': (
# .scss files from core:
'sass/admin.scss',
),
'output_filename': 'css/admin.css',
},
}
# The packages in DYNAMICALLY_LOADED_PIPELINE_JS will all be loaded
# dynamically, and the only way we can do that without making changes
# to django-pipeline is to render the URLs that django-pipeline
# generates as Javascript array elements. So, keep these separate so
# that we can set a template that does that on each when including
# them in PIPELINE_JS.
DYNAMICALLY_LOADED_PIPELINE_JS = {
'desktop_only': {
'source_filenames': (
'js/libs/jquery-ui-1.8.17.custom.min.js',
'js/libs/jquery.ui.autocomplete.html.2010-10-25.js',
'js/libs/jquery.form-v2.94.js',
'js/desktop-functions.js',
),
'output_filename': 'js/desktop_only.js',
'template_name': 'pipeline/js-array.html',
},
'mobile_only': {
'source_filenames': (
'js/mobile-functions.js',
),
'output_filename': 'js/mobile_only.js',
'template_name': 'pipeline/js-array.html',
},
'desktop_and_mobile': {
'source_filenames': (
'js/twitter-embed.js',
),
'output_filename': 'js/desktop_and_mobile.js',
'template_name': 'pipeline/js-array.html',
},
'analytics': {
'source_filenames': (
'js/analytics.js',
),
'output_filename': 'js/analytics.js',
'template_name': 'pipeline/js-array.html',
},
'load-appearances': {
'source_filenames': (
'js/load-appearances.html',
),
'output_filename': 'js/load-appearances.js',
'template_name': 'pipeline/js-array.html',
},
'feeds': {
'source_filenames': (
'js/feeds.js',
),
'output_filename': 'js/feeds.js',
'template_name': 'pipeline/js-array.html',
},
'countdown': {
'source_filenames': (
'js/libs/jquery.countdown-v1.6.0.js',
),
'output_filename': 'js/countdown.js',
'template_name': 'pipeline/js-array.html',
},
'responsive-carousel': {
'source_filenames': (
'js/libs/responsive-carousel.js',
),
'output_filename': 'js/responsive-carousel.js',
'template_name': 'pipeline/js-array.html',
},
'map': {
'source_filenames': (
'js/map-drilldown.js',
),
'output_filename': 'js/map.js',
'template_name': 'pipeline/js-array.html',
},
}
PIPELINE_JS = {
'google-map': {
'source_filenames': (
'js/map.js',
),
'output_filename': 'js/google-map.js',
},
'modernizr_and_loader': {
'source_filenames': (
'js/libs/modernizr.js',
'js/loader.js',
),
'output_filename': 'js/modernizr_and_loader.js',
},
'hide-reveal': {
'source_filenames': (
'js/hide-reveal.js',
),
'output_filename': 'js/hide-reveal.js',
'template_name': 'pipeline/js-array.html',
},
}
for package_name, package in DYNAMICALLY_LOADED_PIPELINE_JS.items():
package['template_name'] = 'pipeline/js-array.html'
PIPELINE_JS[package_name] = package
# Only for debugging compression (the default is: 'not DEBUG' which is
# fine when not experimenting with compression)
# PIPELINE_ENABLED = True
PIPELINE_COMPILERS = (
'pipeline_compass.compass.CompassCompiler',
)
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yui.YUICompressor'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yui.YUICompressor'
PIPELINE_YUI_BINARY = '/usr/bin/env yui-compressor'
PIPELINE_DISABLE_WRAPPER = True
EXCLUDE_FROM_SEARCH = ()
# Settings for bleach, used by sayit to determine what html is allowed
BLEACH_ALLOWED_TAGS = [
'a', 'abbr', 'b', 'i', 'u', 'span', 'sub', 'sup', 'br',
'p',
'ol', 'ul', 'li',
'table', 'caption', 'tr', 'th', 'td',
]
BLEACH_ALLOWED_ATTRIBUTES = {
'*': [ 'id', 'title' ], # class, style
'a': [ 'href' ],
'li': [ 'value' ],
}
BLEACH_STRIP_TAGS = True
INFO_PAGES_ALLOW_RAW_HTML = False
if config.get('EMAIL_SETTINGS', None):
EMAIL_HOST = config.get('EMAIL_HOST', '')
EMAIL_HOST_USER = config.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = config.get('EMAIL_HOST_PASSWORD', '')
port = config.get('EMAIL_PORT', None)
if port:
EMAIL_PORT = port
EMAIL_USE_TLS = config.get('EMAIL_USE_TLS', False)
| agpl-3.0 | 4,693,962,837,680,868,000 | 31.6624 | 98 | 0.654845 | false | 3.346009 | true | false | false |
gbanegas/KissECC | attack_ecc/old/window_attack.py | 1 | 4626 | import random
import math
from itertools import product
from itertools import chain
from thread_sum import ThreadSum
q = 2**252 + 27742317777372353535851937790883648493
r = []
v = []
alpha = []
def int_to_bin(number):
return [int(x) for x in bin(number)[2:]]
def bin_to_int(bit_list):
output = 0
for bit in bit_list:
output = output * 2 + bit
return output
def groupsof(n, xs):
if len(xs) < n:
return [xs]
else:
return chain([xs[0:n]], groupsof(n, xs[n:]))
class WindowAttack(object):
def generate_v_values(self, d, N):
for i in xrange(0, N):
value = d + (alpha[i]*q)
v.append(value)
def generate_alpha_js(self, N):
for i in xrange(1, N+1):
al = r[i] - r[0]
alpha.append(int(math.fabs(al)))
def generate_r_js(self, n, N):
for i in xrange(0, N+1):
a = random.getrandbits(n)
r.append(int(math.fabs(a)))
def bit_flip_random(self, bit_list, randomized_bits):
bit_list_t = bit_list[:]
pos_list = []
if len(bit_list) < randomized_bits:
raise Exception("Randomized bigger then d+(a*r)")
print "Lenght: ", len(bit_list)
for i in xrange(0, randomized_bits):
pos_bit_to_flip = random.randint(0, len(bit_list)-1)
while(pos_bit_to_flip in pos_list):
pos_bit_to_flip = random.randint(0, len(bit_list)-1)
pos_list.append(pos_bit_to_flip)
if bit_list_t[pos_bit_to_flip] == 1:
bit_list_t[pos_bit_to_flip] = 0
else:
bit_list_t[pos_bit_to_flip] = 1
return bit_list_t
def generate_v_values_with_bit_flip(self, d, N, randomized_bits):
for i in xrange(0, N):
value = d + (alpha[i]*q)
bit_list = int_to_bin(value)
#print len(bit_list)
bit_list_flipped = self.bit_flip_random(bit_list, randomized_bits)
value_flipped = bin_to_int(bit_list_flipped)
v.append(value_flipped)
def sum_all_ds(self, d_candidates, interval, mod_value, N):
pairs = {}
number_of_threads = 4
ds = list(groupsof(len(d_candidates)/number_of_threads, d_candidates))
#ds = zip(*[iter(d_candidates)]*number_of_threads)
threads = []
#print "DS: ", len(ds)
for i in xrange(0, number_of_threads):
threads.append(ThreadSum(i, ds[i], v, alpha, N, mod_value, interval))
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
key, d = t.return_result()
try:
if pairs[key] <> None:
val = pairs[key]
if val.count(1) > d.count(1):
pairs[key] = d
except Exception as e:
pairs[key] = d
#print pairs
#print key
#print pairs.keys()
return min(pairs.keys()) , pairs
def test_d(self, d, to_test):
""" Function to test the candidate to d. In our case, it is a comparasion
with the original d. However, in a real case could be the ciphered text with the original
and the candidate"""
return (d==to_test)
def wide_widow_attack(self, d, window_size = 10, n = 512, N = 200, randomized_bits = 30):
self.generate_r_js(n, N)
self.generate_alpha_js(N)
self.generate_v_values_with_bit_flip(d, N, randomized_bits)
print "d = ", int_to_bin(d), " len: ", len(int_to_bin(d))
print "Starting...."
w_prime = 0
w = window_size
d_prime = 0
variations = []
for i in product([0,1], repeat=window_size):
variations.append(list(i))
while(w < (n + window_size + window_size)):
print "w: ", w
print "w_prime: ", w_prime
mod_value = 2**w
d_prime = d_prime % mod_value
d_prime_bin = int_to_bin(d_prime)
to_iterate = []
for variation in variations:
to_iterate.append(variation+d_prime_bin)
sum_d , d_candidate = self.sum_all_ds(to_iterate, w, mod_value, N)
d_prime = bin_to_int(d_candidate[sum_d])
print "sum: ", sum_d, " d_candidate = ", int_to_bin(d_prime)
w_prime = w
w = w + window_size
if self.test_d(d, d_prime):
w = w+n
if (d == d_prime):
print "FOUND KEY."
else:
print "SORRY"
print "Finished."
| apache-2.0 | 1,589,489,777,004,982,000 | 30.684932 | 97 | 0.521401 | false | 3.292527 | false | false | false |
cloudsigma/cloud-init | cloudinit/handlers/boot_hook.py | 6 | 2676 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from cloudinit import handlers
from cloudinit import log as logging
from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
LOG = logging.getLogger(__name__)
BOOTHOOK_PREFIX = "#cloud-boothook"
class BootHookPartHandler(handlers.Handler):
def __init__(self, paths, datasource, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
self.boothook_dir = paths.get_ipath("boothooks")
self.instance_id = None
if datasource:
self.instance_id = datasource.get_instance_id()
def list_types(self):
return [
handlers.type_from_starts_with(BOOTHOOK_PREFIX),
]
def _write_part(self, payload, filename):
filename = util.clean_filename(filename)
filepath = os.path.join(self.boothook_dir, filename)
contents = util.strip_prefix_suffix(util.dos2unix(payload),
prefix=BOOTHOOK_PREFIX)
util.write_file(filepath, contents.lstrip(), 0700)
return filepath
def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
payload, frequency): # pylint: disable=W0613
if ctype in handlers.CONTENT_SIGNALS:
return
filepath = self._write_part(payload, filename)
try:
env = os.environ.copy()
if self.instance_id is not None:
env['INSTANCE_ID'] = str(self.instance_id)
util.subp([filepath], env=env)
except util.ProcessExecutionError:
util.logexc(LOG, "Boothooks script %s execution error", filepath)
except Exception:
util.logexc(LOG, "Boothooks unknown error when running %s",
filepath)
| gpl-3.0 | 7,836,307,426,370,143,000 | 36.690141 | 77 | 0.649851 | false | 3.77433 | false | false | false |
apenwarr/bup | cmd/tag-cmd.py | 8 | 1981 | #!/usr/bin/env python
"""Tag a commit in the bup repository.
Creating a tag on a commit can be used for avoiding automatic cleanup from
removing this commit due to old age.
"""
import sys
import os
from bup import git, options
from bup.helpers import *
handle_ctrl_c()
optspec = """
bup tag
bup tag <tag name> <commit>
bup tag -d <tag name>
--
d,delete= Delete a tag
"""
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
git.check_repo_or_die()
if opt.delete:
tag_file = git.repo('refs/tags/%s' % opt.delete)
debug1("tag file: %s\n" % tag_file)
if not os.path.exists(tag_file):
log("bup: error: tag '%s' not found.\n" % opt.delete)
sys.exit(1)
try:
os.unlink(tag_file)
except OSError, e:
log("bup: error: unable to delete tag '%s': %s" % (opt.delete, e))
sys.exit(1)
sys.exit(0)
tags = [t for sublist in git.tags().values() for t in sublist]
if not extra:
for t in tags:
print t
sys.exit(0)
elif len(extra) < 2:
o.fatal('no commit ref or hash given.')
(tag_name, commit) = extra[:2]
if not tag_name:
o.fatal("tag name must not be empty.")
debug1("args: tag name = %s; commit = %s\n" % (tag_name, commit))
if tag_name in tags:
log("bup: error: tag '%s' already exists\n" % tag_name)
sys.exit(1)
if tag_name.startswith('.'):
o.fatal("'%s' is not a valid tag name." % tag_name)
try:
hash = git.rev_parse(commit)
except git.GitError, e:
log("bup: error: %s" % e)
sys.exit(2)
if not hash:
log("bup: error: commit %s not found.\n" % commit)
sys.exit(2)
pL = git.PackIdxList(git.repo('objects/pack'))
if not pL.exists(hash):
log("bup: error: commit %s not found.\n" % commit)
sys.exit(2)
tag_file = git.repo('refs/tags/%s' % tag_name)
try:
tag = file(tag_file, 'w')
except OSError, e:
log("bup: error: could not create tag '%s': %s" % (tag_name, e))
sys.exit(3)
tag.write(hash.encode('hex'))
tag.close()
| lgpl-2.1 | -7,389,819,531,498,158,000 | 21.770115 | 74 | 0.614841 | false | 2.747573 | false | false | false |
ricardaw/pismdev | examples/ross/prognostic/preprocess_prog.py | 5 | 7797 | #!/usr/bin/env python
# Import all necessary modules here so that if it fails, it fails early.
try:
import netCDF4 as NC
except:
import netCDF3 as NC
import subprocess
import numpy as np
import os
smb_name = "climatic_mass_balance"
temp_name = "ice_surface_temp"
def run(commands):
"""Run a list of commands (or one command given as a string)."""
if isinstance(commands, (list, tuple)):
for cmd in commands:
print "Running '%s'..." % cmd
subprocess.call(cmd.split(' '))
else:
run([commands])
def preprocess_ice_velocity():
"""
Download and preprocess the ~95Mb Antarctic ice velocity dataset from NASA MEASURES project
http://nsidc.org/data/nsidc-0484.html
"""
url = "ftp://[email protected]/pub/DATASETS/nsidc0484_MEASURES_antarc_vel_V01/"
input_filename = "Antarctica_ice_velocity.nc"
output_filename = os.path.splitext(input_filename)[0] + "_cutout.nc"
commands = ["wget -nc %s%s.gz" % (url, input_filename), # NSIDC supports compression on demand!
"gunzip %s.gz" % input_filename,
"ncrename -d nx,x -d ny,y -O %s %s" % (input_filename, input_filename)
]
if not os.path.exists(input_filename):
run(commands)
nc = NC.Dataset(input_filename, 'a')
# Create x and y coordinate variables and set projection parameters; cut
# out the Ross area.
# Metadata provided with the dataset describes the *full* grid, so it is a
# lot easier to modify this file instead of adding grid information to the
# "cutout" file.
if 'x' not in nc.variables and 'y' not in nc.variables:
nx = nc.nx
ny = nc.ny
x_min = float(nc.xmin.strip().split(' ')[0])
y_max = float(nc.ymax.strip().split(' ')[0])
x_max = y_max
y_min = x_min
x = np.linspace(x_min, x_max, nx)
y = np.linspace(y_max, y_min, ny)
nc.projection = "+proj=stere +ellps=WGS84 +datum=WGS84 +lon_0=0 +lat_0=-90 +lat_ts=-71 +units=m"
try:
x_var = nc.createVariable('x', 'f8', ('x',))
y_var = nc.createVariable('y', 'f8', ('y',))
except:
x_var = nc.variables['x']
y_var = nc.variables['y']
x_var[:] = x
y_var[:] = y
x_var.units = "meters"
x_var.standard_name = "projection_x_coordinate"
y_var.units = "meters"
y_var.standard_name = "projection_y_coordinate"
nc.close()
if not os.path.exists(output_filename):
cmd = "ncks -d x,2200,3700 -d y,3500,4700 -O %s %s" % (input_filename, output_filename)
run(cmd)
nc = NC.Dataset(output_filename, 'a')
# fix units of 'vx' and 'vy'
nc.variables['vx'].units = "m / year"
nc.variables['vy'].units = "m / year"
# Compute and save the velocity magnitude
if 'magnitude' not in nc.variables:
vx = nc.variables['vx'][:]
vy = nc.variables['vy'][:]
v_magnitude = np.zeros_like(vx)
v_magnitude = np.sqrt(vx**2 + vy**2)
magnitude = nc.createVariable('v_magnitude', 'f8', ('y', 'x'))
magnitude.units = "m / year"
magnitude[:] = v_magnitude
nc.close()
return output_filename
def preprocess_albmap():
"""
Download and preprocess the ~16Mb ALBMAP dataset from http://doi.pangaea.de/10.1594/PANGAEA.734145
"""
url = "http://store.pangaea.de/Publications/LeBrocq_et_al_2010/ALBMAPv1.nc.zip"
input_filename = "ALBMAPv1.nc"
output_filename = os.path.splitext(input_filename)[0] + "_cutout.nc"
commands = ["wget -nc %s" % url, # download
"unzip -n %s.zip" % input_filename, # unpack
"ncks -O -d x1,439,649 -d y1,250,460 %s %s" % (input_filename, output_filename), # cut out
"ncks -O -v usrf,lsrf,topg,temp,acca %s %s" % (output_filename, output_filename), # trim
"ncrename -O -d x1,x -d y1,y -v x1,x -v y1,y %s" % output_filename, # fix metadata
"ncrename -O -v temp,%s -v acca,%s %s" % (temp_name, smb_name, output_filename)]
run(commands)
nc = NC.Dataset(output_filename, 'a')
# fix acab
acab = nc.variables[smb_name]
acab.units = "m / year"
acab.standard_name = "land_ice_surface_specific_mass_balance"
SMB = acab[:]
SMB[SMB == -9999] = 0
acab[:] = SMB
# fix artm and topg
nc.variables[temp_name].units = "Celsius"
nc.variables["topg"].standard_name = "bedrock_altitude"
# compute ice thickness
if 'thk' not in nc.variables:
usrf = nc.variables['usrf'][:]
lsrf = nc.variables['lsrf'][:]
thk = nc.createVariable('thk', 'f8', ('y', 'x'))
thk.units = "meters"
thk.standard_name = "land_ice_thickness"
thk[:] = usrf - lsrf
nc.projection = "+proj=stere +ellps=WGS84 +datum=WGS84 +lon_0=0 +lat_0=-90 +lat_ts=-71 +units=m"
nc.close()
# Remove usrf and lsrf variables:
command = "ncks -x -v usrf,lsrf -O %s %s" % (output_filename, output_filename)
run(command)
return output_filename
def final_corrections(filename):
"""
* replaces missing values with zeros
* computes Dirichlet B.C. locations
"""
nc = NC.Dataset(filename, 'a')
# replace missing values with zeros
for var in ['u_ssa_bc', 'v_ssa_bc', 'magnitude']:
tmp = nc.variables[var][:]
tmp[tmp.mask == True] = 0
nc.variables[var][:] = tmp
thk = nc.variables['thk'][:]
topg = nc.variables['topg'][:]
# compute the grounded/floating mask:
mask = np.zeros(thk.shape, dtype='i')
rho_ice = 910.0
rho_seawater = 1028.0
ice_free = 0
grounded = 1
floating = 2
My, Mx = thk.shape
for j in xrange(My):
for i in xrange(Mx):
if topg[j,i] + thk[j,i] > 0 + (1 - rho_ice/rho_seawater) * thk[j,i]:
mask[j,i] = grounded
else:
if thk[j,i] < 1:
mask[j,i] = ice_free
else:
mask[j,i] = floating
# compute the B.C. locations:
bcflag_var = nc.createVariable('bcflag', 'i', ('y', 'x'))
bcflag_var[:] = mask == grounded
# mark floating cells next to grounded ones too:
row = np.array([-1, 0, 1, -1, 1, -1, 0, 1])
col = np.array([-1, -1, -1, 0, 0, 1, 1, 1])
for j in xrange(1, My-1):
for i in xrange(1, Mx-1):
nearest = mask[j + row, i + col]
if mask[j,i] == floating and np.any(nearest == grounded):
bcflag_var[j,i] = 1
topg[j,i]=-2000
#modifications for prognostic run
tempma = nc.variables[temp_name][:]
for j in xrange(My):
for i in xrange(Mx):
if bcflag_var[j,i] == 0:
topg[j,i]=-2000 # to avoid grounding
if tempma[j,i] > -20.0:
tempma[j,i]=-20.0 # to adjust open ocean temperatures
nc.variables[temp_name][:] = tempma
nc.variables['topg'][:] = topg
nc.close()
if __name__ == "__main__":
velocity = preprocess_ice_velocity()
albmap = preprocess_albmap()
albmap_velocity = os.path.splitext(albmap)[0] + "_velocity.nc" # ice velocity on the ALBMAP grid
output = "Ross_combined_prog.nc"
commands = ["nc2cdo.py %s" % velocity,
"nc2cdo.py %s" % albmap,
"cdo remapbil,%s %s %s" % (albmap, velocity, albmap_velocity),
"ncks -x -v mask -O %s %s" % (albmap, output),
"ncks -v vx,vy,v_magnitude -A %s %s" % (albmap_velocity, output),
"ncrename -v vx,u_ssa_bc -v vy,v_ssa_bc -v v_magnitude,magnitude -O %s" % output]
run(commands)
final_corrections(output)
| gpl-2.0 | 247,885,660,459,141,630 | 31.219008 | 106 | 0.559446 | false | 3.086698 | false | false | false |
karulont/combopt | project8/concord.py | 1 | 4102 | from sys import argv, stdout
import networkx as nx
import subprocess as proc
import os
INFTY = 999999
def read_rpp(fname):
with open(fname) as f:
lines = [s for s in [l.strip() for l in f] if s and not s.startswith('#')]
n, m = lines[0].split()
print('N:', n, 'M:', m)
edges = [(int(u), int(v)) for u, v in [p.split('-') for p in lines[2].split()]]
costs = {e: c for e, c in zip(edges, [int(c) for c in lines[3].split()])}
required = [e for e, r in zip(edges, [c == '1' for c in lines[4].split()]) if r]
return edges, costs, required
def rpp_to_atsp(rpp):
""" Converts an RPP problem into an asymmetric TSP problem.
See section 4.10 in http://logistik.bwl.uni-mainz.de/Dateien/LM-2012-03.pdf. """
edges, costs, required = rpp
g = nx.Graph()
g.add_edges_from([(u, v, {'cost': c}) for (u, v), c in costs.items()])
atsp = []
spaths = {}
for u, v in required:
lens = []
for s, t in required:
if u == s and v == t:
lens.append(INFTY)
else:
spath = nx.shortest_path(g, v, s, 'cost')
spath = [e if e in costs else e[::-1] for e in zip(spath[:-1], spath[1:])]
spaths[v, s] = spath
print('spaths[' + str(v) + ', ' + str(s) + '] = ' + str(spath))
lens.append(costs[u, v] + sum(costs[e] for e in spath))
atsp.append(lens)
return atsp, spaths
def save_atsp_as_tsp(atsp, fname):
""" Transforms an asymmetric TSP of size n into a symmetric TSP of size 2n and saves it in TSPLIB format.
See http://en.wikipedia.org/wiki/Travelling_salesman_problem#Solving_by_conversion_to_symmetric_TSP. """
n = len(atsp)
with open(fname, 'wt') as f:
f.write(
'NAME: ' + fname + '\n'
'TYPE: TSP\n'
'DIMENSION: ' + str(n * 2) + '\n'
'EDGE_WEIGHT_TYPE: EXPLICIT\n'
'EDGE_WEIGHT_FORMAT: LOWER_DIAG_ROW\n'
'EDGE_WEIGHT_SECTION\n')
for i in range(n):
f.write(' '.join([str(INFTY)] * (i + 1)) + '\n')
for i in range(n):
row = [str(d) for d in atsp[i]]
row[i] = '0'
f.write(' '.join(row + [str(INFTY)] * (i + 1)) + '\n')
def solve_atsp(atsp, name, concorde):
# Concorde cannot solve ATSP, so we need to transform to TSP first.
tsp_file = name + '.tsp'
save_atsp_as_tsp(atsp, tsp_file)
sol_file = name + '.tsp.sol'
if os.path.exists(sol_file):
os.remove(sol_file)
stdout.flush()
proc.call([concorde, '-x', '-o', sol_file, tsp_file])
with open(sol_file) as f:
tour = [int(s) for s in f.read().split()[1:]]
n = len(atsp)
if tour[1] - tour[0] != n:
tour = (tour[1:] + tour[0:1])[::-1]
for i, j in zip(tour[::2], tour[1::2]):
if j - i != n:
raise Exception('ERROR: Invalid ATSP tour produced by CONCORDE, (i, j) = ' + str((i, j)))
return tour[::2]
def atsp_sol_to_rpp_sol(rpp, atsp_tour, spaths):
edges, costs, required = rpp
rpp_tour = []
for i1, i2 in zip(atsp_tour, atsp_tour[1:] + atsp_tour[0:1]):
e1 = required[i1]
e2 = required[i2]
rpp_tour.append(e1)
rpp_tour += spaths[e1[1], e2[0]]
#rpp_tour = [(0, 1), (1, 2), (2, 3), (2, 3), (2, 4), (4, 5), (5, 6), (6, 7), (3, 7), (3, 4), (4, 10), (9, 10), (8, 9), (8, 9), (0, 9)]
#for e in rpp_tour:
#print(str(e) + ': ' + str(1 if e in required else 0))
print('RPP tour:', rpp_tour)
print('RPP tour cost:', sum(costs[e] for e in rpp_tour))
return [edges.index(e) for e in rpp_tour]
def main():
fname = argv[1] if len(argv) > 1 else 'I01.grp'
concorde = argv[2] if len(argv) > 2 else 'concorde'
print('Reading RPP instance...')
rpp = read_rpp(fname)
print('Costs:', rpp[1])
print('Required:', rpp[2])
print('Transforming to ATSP...')
atsp, aux = rpp_to_atsp(rpp)
print('ATSP:')
[print(r) for r in atsp]
print('Solving with CONCORDE...')
atsp_tour = solve_atsp(atsp, fname, concorde)
print('ATSP tour:', atsp_tour)
sol = atsp_sol_to_rpp_sol(rpp, atsp_tour, aux)
print(sol)
if __name__ == '__main__':
main() | mit | -5,280,329,697,371,602,000 | 26.5 | 136 | 0.547782 | false | 2.636247 | false | false | false |
rafaelolg/visiondataset | visiondataset/datasets/migrations/0002_auto__add_dataset__add_datum.py | 1 | 5921 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Dataset'
db.create_table('datasets_dataset', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creation_time', self.gf('django.db.models.fields.TimeField')(auto_now_add=True, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('datasets', ['Dataset'])
# Adding model 'Datum'
db.create_table('datasets_datum', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('dataset', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['datasets.Dataset'])),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('creation_time', self.gf('django.db.models.fields.TimeField')(auto_now_add=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=256)),
))
db.send_create_signal('datasets', ['Datum'])
def backwards(self, orm):
# Deleting model 'Dataset'
db.delete_table('datasets_dataset')
# Deleting model 'Datum'
db.delete_table('datasets_datum')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'datasets.dataset': {
'Meta': {'object_name': 'Dataset'},
'creation_time': ('django.db.models.fields.TimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'datasets.datum': {
'Meta': {'object_name': 'Datum'},
'creation_time': ('django.db.models.fields.TimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['datasets.Dataset']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['datasets'] | gpl-3.0 | -3,515,394,332,634,719,700 | 61.336842 | 182 | 0.561561 | false | 3.738005 | false | false | false |
leasunhy/GalaxyOJ | manager.py | 1 | 1996 | import os
from flask.ext.script import Manager, Shell, Command
from flask.ext.migrate import Migrate, MigrateCommand
from server import app, db
from server.models import *
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
def make_shell_context():
return dict(app=app, db=db, **model_dict)
manager.add_command('shell', Shell(make_context=make_shell_context))
@manager.command
def add_data():
# add a mock user
u = User(login_name='leasunhy', email='[email protected]')
# add there problems
p1 = Problem()
p1.title = 'A+B'
p1.problem_desc = 'Given two numbers, calculate their sum.'
p1.input_desc = 'A single line containing two integers separated by a space.'
p1.output_desc = 'A single line containing the sum.'
p1.sample_input = '1 2'
p1.sample_output = '3'
p1.source = 'Classical'
p2 = Problem(title='A-B')
p3 = Problem(title='A*B')
# add a contest
import datetime
c = Contest(title='Newbie Corner')
c.start_time = datetime.datetime.now()
c.end_time = c.start_time + datetime.timedelta(1)
c.owner = u
c.problems.append(p2)
# add a submission
s = Submission()
s.owner = u
s.problem = p1
s.compiler_id = 1
s.verdict = 'Accepted'
s.time_usage = 100
s.memory_usage = 600
s.code_length = 233
# add posts
po1 = Tutorial(title='Introduction to Dynamic Programming', content='Abandon.')
po1.create_time = datetime.datetime.now()
po1.owner = u
po2 = Notification(title='Air pollution detected.', content='Evacuate. NOW!')
po2.create_time = datetime.datetime.now()
po2.owner = u
po2.importance = 233
po3 = Solution(title='How to attack A+B?', content='Hand calculate.')
po3.create_time = datetime.datetime.now()
po3.owner = u
po3.problem = p1
db.session.add_all([u, p1, p2, p3, c, s, po1, po2, po3])
db.session.commit()
if __name__ == '__main__':
manager.run()
| gpl-3.0 | -5,505,434,221,279,720,000 | 25.613333 | 83 | 0.649299 | false | 3.109034 | false | false | false |
hariharaselvam/djangotraining | ecommerce/settings.py | 1 | 3406 | """
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'il)j1ja&yz35pjspxh$kf5v)4g(37b-)egng#*x5%(yosvh26f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
BROKER_URL = 'redis://localhost:6379/0'
REST_FRAMEWORK = {'PAGE_SIZE':5}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products',
'tweets',
'rest_framework',
'debug_toolbar',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 | -1,010,263,903,655,032,300 | 25.403101 | 91 | 0.688197 | false | 3.49692 | false | false | false |
openearth/PyWPS | pywps/utils.py | 1 | 2556 | import tempfile
import json
import os
import shapely.wkt
import shapely.geometry
def first_from_filename(filename):
"""read the first geometry from filename"""
import ogr
ds = ogr.Open(filename)
layer = ds.GetLayer(0)
feature = layer.GetFeature(0)
geometry = feature.geometry()
wkt = geometry.ExportToWkt()
return shapely.wkt.loads(wkt)
def first_from_bytes(bytes):
"""read the first geometry from bytes"""
import ogr
with tempfile.NamedTemporaryFile(prefix="pywpsInput",dir=os.curdir) as f:
open(f.name, 'w').write(bytes)
ds = ogr.Open(f.name)
layer = ds.GetLayer(0)
feature = layer.GetFeature(0)
geometry = feature.geometry()
wkt = geometry.ExportToWkt()
return shapely.wkt.loads(wkt)
def decode(file_or_text):
"""combine several decoders to read geo data
>>> location_wkt = "POINT(54 2)"
>>> location_json = '{ "type": "LineString", "coordinates": [[51.0, 3.0], [52.0, 3.1]] }'
>>> location_gml = '''<?xml version="1.0" encoding="utf-8" ?>
... <root
... xmlns:gml="http://www.opengis.net/gml"
... >
... <gml:featureMember>
... <gml:geometryProperty>
... <gml:Point >
... <gml:coordinates>54,3.1</gml:coordinates>
... </gml:Point>
... </gml:geometryProperty>
... </gml:featureMember>
... </root>
... '''
>>> for location in [location_wkt, location_json, location_gml]:
... decode(location).type
'Point'
'LineString'
'Point'
"""
# decoders for file or text
decoders = {
True: [
lambda x: shapely.geometry.shape(json.loads(open(x,'r').read())),
lambda x: shapely.wkt.loads(open(x, 'r').read()),
first_from_filename
],
False: [
lambda x: shapely.geometry.shape(json.loads(x)),
shapely.wkt.loads,
first_from_bytes
]
}
for decoder in decoders[os.path.isfile(file_or_text)]:
try:
# try all the decoders and stop if it works
geom = decoder(file_or_text)
break
except:
# no worries, keep trying
pass
else:
# we have not found a working decoder
if os.path.isfile(file_or_text):
raise ValueError("could not decode %r from %s" % (open(file_or_text).read(), file_or_text))
else:
raise ValueError("could not decode %s" % (file_or_text, ))
return geom
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-2.0 | -2,127,957,431,719,990,300 | 29.428571 | 103 | 0.571987 | false | 3.6 | false | false | false |
earlideal/jiandan | misc/untitled.py | 1 | 2220 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(178, 80)
self.horizontalLayout_2 = QtGui.QHBoxLayout(Form)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.verticalLayout.addWidget(self.pushButton)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.horizontalLayout.addWidget(self.pushButton_3)
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.horizontalLayout.addWidget(self.pushButton_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.pushButton.setText(_translate("Form", "PushButton", None))
self.pushButton_3.setText(_translate("Form", "PushButton", None))
self.pushButton_2.setText(_translate("Form", "PushButton", None))
| apache-2.0 | 4,534,596,966,765,934,600 | 37.947368 | 79 | 0.703153 | false | 4.228571 | false | false | false |
belokop/indico_bare | indico/modules/categories/models/legacy_mapping.py | 1 | 1531 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.db import db
from indico.util.string import return_ascii
class LegacyCategoryMapping(db.Model):
"""Legacy category ID mapping
Legacy categories have non-numeric IDs which are not supported by
any new code. This mapping maps them to proper integer IDs to
avoid breaking things.
"""
__tablename__ = 'legacy_id_map'
__table_args__ = {'schema': 'categories'}
legacy_category_id = db.Column(
db.String,
primary_key=True,
index=True
)
category_id = db.Column(
db.Integer,
primary_key=True,
autoincrement=False
)
@return_ascii
def __repr__(self):
return '<LegacyCategoryMapping({}, {})>'.format(self.legacy_category_id, self.category_id)
| gpl-3.0 | 4,220,421,249,217,486,300 | 31.574468 | 98 | 0.700196 | false | 3.997389 | false | false | false |
JohnLunzer/flexx | flexx/ui/widgets/_bokeh.py | 1 | 4204 | """
Simple example:
.. UIExample:: 300
import numpy as np
from bokeh.plotting import figure
from flexx import app, ui, event
x = np.linspace(0, 6, 50)
p1 = figure()
p1.line(x, np.sin(x))
p2 = figure()
p2.line(x, np.cos(x))
class Example(ui.Widget):
def init(self):
with ui.BoxPanel():
ui.BokehWidget(plot=p1)
ui.BokehWidget(plot=p2)
"""
import os
from ... import event, app
from ...pyscript.stubs import window, Bokeh
from . import Widget
def _load_bokeh(ext):
import bokeh.resources
dev = os.environ.get('BOKEH_RESOURCES', '') == 'relative-dev'
res = bokeh.resources.bokehjsdir()
if dev:
res = os.path.abspath(os.path.join(bokeh.__file__,
'..', '..', 'bokehjs', 'build'))
modname = 'bokeh' if dev else 'bokeh.min'
filename = os.path.join(res, ext, modname + '.' + ext)
return open(filename, 'rb').read().decode()
def _load_bokeh_js():
return _load_bokeh('js')
def _load_bokeh_css():
return _load_bokeh('css')
# Associate Bokeh asset, but in a "lazy" way, so that we don't attempt to
# import bokeh until the user actually instantiates a BokehWidget.
app.assets.associate_asset(__name__, 'bokeh.js', _load_bokeh_js)
app.assets.associate_asset(__name__, 'bokeh.css', _load_bokeh_css)
class BokehWidget(Widget):
""" A widget that shows a Bokeh plot object.
For Bokeh 0.12 and up. The plot's ``sizing_mode`` property is set to
``stretch_both`` unless it was set to something other than ``fixed``. Other
responsive modes are 'scale_width', 'scale_height' and 'scale_both`, which
all keep aspect ratio while being responsive in a certain direction.
"""
CSS = """
.flx-BokehWidget > .plotdiv {
overflow: hidden;
}
"""
@event.prop
def plot(self, plot=None):
""" The Bokeh plot object to display. In JS, this prop
provides the corresponding backbone model.
"""
try:
from bokeh.models import Plot
except ImportError:
from bokeh.models import PlotObject as Plot
if plot is None:
return None
if not isinstance(plot, Plot):
raise ValueError('%s.plot must be a Bokeh plot object.' % self.id)
# The sizing_mode is fixed by default, but that's silly in this context
if plot.sizing_mode == 'fixed':
plot.sizing_mode = 'stretch_both'
self._plot_components(plot)
return plot
@event.emitter
def _plot_components(self, plot):
from bokeh.embed import components
script, div = components(plot)
script = '\n'.join(script.strip().split('\n')[1:-1])
return dict(script=script, div=div, id=plot.ref['id'])
class JS:
@event.prop
def plot(self, plot=None):
return plot
@event.connect('_plot_components')
def __set_plot_components(self, *events):
ev = events[-1]
# Embed div
self.node.innerHTML = ev.div
# "exec" code
el = window.document.createElement('script')
el.innerHTML = ev.script
self.node.appendChild(el)
#eval(script)
# Get plot from id in next event-loop iter
def getplot():
self.plot = Bokeh.index[ev.id]
self.plot.model.document.resize()
#self.real_size._set(self.real_size) ???
window.setTimeout(getplot, 10)
@event.connect('size')
def __resize_plot(self, *events):
if self.plot and self.parent:
if self.plot.resize_width_height:
# Bokeh <= 0.11
cstyle = window.getComputedStyle(self.parent.node)
use_x = cstyle['overflow-x'] not in ('auto', 'scroll')
use_y = cstyle['overflow-y'] not in ('auto', 'scroll')
self.plot.resize_width_height(use_x, use_y)
else:
self.plot.model.document.resize()
| bsd-2-clause | 6,902,889,755,763,184,000 | 30.848485 | 79 | 0.558516 | false | 3.801085 | false | false | false |
Tafkas/fritzbox-munin | fritzbox_power_consumption.py | 1 | 3244 | #!/usr/bin/env python3
# coding=utf-8
"""
fritzbox_power_consumption - A munin plugin for Linux to monitor AVM Fritzbox
Copyright (C) 2015 Christian Stade-Schuldt
Author: Christian Stade-Schuldt
Like Munin, this plugin is licensed under the GNU GPL v2 license
http://www.opensource.org/licenses/GPL-2.0
Add the following section to your munin-node's plugin configuration:
[fritzbox_*]
env.fritzbox_ip [ip address of the fritzbox]
env.fritzbox_username [fritzbox username]
env.fritzbox_password [fritzbox password]
This plugin supports the following munin configuration parameters:
#%# family=auto contrib
#%# capabilities=autoconf
"""
import json
import os
import sys
import fritzbox_helper as fh
PAGE = "energy"
DEVICES = ["system", "cpu", "wifi", "dsl", "ab", "usb"]
def get_power_consumption():
"""get the current power consumption usage"""
server = os.environ["fritzbox_ip"]
username = os.environ["fritzbox_username"]
password = os.environ["fritzbox_password"]
session_id = fh.get_session_id(server, username, password)
xhr_data = fh.get_xhr_content(server, session_id, PAGE)
data = json.loads(xhr_data)
devices = data["data"]["drain"]
for i, device in enumerate(DEVICES):
print("%s.value %s" % (device, devices[i]["actPerc"]))
def print_config():
print("graph_title AVM Fritz!Box Power Consumption")
print("graph_vlabel %")
print("graph_category system")
print("graph_order system cpu wifi dsl ab usb")
print("system.label system")
print("system.type GAUGE")
print("system.graph LINE12")
print("system.min 0")
print("system.max 100")
print("system.info Fritzbox overall power consumption")
print("cpu.label cpu")
print("cpu.type GAUGE")
print("cpu.graph LINE1")
print("cpu.min 0")
print("cpu.max 100")
print("cpu.info Fritzbox central processor power consumption")
print("wifi.label wifi")
print("wifi.type GAUGE")
print("wifi.graph LINE1")
print("wifi.min 0")
print("wifi.max 100")
print("wifi.info Fritzbox wifi power consumption")
print("dsl.label dsl")
print("dsl.type GAUGE")
print("dsl.graph LINE1")
print("dsl.min 0")
print("dsl.max 100")
print("dsl.info Fritzbox dsl power consumption")
print("ab.label ab")
print("ab.type GAUGE")
print("ab.graph LINE1")
print("ab.min 0")
print("ab.max 100")
print("ab.info Fritzbox analog phone ports power consumption")
print("usb.label usb")
print("usb.type GAUGE")
print("usb.graph LINE1")
print("usb.min 0")
print("usb.max 100")
print("usb.info Fritzbox usb devices power consumption")
if os.environ.get("host_name"):
print("host_name " + os.environ["host_name"])
if __name__ == "__main__":
if len(sys.argv) == 2 and sys.argv[1] == "config":
print_config()
elif len(sys.argv) == 2 and sys.argv[1] == "autoconf":
print("yes")
elif len(sys.argv) == 1 or len(sys.argv) == 2 and sys.argv[1] == "fetch":
# Some docs say it'll be called with fetch, some say no arg at all
try:
get_power_consumption()
except:
sys.exit("Couldn't retrieve fritzbox power consumption")
| gpl-2.0 | -9,206,741,028,250,949,000 | 31.44 | 79 | 0.655055 | false | 3.300102 | false | false | false |
saschwarz/django-stw | stw/templatetags/shrinkthewebtags.py | 1 | 3582 | """
Django template tags for inserting Shrink The Web images into templates.
There is one templatetag:
- stwimage - supports all free and PRO features.
- shrinkthewebimage - the original image insertion templatetag that implements
the STW preview feature. This is DEPRECATED.
"""
from collections import OrderedDict
from six.moves.urllib import parse
from django.conf import settings
from django import template
class STWConfigError(template.TemplateSyntaxError):
pass
class FormatSTWImageNode(template.Node):
def __init__(self, url, alt, **kwargs):
self.url = url
self.alt = alt
params = OrderedDict()
# load defaults if any
params.update(settings.SHRINK_THE_WEB)
if 'stwembed' not in kwargs:
params['stwembed'] = 1 # default to image
# overwrite defaults for this tag instance
params.update(kwargs)
self.kwargs = params
self._validate()
@classmethod
def _resolve(cls, var, context):
"""if var is a string then return it otherwise use it to lookup a value in the current context"""
if var[0] == var[-1] and var[0] in ('"', "'"):
var = var[1:-1] # a string
else:
var = template.Variable(var).resolve(context)
return var
def _validate(self):
if 'stwaccesskeyid' not in self.kwargs:
raise STWConfigError("'stwaccesskeyid' must be defined in settings.SHRINK_THE_WEB")
def render(self, context):
url = self._resolve(self.url, context)
alt = self._resolve(self.alt, context)
encoded = parse.urlencode(self.kwargs)
if encoded:
encoded += '&'
result = '''<img src="https://images.shrinktheweb.com/xino.php?{0}stwurl={1}" alt="{2}"/>'''.format(encoded, url, alt)
return result
def do_stwimage(parser, token):
"""
Key value based templatetag supporting all STW features for Free and PRO accounts.
Usage::
{% load shrinkthewebtags %}
{% stwimage url alt key-value-pairs %}
Where:
``url``
is expected to be a variable instantiated from the context
or a quoted string to be used explicitly.
``key-value-pairs``
matching STW API values i.e. stwembed=0 stwinside=1
minimal validation of key value pairs is performed
Examples::
Given a template context variable "author" with attributes "url" and
"description" the following are valid entries in a template file:
{% load shrinkthewebtags %}
get image of the follow the full url (not just the top level page), wait
5 seconds, and return image in large size (this requires license with PRO
features:
{% stwimage author.url author.description stwinside=1 stwdelay=5 stwsize=lrg %}
"""
bits = token.split_contents()
if len(bits) < 3:
raise template.TemplateSyntaxError("'{}' tag takes at least 2 arguments".format(bits[0]))
# process keyword args
kwargs = {}
for bit in bits[3:]:
key, value = bit.split("=")
if value is '':
raise template.TemplateSyntaxError("'{0}' tag keyword: {1} has no argument".format(bits[0], key))
if key.startswith('stw'):
kwargs[str(key)] = value
else:
raise template.TemplateSyntaxError("'{0}' tag keyword: {1} is not a valid STW keyword".format(bits[0], key))
return FormatSTWImageNode(url=bits[1], alt=bits[2] , **kwargs)
register = template.Library()
register.tag('stwimage', do_stwimage)
| mit | 6,499,853,531,888,666,000 | 32.166667 | 127 | 0.635678 | false | 4.047458 | false | false | false |
jtwhite79/pyemu | examples/Freyberg_Truth/Process_output.py | 1 | 1603 | from __future__ import print_function
import flopy
from flopy import utils as fu
import platform
import numpy as np
if 'window' in platform.platform().lower():
newln = '\n'
else:
newln = '\r\n'
print ('Starting to read HYDMOD data')
obs = flopy.utils.HydmodObs('freyberg.hyd.bin')
times = obs.get_times()
read_obsnames = obs.get_obsnames()
with open('freyberg.heads', 'w') as ofp:
ofp.write('obsname value{0}'.format(newln))
for coutname in read_obsnames:
if coutname.startswith('HDI001o'):
cv = obs.get_data(obsname=coutname,totim=times[1])
ofp.write('{0:20s} {1:15.6E} {2}'.format(coutname+'c', cv[0][1], newln))
for coutname in read_obsnames:
cv = obs.get_data(obsname=coutname,totim=times[2])
ofp.write('{0:20s} {1:15.6E} {2}'.format(coutname+'f', cv[0][-1], newln))
print('Now read River flux from the LIST file')
lst = fu.MfListBudget('freyberg.list')
RIV_flux = lst.get_incremental()['RIVER_LEAKAGE_IN']-lst.get_incremental()['RIVER_LEAKAGE_OUT']
with open('freyberg.rivflux', 'w') as ofp:
ofp.write('obsname value{0}'.format(newln))
ofp.write('rivflux_cal {1:15.6E}{0}rivflux_fore {2:15.6E}{0}'.format(newln, RIV_flux[0], RIV_flux[1]))
print('Finally read endpoint file to get traveltime')
endpoint_file = 'freyberg.mpenpt'
lines = open(endpoint_file, 'r').readlines()
items = lines[-1].strip().split()
travel_time = float(items[4]) - float(items[3])
with open('freyberg.travel', 'w') as ofp:
ofp.write('travetime {0:15.6e}{1}'.format(travel_time, newln))
print('Completed processing model output')
| bsd-3-clause | -6,881,194,321,184,488,000 | 33.847826 | 108 | 0.666251 | false | 2.694118 | false | false | false |
skytoup/AppServer | app/blueprints/apps.py | 1 | 6419 | # -*- coding: utf-8 -*-
# Created by apple on 2017/2/5.
import os
from ..log import log
from ..config import Config
from sqlalchemy import func, desc
from sanic import Blueprint
from sanic.request import Request
from sanic.response import text
from ..exceptions import BadRequest
from ..utils import JsonResult, Regex, Date, DB
from sanic.views import HTTPMethodView
from ..db import Session, AppModel, AppVersionModel
apps_blueprint = Blueprint('apps', 'apps')
@apps_blueprint.route('/<app_type:iOS|android|all>/page/<page:int>', ['GET'])
async def get_apps(request: Request, app_type: str, page: int):
"""
获取app
- uri[app类型(all/iOS/android)-app_type: str, 页码(从1起)-page: int], format[时间s-t: int]
:param request:
:return:
"""
time = Date.time2datetime(request.args.get('t'))
if not time:
raise BadRequest('')
if page <= 0:
log.debug('page need greater zero')
raise BadRequest('')
kw = request.args.get('kw')
session = Session()
query = session.query(AppModel, AppVersionModel.version_code, AppVersionModel.version_name,
func.max(AppVersionModel.create_at).label('_update_at')) \
.join(AppVersionModel, AppModel.id == AppVersionModel.app_id) \
.filter(AppModel.create_at <= time)
if app_type != 'all': # 安装包类型过滤
query = query.filter(AppModel.type == app_type)
if kw:
query = query.filter(AppModel.name.like('%{}%'.format(kw)))
result = query.order_by(desc(AppModel.create_at)) \
.group_by(AppModel.short_chain_uri_) \
.offset((page - 1) * Config.apps_limit) \
.limit(Config.apps_limit) \
.all()
datas = []
for app, version_code, version_name, _ in result:
app.version_code = version_code
app.version_name = version_name
datas.append(app)
return JsonResult.ok(datas).response_json()
class AppsView(HTTPMethodView):
@staticmethod
async def options(request: Request, app_id: int):
return text('', headers={
'Access-Control-Allow-Methods': 'GET,PUT,DELETE,OPTIONS',
'Access-Control-Max-Age:': '62400',
})
@staticmethod
async def get(request: Request, app_id: int):
"""
获取app详情
- uri[app_id: int]
:param request:
:param app_id:
:return:
"""
session = Session()
query = DB.model_exists(session, AppModel, id=app_id)
if not query:
raise BadRequest('not find app id: {}'.format(app_id))
app = query.one()
return JsonResult.ok(app).response_json()
@staticmethod
async def delete(request: Request, app_id: int):
"""
删除app
- uri[app_id: int]
:param request:
:param app_id:
:return:
"""
session = Session()
app_query = DB.model_exists(session, AppModel, id=app_id)
if not app_query:
raise BadRequest('not find app id: {}'.format(app_id))
# 删除图标
app = app_query.one()
os.remove(app.icon_)
# 删除app的所有版本
app_version_query = session.query(AppVersionModel).filter(AppVersionModel.app_id == app_id)
for model in app_version_query.all():
os.remove(model.package_)
app_version_query.delete()
# 删除app
app_query.delete()
session.commit()
log.info('did delete app id: {}'.format(app_id))
return JsonResult.ok().response_json()
@staticmethod
async def put(request: Request, app_id: int):
"""
修改app信息
- uri[app_id: int], json(最少包含一个参数)[name: str, short_chain: str, detail: str]
:param request:
:param app_id:
:return:
"""
json = request.json
if not isinstance(json, dict):
log.debug('json it not a dict')
raise BadRequest('')
name = json['name'].strip() if isinstance(json.get('name'), str) else None
short_chain = json['short_chain'].strip() if isinstance(json.get('short_chain'), str) else None
detail = json['detail'].strip() if isinstance(json.get('detail'), str) else None
if not (name or short_chain) and detail is None:
log.debug('need name, short chain or detail, less one')
raise BadRequest('')
session = Session()
query = DB.model_exists(session, AppModel, id=app_id)
if not query:
raise BadRequest('not find app id: {}'.format(app_id))
if short_chain:
if not Regex.ShortChina.match(short_chain):
log.debug(
'short chain length need 5-15 and combination of letters, Numbers, underline')
raise BadRequest(
'short chain length need greater 5 and letter by the combination of letters, Numbers, underline')
elif session.query(AppModel).filter(AppModel.short_chain_uri_ == short_chain,
AppModel.id != app_id).count() != 0:
log.debug('short chain did exists')
raise BadRequest('short chain did exists')
app = query.one()
if name:
app.name = name
if short_chain:
app.short_chain_uri_ = short_chain
if detail is not None:
app.detail = detail
session.commit()
log.debug('did modify app: {}, {} - {} - {}'.format(app.package_name, name, short_chain, detail))
return JsonResult.ok().response_json()
apps_blueprint.add_route(AppsView.as_view(), '/<app_id:int>')
# @apps_blueprint.route('/search', ['GET'])
# async def search(request: Request):
# time = Date.time2datetime(request.args.get('t'))
# if not time:
# raise BadRequest('')
#
# page = request.args.get('page')
# if page <= 0:
# log.debug('page need greater zero')
# raise BadRequest('')
#
# kw = request.args.get('kw')
# if not kw:
# raise BadRequest('')
#
# app_type = request.args.get('type')
#
# session = Session()
# session.query(AppModel).filter(AppModel.create_at <= time, AppModel.type == app_type) \
# .offset((page - 1) * Config.apps_limit) \
# .limit(Config.apps_limit) \
# .all()
# session.commit()
| mit | 5,114,168,623,798,925,000 | 31.592784 | 117 | 0.580104 | false | 3.568284 | true | false | false |
karolisr/krpy | krpy/tools/check-synonymy-for-ncbi-records.py | 2 | 5302 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
# from __future__ import unicode_literals
if __name__ == '__main__':
import os
import argparse
from krpy import krseqsearch
from krpy import krio
from krpy import krbioio
from krpy import krseq
from krpy import krbionames
from krpy import krcl
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', type=unicode,
help='')
parser.add_argument('-o', '--output_file', type=unicode,
help='')
parser.add_argument('-l', '--log_dir', type=unicode,
help='')
parser.add_argument('-n', '--ncbi_names_file', type=unicode,
help='')
parser.add_argument('-s', '--synonymy_file', type=unicode,
help='')
parser.add_argument('-u', '--unresolvable_taxonomy_file', type=unicode,
help='')
parser.add_argument('-k', '--keeplist_taxonomy_file', type=unicode,
help='')
parser.add_argument('-t', '--taxa_mappings_file', type=unicode,
help='')
parser.add_argument('-a', '--authority_file', type=unicode,
help='')
parser.add_argument('-c', '--hacks', type=unicode,
help='')
parser.add_argument('-d', '--hacks_data_location', type=unicode,
help='')
# record, ncbi_names_table, synonymy_table, auth_file,
# hacks, hacks_data_location, unresolvable_taxonomy_list,
# keeplist_taxonomy_list, taxa_mappings_list, log_dir
args = parser.parse_args()
hacks = None
hacks_data_location = None
if args.hacks:
hacks = args.hacks.split(',')
if args.hacks_data_location:
hacks_data_location = dict()
for i, hack in enumerate(hacks):
hacks_data_location[hack] = args.hacks_data_location.split(',')[i]
ncbi_names_table = None
if args.ncbi_names_file:
ncbi_names_table = krio.read_table_file(
path=args.ncbi_names_file,
has_headers=False,
headers=('tax_id', 'name_txt', 'unique_name', 'name_class'),
delimiter='\t|',
quotechar=None,
stripchar='"',
rettype='dict')
synonymy_table = None
if args.synonymy_file:
synonymy_table = krio.read_table_file(
path=args.synonymy_file,
has_headers=True, headers=None, delimiter=',')
unresolvable_taxonomy_list = None
if args.unresolvable_taxonomy_file:
unresolvable_taxonomy_list = krio.read_table_file(
path=args.unresolvable_taxonomy_file,
has_headers=True,
headers=None,
delimiter=',',
quotechar=None,
stripchar='"',
rettype='dict')
keeplist_taxonomy_list = None
if args.keeplist_taxonomy_file:
keeplist_taxonomy_list = krio.read_table_file(
path=args.keeplist_taxonomy_file,
has_headers=False,
headers=None,
delimiter=',',
quotechar=None,
stripchar='"',
rettype='set')
taxa_mappings_list = None
if args.taxa_mappings_file:
taxa_mappings_list = krio.read_table_file(
path=args.taxa_mappings_file,
has_headers=False,
headers=('accession', 'taxon'),
delimiter='\t',
quotechar=None,
stripchar='"',
rettype='dict')
input_file = None
output_file = None
authority_file = None
log_dir = None
if args.input_file:
input_file = args.input_file
if args.output_file:
output_file = args.output_file
if args.authority_file:
authority_file = args.authority_file
if args.log_dir:
log_dir = args.log_dir
records = krbioio.read_sequence_file(input_file, 'gb', ret_type='list')
ps = os.path.sep
tax_log_handle = krseqsearch.__tax_log_open(log_dir, ps)
tax_log_html_handle = krseqsearch.__tax_log_html_open(log_dir, ps)
#########
krcl.hide_cursor()
for i, record in enumerate(records):
krcl.print_progress(i, len(records), 50, '')
name = krseqsearch.check_organism_name(
record,
ncbi_names_table,
synonymy_table,
authority_file,
hacks,
hacks_data_location,
unresolvable_taxonomy_list,
keeplist_taxonomy_list,
taxa_mappings_list,
tax_log_handle,
tax_log_html_handle)
# tn = name[0]
an = name[1]
an_flat = krbionames.flatten_organism_name(an, ' ')
record.annotations['organism_old'] = record.annotations['organism']
record.annotations['organism'] = an_flat
record.annotations['source'] = an_flat
record.description = record.description.replace(record.annotations['organism'], '')
record.description = record.description.strip()
krcl.show_cursor()
#########
krseqsearch.__tax_log_close(tax_log_handle)
krseqsearch.__tax_log_html_close(tax_log_html_handle)
krbioio.write_sequence_file(records, output_file, 'gb')
| gpl-3.0 | 5,662,015,182,590,594,000 | 30.372781 | 91 | 0.563372 | false | 3.651515 | false | false | false |
JasonLG1979/pithos | pithos/pandora/pandora.py | 1 | 23793 | # -*- coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
# Copyright (C) 2010 Kevin Mehall <[email protected]>
# Copyright (C) 2012 Christopher Eby <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Pandora JSON v5 API
See http://6xq.net/playground/pandora-apidoc/json/ for API documentation.
"""
from .blowfish import Blowfish
# from Crypto.Cipher import Blowfish
from xml.dom import minidom
import re
import json
import logging
import time
import urllib.request, urllib.parse, urllib.error
import codecs
import ssl
import os
from enum import IntEnum
from socket import error as SocketError
from . import data
HTTP_TIMEOUT = 30
USER_AGENT = 'pithos'
RATE_BAN = 'ban'
RATE_LOVE = 'love'
RATE_NONE = None
class ApiError(IntEnum):
INTERNAL_ERROR = 0
MAINTENANCE_MODE = 1
URL_PARAM_MISSING_METHOD = 2
URL_PARAM_MISSING_AUTH_TOKEN = 3
URL_PARAM_MISSING_PARTNER_ID = 4
URL_PARAM_MISSING_USER_ID = 5
SECURE_PROTOCOL_REQUIRED = 6
CERTIFICATE_REQUIRED = 7
PARAMETER_TYPE_MISMATCH = 8
PARAMETER_MISSING = 9
PARAMETER_VALUE_INVALID = 10
API_VERSION_NOT_SUPPORTED = 11
COUNTRY_NOT_SUPPORTED = 12
INSUFFICIENT_CONNECTIVITY = 13
UNKNOWN_METHOD_NAME = 14
WRONG_PROTOCOL = 15
READ_ONLY_MODE = 1000
INVALID_AUTH_TOKEN = 1001
INVALID_LOGIN = 1002
LISTENER_NOT_AUTHORIZED = 1003
USER_NOT_AUTHORIZED = 1004
MAX_STATIONS_REACHED = 1005
STATION_DOES_NOT_EXIST = 1006
COMPLIMENTARY_PERIOD_ALREADY_IN_USE = 1007
CALL_NOT_ALLOWED = 1008
DEVICE_NOT_FOUND = 1009
PARTNER_NOT_AUTHORIZED = 1010
INVALID_USERNAME = 1011
INVALID_PASSWORD = 1012
USERNAME_ALREADY_EXISTS = 1013
DEVICE_ALREADY_ASSOCIATED_TO_ACCOUNT = 1014
UPGRADE_DEVICE_MODEL_INVALID = 1015
EXPLICIT_PIN_INCORRECT = 1018
EXPLICIT_PIN_MALFORMED = 1020
DEVICE_MODEL_INVALID = 1023
ZIP_CODE_INVALID = 1024
BIRTH_YEAR_INVALID = 1025
BIRTH_YEAR_TOO_YOUNG = 1026
# FIXME: They can't both be 1027?
# INVALID_COUNTRY_CODE = 1027
# INVALID_GENDER = 1027
DEVICE_DISABLED = 1034
DAILY_TRIAL_LIMIT_REACHED = 1035
INVALID_SPONSOR = 1036
USER_ALREADY_USED_TRIAL = 1037
PLAYLIST_EXCEEDED = 1039
# Catch all for undocumented error codes
UNKNOWN_ERROR = 100000
@property
def title(self):
# Turns RANDOM_ERROR into Pandora Error: Random Error
return 'Pandora Error: {}'.format(self.name.replace('_', ' ').title())
@property
def sub_message(self):
value = self.value
if value == 1:
return 'Pandora is performing maintenance.\nTry again later.'
elif value == 12:
return ('Pandora is not available in your country.\n'
'If you wish to use Pandora you must configure your system or Pithos proxy accordingly.')
elif value == 13:
return ('Out of sync. Correct your system\'s clock.\n'
'If the problem persists it may indicate a Pandora API change.\nA Pithos update may be required.')
if value == 1000:
return 'Pandora is in read-only mode.\nTry again later.'
elif value == 1002:
return 'Invalid username or password.'
elif value == 1003:
return 'A Pandora One account is required to access this feature.\nUncheck "Pandora One" in Settings.'
elif value == 1005:
return ('You have reached the maximum number of stations.\n'
'To add a new station you must first delete an existing station.')
elif value == 1010:
return 'Invalid Pandora partner keys.\nA Pithos update may be required.'
elif value == 1023:
return 'Invalid Pandora device model.\nA Pithos update may be required.'
elif value == 1039:
return 'You have requested too many playlists.\nTry again later.'
else:
return None
PLAYLIST_VALIDITY_TIME = 60*60
NAME_COMPARE_REGEX = re.compile(r'[^A-Za-z0-9]')
class PandoraError(IOError):
def __init__(self, message, status=None, submsg=None):
self.status = status
self.message = message
self.submsg = submsg
class PandoraAuthTokenInvalid(PandoraError): pass
class PandoraNetError(PandoraError): pass
class PandoraAPIVersionError(PandoraError): pass
class PandoraTimeout(PandoraNetError): pass
def pad(s, l):
return s + b'\0' * (l - len(s))
class Pandora:
"""Access the Pandora API
To use the Pandora class, make sure to call :py:meth:`set_audio_quality`
and :py:meth:`connect` methods.
Get information from Pandora using:
- :py:meth:`get_stations` which populates the :py:attr:`stations` attribute
- :py:meth:`search` to find songs to add to stations or create a new station with
- :py:meth:`json_call` call into the JSON API directly
"""
def __init__(self):
self.opener = self.build_opener()
self.connected = False
self.isSubscriber = False
def pandora_encrypt(self, s):
return b''.join([codecs.encode(self.blowfish_encode.encrypt(pad(s[i:i+8], 8)), 'hex_codec') for i in range(0, len(s), 8)])
def pandora_decrypt(self, s):
return b''.join([self.blowfish_decode.decrypt(pad(codecs.decode(s[i:i+16], 'hex_codec'), 8)) for i in range(0, len(s), 16)]).rstrip(b'\x08')
def json_call(self, method, args=None, https=False, blowfish=True):
if not args:
args = {}
url_arg_strings = []
if self.partnerId:
url_arg_strings.append('partner_id=%s'%self.partnerId)
if self.userId:
url_arg_strings.append('user_id=%s'%self.userId)
if self.userAuthToken:
url_arg_strings.append('auth_token=%s'%urllib.parse.quote_plus(self.userAuthToken))
elif self.partnerAuthToken:
url_arg_strings.append('auth_token=%s'%urllib.parse.quote_plus(self.partnerAuthToken))
url_arg_strings.append('method=%s'%method)
protocol = 'https' if https else 'http'
url = protocol + self.rpcUrl + '&'.join(url_arg_strings)
if self.time_offset:
args['syncTime'] = int(time.time()+self.time_offset)
if self.userAuthToken:
args['userAuthToken'] = self.userAuthToken
elif self.partnerAuthToken:
args['partnerAuthToken'] = self.partnerAuthToken
data = json.dumps(args).encode('utf-8')
logging.debug(url)
logging.debug(data)
if blowfish:
data = self.pandora_encrypt(data)
try:
req = urllib.request.Request(url, data, {'User-agent': USER_AGENT, 'Content-type': 'text/plain'})
with self.opener.open(req, timeout=HTTP_TIMEOUT) as response:
text = response.read().decode('utf-8')
except urllib.error.HTTPError as e:
logging.error("HTTP error: %s", e)
raise PandoraNetError(str(e))
except urllib.error.URLError as e:
logging.error("Network error: %s", e)
if e.reason.strerror == 'timed out':
raise PandoraTimeout("Network error", submsg="Timeout")
else:
raise PandoraNetError("Network error", submsg=e.reason.strerror)
except SocketError as e:
try:
error_string = os.strerror(e.errno)
except (TypeError, ValueError):
error_string = "Unknown Error"
logging.error("Network Socket Error: %s", error_string)
raise PandoraNetError("Network Socket Error", submsg=error_string)
logging.debug(text)
tree = json.loads(text)
if tree['stat'] == 'fail':
code = tree['code']
msg = tree['message']
try:
error_enum = ApiError(code)
except ValueError:
error_enum = ApiError.UNKNOWN_ERROR
logging.error('fault code: {} {} message: {}'.format(code, error_enum.name, msg))
if error_enum is ApiError.INVALID_AUTH_TOKEN:
raise PandoraAuthTokenInvalid(msg)
elif error_enum is ApiError.API_VERSION_NOT_SUPPORTED:
raise PandoraAPIVersionError(msg)
elif error_enum is ApiError.UNKNOWN_ERROR:
submsg = 'Undocumented Error Code: {}\n{}'.format(code, msg)
raise PandoraError(error_enum.title, code, submsg)
else:
submsg = error_enum.sub_message or 'Error Code: {}\n{}'.format(code, msg)
raise PandoraError(error_enum.title, code, submsg)
if 'result' in tree:
return tree['result']
def set_audio_quality(self, fmt):
"""Set the desired audio quality
Used by the :py:attr:`Song.audioUrl` property.
:param fmt: An audio quality format from :py:data:`pithos.pandora.data.valid_audio_formats`
"""
self.audio_quality = fmt
@staticmethod
def build_opener(*handlers):
"""Creates a new opener
Wrapper around urllib.request.build_opener() that adds
a custom ssl.SSLContext for use with internal-tuner.pandora.com
"""
ctx = ssl.create_default_context()
ctx.load_verify_locations(cadata=data.internal_cert)
https = urllib.request.HTTPSHandler(context=ctx)
return urllib.request.build_opener(https, *handlers)
def set_url_opener(self, opener):
self.opener = opener
def connect(self, client, user, password):
"""Connect to the Pandora API and log the user in
:param client: The client ID from :py:data:`pithos.pandora.data.client_keys`
:param user: The user's login email
:param password: The user's login password
"""
self.connected = False
self.partnerId = self.userId = self.partnerAuthToken = None
self.userAuthToken = self.time_offset = None
self.rpcUrl = client['rpcUrl']
self.blowfish_encode = Blowfish(client['encryptKey'].encode('utf-8'))
self.blowfish_decode = Blowfish(client['decryptKey'].encode('utf-8'))
partner = self.json_call('auth.partnerLogin', {
'deviceModel': client['deviceModel'],
'username': client['username'], # partner username
'password': client['password'], # partner password
'version': client['version']
},https=True, blowfish=False)
self.partnerId = partner['partnerId']
self.partnerAuthToken = partner['partnerAuthToken']
pandora_time = int(self.pandora_decrypt(partner['syncTime'].encode('utf-8'))[4:14])
self.time_offset = pandora_time - time.time()
logging.info("Time offset is %s", self.time_offset)
auth_args = {'username': user, 'password': password, 'loginType': 'user', 'returnIsSubscriber': True}
user = self.json_call('auth.userLogin', auth_args, https=True)
self.userId = user['userId']
self.userAuthToken = user['userAuthToken']
self.connected = True
self.isSubscriber = user['isSubscriber']
@property
def explicit_content_filter_state(self):
"""The User must already be authenticated before this is called.
returns the state of Explicit Content Filter and if the Explicit Content Filter is PIN protected
"""
get_filter_state = self.json_call('user.getSettings', https=True)
filter_state = get_filter_state['isExplicitContentFilterEnabled']
pin_protected = get_filter_state['isExplicitContentFilterPINProtected']
logging.info('Explicit Content Filter state: %s' %filter_state)
logging.info('PIN protected: %s' %pin_protected)
return filter_state, pin_protected
def set_explicit_content_filter(self, state):
"""The User must already be authenticated before this is called.
Does not take effect until the next playlist.
Valid desired states are True to enable and False to disable the Explicit Content Filter.
"""
self.json_call('user.setExplicitContentFilter', {'isExplicitContentFilterEnabled': state})
logging.info('Explicit Content Filter set to: %s' %(state))
def get_stations(self, *ignore):
stations = self.json_call('user.getStationList')['stations']
self.quickMixStationIds = None
self.stations = [Station(self, i) for i in stations]
if self.quickMixStationIds:
for i in self.stations:
if i.id in self.quickMixStationIds:
i.useQuickMix = True
return self.stations
def save_quick_mix(self):
stationIds = []
for i in self.stations:
if i.useQuickMix:
stationIds.append(i.id)
self.json_call('user.setQuickMix', {'quickMixStationIds': stationIds})
def search(self, query):
results = self.json_call(
'music.search',
{'includeGenreStations': True, 'includeNearMatches': True, 'searchText': query},
)
l = [SearchResult('artist', i) for i in results['artists'] if i['score'] >= 80]
l += [SearchResult('song', i) for i in results['songs'] if i['score'] >= 80]
l += [SearchResult('genre', i) for i in results['genreStations']]
l.sort(key=lambda i: i.score, reverse=True)
return l
def add_station_by_music_id(self, musicid):
d = self.json_call('station.createStation', {'musicToken': musicid})
station = Station(self, d)
if not self.get_station_by_id(station.id):
self.stations.append(station)
return station
def add_station_by_track_token(self, trackToken, musicType):
d = self.json_call('station.createStation', {'trackToken': trackToken, 'musicType': musicType})
station = Station(self, d)
if not self.get_station_by_id(station.id):
self.stations.append(station)
return station
def delete_station(self, station):
if self.get_station_by_id(station.id):
logging.info("pandora: Deleting Station")
self.json_call('station.deleteStation', {'stationToken': station.idToken})
self.stations.remove(station)
def get_station_by_id(self, id):
for i in self.stations:
if i.id == id:
return i
def add_feedback(self, trackToken, rating):
logging.info("pandora: addFeedback")
rating_bool = True if rating == RATE_LOVE else False
feedback = self.json_call('station.addFeedback', {'trackToken': trackToken, 'isPositive': rating_bool})
return feedback['feedbackId']
def delete_feedback(self, stationToken, feedbackId):
self.json_call('station.deleteFeedback', {'feedbackId': feedbackId, 'stationToken': stationToken})
class Station:
def __init__(self, pandora, d):
self.pandora = pandora
self.id = d['stationId']
self.idToken = d['stationToken']
self.isCreator = not d['isShared']
self.isQuickMix = d['isQuickMix']
self.isThumbprint = d.get('isThumbprint', False)
self.name = d['stationName']
self.useQuickMix = False
if self.isQuickMix:
self.pandora.quickMixStationIds = d.get('quickMixStationIds', [])
def transformIfShared(self):
if not self.isCreator:
logging.info("pandora: transforming station")
self.pandora.json_call('station.transformSharedStation', {'stationToken': self.idToken})
self.isCreator = True
def get_playlist(self):
logging.info("pandora: Get Playlist")
# Set the playlist time to the time we requested a playlist.
# It is better that a playlist be considered invalid a fraction
# of a sec early than be considered valid any longer than it actually is.
playlist_time = time.time()
playlist = self.pandora.json_call('station.getPlaylist', {
'stationToken': self.idToken,
'includeTrackLength': True,
'additionalAudioUrl': 'HTTP_32_AACPLUS,HTTP_128_MP3',
}, https=True)['items']
return [Song(self.pandora, i, playlist_time) for i in playlist if 'songName' in i]
@property
def info_url(self):
return 'http://www.pandora.com/stations/'+self.idToken
def rename(self, new_name):
if new_name != self.name:
self.transformIfShared()
logging.info("pandora: Renaming station")
self.pandora.json_call('station.renameStation', {'stationToken': self.idToken, 'stationName': new_name})
self.name = new_name
def delete(self):
self.pandora.delete_station(self)
def __repr__(self):
return '<{}.{} {} "{}">'.format(
__name__,
__class__.__name__,
self.id,
self.name,
)
class Song:
def __init__(self, pandora, d, playlist_time):
self.pandora = pandora
self.playlist_time = playlist_time
self.is_ad = None # None = we haven't checked, otherwise True/False
self.tired = False
self.message = ''
self.duration = None
self.position = None
self.bitrate = None
self.start_time = None
self.finished = False
self.feedbackId = None
self.bitrate = None
self.artUrl = None
self.album = d['albumName']
self.artist = d['artistName']
self.trackToken = d['trackToken']
self.rating = RATE_LOVE if d['songRating'] == 1 else RATE_NONE # banned songs won't play, so we don't care about them
self.stationId = d['stationId']
self.songName = d['songName']
self.songDetailURL = d['songDetailUrl']
self.songExplorerUrl = d['songExplorerUrl']
self.artRadio = d['albumArtUrl']
self.trackLength = d['trackLength']
self.trackGain = float(d.get('trackGain', '0.0'))
self.audioUrlMap = d['audioUrlMap']
# Optionally we requested more URLs
if len(d.get('additionalAudioUrl', [])) == 2:
if int(self.audioUrlMap['highQuality']['bitrate']) < 128:
# We can use the higher quality mp3 stream for non-one users
self.audioUrlMap['mediumQuality'] = self.audioUrlMap['highQuality']
self.audioUrlMap['highQuality'] = {
'encoding': 'mp3',
'bitrate': '128',
'audioUrl': d['additionalAudioUrl'][1],
}
else:
# And we can offer a lower bandwidth option for one users
self.audioUrlMap['lowQuality'] = {
'encoding': 'aacplus',
'bitrate': '32',
'audioUrl': d['additionalAudioUrl'][0],
}
# the actual name of the track, minus any special characters (except dashes) is stored
# as the last part of the songExplorerUrl, before the args.
explorer_name = self.songExplorerUrl.split('?')[0].split('/')[-1]
clean_expl_name = NAME_COMPARE_REGEX.sub('', explorer_name).lower()
clean_name = NAME_COMPARE_REGEX.sub('', self.songName).lower()
if clean_name == clean_expl_name:
self.title = self.songName
else:
try:
with urllib.request.urlopen(self.songExplorerUrl) as x, minidom.parseString(x.read()) as dom:
attr_value = dom.getElementsByTagName('songExplorer')[0].attributes['songTitle'].value
# Pandora stores their titles for film scores and the like as 'Score name: song name'
self.title = attr_value.replace('{0}: '.format(self.songName), '', 1)
except:
self.title = self.songName
@property
def audioUrl(self):
quality = self.pandora.audio_quality
try:
q = self.audioUrlMap[quality]
self.bitrate = q['bitrate']
logging.info("Using audio quality %s: %s %s", quality, q['bitrate'], q['encoding'])
return q['audioUrl']
except KeyError:
logging.warning("Unable to use audio format %s. Using %s",
quality, list(self.audioUrlMap.keys())[0])
self.bitrate = list(self.audioUrlMap.values())[0]['bitrate']
return list(self.audioUrlMap.values())[0]['audioUrl']
@property
def station(self):
return self.pandora.get_station_by_id(self.stationId)
def get_duration_sec(self):
if self.duration is not None:
return self.duration // 1000000000
else:
return self.trackLength
def get_position_sec(self):
if self.position is not None:
return self.position // 1000000000
else:
return 0
def rate(self, rating):
if self.rating != rating:
self.station.transformIfShared()
if rating == RATE_NONE:
if not self.feedbackId:
# We need a feedbackId, get one by re-rating the song. We
# could also get one by calling station.getStation, but
# that requires transferring a lot of data (all feedback,
# seeds, etc for the station).
opposite = RATE_BAN if self.rating == RATE_LOVE else RATE_LOVE
self.feedbackId = self.pandora.add_feedback(self.trackToken, opposite)
self.pandora.delete_feedback(self.station.idToken, self.feedbackId)
else:
self.feedbackId = self.pandora.add_feedback(self.trackToken, rating)
self.rating = rating
def set_tired(self):
if not self.tired:
self.pandora.json_call('user.sleepSong', {'trackToken': self.trackToken})
self.tired = True
def bookmark(self):
self.pandora.json_call('bookmark.addSongBookmark', {'trackToken': self.trackToken})
def bookmark_artist(self):
self.pandora.json_call('bookmark.addArtistBookmark', {'trackToken': self.trackToken})
@property
def rating_str(self):
return self.rating
def is_still_valid(self):
# Playlists are valid for 1 hour. A song is considered valid if there is enough time
# to play the remaining duration of the song before the playlist expires.
return ((time.time() + (self.get_duration_sec() - self.get_position_sec())) - self.playlist_time) < PLAYLIST_VALIDITY_TIME
def __repr__(self):
return '<{}.{} {} "{}" by "{}" from "{}">'.format(
__name__,
__class__.__name__,
self.trackToken,
self.title,
self.artist,
self.album,
)
class SearchResult:
def __init__(self, resultType, d):
self.resultType = resultType
self.score = d['score']
self.musicId = d['musicToken']
if resultType == 'song':
self.title = d['songName']
self.artist = d['artistName']
elif resultType == 'artist':
self.name = d['artistName']
elif resultType == 'genre':
self.stationName = d['stationName']
| gpl-3.0 | 5,187,091,006,426,525,000 | 38.262376 | 148 | 0.611819 | false | 3.825241 | false | false | false |
lablup/sorna-manager | tests/gateway/test_utils.py | 1 | 1688 | import asyncio
import pytest
from ai.backend.manager.models import verify_dotfile_name, verify_vfolder_name
from ai.backend.gateway.utils import (
call_non_bursty,
)
@pytest.mark.asyncio
async def test_call_non_bursty():
key = 'x'
execution_count = 0
async def execute():
nonlocal execution_count
await asyncio.sleep(0)
execution_count += 1
# ensure reset
await asyncio.sleep(0.11)
# check run as coroutine
execution_count = 0
with pytest.raises(TypeError):
await call_non_bursty(key, execute())
# check run as coroutinefunction
execution_count = 0
await call_non_bursty(key, execute)
assert execution_count == 1
await asyncio.sleep(0.11)
# check burstiness control
execution_count = 0
for _ in range(129):
await call_non_bursty(key, execute)
assert execution_count == 3
await asyncio.sleep(0.01)
await call_non_bursty(key, execute)
assert execution_count == 3
await asyncio.sleep(0.11)
await call_non_bursty(key, execute)
assert execution_count == 4
for _ in range(64):
await call_non_bursty(key, execute)
assert execution_count == 5
def test_vfolder_name_validator():
assert not verify_vfolder_name('.bashrc')
assert not verify_vfolder_name('.terminfo')
assert verify_vfolder_name('bashrc')
assert verify_vfolder_name('.config')
def test_dotfile_name_validator():
assert not verify_dotfile_name('.terminfo')
assert not verify_dotfile_name('.config')
assert not verify_dotfile_name('.ssh/authorized_keys')
assert verify_dotfile_name('.bashrc')
assert verify_dotfile_name('.ssh/id_rsa')
| lgpl-3.0 | 3,031,620,088,801,859,000 | 25.793651 | 78 | 0.675355 | false | 3.583864 | true | false | false |
virt-manager/virt-manager | virtManager/createconn.py | 2 | 9260 | # Copyright (C) 2006, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <[email protected]>
#
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import glob
import os
import urllib.parse
from gi.repository import Gtk
from virtinst import log
from .lib import uiutil
from .baseclass import vmmGObjectUI
from .connmanager import vmmConnectionManager
(HV_QEMU,
HV_XEN,
HV_LXC,
HV_QEMU_SESSION,
HV_BHYVE,
HV_VZ,
HV_CUSTOM) = range(7)
def _default_uri(): # pragma: no cover
if os.path.exists('/var/lib/xen'):
if (os.path.exists('/dev/xen/evtchn') or
os.path.exists("/proc/xen")):
return 'xen:///'
if (os.path.exists("/usr/bin/qemu") or
os.path.exists("/usr/bin/qemu-kvm") or
os.path.exists("/usr/bin/kvm") or
os.path.exists("/usr/libexec/qemu-kvm") or
glob.glob("/usr/bin/qemu-system-*")):
return "qemu:///system"
if (os.path.exists("/usr/lib/libvirt/libvirt_lxc") or
os.path.exists("/usr/lib64/libvirt/libvirt_lxc")):
return "lxc:///"
return None
class vmmCreateConn(vmmGObjectUI):
@classmethod
def get_instance(cls, parentobj):
try:
if not cls._instance:
cls._instance = vmmCreateConn()
return cls._instance
except Exception as e: # pragma: no cover
parentobj.err.show_err(
_("Error launching connect dialog: %s") % str(e))
def __init__(self):
vmmGObjectUI.__init__(self, "createconn.ui", "vmm-open-connection")
self._cleanup_on_app_close()
self.builder.connect_signals({
"on_hypervisor_changed": self.hypervisor_changed,
"on_connect_remote_toggled": self.connect_remote_toggled,
"on_username_entry_changed": self.username_changed,
"on_hostname_changed": self.hostname_changed,
"on_cancel_clicked": self.cancel,
"on_connect_clicked": self.open_conn,
"on_vmm_open_connection_delete_event": self.cancel,
})
self.set_initial_state()
self.reset_state()
@staticmethod
def default_uri():
return _default_uri()
def cancel(self, ignore1=None, ignore2=None):
log.debug("Cancelling open connection")
self.close()
return 1
def close(self, ignore1=None, ignore2=None):
log.debug("Closing open connection")
self.topwin.hide()
def show(self, parent):
log.debug("Showing open connection")
if self.is_visible():
self.topwin.present()
return
self.reset_state()
self.topwin.set_transient_for(parent)
self.topwin.present()
def _cleanup(self):
pass
def set_initial_state(self):
self.widget("connect").grab_default()
combo = self.widget("hypervisor")
# [connection ID, label]
model = Gtk.ListStore(int, str)
def _add_hv_row(rowid, config_name, label):
if (not self.config.default_hvs or
not config_name or
config_name in self.config.default_hvs):
model.append([rowid, label])
_add_hv_row(HV_QEMU, "qemu", "QEMU/KVM")
_add_hv_row(HV_QEMU_SESSION, "qemu", "QEMU/KVM " + _("user session"))
_add_hv_row(HV_XEN, "xen", "Xen")
_add_hv_row(HV_LXC, "lxc", "Libvirt-LXC")
_add_hv_row(HV_BHYVE, "bhyve", "Bhyve")
_add_hv_row(HV_VZ, "vz", "Virtuozzo")
_add_hv_row(-1, None, "")
_add_hv_row(HV_CUSTOM, None, _("Custom URI..."))
combo.set_model(model)
uiutil.init_combo_text_column(combo, 1)
def sepfunc(model, it):
return model[it][0] == -1
combo.set_row_separator_func(sepfunc)
def reset_state(self):
self.set_default_hypervisor()
self.widget("autoconnect").set_sensitive(True)
self.widget("autoconnect").set_active(True)
self.widget("hostname").set_text("")
self.widget("connect-remote").set_active(False)
self.widget("username-entry").set_text("")
self.widget("uri-entry").set_text("")
self.connect_remote_toggled(self.widget("connect-remote"))
self.populate_uri()
def is_remote(self):
# Whether user is requesting a remote connection
return self.widget("connect-remote").get_active()
def set_default_hypervisor(self):
default = self.default_uri()
if not default or default.startswith("qemu"):
uiutil.set_list_selection(self.widget("hypervisor"), HV_QEMU)
elif default.startswith("xen"): # pragma: no cover
uiutil.set_list_selection(self.widget("hypervisor"), HV_XEN)
def hostname_changed(self, src_ignore):
self.populate_uri()
def hypervisor_changed(self, src):
ignore = src
hv = uiutil.get_list_selection(self.widget("hypervisor"))
is_session = hv == HV_QEMU_SESSION
is_custom = hv == HV_CUSTOM
show_remote = not is_session and not is_custom
uiutil.set_grid_row_visible(
self.widget("session-warning-box"), is_session)
uiutil.set_grid_row_visible(
self.widget("connect-remote"), show_remote)
uiutil.set_grid_row_visible(
self.widget("username-entry"), show_remote)
uiutil.set_grid_row_visible(
self.widget("hostname"), show_remote)
if not show_remote:
self.widget("connect-remote").set_active(False)
uiutil.set_grid_row_visible(self.widget("uri-label"), not is_custom)
uiutil.set_grid_row_visible(self.widget("uri-entry"), is_custom)
if is_custom:
label = self.widget("uri-label").get_text()
self.widget("uri-entry").set_text(label)
self.widget("uri-entry").grab_focus()
self.populate_uri()
def username_changed(self, src_ignore):
self.populate_uri()
def connect_remote_toggled(self, src_ignore):
is_remote = self.is_remote()
self.widget("hostname").set_sensitive(is_remote)
self.widget("autoconnect").set_active(not is_remote)
self.widget("username-entry").set_sensitive(is_remote)
if is_remote and not self.widget("username-entry").get_text():
self.widget("username-entry").set_text("root")
self.populate_uri()
def populate_uri(self):
uri = self.generate_uri()
self.widget("uri-label").set_text(uri)
def generate_uri(self):
hv = uiutil.get_list_selection(self.widget("hypervisor"))
host = self.widget("hostname").get_text().strip()
user = self.widget("username-entry").get_text()
is_remote = self.is_remote()
hvstr = ""
if hv == HV_XEN:
hvstr = "xen"
elif hv == HV_QEMU or hv == HV_QEMU_SESSION:
hvstr = "qemu"
elif hv == HV_BHYVE:
hvstr = "bhyve"
elif hv == HV_VZ:
hvstr = "vz"
else:
hvstr = "lxc"
addrstr = ""
if user:
addrstr += urllib.parse.quote(user) + "@"
if host.count(":") > 1:
host = "[%s]" % host
addrstr += host
if is_remote:
hoststr = "+ssh://" + addrstr + "/"
else:
hoststr = ":///"
uri = hvstr + hoststr
if hv in (HV_QEMU, HV_BHYVE, HV_VZ):
uri += "system"
elif hv == HV_QEMU_SESSION:
uri += "session"
return uri
def validate(self):
is_remote = self.is_remote()
host = self.widget("hostname").get_text()
if is_remote and not host:
msg = _("A hostname is required for remote connections.")
return self.err.val_err(msg)
return True
def _conn_open_completed(self, conn, ConnectError):
if not ConnectError:
self.close()
self.reset_finish_cursor()
return
msg, details, title = ConnectError
msg += "\n\n"
msg += _("Would you still like to remember this connection?")
remember = self.err.show_err(msg, details, title,
buttons=Gtk.ButtonsType.YES_NO,
dialog_type=Gtk.MessageType.QUESTION, modal=True)
self.reset_finish_cursor()
if remember:
self.close()
else:
vmmConnectionManager.get_instance().remove_conn(conn.get_uri())
def open_conn(self, ignore):
if not self.validate():
return
auto = False
if self.widget("autoconnect").get_sensitive():
auto = bool(self.widget("autoconnect").get_active())
if self.widget("uri-label").is_visible():
uri = self.generate_uri()
else:
uri = self.widget("uri-entry").get_text()
log.debug("Generate URI=%s, auto=%s", uri, auto)
conn = vmmConnectionManager.get_instance().add_conn(uri)
conn.set_autoconnect(auto)
if conn.is_active():
self._conn_open_completed(conn, None)
return
conn.connect_once("open-completed", self._conn_open_completed)
self.set_finish_cursor()
conn.open()
| gpl-2.0 | 4,736,887,359,726,736,000 | 31.264808 | 77 | 0.576242 | false | 3.56428 | false | false | false |
hildeth/chapel | util/test/send_email.py | 3 | 4970 | #!/usr/bin/env python
"""Portable email sender. Acts as replacement for mail, Mail, mailx,
email (cygwin). Message body is taken from stdin.
"""
from __future__ import print_function
import email.mime.text
import getpass
import logging
import optparse
import os
import smtplib
import socket
import sys
def main():
"""Parse command line arguments and send email!"""
args = _parse_args()
_setup_logging(args.verbose)
body = sys.stdin.read()
# Send the email!
send_email(args.recipients, body, args.subject, args.header, args.sender, args.smtp_host)
def send_email(recipients, body, subject=None, headers=None, sender=None, smtp_host=None):
"""Send email!
:arg recipients: list of recipients. If only one, may be a string.
:arg body: The email message body.
:arg subject: Optional subject. Defaults to ''.
:arg headers: Optional dict of headers to add.
:arg sender: Optional sender address. Defaults to <user>@<fqdn>
:arg smtp_host: Optional SMTP host. Defaults to 'localhost'.
"""
if isinstance(recipients, basestring):
recipients = [recipients]
sender = sender or _default_sender()
subject = subject or ''
smtp_host = smtp_host or _default_smtp_host()
msg = email.mime.text.MIMEText(body)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ','.join(recipients)
if headers:
for key, value in headers.iteritems():
msg[key] = value
logging.debug('Opening connection to: {0}'.format(smtp_host))
smtp = smtplib.SMTP(smtp_host)
try:
logging.info('Sending email to: {0} from: {1} subject: {2}'.format(
','.join(recipients), sender, subject))
logging.debug('Email headers: {0}'.format(headers))
logging.debug('Email body length: {0}'.format(len(body)))
smtp.sendmail(sender, recipients, msg.as_string())
finally:
smtp.quit()
def _parse_headers(option, opt, value, parser, *args, **kwargs):
"""OptionParser callback function for parsing header values passed by user.
It takes values that have commas (e.g. the user specified
[email protected],Precedence=bulk), breaks them apart and adds the
individual name/value pairs to the dict of values.
"""
# Get the existing values the parser knows about for this particular
# option.
value_dict = getattr(parser.values, option.dest, None) or {}
# Split the value provided.
parsed_vals = value.split(',')
for v in parsed_vals:
key, value = v.split('=')
value_dict[key] = value
# Set the updated dict to the oiption value.
setattr(parser.values, option.dest, value_dict)
def _default_sender():
"""Return default sender address, which is <user>@<hostname>."""
return '{0}@{1}'.format(getpass.getuser(), socket.getfqdn())
def _default_smtp_host():
"""Return default smtp host, which is localhost unless CHPL_UTIL_SMTP_HOST is
set in environment.
"""
return os.environ.get('CHPL_UTIL_SMTP_HOST', 'localhost')
def _parse_args():
"""Parse and return command line arguments."""
class NoWrapHelpFormatter(optparse.IndentedHelpFormatter):
"""Help formatter that does not wrap the description text."""
def _format_text(self, text):
return text
parser = optparse.OptionParser(
usage='usage: %prog [options] recipient_email [...]',
description=__doc__,
formatter=NoWrapHelpFormatter()
)
parser.add_option(
'-v', '--verbose',
action='store_true',
help='Verbose output.'
)
mail_group = optparse.OptionGroup(parser, 'Mail Options')
mail_group.add_option(
'-s', '--subject',
default=None,
help='Email subject.'
)
mail_group.add_option(
'-H', '--header',
action='callback', type='string',
callback=_parse_headers,
help=('Email header(s) of form NAME=VALUE. '
'Specify more than one with comma delimited list.')
)
mail_group.add_option(
'-S', '--sender',
default=_default_sender(),
help='Sender email address. (default: %default)'
)
mail_group.add_option(
'--smtp-host',
default=_default_smtp_host(),
help='SMTP host to use when sending email. (default: %default)'
)
parser.add_option_group(mail_group)
opts, args = parser.parse_args()
# Add all positional arguments as recipients.
opts.recipients = args
return opts
def _setup_logging(verbose=False):
"""Initialize logging and set level based on verbose.
:type verbose: bool
:arg verbose: When True, set log level to DEBUG.
"""
log_level = logging.DEBUG if verbose else logging.WARN
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=log_level)
logging.debug('Verbose output enabled.')
if __name__ == '__main__':
main()
| apache-2.0 | 797,079,011,877,448,400 | 28.939759 | 93 | 0.635412 | false | 3.922652 | false | false | false |
davinwang/caffe2 | caffe2/python/layers/add_bias.py | 4 | 2212 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package add_bias
# Module caffe2.python.layers.add_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import math
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = self.create_param(
param_name='b',
shape=[input_dims, ],
initializer=bias_init,
optimizer=bias_optim,
)
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
| apache-2.0 | 6,581,035,748,500,131,000 | 36.491525 | 78 | 0.629747 | false | 3.942959 | false | false | false |
bsmithyman/pygeo | util/segy2fast.py | 1 | 7338 | #!/usr/bin/env python
# pygeo - a distribution of tools for managing geophysical data
# Copyright (C) 2011, 2012 Brendan Smithyman
# This file is part of pygeo.
# pygeo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# pygeo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with pygeo. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
import numpy as np
import sys
from optparse import OptionParser
from pygeo.segyread import SEGYFile
from pygeo.coord import reduceToLocal
# ------------------------------------------------------------------------
# Settings
AUTHORSHIP = 'Brendan Smithyman'
VERSION = '%prog v1.1\n'
DESCRIPTION = 'Exports a series of FAST pick datafiles based on SEG-Y headers.'
USAGE = '%prog [options] segy_file'
format_string = '%10.3f%10.3f%10.3f%10.3f%10.3f%3d\n'
# ------------------------------------------------------------------------
parser = OptionParser( usage = USAGE,
version = VERSION,
description = DESCRIPTION)
parser.add_option('-b', '--basis', action='store', dest='basis',
help='point to use as zero coordinate [%default]')
parser.add_option('-a', '--angle', action='store', dest='angle',
help='angle in degrees for coordinate rotation [%default]')
parser.add_option('-k', '--key', action='store', dest='key',
help='trace header id for first-arrival picks [%default]')
parser.add_option('-u', '--unit', action='store', dest='unit',
help='spatial unit [%default]')
parser.add_option('-z', '--zdir', action='store', dest='zdir',
help='coord. system z-scaling [%default]')
parser.add_option('-t', '--tfac', action='store', dest='tfac',
help='temporal unit [%default]')
parser.add_option('-s', '--shotout', action='store', dest='shotout',
help='filename for shot geometry information (for f.in) [%default]')
parser.add_option('-e', '--error', action='store', dest='error',
help='uniform data error [%default]')
parser.add_option('-o', '--omit', action='store_true', dest='omit',
help='omit shots without picks and renumber accordingly')
parser.add_option('-r', '--resamp', action='store', dest='resamp',
help='use a subset of shots, every nth [%default]')
parser.set_defaults( basis = '0.,0.,0.',
angle = '0.',
key = 'delrt',
unit = '1e3',
zdir = '-1',
tfac = '1e3',
shotout = 'shotout.dat',
error = '30.',
omit = False,
resamp = 1)
(options, args) = parser.parse_args()
if (len(args) < 1):
parser.error('Please specify a SEG-Y file!')
exit(1)
# Get input filename
infile = args[0]
# Convert rotation angle to radians
angle = np.float(options.angle)*np.pi/180.
# Convert basis to array
basis = np.array([np.float(item) for item in options.basis.strip().split(',')])
pickkey = options.key
unit = np.float(options.unit)
zantithesis = np.float(options.zdir)
tfac = np.float(options.tfac)
shotout = options.shotout
error = np.float(options.error)
omit = options.omit
resamp = np.int(options.resamp)
# Open SEG-Y file and get first trace header
sys.stdout.write('Reading "%s"...\n'%(infile,))
sys.stdout.flush()
sf = SEGYFile(infile, endian='Big')
trh0 = sf.trhead[0]
sys.stdout.write('Calculating scale factors...\n')
sys.stdout.flush()
# Determine coordinate and elevation scale factors from first trace header
# (assume same for all traces)
if (trh0['scalco'] < 0):
scalco = 1./abs(trh0['scalco'])
else:
scalco = trh0['scalco']
scalco = scalco / unit
if (trh0['scalel'] < 0):
scalel = 1./abs(trh0['scalel'])
else:
scalel = trh0['scalel']
scalel = scalel / unit
# Use SEGYFile internal to calculate shot-gather boundaries
sys.stdout.write('Calculating ensemble boundaries...\n')
sys.stdout.flush()
sf._calcEnsembles()
# Find the number of ensembles, and order them by occurrence in the SEG-Y file
ngathers = len(sf.ensembles)
ordering = np.argsort(sf.ensembles.values())
shotnums = np.array(sf.ensembles.keys())[ordering]
sys.stdout.write('Writing output files...\n')
sys.stdout.flush()
# Create some empty lists to hold upcoming values
shotlocs = [[],[],[]]
shotactive = []
shotnumber = 0
# Create bound thresholds (which will be updated)
bounds = [1e10,-1e10,1e10,-1e10,1e10,-1e10]
# Loop over each shot gather
for i in xrange(0, ngathers, resamp):
outlines = []
# Get the trace header for the first trace in this shot gather
trhl0 = sf.trhead[sf.ensembles[shotnums[i]]]
sx = trhl0['sx'] * scalco
sy = trhl0['sy'] * scalco
sz = trhl0['selev'] * scalel * zantithesis
(nsx, nsy, nsz) = reduceToLocal(np.array([sx,sy,sz],ndmin=2), angle, basis)[0]
# Append information about this shot to the running tally of all shot
# locations; this is used to construct f.in
shotlocs[0].append(nsx)
shotlocs[1].append(nsy)
shotlocs[2].append(nsz)
outlines.append(format_string % (nsx, nsy, nsz, 0., 0., -1))
tr0 = sf.ensembles[shotnums[i]]
if (i == ngathers - 1):
tr1 = sf.ntr - 1
else:
tr1 = sf.ensembles[shotnums[i+1]]
shotactive.append(0)
for j in xrange(tr0, tr1):
trhl = sf.trhead[j]
rx = trhl['gx'] * scalco
ry = trhl['gy'] * scalco
rz = trhl['gelev'] * scalel * zantithesis
(nrx, nry, nrz) = reduceToLocal(np.array([rx,ry,rz],ndmin=2), angle, basis)[0]
if (nrx < bounds[0]):
bounds[0] = nrx
if (nrx > bounds[1]):
bounds[1] = nrx
if (nry < bounds[2]):
bounds[2] = nry
if (nry > bounds[3]):
bounds[3] = nry
if (nrz < bounds[4]):
bounds[4] = nrz
if (nrz > bounds[5]):
bounds[5] = nrz
stime = trhl[pickkey]
if ((stime != 0) and (stime != 65535)):
outlines.append(format_string % (nrx, nry, nrz, stime/tfac, error/tfac, 1))
shotactive[-1] += 1
if (omit):
if (shotactive[-1] != 0):
shotnumber += 1
else:
shotactive.pop()
shotlocs[0].pop()
shotlocs[1].pop()
shotlocs[2].pop()
continue
else:
shotnumber += 1
# Create a FAST output file for this gather (using 4-digit filenames)
outfile = 'fd%04d.ascii'%(shotnumber,)
sys.stdout.write('%s <-- SHOTID %d\n'%(outfile, shotnums[i]))
sys.stdout.flush()
with open(outfile, 'w') as fp:
fp.writelines(outlines)
itrace = []
for i in xrange(shotnumber):
if (shotactive[i] > 0):
itrace.append(i + 1)
with open(shotout, 'w') as fp:
fp.write(' isource=')
fp.write(', '.join(['%d'%(item != 0) for item in shotactive]))
fp.write(',\n xsource=')
fp.write(', '.join(['%8.3f'%item for item in shotlocs[0]]))
fp.write(',\n ysource=')
fp.write(', '.join(['%8.3f'%item for item in shotlocs[1]]))
fp.write(',\n zsource=')
fp.write(', '.join(['%8.3f'%item for item in shotlocs[2]]))
fp.write(',\n itrace=')
fp.write(', '.join(['%d'%item for item in itrace]))
sys.stdout.write('\nBounds:\n\t%f < x < %f\n\t%f < y < %f\n\t%f < z < %f\n' % tuple(bounds))
sys.stdout.flush()
| gpl-3.0 | -5,089,923,384,913,584,000 | 28.119048 | 92 | 0.633688 | false | 3.006145 | false | false | false |
packdl/Relations | Relations.py | 1 | 36944 | #
# Gramps - a GTK+/GNOME based genealogy program - Family Sheet plugin
#
# Copyright (C) 2008,2009,2010 Reinhard Mueller
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Reports/Text Reports/Relations"""
from __future__ import unicode_literals
#------------------------------------------------------------------------
#
# Standard Python modules
#
#------------------------------------------------------------------------
import string
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.display.name import displayer
from gramps.gen.lib import Date, Event, EventType, FamilyRelType, Name
from gramps.gen.lib import StyledText, StyledTextTag, StyledTextTagType
from gramps.gen.plug import docgen
from gramps.gen.plug.menu import BooleanOption, EnumeratedListOption, PersonOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
import gramps.gen.datehandler
from gramps.gen.relationship import get_relationship_calculator
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
empty_birth = Event()
empty_birth.set_type(EventType.BIRTH)
empty_marriage = Event()
empty_marriage.set_type(EventType.MARRIAGE)
#------------------------------------------------------------------------
#
# Relations report
#
#------------------------------------------------------------------------
class Relations(Report):
"""
Relations is a page that contains all available info about the relationship
between two people.
"""
def __init__(self, database, options, user):
"""
Initialize the report.
@param database: the GRAMPS database instance
@param options: instance of the Options class for this report
@param user: a gramps.gen.user.User() instance
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.person_id = menu.get_option_by_name('pid').get_value()
self.person2_id = menu.get_option_by_name('pid2').get_value()
self.recurse = menu.get_option_by_name('recurse').get_value()
self.callname = menu.get_option_by_name('callname').get_value()
self.placeholder = menu.get_option_by_name('placeholder').get_value()
self.incl_sources = menu.get_option_by_name('incl_sources').get_value()
self.incl_notes = menu.get_option_by_name('incl_notes').get_value()
def write_report(self):
"""
Build the actual report.
"""
person1 = self.database.get_person_from_gramps_id(self.person_id)
person2 = self.database.get_person_from_gramps_id(self.person2_id)
self.__process_relationship(person1, person2)
#(rank, ahnentafel, person_key) = self.__calc_person_key(person)
#self.__process_person(person, rank, ahnentafel, person_key)
def __process_relationship(self, person1, person2):
# --- Now let the party begin! ---
self.doc.start_paragraph('FSR-Key')
self.doc.write_text('starting')
self.doc.end_paragraph()
self.doc.start_table(None, 'FSR-Table')
# Main person
self.doc.start_row()
self.doc.start_cell('FSR-HeadCell', 3)
self.doc.start_paragraph('FSR-Name')
self.doc.write_text("First Person\n")
self.doc.end_paragraph()
self.__dump_person(person1, False, None)
self.doc.start_paragraph('FSR-Name')
self.doc.write_text("\nSecond Person\n")
self.doc.end_paragraph()
self.__dump_person(person2, False, None)
self.doc.start_paragraph('FSR-Name')
relationship = get_relationship_calculator()
relate = "\nSecond person is the first person's " + relationship.get_one_relationship(self.database, person1, person2)
self.doc.write_text(relate)
self.doc.end_paragraph()
self.doc.start_paragraph('FSR-Name')
self.doc.write_text("\nCommon Ancestor\n")
self.doc.write_text("The common ancestors for Person 1 and Person 2 are ")
#firstAncestor = self.database.get_person_from_handle();
info, msg = relationship.get_relationship_distance_new(
self.database, person1, person2, all_dist=True, only_birth=False)
self.doc.write_text(self.__process_ancestor_string(info))
self.doc.end_paragraph()
#relationship = get_relationship_calculator()
#self.doc.start_paragraph('FSR-Name')
print('info:',info)
print('message:', msg)
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def __process_ancestor_string(self, info):
if type(info).__name__=='tuple':
return None
elif type(info).__name__=='list':
len(info)
ancestorlist=[]
for relation in info:
rank = relation[0]
person_handle = relation[1]
if rank == -1:
return None
ancestor = self.database.get_person_from_handle(person_handle)
name = ancestor.get_primary_name().get_regular_name()
ancestorlist.append(name)
if len(ancestorlist)>0:
return ' and '.join(ancestorlist)
else:
return None
def __process_person(self, person, rank, ahnentafel, person_key):
"""
Recursively build the Family Sheet for this person and all children
with spouses.
@param person: Person object for the key person of the Family Sheet.
@param rank: Numerical distance between the central person in the
database and the person in the parameter (the number of births
needed to connect them).
@param ahnentafel: "Ahnentafel" number of the common ancestor of the
central person in the database and the person in the parameter,
seen from the side of the central person in the database.
@param person_key: Family Sheet key to be printed on the top right of
the corner.
"""
# List of (person, rank, ahnentafel, person_key) tuples for persons to
# process recursively after this one.
more_sheets = []
# Numbering of spouses (integer, but printed in roman numbers).
spouse_index = 0
# Numbering of children (integer, but printed as lowercase letters).
child_index = 0
# Source references to print as footnotes.
self.__citation_index = 0
self.__citations = []
# Notes to print as footnotes.
self.__note_index = 0
self.__notes = []
# --- Now let the party begin! ---
self.doc.start_paragraph('FSR-Key')
self.doc.write_text(person_key)
self.doc.end_paragraph()
self.doc.start_table(None, 'FSR-Table')
# Main person
self.doc.start_row()
self.doc.start_cell('FSR-HeadCell', 3)
self.__dump_person(person, False, None)
self.doc.end_cell()
self.doc.end_row()
# Spouses
for family_handle in person.get_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
spouse_index += 1
spouse_handle = utils.find_spouse(person, family)
spouse = self.database.get_person_from_handle(spouse_handle)
# Determine relationship between the center person and the spouse.
# If the spouse has a closer blood relationship than the current
# person, we refer to the Family Sheet of the spouse instead of
# printing the child list, because all children are more closely
# related to the center person via the spouse than via the current
# person. The same happens if the relationship is on the same
# level, but the relationship via the spouse goes via a common
# ancestor with a lower Ahnentafel numbering (i.e. a relationship
# stronger father-sided). In these cases, refer_spouse will be set
# to True.
(spouse_rank, spouse_at, spouse_key) = \
self.__calc_person_key(spouse)
if self.recurse != RelationsOptions.RECURSE_ALL:
refer_spouse = (spouse_rank != -1 and \
(spouse_rank < rank or
(spouse_rank == rank and spouse_at < ahnentafel)))
else:
refer_spouse = False
self.doc.start_row()
self.doc.start_cell('FSR-NumberCell', 1)
self.doc.start_paragraph('FSR-Number')
self.doc.write_text(utils.roman(spouse_index))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FSR-DataCell', 2)
self.__dump_family(family, spouse)
if refer_spouse:
self.doc.start_paragraph('FSR-Normal')
self.doc.write_text(_("\u2192 %s") % spouse_key)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
if refer_spouse:
# Spouse with closer relationship than current person? Don't
# print children on this Family Sheet (but count them for the
# numbering).
child_index += len(family.get_child_ref_list())
continue
# Children
for child_ref in family.get_child_ref_list():
child = self.database.get_person_from_handle(child_ref.ref)
child_letter = string.ascii_lowercase[child_index]
self.doc.start_row()
self.doc.start_cell('FSR-EmptyCell', 1)
self.doc.end_cell()
self.doc.start_cell('FSR-NumberCell', 1)
self.doc.start_paragraph('FSR-Number')
self.doc.write_text(child_letter)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('FSR-DataCell', 1)
has_spouses = (child.get_family_handle_list() != [])
self.__dump_person(child, has_spouses, child_ref)
if has_spouses:
# We have to recalculate the key for this person, it could
# be closer related if it is a direct ancestor of the
# central person or one of its spouses.
(child_rank, child_at, child_key) = \
self.__calc_person_key(child)
self.doc.start_paragraph('FSR-Normal')
self.doc.write_text(_("\u2192 %s") % child_key)
self.doc.end_paragraph()
# We recursively print this child *only* if its
# relationship with the central person is closest via the
# current person. This way, we avoid that a person is
# printed recursively from more than one of its ancestors.
if child_key == person_key + child_letter or \
self.recurse == RelationsOptions.RECURSE_ALL:
more_sheets.append(
(child, child_rank, child_at, child_key))
self.doc.end_cell()
self.doc.end_row()
child_index += 1
self.doc.start_row()
self.doc.start_cell('FSR-FootCell', 3)
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
self.__dump_sources()
self.__dump_notes()
# Now print the sheets for the children.
if self.recurse != RelationsOptions.RECURSE_NONE:
for (child, child_rank, child_at, child_key) in more_sheets:
self.doc.page_break()
self.__process_person(child, child_rank, child_at, child_key)
def __dump_family(self, family, spouse):
"""
Output all data of a family the key person is a parent in, and all data
of the corresponding spouse.
"""
self.__dump_attributes(family)
# If this is a married couple, it must at least have a marriage event.
# If no marriage event is there, print placeholders for it
# nevertheless.
if family.get_relationship() == FamilyRelType.MARRIED and spouse:
for event_ref in family.get_event_ref_list():
event = self.database.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.MARRIAGE:
break
else:
self.__dump_event(empty_marriage, None)
for event_ref in family.get_event_ref_list():
self.__dump_event_ref(event_ref)
if spouse:
self.__dump_person(spouse, False, family)
else:
self.doc.start_paragraph('FSR-Normal')
self.__write_sources(family)
self.__write_notes(family)
self.doc.end_paragraph()
def __dump_person(self, person, short, ref):
"""
Output all data of a person.
@param person: Person object to output.
@param short: If True, print only name and birth event.
@param ref: Reference through which this person is linked into the
Family Sheet. Can be a family object (for the spouses) or a
child_ref object (for the children). Source references and notes
for this reference object will also be output.
"""
name = person.get_primary_name()
name_text = _Name_get_styled(name, self.callname, self.placeholder)
self.doc.start_paragraph('FSR-Name')
mark = utils.get_person_mark(self.database, person)
self.doc.write_text("", mark)
self.doc.write_markup(str(name_text), name_text.get_tags())
self.__write_sources(name)
self.__write_notes(name)
self.__write_sources(person)
self.__write_notes(person)
if ref:
self.__write_sources(ref)
self.__write_notes(ref)
self.doc.end_paragraph()
if short:
event_ref = person.get_birth_ref()
if event_ref:
self.__dump_event_ref(event_ref)
else:
for alt_name in person.get_alternate_names():
name_type = str(alt_name.get_type())
name = _Name_get_styled(alt_name, self.callname,
self.placeholder)
self.__dump_line(name_type, name, alt_name)
self.__dump_attributes(person)
# Each person should have a birth event. If no birth event is
# there, print the placeholders for it nevertheless.
if not person.get_birth_ref():
self.__dump_event(empty_birth, None)
for event_ref in person.get_primary_event_ref_list():
self.__dump_event_ref(event_ref)
for addr in person.get_address_list():
location = utils.get_address_str(addr)
date = gramps.gen.datehandler.get_date(addr)
self.doc.start_paragraph('FSR-Normal')
if date:
self.doc.write_text(_("Address (%(date)s): %(location)s") % {
'date': date,
'location': location})
else:
self.doc.write_text(_("Address: %(location)s") % {
'location': location})
self.__write_sources(addr)
self.__write_notes(addr)
self.doc.end_paragraph()
def __dump_event_ref(self, event_ref):
"""
Output all data for an event given as a reference.
"""
event = self.database.get_event_from_handle(event_ref.ref)
self.__dump_event(event, event_ref)
def __dump_event(self, event, ref):
"""
Output all data for an event.
@param event: Event object
@param ref: Reference through which this event is linked to the
currently processed object. Source references and notes for this
reference object will also be output.
"""
description = event.get_description()
date_text = _Event_get_date_text(event, self.placeholder)
place_text = _Event_get_place_text(event, self.database,
self.placeholder)
self.doc.start_paragraph('FSR-Normal')
self.doc.write_text("%s:" % event.get_type())
if description:
self.doc.write_text(" ")
self.doc.write_text(description)
if date_text:
self.doc.write_text(" ")
self.doc.write_text(date_text)
if place_text:
self.doc.write_text(" ")
self.doc.write_text(place_text)
if event.get_place_handle():
place = self.database.get_place_from_handle(event.get_place_handle())
self.__write_sources(place)
self.__write_notes(place)
self.__write_sources(event)
self.__write_notes(event)
if ref:
self.__write_notes(ref)
for attr in event.get_attribute_list():
self.doc.write_text(_("; %(type)s: %(value)s") % {
'type' : attr.get_type(),
'value': attr.get_value()})
self.__write_sources(attr)
self.__write_notes(attr)
self.doc.end_paragraph()
def __dump_attributes(self, obj):
"""
Output all attributes of the given object
"""
for attr in obj.get_attribute_list():
self.__dump_line(str(attr.get_type()), attr.get_value(), obj)
def __dump_line(self, name, text, obj):
"""
Output a name/text pair (like an attribute) with its related source
references and notes.
"""
self.doc.start_paragraph('FSR-Normal')
self.doc.write_text("%s: " % name)
if isinstance (text, StyledText):
self.doc.write_markup(str(text), text.get_tags())
else:
self.doc.write_text(text)
self.__write_sources(obj)
self.__write_notes(obj)
self.doc.end_paragraph()
def __write_sources(self, obj):
"""
Output source reference numbers for the given object (numbers like [1]
in superscript) and collect the source references to be printed at the
end of the report.
"""
if not self.incl_sources:
return
for citation_handle in obj.get_citation_list():
# Citation already in list? If yes, use same number again.
if citation_handle in self.__citations:
index = self.__citations.index(citation_handle) + 1
else:
self.__citations.append(citation_handle)
self.__citation_index += 1
index = self.__citation_index
self.doc.start_superscript()
self.doc.write_text(" [%s]" % index)
self.doc.end_superscript()
def __write_notes(self, obj):
"""
Output note reference numbers for the given object (numbers like (1) in
superscript) and collect the note handles to be printed at the end of
the report.
"""
if not self.incl_notes:
return
for note_handle in obj.get_note_list():
# Note already in list? If yes, use same number again.
if note_handle in self.__notes:
index = self.__notes.index(note_handle) + 1
else:
self.__notes.append(note_handle)
self.__note_index += 1
index = self.__note_index
self.doc.start_superscript()
self.doc.write_text(" (%s)" % index)
self.doc.end_superscript()
def __dump_sources(self):
"""
Print the collected sources.
"""
if self.__citations:
self.doc.start_paragraph('FSR-Footnote')
self.doc.write_text("\n")
self.doc.write_text(_("Source references:"))
self.doc.end_paragraph()
index = 0
for citation_handle in self.__citations:
citation = self.database.get_citation_from_handle(citation_handle)
source = self.database.get_source_from_handle(citation.get_reference_handle())
index += 1
self.doc.start_paragraph('FSR-Footnote')
self.doc.write_text("[%s]: " % index)
if source.get_abbreviation():
self.doc.write_text(source.get_abbreviation())
else:
if source.get_author():
self.doc.write_text(_("%s: ") % source.get_author())
self.doc.write_text(source.get_title())
self.__write_notes(source)
if citation.get_page():
self.doc.write_text(_(", page %s") % citation.get_page())
self.__write_notes(citation)
self.doc.end_paragraph()
def __dump_notes(self):
"""
Print the collected notes.
"""
if self.__notes:
self.doc.start_paragraph('FSR-Footnote')
self.doc.write_text("\n")
self.doc.write_text(_("Notes:"))
self.doc.end_paragraph()
index = 0
for note_handle in self.__notes:
note = self.database.get_note_from_handle(note_handle)
index += 1
self.doc.start_paragraph('FSR-Footnote')
self.doc.write_text("(%s): " % index)
self.doc.write_text(note.get())
self.doc.end_paragraph()
def __calc_person_key(self, person):
"""
The person key is a unique identifier that is built from the
relationship to the default person. It consists of the "Ahnentafel"
number of the common ancestor of the person with the default person,
and then a letter representing the child number for each generation
from the common ancestor to the person.
If more than one common ancestor exists, the common ancestor with the
lowest "Ahnentafel" number has precedence.
For example, the second child of the third child of the father of the
mother of the central person gets the person key "6cb".
"""
relationship = get_relationship_calculator()
default_person = self.database.get_default_person()
# No home person set.
if default_person is None:
return (-1, 0, "")
# First try direct relationship.
spousestring = ""
info, msg = relationship.get_relationship_distance_new(
self.database, default_person, person, all_dist=True)
info = relationship.collapse_relations(info)[0]
(rank, ancestor_handle, default_rel, default_fam, person_rel,
person_fam) = info
# Then try relationship to any spouse.
if rank == -1:
index = 0
for family_handle in default_person.get_family_handle_list():
index += 1
family = self.database.get_family_from_handle(family_handle)
spouse_handle = utils.find_spouse(default_person, family)
spouse = self.database.get_person_from_handle(spouse_handle)
info, msg = relationship.get_relationship_distance_new(
self.database, spouse, person, all_dist=True)
info = relationship.collapse_relations(info)[0]
(rank, ancestor_handle, default_rel, default_fam, person_rel,
person_fam) = info
if rank != -1:
spousestring = utils.roman(index)
break
# If no relationship found at all, exit here.
if rank == -1:
return (rank, 0, "")
# Calculate Ahnentafel number of common ancestor.
ahnentafel = 1
for rel in default_rel:
ahnentafel *= 2
if rel in (relationship.REL_MOTHER,
relationship.REL_MOTHER_NOTBIRTH):
ahnentafel += 1
# Find out child letters.
child = person
childletters = ""
for rel in person_rel:
family_handle = child.get_main_parents_family_handle()
family = self.database.get_family_from_handle(family_handle)
if rel in (relationship.REL_MOTHER,
relationship.REL_MOTHER_NOTBIRTH):
parent_handle = family.get_mother_handle()
else:
parent_handle = family.get_father_handle()
parent = self.database.get_person_from_handle(parent_handle)
# Count *all* children from this parent
childletter = "?"
index = 0
for family_handle in parent.get_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
if child_ref.ref == child.get_handle():
childletter = string.ascii_lowercase[index]
break
index += 1
else:
continue
break
childletters = childletter + childletters
child = parent
return (rank, ahnentafel,
"%s%s%s" % (spousestring, ahnentafel, childletters))
#------------------------------------------------------------------------
#
# Reusable functions (could be methods of gramps.gen.lib.*)
#
#------------------------------------------------------------------------
_Name_CALLNAME_DONTUSE = 0
_Name_CALLNAME_REPLACE = 1
_Name_CALLNAME_UNDERLINE_ADD = 2
def _Name_get_styled(name, callname, placeholder=False):
"""
Return a StyledText object with the name formatted according to the
parameters:
@param callname: whether the callname should be used instead of the first
name (CALLNAME_REPLACE), underlined within the first name
(CALLNAME_UNDERLINE_ADD) or not used at all (CALLNAME_DONTUSE).
@param placeholder: whether a series of underscores should be inserted as a
placeholder if first name or surname are missing.
"""
# Make a copy of the name object so we don't mess around with the real
# data.
n = Name(source=name)
# Insert placeholders.
if placeholder:
if not n.first_name:
n.first_name = "____________"
if not n.get_surname():
n.get_primary_surname().set_surname("____________")
if n.call:
if callname == _Name_CALLNAME_REPLACE:
# Replace first name with call name.
n.first_name = n.call
elif callname == _Name_CALLNAME_UNDERLINE_ADD:
if n.call not in n.first_name:
# Add call name to first name.
n.first_name = "\"%(call)s\" (%(first)s)" % {
'call': n.call,
'first': n.first_name}
text = displayer.display_name(n)
tags = []
if n.call:
if callname == _Name_CALLNAME_UNDERLINE_ADD:
# "name" in next line is on purpose: only underline the call name
# if it was a part of the *original* first name
if n.call in name.first_name:
# Underline call name
callpos = text.find(n.call)
tags = [StyledTextTag(StyledTextTagType.UNDERLINE, True,
[(callpos, callpos + len(n.call))])]
return StyledText(text, tags)
def _Date_get_text(date, placeholder=False):
"""
Return a textual representation of the date to be used in textual context,
like "on 1 January 1980" or "in January 1980" or "after January 1980".
@param placeholder: whether a series of underscores should be inserted as a
placeholder if the date is missing or incomplete.
"""
text = gramps.gen.datehandler.displayer.display(date) # @UndefinedVariable
if date.get_modifier() == Date.MOD_NONE \
and date.get_quality() == Date.QUAL_NONE:
if date.get_day_valid():
text = _("on %(ymd_date)s") % {'ymd_date': text}
elif date.get_month_valid():
text = _("in %(ym_date)s") % {'ym_date': text}
elif date.get_year_valid():
text = _("in %(y_date)s") % {'y_date': text}
if placeholder:
if date.is_empty():
text = _("on %(placeholder)s") % { 'placeholder': "__________"}
elif not date.is_regular():
text = _("on %(placeholder)s (%(partial)s)") % {
'placeholder': "__________",
'partial': text}
return text
# Output placeholders for missing dates and places only for the
# following event types.
_Event_needs_date_place = [
EventType.BIRTH,
EventType.DEATH,
EventType.MARRIAGE,
EventType.DIVORCE]
def _Event_get_date_text(event, placeholder=False):
"""
Return a textual representation of the event's date to be used in textual
context, like "on 1 January 1980" or "in January 1980" or "after January
1980".
@param placeholder: whether a series of underscores should be inserted as a
placeholder if the date is missing or incomplete.
"""
return _Date_get_text(event.get_date_object(),
placeholder and event.get_type() in _Event_needs_date_place)
def _Event_get_place_text(event, database, placeholder=False):
"""
Return a textual representation of the event's place to be used in textual
context. This is basically "in " + the place title.
@param placeholder: whether a series of underscores should be inserted as a
placeholder if the place is missing.
"""
place_handle = event.get_place_handle()
if place_handle:
place = database.get_place_from_handle(place_handle)
text = _("in %(place)s") % {'place': place.get_title()}
elif placeholder and event.get_type() in _Event_needs_date_place:
text = _("in %(place)s") % {'place': "__________"}
else:
text = ""
return text
#------------------------------------------------------------------------
#
# MenuReportOptions
#
#------------------------------------------------------------------------
class RelationsOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
RECURSE_NONE = 0
RECURSE_SIDE = 1
RECURSE_ALL = 2
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
##########################
category_name = _("Report Options")
##########################
pid = PersonOption(_("First Relative"))
pid2 = PersonOption(_("Second Relative"))
pid.set_help(_("The first person for the relationship calculation."))
pid2.set_help(_("The second person for the relationship calculation."))
menu.add_option(category_name, "pid", pid)
menu.add_option(category_name, "pid2", pid2)
recurse = EnumeratedListOption(_("Print sheets for"), self.RECURSE_NONE)
recurse.set_items([
(self.RECURSE_NONE, _("Center person only")),
(self.RECURSE_SIDE, _("Center person and descendants in side branches")),
(self.RECURSE_ALL, _("Center person and all descendants"))])
menu.add_option(category_name, "recurse", recurse)
callname = EnumeratedListOption(_("Use call name"), _Name_CALLNAME_DONTUSE)
callname.set_items([
(_Name_CALLNAME_DONTUSE, _("Don't use call name")),
(_Name_CALLNAME_REPLACE, _("Replace first name with call name")),
(_Name_CALLNAME_UNDERLINE_ADD, _("Underline call name in first name / add call name to first name"))])
menu.add_option(category_name, "callname", callname)
placeholder = BooleanOption( _("Print placeholders for missing information"), True)
menu.add_option(category_name, "placeholder", placeholder)
incl_sources = BooleanOption( _("Include sources"), True)
menu.add_option(category_name, "incl_sources", incl_sources)
incl_notes = BooleanOption( _("Include notes"), True)
menu.add_option(category_name, "incl_notes", incl_notes)
def make_default_style(self, default_style):
"""Make default output style for the Family Sheet Report."""
#Paragraph Styles
font = docgen.FontStyle()
font.set_type_face(docgen.FONT_SANS_SERIF)
font.set_size(10)
font.set_bold(0)
para = docgen.ParagraphStyle()
para.set_font(font)
para.set_description(_('The basic style used for the text display'))
default_style.add_paragraph_style('FSR-Normal', para)
font = docgen.FontStyle()
font.set_type_face(docgen.FONT_SANS_SERIF)
font.set_size(10)
font.set_bold(0)
para = docgen.ParagraphStyle()
para.set_font(font)
para.set_alignment(docgen.PARA_ALIGN_RIGHT)
para.set_description(_('The style used for the page key on the top'))
default_style.add_paragraph_style('FSR-Key', para)
font = docgen.FontStyle()
font.set_type_face(docgen.FONT_SANS_SERIF)
font.set_size(12)
font.set_bold(1)
para = docgen.ParagraphStyle()
para.set_font(font)
para.set_description(_("The style used for names"))
default_style.add_paragraph_style('FSR-Name', para)
font = docgen.FontStyle()
font.set_type_face(docgen.FONT_SANS_SERIF)
font.set_size(12)
font.set_bold(1)
para = docgen.ParagraphStyle()
para.set_font(font)
para.set_alignment(docgen.PARA_ALIGN_CENTER)
para.set_description(_("The style used for numbers"))
default_style.add_paragraph_style('FSR-Number', para)
font = docgen.FontStyle()
font.set_type_face(docgen.FONT_SANS_SERIF)
font.set_size(8)
font.set_bold(0)
para = docgen.ParagraphStyle()
para.set_font(font)
para.set_description(_(
'The style used for footnotes (notes and source references)'))
default_style.add_paragraph_style('FSR-Footnote', para)
#Table Styles
cell = docgen.TableCellStyle()
cell.set_padding(0.1)
cell.set_top_border(1)
cell.set_left_border(1)
cell.set_right_border(1)
default_style.add_cell_style('FSR-HeadCell', cell)
cell = docgen.TableCellStyle()
cell.set_padding(0.1)
cell.set_left_border(1)
default_style.add_cell_style('FSR-EmptyCell', cell)
cell = docgen.TableCellStyle()
cell.set_padding(0.1)
cell.set_top_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FSR-NumberCell', cell)
cell = docgen.TableCellStyle()
cell.set_padding(0.1)
cell.set_top_border(1)
cell.set_right_border(1)
cell.set_left_border(1)
default_style.add_cell_style('FSR-DataCell', cell)
cell = docgen.TableCellStyle()
cell.set_padding(0.1)
cell.set_top_border(1)
default_style.add_cell_style('FSR-FootCell', cell)
table = docgen.TableStyle()
table.set_width(100)
table.set_columns(3)
table.set_column_width(0, 7)
table.set_column_width(1, 7)
table.set_column_width(2, 86)
default_style.add_table_style('FSR-Table', table)
| mit | 7,471,650,773,089,485,000 | 36.092369 | 126 | 0.565071 | false | 4.037154 | false | false | false |
vinnyspb/nexa-controller-rpi | datadog_stat.py | 1 | 1424 | import httplib
import time
class DataDogStat:
def __init__(self, config):
self._config = config
def post_status(self, status):
conn = httplib.HTTPSConnection("app.datadoghq.com", timeout=60)
conn.request("GET", "/api/v1/validate?api_key=" + self._config.DATADOG_API_KEY)
r1 = conn.getresponse()
if r1.status != 200:
raise Exception("Not 200 status in DataDog API login: " + str(r1.status))
current_timestamp = int(time.time())
if status:
datadog_metric_value = self._config.DATADOG_ON_VALUE
else:
datadog_metric_value = self._config.DATADOG_OFF_VALUE
headers = {"Content-type": "application/json"}
post_data = '{ "series" : [{"metric":"' + self._config.DATATOG_METRIC_NAME + \
'", "points":[[' + str(current_timestamp) + \
', ' + datadog_metric_value + ']], "type":"gauge", "host":"' + \
self._config.DATADOG_HOST_NAME + '", "tags\":[""]}]}'
conn = httplib.HTTPSConnection("app.datadoghq.com", timeout=60)
conn.request("POST", "/api/v1/series?api_key=" + self._config.DATADOG_API_KEY,
post_data,
headers)
r1 = conn.getresponse()
if r1.status != 202:
raise Exception("Not 202 status in Datadog metric post: " + str(r1.status))
return True
| mit | -6,053,304,983,212,914,000 | 37.486486 | 87 | 0.554775 | false | 3.632653 | true | false | false |
PaddlePaddle/Paddle | python/paddle/distributed/fleet/runtime/parameter_server_runtime.py | 1 | 27324 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.framework import Program
from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.executor import Executor
from paddle.fluid.parallel_executor import ParallelExecutor
from paddle.fluid.framework import Variable, Parameter
from .runtime_base import RuntimeBase
from ..base.private_helper_function import wait_server_ready
__all__ = []
class ParameterServerRuntime(RuntimeBase):
def __init__(self):
super(ParameterServerRuntime, self).__init__()
self._communicator = None
def _set_basic_info(self, context):
self.context = context
self.role_maker = context["role_maker"]
self.origin_main_program = context["origin_main_program"]
self.origin_startup_program = context["origin_startup_program"]
self.async_strategy = self._get_distributed_strategy()
self.compiled_strategy = self.build_compiled_startegy()
def _get_distributed_strategy(self):
strategy = None
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
dist_strategy = self.context["valid_strategy"]
k_steps = dist_strategy.a_sync_configs["k_steps"]
if not dist_strategy.a_sync and k_steps == 0:
strategy = StrategyFactory.create_sync_strategy()
if dist_strategy.a_sync and k_steps == 0:
strategy = StrategyFactory.create_async_strategy()
if dist_strategy.a_sync and k_steps > 0:
strategy = StrategyFactory.create_geo_strategy(k_steps)
if not strategy:
raise ValueError("k_steps must be invalid value, please check")
return strategy
def build_compiled_startegy(self):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import CompileTimeStrategy
compiled_config = CompileTimeStrategy(
self.origin_main_program, self.origin_main_program,
self.async_strategy, self.role_maker)
return compiled_config
def _load_sparse_params(self,
executor,
dirname,
varnames,
main_program=None):
assert vars != None
check_vars = []
load_prog = Program()
load_block = load_prog.global_block()
def _in_varnames(var):
return var.name in varnames
load_vars = list(
filter(_in_varnames, fluid.default_main_program().list_vars()))
if main_program is None:
main_program = self.origin_main_program
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts
for each_var in load_vars:
assert isinstance(each_var, Variable)
origin_varname, _, _ = _get_varname_parts(each_var.name)
new_var = fluid.io._clone_var_in_block_(load_block, each_var)
var_path = os.path.join(dirname, origin_varname)
if not os.path.exists(var_path):
raise ValueError("SelectedRows var {} can not find at {}".
format(new_var.name, var_path))
if os.path.isfile(var_path):
load_block.append_op(
type='sparse_tensor_load',
inputs={},
outputs={'Out': [new_var]},
attrs={
'file_path': os.path.join(dirname, origin_varname),
'node_index': self.role_maker._server_index(),
'node_num': self.role_maker._server_num(),
'shape': each_var.shape
})
check_vars.append(each_var)
executor.run(load_prog)
def _load_distributed_params(self, dirname, varnames):
from paddle.fluid.communicator import LargeScaleKV
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts
scale_kv = LargeScaleKV()
for varname in varnames:
origin_varname, _, _ = _get_varname_parts(varname)
sparse_dir = os.path.join(dirname, origin_varname, varname)
scale_kv.load(varname, sparse_dir)
@staticmethod
def __exclude_vars(exclude_var_names=[]):
def is_valid(var):
if var.name in exclude_var_names:
return False
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts
origin_varname, _, _ = _get_varname_parts(var.name)
if origin_varname.endswith("@GRAD"):
return False
if origin_varname == "learning_rate_0":
return False
if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \
var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \
var.desc.type() == core.VarDesc.VarType.READER:
return False
return var.persistable
return is_valid
def _init_worker(self):
def sync_strategy_envs():
kwargs = {}
kwargs[
"pserver_endpoints"] = self.role_maker._get_pserver_endpoints()
kwargs["trainer_id"] = self.role_maker._worker_index()
return kwargs
def geo_strategy_envs():
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
def get_sparse_attrs():
opt_init_map = {}
opt_init_map["gaussian_random"] = ["seed", "mean", "std"]
opt_init_map["fill_constant"] = ["value"]
opt_init_map["uniform_random"] = ["seed", "min", "max"]
opt_init_map[
"truncated_gaussian_random"] = ["seed", "mean", "std"]
dist_varnames = get_sparse_tablenames(self.origin_main_program,
True)
sparse_varnames = get_sparse_tablenames(
self.origin_main_program, False)
if len(dist_varnames) != 0:
raise ValueError(
"GeoStrategy can not support large scale embeding now, please use fluid.layers.embedding"
)
init_attrs = []
for value_name in sparse_varnames:
value_var = self.origin_main_program.global_block().vars[
value_name]
value_attr = [
value_name,
",".join([str(dim) for dim in value_var.shape])
]
for op in self.origin_startup_program.global_block().ops:
if op.type in opt_init_map.keys(
) and value_name == op.output("Out")[0]:
init_attr = [op.type]
for attr in opt_init_map[op.type]:
init_attr.append(str(op.attr(attr)))
value_attr.append("&".join(init_attr))
init_attrs.append(":".join(value_attr))
break
return "#".join(init_attrs)
kwargs = {}
kwargs["trainers"] = self.role_maker._worker_num()
kwargs["sparse_attrs"] = get_sparse_attrs()
return kwargs
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_lr_ops, _has_global_step
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \
SyncStrategy, GeoStrategy
trainer_config = self.async_strategy.get_trainer_runtime_config()
print(trainer_config)
dist_strategy = self.context["valid_strategy"]
launch_barrier = dist_strategy.a_sync_configs["launch_barrier"]
if launch_barrier:
# for trainer wait server ready
wait_server_ready(self.role_maker._get_pserver_endpoints())
# for ps-heter mode, wait heter worker ready
if self.role_maker._is_heter_parameter_server_mode and self.role_maker._is_worker(
):
wait_server_ready(self.role_maker._get_heter_worker_endpoints())
lrs = _has_global_step(_get_lr_ops(self.origin_main_program))
if lrs:
kwargs = {"need_global_step": "1"}
else:
kwargs = {"need_global_step": "0"}
if isinstance(self.async_strategy, GeoStrategy):
geo_kwargs = geo_strategy_envs()
kwargs.update(geo_kwargs)
if isinstance(self.async_strategy, SyncStrategy):
sync_kwargs = sync_strategy_envs()
kwargs.update(sync_kwargs)
kwargs = kwargs if kwargs else None
send_ctx = self.compiled_strategy.get_communicator_send_context()
if self.compiled_strategy.is_geo_mode():
recv_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=4)
else:
recv_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=1)
from paddle.fluid.communicator import Communicator
self._communicator = Communicator(
trainer_config.mode, kwargs,
trainer_config.get_communicator_flags())
self._communicator.init_with_ctx(send_ctx, recv_ctx)
if not self._communicator.is_running():
self._communicator.start()
else:
warnings.warn("communicator has been initialized, skip")
def _get_executor(self):
executor = fluid.Executor(fluid.CPUPlace())
if self.role_maker._is_heter_parameter_server_mode:
heter_worker_device_guard = self.context[
"valid_strategy"].a_sync_configs[
"heter_worker_device_guard"].upper()
if heter_worker_device_guard not in ["GPU", "XPU", "CPU"]:
raise ValueError("Heter Worker Not Support Device {}".format(
heter_worker_device_guard))
if self.role_maker._is_heter_worker():
if heter_worker_device_guard == "GPU":
executor = Executor(
fluid.CUDAPlace(
int(os.getenv("FLAGS_selected_gpus", "0"))))
elif heter_worker_device_guard == "XPU":
executor = Executor(
fluid.XPUPlace(
int(os.getenv("FLAGS_selected_xpus", "0"))))
return executor
def _init_server(self, *args, **kwargs):
if len(args) > 1:
raise ValueError("init server can only accept 1 args: `dirname`")
elif len(args) == 1:
model_dirname = args[0]
else:
model_dirname = None
executor = self._get_executor()
if self.role_maker._is_heter_worker() and self.context[
"valid_strategy"].a_sync_configs["launch_barrier"]:
# for heter trainer wait server ready
wait_server_ready(self.role_maker._get_pserver_endpoints())
executor.run(fluid.default_startup_program())
if self.role_maker._is_heter_worker():
self._init_worker()
return
sparse_varnames = self.compiled_strategy.get_sparse_varname_on_ps(False)
sparse_related_optimize_varnames = []
for var_name in sparse_varnames:
sparse_related_optimize_varnames += self.compiled_strategy.get_optimize_varname_on_ps(
var_name)
sparse_related_optimize_varnames = list(
set(sparse_related_optimize_varnames))
distribtued_varnames = self.compiled_strategy.get_sparse_varname_on_ps(
True)
distributed_related_optimize_varnames = []
for var_name in distribtued_varnames:
distributed_related_optimize_varnames += self.compiled_strategy.get_optimize_varname_on_ps(
var_name)
distributed_related_optimize_varnames = list(
set(distributed_related_optimize_varnames))
remaining_vars = list(
filter(
ParameterServerRuntime.__exclude_vars(
sparse_varnames + distribtued_varnames +
sparse_related_optimize_varnames +
distributed_related_optimize_varnames),
fluid.default_main_program().list_vars()))
if not model_dirname:
return
if not os.path.isdir(model_dirname):
raise ValueError("There is no directory named '%s'", model_dirname)
# load dense
fluid.io.load_vars(
executor,
main_program=fluid.default_main_program(),
dirname=model_dirname,
vars=remaining_vars)
# load sparse
self._load_sparse_params(
executor=executor,
dirname=model_dirname,
varnames=sparse_varnames + sparse_related_optimize_varnames)
# load large scale
self._load_distributed_params(
dirname=model_dirname,
varnames=distribtued_varnames +
distributed_related_optimize_varnames)
def _run_server(self):
executor = self._get_executor()
executor.run(fluid.default_main_program())
def _stop_worker(self):
self._communicator.stop()
executor = self._get_executor()
executor.close()
def _get_optimizer_status(self, op, param_name):
supported_opts = [
"sgd", "adam", "adagrad", "adamax", "momentum", "lars_momentum",
"rmsprop", "decayed_adagrad", "ftrl"
]
reshaped_val_map = {}
reshaped_val_map["sgd"] = []
reshaped_val_map["adam"] = ["moment1_0", "moment2_0"]
reshaped_val_map["adagrad"] = ["moment_0"]
reshaped_val_map["adamax"] = ["moment_0", "inf_norm_0"]
reshaped_val_map["momentum"] = ["velocity_0"]
reshaped_val_map["lars_momentum"] = ["velocity_0"]
reshaped_val_map[
"rmsprop"] = ["momentum_0", "mean_square_0", "mean_grad_0"]
reshaped_val_map["decayed_adagrad"] = ["moment_0"]
reshaped_val_map["ftrl"] = ["squared_0", "linear_0"]
orishaped_val_map = {}
orishaped_val_map["adam"] = ["beta1_pow_acc_0", "beta2_pow_acc_0"]
orishaped_val_map["adamax"] = ["beta1_pow_acc_0"]
if op not in supported_opts:
raise ValueError(
"fleet can not support optimizer: {}, only this can be supported: {}".
format(op, supported_opts))
reshaped_names = [
param_name + "_" + val for val in reshaped_val_map[op]
]
if op not in orishaped_val_map:
origin_names = []
else:
origin_names = [
param_name + "_" + val for val in orishaped_val_map[op]
]
return reshaped_names, origin_names
def _get_optimizer_op(self, param_name):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops
opts = _get_optimize_ops(self.origin_main_program)
for op in opts:
if "Param" in op.input_names and \
"LearningRate" in op.input_names and op.input("Param")[0] == param_name:
return op
def _save_dense_params(self, executor, dirname, context, main_program):
self._communicator.recv()
prog = Program()
block = prog.global_block()
local_vars = []
for name, var_ctx in context.items():
if len(var_ctx.origin_varnames()) != 1:
raise ValueError("Dense can not support split now.")
varname = var_ctx.origin_varnames()[0]
local_vars.append(varname)
optimizer = self._get_optimizer_op(varname)
reshaped_varnames, origin_varnames = self._get_optimizer_status(
optimizer.type, varname)
for var_name in [varname] + reshaped_varnames + origin_varnames:
var = self.origin_main_program.global_block().vars[var_name]
block.append_op(
type='recv_save',
attrs={
"trainer_id": self.role_maker._worker_index(),
"shape": var.shape,
"slice_shapes":
[",".join([str(i) for i in var.shape])],
"slice_varnames": [var.name],
"remote_varnames": [var.name],
"is_sparse": False,
"endpoints": var_ctx.split_endpoints(),
"file_path": os.path.join(dirname, var.name)
})
executor.run(prog)
return local_vars
def _save_sparse_params(self, executor, dirname, context, main_program):
prog = Program()
block = prog.global_block()
local_vars = []
for name, var_ctx in context.items():
if len(var_ctx.origin_varnames()) != 1:
raise ValueError("Dense can not support split now.")
varname = var_ctx.origin_varnames()[0]
local_vars.append(varname)
optimizer = self._get_optimizer_op(varname)
reshaped_varnames, origin_varnames = self._get_optimizer_status(
optimizer.type, varname)
var = self.origin_main_program.global_block().vars[varname]
slice_shapes = []
dims1 = ",".join([str(i) for i in var.shape[1:]])
for section in var_ctx.sections():
slice_shapes.append(str(section) + dims1)
block.append_op(
type='recv_save',
attrs={
"trainer_id": self.role_maker._worker_index(),
"shape": var.shape,
"slice_shapes": slice_shapes,
"slice_varnames": var_ctx.split_varnames(),
"remote_varnames": var_ctx.split_varnames(),
"is_sparse": True,
"endpoints": var_ctx.split_endpoints(),
"pserver_num":
len(self.role_maker._get_pserver_endpoints()),
"file_path": os.path.join(dirname, var.name)
})
for reshaped_varname in reshaped_varnames:
var = self.origin_main_program.global_block().vars[
reshaped_varname]
slice_varnames = []
remote_varnames = []
for i in range(len(var_ctx.split_varnames())):
slice_varnames.append("{}.block{}".format(reshaped_varname,
i))
remote_varnames.append(reshaped_varname)
block.append_op(
type='recv_save',
attrs={
"trainer_id": self.role_maker._worker_index(),
"shape": var.shape,
"slice_shapes": slice_shapes,
"slice_varnames": slice_varnames,
"remote_varnames": remote_varnames,
"is_sparse": True,
"endpoints": var_ctx.split_endpoints(),
"pserver_num":
len(self.role_maker._get_pserver_endpoints()),
"file_path": os.path.join(dirname, var.name)
})
for origin_varname in origin_varnames:
var = self.origin_main_program.global_block().vars[
origin_varname]
block.append_op(
type='recv_save',
attrs={
"trainer_id": self.role_maker._worker_index(),
"shape": var.shape,
"slice_shapes":
[",".join([str(i) for i in var.shape])],
"slice_varnames": [origin_varname],
"remote_varnames": [origin_varname],
"is_sparse": False,
"endpoints": var_ctx.split_endpoints()[:1],
"file_path": os.path.join(dirname, var.name)
})
executor.run(prog)
return context.keys()
def _save_distributed_params(self, executor, dirname, context, mode):
prog = Program()
block = prog.global_block()
for name, var_ctx in context.items():
block.append_op(
type='checkpoint_notify',
attrs={
"varname": name,
"mode": mode,
"slice_varnames": var_ctx.split_varnames(),
"remote_varnames": var_ctx.split_varnames(),
"endpoints": var_ctx.split_endpoints(),
"dirname": dirname
})
executor.run(prog)
return context.keys()
def _save_distributed_persistables(self, executor, dirname, main_program,
mode):
dense_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=1, use_origin_program=True)
sparse_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=2, use_origin_program=True)
distributed_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=3, use_origin_program=True)
recv_dense_varnames = self._save_dense_params(executor, dirname,
dense_ctx, main_program)
recv_sparse_varnames = self._save_sparse_params(
executor, dirname, sparse_ctx, main_program)
recv_distributed_varnames = self._save_distributed_params(
executor, dirname, distributed_ctx, mode)
saved_varnames = recv_dense_varnames + list(
recv_sparse_varnames) + list(recv_distributed_varnames)
remaining_vars = list(
filter(
ParameterServerRuntime.__exclude_vars(saved_varnames),
main_program.list_vars()))
fluid.io.save_vars(
executor,
main_program=main_program,
dirname=dirname,
vars=remaining_vars)
def _ps_inference_save_persistables(self,
executor,
dirname,
main_program=None,
mode=0,
**kwargs):
"""
This function filters out all variables with `persistable==True` from the
give `main_program` and then saves these variables to the folder `dirname`
or file `filename`.
The `dirname` is used to specify the folder where persistable variables
are going to be saved. If you would like to save variables in separate
files, set `filename` None; if you would like to save all variables in a
single file, use `filename` to specify the file name.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save_persistables() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save_persistables() function, executor must be as Executor type"
)
if main_program is None:
main_program = self.compiled_strategy.get_origin_ps_main_program()
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save_persistables() function, main_program must be as Program type, CompiledProgram is not allowed"
)
self._save_distributed_persistables(executor, dirname, main_program,
mode)
def _ps_inference_save_inference_model(self,
executor,
dirname,
feeded_var_names,
target_vars,
main_program=None,
export_for_deployment=True):
"""
Prune the given `main_program` to build a new program especially for inference,
and then save it and all related parameters to given `dirname` by the `executor`.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save_inference_model() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save_inference_model() function, executor must be as Executor type"
)
if main_program is not None:
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save_inference_model() function, main_program must be as Program type, CompiledProgram is not allowed"
)
fluid.io.save_inference_model(dirname, feeded_var_names,
target_vars, executor, main_program,
None, None, export_for_deployment)
else:
fluid.io.save_inference_model(dirname, feeded_var_names,
target_vars, executor,
self.origin_main_program, None, None,
export_for_deployment, True)
model_basename = "__model__"
model_filename = os.path.join(dirname, model_basename)
with open(model_filename, "rb") as f:
program_desc_str = f.read()
program = Program.parse_from_string(program_desc_str)
program._copy_dist_param_info_from(fluid.default_main_program())
self._ps_inference_save_persistables(
executor, dirname, program, mode=0)
def _save_inference_model(self, *args, **kwargs):
self._ps_inference_save_inference_model(*args, **kwargs)
def _save_persistables(self, *args, **kwargs):
self._ps_inference_save_persistables(*args, **kwargs)
| apache-2.0 | 1,028,175,440,913,156,700 | 39.843049 | 132 | 0.544576 | false | 4.318634 | true | false | false |
LLNL/spack | var/spack/repos/builtin/packages/r-genomicfeatures/package.py | 5 | 3183 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGenomicfeatures(RPackage):
"""Conveniently import and query gene models.
A set of tools and methods for making and manipulating transcript
centric annotations. With these tools the user can easily download the
genomic locations of the transcripts, exons and cds of a given organism,
from either the UCSC Genome Browser or a BioMart database (more sources
will be supported in the future). This information is then stored in a
local database that keeps track of the relationship between transcripts,
exons, cds and genes. Flexible methods are provided for extracting the
desired features in a convenient format."""
homepage = "https://bioconductor.org/packages/GenomicFeatures"
git = "https://git.bioconductor.org/packages/GenomicFeatures.git"
version('1.36.4', commit='28082ec465c91ccaec6881ff348b380edac1b555')
version('1.34.8', commit='c798b3bb111f4de30632303540074ec1875c1387')
version('1.32.3', commit='80807d88048858846de3750cecb9431a0e5e69e1')
version('1.30.3', commit='496bbf81beebd7c934b8d3dcea001e3e4a7d7dee')
version('1.28.5', commit='ba92381ae93cb1392dad5e6acfab8f6c1d744834')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-rcurl', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.30.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.30.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.30.3:', type=('build', 'run'))
depends_on('r-rmysql', when='@1.30.3', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.3:', type=('build', 'run'))
| lgpl-2.1 | 8,884,826,130,342,280,000 | 54.842105 | 82 | 0.649387 | false | 2.844504 | false | false | false |
scottsilverlabs/raspberrystem | rstem/projects/led_matrix_games/game_of_life.py | 1 | 4277 | #!/usr/bin/env python3
#
# Copyright (c) 2014, Scott Silver Labs, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from rstem import led_matrix, button
import random
import time
# notify of progress
print("P50")
sys.stdout.flush()
# initialize led matrix
#led_matrix.init_grid(2,2)
led_matrix.init_matrices([(0,8),(8,8),(8,0),(0,0)])
# set up buttons
A = 4
B = 17
UP = 25
DOWN = 24
LEFT = 23
RIGHT = 18
START = 27
SELECT = 22
# setup exit and restart button
exit_button = button.Button(START)
restart_button = button.Button(A)
# notify of progress
print("P60")
sys.stdout.flush()
# initialize variables
num_rows, num_cols, curr_gen, next_gen = (None, None, None, None)
def get_num_neighbors(curr_gen, x, y):
"""Returns the number of (alive) neighbors of given pixel"""
count = 0
for j in range(y-1, y+2):
for i in range(x-1, x+2):
if not(i == x and j == y): # don't count itself
if i >= 0 and i < led_matrix.width() and j >= 0 and j < led_matrix.height():
if curr_gen[j][i] == 0xF:
count += 1
return count
def next_generation():
"""Creates next generation using Conway's Game of Life rules:
http://en.wikipedia.org/wiki/Conway's_Game_of_Life
"""
global next_gen
global curr_gen
for y in range(0,num_rows):
for x in range(0,num_cols):
num_neighbors = get_num_neighbors(curr_gen, x, y)
if curr_gen[y][x] == 0xF and num_neighbors < 2:
next_gen[y][x] = 0 # pixel died off, not enough neighbors
elif curr_gen[y][x] == 0xF and num_neighbors > 3:
next_gen[y][x] = 0 # pixel died off, too many neighbors
elif curr_gen[y][x] == 0 and num_neighbors == 3:
next_gen[y][x] = 0xF # birth of a new pixel
else:
next_gen[y][x] = curr_gen[y][x]
curr_gen, next_gen = next_gen, curr_gen # swap lists
def random_grid(width, height):
"""Creates a grid of random dead and alive pixels."""
grid = []
for y in range(height):
row = []
for x in range(width):
random_num = random.randint(0,3)
if random_num == 0: # make alive pixels less common
row.append(0xF) # add an alive pixel
else:
row.append(0x0) # add a dead pixel
grid.append(row)
return grid
def draw_grid():
"""Draws the current generation to led_matrix."""
for y in range(num_rows):
for x in range(num_cols):
led_matrix.point(x, y, curr_gen[y][x])
# whole game loop
while True:
# variables
num_rows = led_matrix.height()
num_cols = led_matrix.width()
# notify of progress
print("P80")
sys.stdout.flush()
curr_gen = random_grid(num_cols, num_rows)
# notify of progress
print("P90")
sys.stdout.flush()
next_gen = [[0 for i in range(num_cols)] for j in range(num_rows)]
# TODO allow sprite input instead of random grid?
# notify menu we are ready for the led matrix
print("READY")
sys.stdout.flush()
# single game loop
while True:
if exit_button.is_pressed():
# clean up stuff and exit the program
button.cleanup()
led_matrix.cleanup()
sys.exit(0)
elif restart_button.is_pressed():
break # break out of this inner loop (lets us restart generations)
else:
led_matrix.erase() # clear the display
draw_grid() # draw the current generation
led_matrix.show() # show on display
next_generation() # update generation to next generation
| apache-2.0 | -1,774,233,955,111,298,800 | 29.333333 | 92 | 0.594342 | false | 3.555278 | false | false | false |
DolphDev/PSV | psv/core/objects/rowobjects.py | 2 | 14722 | from ..utils import asciireplace, limit_text
from ..exceptions.messages import RowObjectMsg as msg
from functools import lru_cache
from tabulate import tabulate
from string import ascii_lowercase, digits
from types import FunctionType
import keyword
accepted_chars = (ascii_lowercase + "_" + digits)
class Row(dict):
"""This Class represents a row in a spreadsheet
This object is a highly specialized dict, meant to allow
extremely quick and easy access/manipulation to row data
at an acceptable memory cost.
"""
__slots__ = ["__delwhitelist__", "__output__", "__sawhitelist__"]
def __init__(self, data, columns_map, *args, **kwargs):
# These are used to
super(Row, self).__setattr__("__delwhitelist__",
RowDefaults.__delwhitelist__)
super(Row, self).__setattr__("__sawhitelist__",
RowDefaults.__sawhitelist__)
super(Row, self).__init__(data)
self[RowDefaults.__psvcolumns__] = columns_map
self._set_outputrow(True)
self.construct(*args, **kwargs)
def __call__(self, column, setvalue=None, delete=False):
"""Alais for .getcolumn() family of methods"""
if delete:
self.delcolumn(column, False)
elif setvalue is None:
return self.getcolumn(column, False)
else:
self.setcolumn(column, setvalue, False)
def __eq__(self, other):
#Returns True if content is the same as the
if isinstance(other, self.__class__):
return self.__hashvalue__() == other.__hashvalue__()
return False
def __hashvalue__(self):
"""raw data that can be hashed if all contents are hashable
or can be used for comparison
"""
return (tuple((column, self[column])
for column in filter(lambda x: x != "__psvcolumnstracker__", sorted(self.keys()))))
def __repr__(self):
return "<{rowname}:{columnamount} object at {hexloc}>".format(
rowname=self.__class__.__name__,
columnamount=len(self.keys())-1,
hexloc=hex(id(self)).upper().replace("X", "x")
)
def __str__(self):
return "<{rowname}:{columnamount} object at {hexloc}>".format(
rowname=self.__class__.__name__,
columnamount=len(self.keys())-1,
hexloc=hex(id(self)).upper().replace("X", "x")
)
def __pos__(self):
self._set_outputrow(True)
return self
def __neg__(self):
self._set_outputrow(False)
return self
def __invert__(self):
self._set_outputrow(not (self.outputrow))
return self
def __getattribute__(self, attr):
if not self["__psvcolumnstracker__"].get(attr, False):
return super(dict, self).__getattribute__(attr)
else:
return self[self["__psvcolumnstracker__"][attr]]
def __getattr__(self, attr):
"""Handles all exception handeling when __getattribute__ fails"""
s = cleanup_name(attr)
if s in self["__psvcolumnstracker__"].keys():
raise AttributeError((
"{}{}"
.format(
'\'{}\' has no attribute \'{}\''.format(
type(self), attr),
". However, '{s}' is an existing condensed ".format(s=s) +
"column name. Only the condensed version is supported."
.format(s=s)
)))
else:
raise AttributeError(msg.attribute_missing.format(
type(self), attr))
def __setattr__(self, attr, v):
"""Allows setting of rows and attributes by using =
statement
Note: Setting class Attributes is not optimized, this dict has specialized around
dynamic attribute (from row data) access. Regular Attribute Setting may be much slower.
"""
s = cleanup_name(attr)
try:
self[self["__psvcolumnstracker__"][attr]] = v
except KeyError:
if attr in self.__sawhitelist__:
super(Row, self).__setattr__(attr, v)
else:
keys = self["__psvcolumnstracker__"].keys()
if s in keys:
raise AttributeError((
"{}{}"
.format(
'\'{}\' has no attribute \'{}\''.format(
type(self), attr),
". However, '{s}' is an existing condensed ".format(s=s) +
"column name. Only the condensed version is supported."
.format(s=s)
)))
else:
# A somewhat hacky implementation of Dict's restriction of editing it's
# Attributes.
if attr in dir(self):
raise AttributeError(
msg.attribute_readonly.format(classname=self.__class__, attr=attr))
else:
raise AttributeError(msg.attribute_missing.format(
type(self), attr))
def __delattr__(self, attr):
"""Allows deletion of rows and attributes (Makes a row an empty string) by using
del statement"""
s = cleanup_name(attr)
try:
self[self["__psvcolumnstracker__"][attr]] = ""
except KeyError:
if attr in self.__delwhitelist__:
super(Row, self).__delattr__(attr)
else:
keys = self["__psvcolumnstracker__"].keys()
if s in keys:
raise AttributeError((
"{}{}"
.format(
'\'{}\' has no attribute \'{}\''.format(
type(self), attr),
". However, '{s}' is an existing condensed ".format(s=s) +
"column name. Only the condensed version is supported."
.format(s=s)
)))
else:
if attr in dir(self):
raise AttributeError(
msg.attribute_readonly.format(classname=self.__class__, attr=attr))
else:
raise AttributeError(msg.attribute_missing.format(
type(self), attr))
def add_valid_attribute(self, attr, deletable=False):
"""Used by classes that inherit to add attributes to the whitelists
Note: Row should only be inherited if no other option is available.
These attributes being accessed will be notably slower due to the implementation.
Memory Usage may also be much higher, as the whitelists will no longer be a
static variable.
"""
if self.__class__ is Row:
raise TypeError(msg.inherited_rows)
super(Row, self).__setattr__(
"__sawhitelist__", set(self.__sawhitelist__ | set((attr,))))
if deletable:
super(Row, self).__setattr__(
"__delwhitelist__", set(self.__delwhitelist__ | set((attr,))))
def construct(self, *args, **kwargs):
"""This method can be used by inherited objects of :class:`Row` as if it was __init__
Note: Row should only be inherited if no other option is available. It cause
memory bloat issues and can be notably slower.
"""
pass
@property
def outputrow(self):
"""Returns a boolean of the current output flag for this row"""
return self.__output__
@outputrow.setter
def outputrow(self, v):
if not isinstance(v, bool):
raise TypeError(msg.outputrowmsg.format(bool, type(v)))
self.__output__ = v
def _set_outputrow(self, v):
"""Fast Internal way to set output flags
Doesn't check for bad input, meant for internal use only
Much faster than the setter
"""
super(Row, self).__setattr__("__output__", v)
def getcolumn(self, column, accept_small_names=True):
"""Get a cell by the orginal column name
:param column: The column name. Can only be long form if accept_small_names == False
:type column: :class:`str`
:returns: String of the data, or an int/float if a number/decimal.
:rtype: :class:`str`, :class:`int`, or :class:`float`
"""
if column in self.keys():
return (self[column])
elif accept_small_names:
if self["__psvcolumnstracker__"].get(column):
return getattr(self, column)
if not accept_small_names:
raise ValueError("'{}'".format(column))
else:
raise ValueError("'{}'. Make sure the shorterned columns name have no collisions".format(column))
def setcolumn(self, column, value, accept_small_names=True):
"""Set a cell by the orginal column name
:param column: The column name. Can be both long and short form.
:param value: The data to be set to the specified column
:type column: :class:`str`
"""
if column in self.keys():
self[column] = value
return
elif accept_small_names:
if self["__psvcolumnstracker__"].get(column):
self.__setattr__(column, value)
return
if not accept_small_names:
raise ValueError("'{}'".format(column))
else:
raise ValueError("'{}'. Make sure the shorterned columns name have no collisions".format(column))
def delcolumn(self, column, accept_small_names=True):
"""Delete a cell by the orginal column name
:param column: The column name. Can be both long and short form.
:type column: :class:`str`
"""
if column in self.keys():
self[column] = ""
return
elif accept_small_names:
if self["__psvcolumnstracker__"].get(column):
self.__delattr__(column)
return
if not accept_small_names:
raise ValueError("'{}'".format(column))
else:
raise ValueError("'{}'. Make sure the shorterned columns name have no collisions".format(column))
def _addcolumns(self, columnname, columndata=""):
"""Adds a column for this row only doesn't add to column tracker
Warning: Internal Method, API/Behavior may change without notice"""
self[columnname] = columndata
def _addcolumns_func(self, columnname, columnfunc):
self[columnname] = columnfunc(self)
def _delcolumns(self, columnname, columndata=""):
"""Adds a column for this row only
doesn't add to column tracker
Warning: Internal Method, API/Behavior may change without notice"""
del self[columnname]
def _rename_columns(self, old_columnname, new_columnname):
self[new_columnname] = self[old_columnname]
del self[old_columnname]
def longcolumn(self, columns=None):
"""
:params columns: A collection of columns, if supplied the method
will return only the specified columns.
:type columns: :class:`tuple`, :class:`list`
:returns: Generates a :class:`dict` that uses orginal names of
the column.
:rtype: :class:`dict`
"""
newdict = {}
for k in columns or self.keys():
if k == "__psvcolumnstracker__":
continue
newdict.update({
k: self[k]})
return newdict
def update_values(self, *arg, **kwargs):
"""Safe way to use a .update() like method on rows, checks header columns
"""
keys = set(self.keys())
if arg:
for x in arg:
xkeys = set(x.keys())
if xkeys.issubset(keys):
self.update(x)
else:
raise ValueError(
"'{}' contains columns not in this row currently"
.format(x)
)
if kwargs:
kwkeys = set(kwargs.keys())
if kwkeys.issubset(keys):
self.update(kwargs)
else:
raise ValueError(
"'{}' contains columns not in this row currently"
.format(kwargs)
)
def tabulate(self, format="grid", only_ascii=True, columns=None, text_limit=None):
"""Integrates tabulate library with psv
:param format: A valid format for :class:`tabulate` library.
:only_ascii: If :data:`True`, only return ascii characters.
:param columns: Collection of column names that will be included in the
tabulating.
:param text_limit: The number of characters to include per cell.
:type format: :class:`str`
"""
data = self.longcolumn()
sortedcolumns = sorted(data) if not columns else columns
result = tabulate(
[sortedcolumns] +
[[limit_text(data[c], text_limit) for c in sortedcolumns]],
headers="firstrow",
tablefmt=format)
if only_ascii:
return asciireplace(result)
else:
return result
class RowDefaults(object):
"""Contains Static Variables the Row uses
to prevent rampant memory waste.
"""
__delwhitelist__ = set()
__sawhitelist__ = {"__output__", "outputrow"}
# This is inlined in most of the library due to speed constraints.
__psvcolumns__ = '__psvcolumnstracker__'
# For backwards compability, will be removed in the future
# Refering Row as BaseRow is considered Depreciated
BaseRow = Row
#This block was in utils,
# but it relied on a circular reference that re-imported
# a variable everytime this core function was called.
#While less clean, this produces a decent speedup.
banned_columns = {RowDefaults.__psvcolumns__,}
non_accepted_key_names = set(tuple(dir(
Row)) + ("row_obj", RowDefaults.__psvcolumns__,
RowDefaults.__psvcolumns__) + tuple(keyword.kwlist))
bad_first_char = set(digits)
@lru_cache(1024)
def cleanup_name(s):
result = "".join(filter(lambda x: x in accepted_chars, s.lower()))
if not result:
raise ValueError(msg.non_valid.format(s))
if result in non_accepted_key_names or result[0] in bad_first_char:
result = "psv_" + result
return result
| mit | -8,322,092,544,493,355,000 | 36.365482 | 109 | 0.545238 | false | 4.503518 | false | false | false |
lmorillas/python_events | extract_calendars.py | 1 | 7866 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 @lmorillas. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Based on
https://github.com/google/google-api-python-client/blob/master/samples/service_account/tasks.py
by [email protected]
"""
__author__ = '[email protected] (Luis Miguel Morillas)'
import httplib2
import pprint
import sys
import datetime
from operator import itemgetter
from itertools import groupby
from googleapiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials, AccessTokenRefreshError
from geopy import geocoders
google = geocoders.GoogleV3(timeout=5)
yandex = geocoders.Yandex(timeout=5)
nom = geocoders.Nominatim(timeout=5)
import shelve
# Credentials for Service Accout
EMAIL_CLIENT = '696801545616-44i6o78jdoa7me4lr416n1d5rniidmns@developer.gserviceaccount.com'
FILE_KEY = 'pycal.p12'
def connect_calendar():
# Load the key in PKCS 12 format that you downloaded from the Google API
# Console when you created your Service account.
f = open(FILE_KEY, 'rb')
key = f.read()
f.close()
credentials = SignedJwtAssertionCredentials(EMAIL_CLIENT,
key,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.readonly'])
http = httplib2.Http()
http = credentials.authorize(http)
service = build(serviceName='calendar', version='v3', http=http)
return service
def get_month(date_str):
'''
returns start month str from event
'''
return datetime.datetime.strptime(date_str[:10], '%Y-%m-%d').strftime("%B")
def calendar_events(service, cal_id, singleEvents="False"):
# Today: only envents present and future
timeMin = datetime.datetime.now().strftime('%Y-%m-%dT00:00:00.000Z')
if singleEvents != "False":
timeMax = '{}-12-31T23:00:00.000Z'.format(datetime.datetime.now().year)
else:
timeMax = None
#timeMin = datetime.datetime.now().isoformat()
events = []
try:
page_token = None
while True:
event_list = service.events().list(singleEvents=singleEvents,orderBy='startTime', calendarId=cal_id,
pageToken=page_token, timeMin=timeMin, timeMax=timeMax).execute()
events.extend([event for event in event_list['items']])
page_token = event_list.get('nextPageToken')
if not page_token:
break
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize.')
return events
def geolocate(address):
global geocache
#address = address.encode('utf-8') # for storing in shelve
loc = None
if address not in geocache.keys():
print ('Searching ', address)
try:
loc = google.geocode(address)
except:
pass
if not loc:
try:
loc = yandex.geocode(address)
except:
pass
if not loc:
try:
loc = google.geocode(','.join(address.split(',')[1:]))
except:
pass
if loc:
loc = loc.latitude, loc.longitude, loc.raw
geocache[address] = loc
else:
loc = geocache.get(address)[:2]
return loc
def loc_to_country(latlon):
global geocache
if latlon not in geocache.keys():
print ('Searching country of ', latlon)
try:
loc = nom.reverse(latlon)
if loc:
country = loc.raw.get('address').get('country')
geocache[latlon] = country
return country
except:
return ''
else:
return geocache.get(latlon)
def event_to_item(event, cal):
if event.get('summary'):
print (event.get('summary').encode('utf-8'), ' --> ' )
else:
print('No summary ? ', event)
item = {}
item['description'] = event.get('description')
item['id'] = event.get('id')
item['start'] = event.get('start').get('date')
if not item['start']:
item['start'] = event.get('start').get('dateTime')
item['end'] = event.get('end').get('date')
if not item['end']:
item['end'] = event.get('end').get('dateTime')
item['label'] = event.get('summary')
item['url'] = event.get('htmlLink')
item['cal'] = cal
item['month'] = get_month(item.get('start'))
address = event.get('location')
if address:
location = geolocate(address)
if location:
lat = location[0]
lon = location[1]
item['latlon'] = "{},{}".format(lat, lon)
print (item['latlon'])
country = loc_to_country(item['latlon'])
item['country'] = country
return item
def create_index(data="", schema = ""):
import pytz
#data = json.dumps(data)
data = json.JSONEncoderForHTML().encode(data)
schema = json.dumps(schema)
now = datetime.datetime.now(pytz.utc)
format = "%Y-%m-%d" # "%Y-%m-%d %H:%M %Z"
template = open('index.templ').read()
open('docs/index.html', 'w').write(template.format(datetime=now.strftime(format),
data=data, schema=schema ))
def select_first_event(eventlist):
'''select only the first enven when repeated events'''
def sort_by_eventID(element):
return element.get('recurringEventId', element.get('summary'))
#recurring = itemgetter('recurringEventId') # keyerror ?
recurring = sort_by_eventID
def _date(x):
return x.get('start').get('dateTime')
eventlist.sort(key=recurring)
_non_repeated = []
for ev, recur in groupby(eventlist, key=recurring):
try:
recur = sorted(recur, key=_date)
_non_repeated.append(recur[0]) # only add the first
except:
print ('recur error -> ', [x for x in recur])
return _non_repeated
if __name__ == '__main__':
import datetime
import simplejson as json
geocache = shelve.open('geocache.dat')
# Cals IDs from https://wiki.python.org/moin/PythonEventsCalendar
cal_id_python_events = '[email protected]'
cal_id_user_group = '[email protected]'
items = []
service = connect_calendar()
events = calendar_events(service, cal_id_python_events)
for event in events:
items.append(event_to_item(event, 'Larger'))
events = calendar_events(service, cal_id_user_group, singleEvents="True")
events = select_first_event(events)
for event in events:
items.append(event_to_item(event, 'Smaller'))
geocache.sync()
geocache.close()
schema = {"properties": {
"url": {
"valueType": "url"
},
"start": {
"valueType": "date"
},
"end": {
"valueType": "date"
},
"month": {
"valueType": "date"
},
},
"types": {
"Item": {
"pluralLabel": "events",
"label": "event"
}
}}
data = {'items': items}
#data.update(metadata)
#json.dump(data, open('docs/events_python.json', 'w'))
create_index(data, schema)
| apache-2.0 | -6,285,781,665,194,991,000 | 27.298561 | 112 | 0.606153 | false | 3.655204 | false | false | false |
bbc/kamaelia | Sketches/PT/BouncingCatGame/X-MPS-likesprite.py | 3 | 4359 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Proper likefile control of a sprite handler
#
from likefile import LikeFile, schedulerThread
import time, Axon, os, random, pygame, math
from Sprites.BasicSprite import BasicSprite
from Sprites.SpriteScheduler import SpriteScheduler
from Kamaelia.UI.Pygame.EventHandler import EventHandler
from Simplegame import cat_location, screensize, border, background, screen_surface, randomFromRangeExcludingZero
import Axon
bg = schedulerThread(slowmo=0.01).start()
global spritescheduler
class MyGamesEvents(EventHandler):
def __init__(self, cat_args, trace=1, ):
self.trace = 0
self.cat_args = cat_args
def keydown(self, unicode, key, mod, where):
if key == 113: # "Q"
raise "QUIT"
class CatSprite(BasicSprite):
def main(self):
spritescheduler.allsprites.add(self)
while True:
self.pause()
yield 1
def make_cat(cat_location, screensize, border):
# Get the cat again!
files = list()
for x in os.listdir("pictures"):
if x not in ("README","CVS",".svn"):
files.append(x)
image_location = files[random.randint(0,len(files)-1)]
cat_surface = pygame.image.load("pictures/"+image_location)
cat = cat_surface.convert()
cat.set_colorkey((255,255,255), pygame.RLEACCEL)
newCat = CatSprite(image=cat)
return newCat
cat_args = (cat_location, screensize, border)
spritescheduler = SpriteScheduler(cat_args, [], background, screen_surface, MyGamesEvents).activate()
#newcat = make_cat(*cat_args)
class SolarSystem(Axon.ThreadedComponent.threadedcomponent):
def __init__(self, *cat_args):
super(SolarSystem, self).__init__()
self.the_sun = LikeFile(make_cat(*cat_args), extrainboxes = ("translation", "imaging"))
self.the_sun.activate()
self.planet = LikeFile(make_cat(*cat_args), extrainboxes = ("translation", "rotator", "imaging"))
self.planet.activate()
self.sun_position = tuple([x/2 for x in screensize])
self.planet_position = (screensize[0]/4.0, screensize[1]/2)
self.planet_velocity = (0.0, 10)
# ugh, I should be using numpy but it works, that's the important thing
# This is merely a test of likefile. Really, kamaelia components should be written for a physics simulation like this.
def acceleration(self, pos_planet, pos_sun):
g = 200 # fudge factor
# F = ma, but F is proportional to distance ** -2
# neatly removing the need to calculate a square root for the distance
direction = (pos_planet[0] - pos_sun[0], pos_planet[1] - pos_sun[1])
magnitude = direction[0] ** 2 + direction[1] ** 2
return tuple([g * x/magnitude for x in direction])
def apply_acceleration_to_velocity(self, velocity, accn):
return (velocity[0] + accn[0], velocity[1] + accn[1])
def apply_velocity_to_position(self,position, velocity):
return (position[0] + velocity[0], position[1] + velocity[1])
def main(self):
self.the_sun.put(self.sun_position, "translation")
while True:
time.sleep(0.01)
self.planet.put(self.planet_position, "translation")
accn = self.acceleration(self.sun_position, self.planet_position)
self.planet_velocity = self.apply_acceleration_to_velocity(self.planet_velocity, accn)
self.planet_position = self.apply_velocity_to_position(self.planet_position, self.planet_velocity)
SolarSystem(*cat_args).activate()
while 1:
time.sleep(100)
| apache-2.0 | 8,544,102,827,104,753,000 | 35.940678 | 122 | 0.681349 | false | 3.541024 | false | false | false |
miyyer/qb | qanta/buzzer/util.py | 2 | 7569 | import os
import pickle
import numpy as np
import chainer
from multiprocessing import Pool
from functools import partial
from chainer import Variable
from chainer.backends import cuda
from qanta.datasets.quiz_bowl import QuizBowlDataset
from qanta.guesser.abstract import AbstractGuesser
from qanta.util.constants import BUZZER_DEV_FOLD, BUZZER_TRAIN_FOLD
# constansts
N_GUESSES = 10
os.makedirs("output/buzzer", exist_ok=True)
dataset_dir = "output/buzzer/{}_data.pkl"
def vector_converter_0(guesses_sequence):
"""vector converter / feature extractor with only prob
Args:
guesses_sequence: a sequence (length of question) of list of guesses
(n_guesses), each entry is (guess, prob)
Returns:
a sequence of vectors
"""
length = len(guesses_sequence)
prev_prob_vec = [0.0 for _ in range(N_GUESSES)]
prev_dict = dict()
vecs = []
for i in range(length):
prob_vec = []
prob_diff_vec = []
isnew_vec = []
guesses = guesses_sequence[i]
for guess, prob in guesses:
prob_vec.append(prob)
if i > 0 and guess in prev_dict:
prev_prob = prev_dict[guess]
prob_diff_vec.append(prob - prev_prob)
isnew_vec.append(0)
else:
prob_diff_vec.append(prob)
isnew_vec.append(1)
if len(guesses) < N_GUESSES:
for k in range(max(N_GUESSES - len(guesses), 0)):
prob_vec.append(0)
prob_diff_vec.append(0)
isnew_vec.append(0)
features = (
prob_vec[:3]
+ isnew_vec[:3]
+ prob_diff_vec[:3]
+ [prob_vec[0] - prob_vec[1], prob_vec[1] - prob_vec[2]]
+ [prob_vec[0] - prev_prob_vec[0], prob_vec[1] - prev_prob_vec[1]]
+ [sum(isnew_vec[:5])]
+ [np.average(prob_vec), np.average(prev_prob_vec)]
+ [np.average(prob_vec[:6]), np.average(prev_prob_vec[:5])]
+ [np.var(prob_vec), np.var(prev_prob_vec)]
+ [np.var(prob_vec[:5]), np.var(prev_prob_vec[:5])]
)
vecs.append(np.array(features, dtype=np.float32))
prev_prob_vec = prob_vec
prev_dict = {g: p for g, p in guesses}
return vecs
def vector_converter_1(guesses_sequence):
"""vector converter / feature extractor with both logit and prob
Args:
guesses_sequence: a sequence (length of question) of list of guesses
(n_guesses), each entry is (guess, logit, prob)
Returns:
a sequence of vectors
"""
length = len(guesses_sequence)
prev_logit_vec = [0.0 for _ in range(N_GUESSES)]
prev_prob_vec = [0.0 for _ in range(N_GUESSES)]
prev_dict = dict()
vecs = []
for i in range(length):
logit_vec = []
prob_vec = []
logit_diff_vec = []
prob_diff_vec = []
isnew_vec = []
guesses = guesses_sequence[i]
for guess, logit, prob in guesses:
logit_vec.append(logit)
prob_vec.append(prob)
if i > 0 and guess in prev_dict:
prev_logit, prev_prob = prev_dict[guess]
logit_diff_vec.append(logit - prev_logit)
prob_diff_vec.append(prob - prev_prob)
isnew_vec.append(0)
else:
logit_diff_vec.append(logit)
prob_diff_vec.append(prob)
isnew_vec.append(1)
if len(guesses) < N_GUESSES:
for k in range(max(N_GUESSES - len(guesses), 0)):
logit_vec.append(0)
prob_vec.append(0)
logit_diff_vec.append(0)
prob_diff_vec.append(0)
isnew_vec.append(0)
features = (
logit_vec[:3]
+ prob_vec[:3]
+ isnew_vec[:3]
+ logit_diff_vec[:3]
+ prob_diff_vec[:3]
+ [logit_vec[0] - logit_vec[1], logit_vec[1] - logit_vec[2]]
+ [prob_vec[0] - prob_vec[1], prob_vec[1] - prob_vec[2]]
+ [logit_vec[0] - prev_logit_vec[0], logit_vec[1] - prev_logit_vec[1]]
+ [prob_vec[0] - prev_prob_vec[0], prob_vec[1] - prev_prob_vec[1]]
+ [sum(isnew_vec[:5])]
+ [np.average(logit_vec), np.average(prev_logit_vec)]
+ [np.average(prob_vec), np.average(prev_prob_vec)]
+ [np.average(logit_vec[:6]), np.average(prev_logit_vec[:5])]
+ [np.average(prob_vec[:6]), np.average(prev_prob_vec[:5])]
+ [np.var(logit_vec), np.var(prev_logit_vec)]
+ [np.var(prob_vec), np.var(prev_prob_vec)]
+ [np.var(logit_vec[:5]), np.var(prev_logit_vec[:5])]
+ [np.var(prob_vec[:5]), np.var(prev_prob_vec[:5])]
)
vecs.append(np.array(features, dtype=np.float32))
prev_logit_vec = logit_vec
prev_prob_vec = prob_vec
prev_dict = {x: (y, z) for x, y, z in guesses}
return vecs
def process_question(questions, vector_converter, item):
"""multiprocessing worker that converts the guesser output of a single
question into format used by the buzzer
"""
qid, q_rows = item
qid = q_rows.qanta_id.tolist()[0]
answer = questions[qid].page
q_rows = q_rows.groupby("char_index")
char_indices = sorted(q_rows.groups.keys())
guesses_sequence = []
labels = []
for idx in char_indices:
p = q_rows.get_group(idx).sort_values("score", ascending=False)
guesses_sequence.append(list(zip(p.guess, p.score))[:N_GUESSES])
labels.append(int(p.guess.tolist()[0] == answer))
vectors = vector_converter(guesses_sequence)
return qid, vectors, labels, char_indices
def read_data(
fold,
output_type="char",
guesser_module="qanta.guesser.rnn",
guesser_class="RnnGuesser",
guesser_config_num=0,
vector_converter=vector_converter_0,
):
if os.path.isfile(dataset_dir.format(fold)):
with open(dataset_dir.format(fold), "rb") as f:
return pickle.load(f)
g_dir = AbstractGuesser.output_path(
guesser_module, guesser_class, guesser_config_num, ""
)
g_path = AbstractGuesser.guess_path(g_dir, fold, output_type)
with open(g_path, "rb") as f:
df = pickle.load(f)
df_groups = df.groupby("qanta_id")
questions = QuizBowlDataset(buzzer_train=True).questions_by_fold()
questions = {q.qanta_id: q for q in questions[fold]}
pool = Pool(8)
worker = partial(process_question, questions, vector_converter)
dataset = pool.map(worker, df_groups)
with open(dataset_dir.format(fold), "wb") as f:
pickle.dump(dataset, f)
return dataset
def convert_seq(batch, device=None):
def to_device_batch(batch):
if device is None:
return batch
elif device < 0:
return [chainer.dataset.to_device(device, x) for x in batch]
else:
xp = cuda.cupy.get_array_module(*batch)
concat = xp.concatenate(batch, axis=0)
sections = np.cumsum([len(x) for x in batch[:-1]], dtype=np.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = cuda.cupy.split(concat_dev, sections)
return batch_dev
qids, vectors, labels, positions = list(map(list, zip(*batch)))
xs = [Variable(x) for x in to_device_batch(vectors)]
ys = to_device_batch(labels)
return {"xs": xs, "ys": ys}
if __name__ == "__main__":
data = read_data(BUZZER_TRAIN_FOLD)
print(data)
| mit | 5,055,332,657,816,431,000 | 34.535211 | 82 | 0.570485 | false | 3.182927 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.